hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ecae8020de8e85694dfcac02757f51cf3f873f56
59,330
ipynb
Jupyter Notebook
devel/.ipynb_checkpoints/Fitting Plastic and Elastic Regions-checkpoint.ipynb
snesnehne/MatPy
291debff0796124c34ddfad2270976dcd2f445e7
[ "MIT" ]
3
2018-05-18T10:18:58.000Z
2020-08-05T13:52:32.000Z
devel/.ipynb_checkpoints/Fitting Plastic and Elastic Regions-checkpoint.ipynb
snesnehne/MatPy
291debff0796124c34ddfad2270976dcd2f445e7
[ "MIT" ]
2
2016-08-05T19:36:16.000Z
2021-09-14T13:51:28.000Z
devel/.ipynb_checkpoints/Fitting Plastic and Elastic Regions-checkpoint.ipynb
snesnehne/MatPy
291debff0796124c34ddfad2270976dcd2f445e7
[ "MIT" ]
7
2017-05-21T16:43:34.000Z
2021-09-22T07:34:16.000Z
68.352535
23,776
0.728063
[ [ [ "from parser import stress_strain as model\nimport material_analytics\n\nreload(material_analytics)\nimport graph_suite as plot\n\ndata = model('ref/HSRS/22').get_experimental_data()\n\n\nyielding = material_analytics.yield_stress(data)[0]\n\n\"\"\"Finds the yield index\"\"\"\nyield_index = 0\nfor index, point in enumerate(data):\n \n if (point == yielding).all():\n yield_index = index\n break\n\n\"\"\"Separates data into plastic and elastic regions\"\"\"\nelastic = data[:yield_index+1]\nelastic = elastic[elastic[:,0].argsort()] # sorts elastic by xs\n\nplastic = data[yield_index+1:]\n\nplot.plotmult2D(elastic, plastic,marker1 = 'bo', marker2 = 'ro')", "_____no_output_____" ], [ "elasticslopes = material_analytics.get_slopes(elastic)\n\nfor index, val in enumerate(elastic):\n print val, \"slope is:\", elasticslopes[index-1]\n\n#print material_analytics.get_slopes(elastic)", " [ 0. 0.38665151] slope is: 100.258124938\n[ 6.90235609e-06 -3.03527578e-01] slope is: -99991.8119738\n[ 0.00207038 1.53675264] slope is: 891.836110007\n[ 0.00318079 1.5367697 ] slope is: 0.0153676210086\n[ 0.0038187 -0.30353915] slope is: -2884.89138561\n[ 0.00403542 3.60711247] slope is: 18044.7278833\n[ 0.00502074 -3.06508516] slope is: -6771.61166445\n[ 0.00580736 2.22702922] slope is: 6727.62979004\n[ 0.00673739 4.9886397 ] slope is: 2969.39381296\n[ 0.00801604 0.84660961] slope is: -3239.36259184\n[ 0.00829301 -0.30355273] slope is: -4152.70835421\n[ 0.00856098 3.14709837] slope is: 12877.1398033\n[ 0.009495 5.67902188] slope is: 2710.78692785\n[ 0.01015951 -0.3035584 ] slope is: -9002.91381502\n[ 1.01827365e-02 3.99299053e+02] slope is: 17206672.975\n[ 0.01085796 4.98884527] slope is: -583969.565508\n[ 0.01198191 4.29863957] slope is: -614.088618163\n[ 0.01260613 4.06834854] slope is: -368.925836131\n[ 0.01331199 3.37720703] slope is: -979.145640549\n[ 0.01335389 1.53692605] slope is: -43921.520918\n[ 0.01412908 4.29873187] slope is: 3562.74972129\n[ 0.01482264 2.91751139] slope is: -1991.50333506\n[ 0.01497645 4.29876829] slope is: 8980.5079714\n[ 0.01558322 1.0769984 ] slope is: -5309.63617923\n[ 0.0158238 -2.37512801] slope is: -14349.3200392\n[ 0.01624702 0.38671434] slope is: 6525.80722818\n[ 0.01684661 0.15638902] slope is: -384.137889679\n[ 0.01702841 0.84668592] slope is: 3797.03976156\n[ 0.01718668 5.44913029] slope is: 29079.7200011\n[ 0.01733861 1.53698729] slope is: -25749.4548562\n[ 0.01742363 2.91758727] slope is: 16239.1430811\n[ 0.01812746 5.44720632] slope is: 3594.06399619\n[ 0.0231042 -1.91373233] slope is: -1479.06713769\n[ 2.33657006e-02 3.69746336e+02] slope is: 1421274.1679\n[ 0.02820966 1.99673514] slope is: -75919.2866622\n[ 0.03392703 1.76641027] slope is: -40.2850767029\n[ 0.04061022 -0.76379498] slope is: -378.592332586\n[ 4.37217757e-02 4.34811338e+02] slope is: 139986.382752\n[ 0.04687485 3.60556754] slope is: -136757.458306\n[ 0.05500686 1. ] slope is: -320.40844999\n[ 5.70314638e-02 3.40173453e+02] slope is: 167526.171261\n[ 0.06320476 4.98515176] slope is: -54296.5228614\n[ 0.07169296 3.83479661] slope is: -135.523936075\n[ 7.63260231e-02 4.83638355e+02] slope is: 103560.820858\n[ 0.08134057 6.82299987] slope is: -95086.5176802\n[ 9.13452029e-02 3.14958671e+02] slope is: 30799.2836786\n[ 0.09150423 8.89094989] slope is: -1924668.54934\n[ 1.08419422e-01 5.32436899e+02] slope is: 30951.2201438\n[ 1.25329005e-01 2.89956164e+02] slope is: -14339.8415086\n[ 0.12700156 1.76925521] slope is: -172303.510935\n[ 1.39649856e-01 5.48583230e+02] slope is: 43232.2192192\n[ 0.14936046 6.60812572] slope is: -55812.685433\n[ 1.59006040e-01 2.60326837e+02] slope is: 26304.1550895\n[ 1.69887493e-01 5.70926100e+02] slope is: 28543.9143739\n[ 0.17252417 5.68828789] slope is: -214375.003184\n[ 1.91664697e-01 2.36207193e+02] slope is: 12043.4988365\n[ 0.19571199 7.99494049] slope is: -56386.3703981\n[ 1.99288712e-01 5.83387271e+02] slope is: 160871.451453\n[ 0.21968124 11.686137 ] slope is: -28034.8346806\n[ 0.22378475 218.98856412] slope is: 50518.3374469\n[ 2.28138245e-01 6.01124127e+02] slope is: 87776.7034939\n[ 0.24370953 9.38250509] slope is: -38002.1090803\n[ 0.25398979 210.28975931] slope is: 19543.0125604\n[ 2.56362411e-01 6.22528387e+02] slope is: 173748.119443\n[ 0.26758515 10.76880222] slope is: -54510.7162794\n[ 0.28318526 179.66415492] slope is: 10826.5509838\n[ 2.84918641e-01 6.45293441e+02] slope is: 268624.474843\n[ 0.29080709 10.31007365] slope is: -107835.390074\n[ 0.31061207 163.78855645] slope is: 7749.49148516\n[ 0.3129049 15.62007812] slope is: -64622.5566191\n[ 3.14119557e-01 6.81109301e+02] slope is: 547880.395362\n[ 0.33401691 19.31687671] slope is: -33260.3253428\n[ 0.33574011 143.28439709] slope is: 71940.2555946\n[ 3.44370920e-01 7.08422393e+02] slope is: 65479.1422221\n[ 0.35322259 20.70651937] slope is: -77693.3519624\n[ 0.35903655 133.8502572 ] slope is: 19460.6877216\n[ 0.37165378 18.86254573] slope is: -9113.54751691\n[ 3.76273117e-01 7.34556211e+02] slope is: 154934.363388\n[ 0.37901726 114.01558928] slope is: -226133.037102\n[ 0.38813323 25.33260665] slope is: -9728.31026996\n[ 0.39670175 102.25538212] slope is: 8977.36635412\n[ 0.40312001 29.72611435] slope is: -11300.4501352\n[ 4.09425679e-01 7.53557412e+02] slope is: 114790.625435\n[ 0.41122132 93.49078747] slope is: -367593.542772\n[ 0.4150876 31.57757496] slope is: -16013.6602056\n[ 0.42254496 81.48669383] slope is: 6692.59201711\n[ 0.42472043 37.35681902] slope is: -20285.2458469\n[ 0.43051481 74.56162045] slope is: 6420.847055\n[ 0.4319358 42.2123715] slope is: -22765.2399933\n[ 0.43555402 64.3974151 ] slope is: 6131.47814563\n[ 0.43614017 49.83999047] slope is: -24835.5117711\n[ 0.43744706 55.61757909] slope is: 4420.86900854\n[ 4.44848029e-01 7.71611334e+02] slope is: 96743.2983424\n[ 4.82364896e-01 7.91701881e+02] slope is: 535.507052464\n[ 5.23385912e-01 8.07399575e+02] slope is: 382.674425135\n[ 5.66767767e-01 8.28328118e+02] slope is: 482.426191787\n[ 6.14150306e-01 8.39823764e+02] slope is: 242.613548031\n[ 6.64133402e-01 8.56544363e+02] slope is: 334.52507168\n[ 7.19691997e-01 8.68408206e+02] slope is: 213.537497783\n[ 7.78460520e-01 8.81594549e+02] slope is: 224.377638372\n[ 8.42515074e-01 8.87642149e+02] slope is: 94.4132813073\n[ 0.94576549 898.12537392] slope is: 101.532028268\n[ 1.05243884 899.44660331] slope is: 12.3857503764\n[ 1.16290374 897.08782126] slope is: -21.3532269151\n[ 1.27586995 897.88715462] slope is: 7.07586191938\n[ 1.39127975 900.2614981 ] slope is: 20.5731531389\n[ 1.51028567 892.61588504] slope is: -64.2456536754\n[ 1.63038123 893.8101834 ] slope is: 9.94456764625\n[ 1.75297856 892.03282867] slope is: -14.4974991654\n[ 1.87595586 887.54974346] slope is: -36.4545734785\n[ 1.99905135 881.72090114] slope is: -47.3522035041\n[ 2.12256188 878.83566656] slope is: -23.3602310996\n[ 2.24642216 868.29725722] slope is: -85.0830392773\n[ 2.3703703 864.97274245] slope is: -26.8218217635\n[ 2.49453809 861.21071336] slope is: -30.2979466177\n[ 2.61786954 857.67815366] slope is: -28.6428128304\n[ 2.74022937 855.96049394] slope is: -14.0377743717\n[ 2.86230911 855.81133995] slope is: -1.22177509025\n[ 2.98384194 851.42243289] slope is: -36.1129360297\n[ 3.1045271 851.73500787] slope is: 2.59000333471\n[ 3.22532444 863.84592233] slope is: 100.258124938\n" ] ], [ [ "## Straight up interpolation isn't the best option as we have some noise", "_____no_output_____" ] ], [ [ "from scipy import interpolate\nimport numpy as np\n\nelasticx = elastic[:,0]\nelasticy = elastic[:,1]\n\nfinterp = interpolate.UnivariateSpline(elasticx,elasticy)\nxnew = np.arange(0, max(elasticx), 0.1)\nynew = finterp(xnew) # use interpolation function returned by `interp1d`\n\ninterpdata = material_analytics.combine_data(xnew,ynew)\n\nplot.plot2D(interpdata)\n#plot.plotmult2D(elastic,interpdata, marker2='r-')", "_____no_output_____" ], [ "# trying to fit log stuff to our elastic region\nelastic = material_analytics.log_prep(elastic)\nelasticlog = material_analytics.log_approx(elastic)\n\nfittedelastic = material_analytics.samplepoints(elasticlog,[0.1,max(elastic[:,0])],1000)\nplot.plotmult2D(elastic,fittedelastic)", "_____no_output_____" ] ], [ [ "_____________________________________________________________________________________________________________", "_____no_output_____" ] ], [ [ "from parser import stress_strain as model\nimport material_analytics\n\nreload(material_analytics)\nimport graph_suite as plot\n\ndata = model('ref/HSRS/22').get_experimental_data()\n\nyielding = material_analytics.yield_stress(data)[0]\n\n\"\"\"Finds the yield index\"\"\"\nyield_index = 0\nfor index, point in enumerate(data):\n \n if (point == yielding).all():\n yield_index = index\n break\n\n\"\"\"Separates data into plastic and elastic regions\"\"\"\nelastic = data[:yield_index+1]\nelastic = elastic[elastic[:,0].argsort()] # sorts elastic by xs\n\nplastic = data[yield_index+1:]", "_____no_output_____" ] ], [ [ "## This attempts to smooth the curve, but because of horizontal error, we might be better off using splines now that we did an argsort ", "_____no_output_____" ] ], [ [ "# http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay (Written by scipy)\n# trying to use this to smooth the elastic curve to find the high yield point\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\n r\"\"\"Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\n The Savitzky-Golay filter removes high frequency noise from data.\n It has the advantage of preserving the original shape and\n features of the signal better than other types of filtering\n approaches, such as moving averages techniques.\n Parameters\n ----------\n y : array_like, shape (N,)\n the values of the time history of the signal.\n window_size : int\n the length of the window. Must be an odd integer number.\n order : int\n the order of the polynomial used in the filtering.\n Must be less then `window_size` - 1.\n deriv: int\n the order of the derivative to compute (default = 0 means only smoothing)\n Returns\n -------\n ys : ndarray, shape (N)\n the smoothed signal (or it's n-th derivative).\n Notes\n -----\n The Savitzky-Golay is a type of low-pass filter, particularly\n suited for smoothing noisy data. The main idea behind this\n approach is to make for each point a least-square fit with a\n polynomial of high order over a odd-sized window centered at\n the point.\n Examples\n --------\n t = np.linspace(-4, 4, 500)\n y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)\n ysg = savitzky_golay(y, window_size=31, order=4)\n import matplotlib.pyplot as plt\n plt.plot(t, y, label='Noisy signal')\n plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')\n plt.plot(t, ysg, 'r', label='Filtered signal')\n plt.legend()\n plt.show()\n References\n ----------\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\n Data by Simplified Least Squares Procedures. Analytical\n Chemistry, 1964, 36 (8), pp 1627-1639.\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\n Cambridge University Press ISBN-13: 9780521880688\n \"\"\"\n import numpy as np\n from math import factorial\n \n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError, msg:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m[::-1], y, mode='valid')\n\nelasticstrain = elastic[:,0]\nelasticstress = elastic[:,1]\nelasticstress = savitzky_golay(elasticstress,101,4)\n\nelasticsmoothed = material_analytics.combine_data(elasticstrain,elasticstress)\n\nplot.plotmult2D(elastic,elasticsmoothed, marker1='o')", "_____no_output_____" ], [ "# -------------------------------------------------------------------------", "_____no_output_____" ], [ "from parser import stress_strain as model\nimport material_analytics\n\nreload(material_analytics)\nimport graph_suite as plot\n\ndata = model('ref/HSRS/22').get_experimental_data()\n\nyielding = material_analytics.yield_stress(data)[0]\n\n\"\"\"Finds the yield index\"\"\"\nyield_index = 0\nfor index, point in enumerate(data):\n \n if (point == yielding).all():\n yield_index = index\n break\n\n\"\"\"Separates data into plastic and elastic regions\"\"\"\nelastic = data[:yield_index+1]\nplastic = data[yield_index+1:]\n\n\"\"\"\nFinds the upper yield point (lower yield point is *yielding*). \nWe're taking the first element ([0]) because it returns the \nfirst element that meets the criteria in parentheses.\n\"\"\"\nupperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0]\nupperyieldpoint = elastic[upperyieldpoint_index]\n\n\"\"\"We estimate the region until the first upper yield point with a linear model\"\"\"\nlin_elastic_region = elastic[:upperyieldpoint_index]\nlin_elastic_samples = material_analytics.predictlinear(lin_elastic_region, step=0.1)\n\nplot.plotmult2D(lin_elastic_samples, lin_elastic_region)", "_____no_output_____" ], [ "\"\"\"\nIf the upper yield point is the default yield point, \nthen the material doesn't exhibit the yield point phenomenon.\n\nOtherwise, we establish the domain in which the yield point \nphenomenon occurs, within which we will be selecting nearest\nneighbors as a method of approximation.\n\"\"\"\n\nyieldpointphenom_region = None\nyieldpointphenom = True\n\nif upperyieldpoint_index==yield_index:\n yieldpointphenom = False \n\nif yieldpointphenom:\n yieldpointphenom_region = [upperyieldpoint_index,yield_index]", "_____no_output_____" ], [ "\"\"\"Great fit, we fit a log to plastic region\"\"\"\nplastic_reg = material_analytics.log_approx(plastic)\nplot.plotmult2D(plastic,material_analytics.samplepoints(plastic_reg,[yielding[0],max(data[:,0])],100))\n\n\"\"\"Outside of these three regions, we assume that the material is broken\"\"\"", "_____no_output_____" ], [ "from parser import stress_strain as model\nimport material_analytics\n\nreload(material_analytics)\nimport graph_suite as plot\n\ndata = model('ref/850-300.dat').get_experimental_data()\n\nyielding = material_analytics.yield_stress(data)[0]\n\n\"\"\"Finds the yield index\"\"\"\nyield_index = 0\nfor index, point in enumerate(data):\n \n if (point == yielding).all():\n yield_index = index\n break\n\n\"\"\"Separates data into plastic and elastic regions\"\"\"\nelastic = data[:yield_index+1]\nplastic = data[yield_index+1:]\n\n\"\"\"\nFinds the upper yield point (lower yield point is *yielding*). \nWe're taking the first element ([0]) because it returns the \nfirst element that meets the criteria in parentheses.\n\"\"\"\nupperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0]\nupperyieldpoint = elastic[upperyieldpoint_index]\n\n\"\"\"We estimate the region until the first upper yield point with a linear model\"\"\"\nlin_elastic_region = elastic[:upperyieldpoint_index]\nlin_elastic_samples = material_analytics.predictlinear(lin_elastic_region, step=0.1)\n\nplot.plotmult2D(lin_elastic_samples, lin_elastic_region)\n\n\"\"\"\nIf the upper yield point is the default yield point, \nthen the material doesn't exhibit the yield point phenomenon.\n\nOtherwise, we establish the domain in which the yield point \nphenomenon occurs, within which we will be selecting nearest\nneighbors as a method of approximation.\n\"\"\"\n\nyieldpointphenom_region = None\nyieldpointphenom = True\n\nif upperyieldpoint_index==yield_index:\n yieldpointphenom = False \n\nif yieldpointphenom:\n yieldpointphenom_region = [upperyieldpoint_index,yield_index]\n print \"Yield Point phenomenon is occuring!\"\n \nelse:\n print \"Yield Point phenomenon is not occuring!\"\n\n\"\"\"Great fit, we fit a log to plastic region\"\"\"\nplastic_reg = material_analytics.log_approx(plastic)\nplot.plotmult2D(plastic,material_analytics.samplepoints(plastic_reg,[yielding[0],max(data[:,0])],100))\n\n\"\"\"Outside of these three regions, we assume that the material has fractured\"\"\"\n", "Yield Point phenomenon is not occuring!\n" ], [ "data1 = model('ref/850-300.dat').get_experimental_data()\nyield1 = material_analytics.yield_stress(data1)\nplot.plotmult2D(data1,yield1)", "_____no_output_____" ], [ "#Given a dataset and a strain value, predicts what the stress will be at that point\ndef predict_value(data, strain):\n \n yielding = material_analytics.yield_stress(data)[0]\n\n \"\"\"Finds the yield index\"\"\"\n yield_index = 0\n for index, point in enumerate(data):\n\n if (point == yielding).all():\n yield_index = index\n break\n\n \"\"\"Separates data into plastic and elastic regions\"\"\"\n elastic = data[:yield_index+1]\n plastic = data[yield_index+1:]\n\n \"\"\"\n Finds the upper yield point (lower yield point is the *yielding* variable). \n We're taking the first element ([0]) because it returns the \n first element that meets the criteria in parentheses.\n \n It's a two-dimensional array so we have to do this twice.\n \"\"\"\n upperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0]\n upperyieldpoint = elastic[upperyieldpoint_index]\n\n \"\"\"We estimate the region until the first upper yield point with a linear model\"\"\"\n lin_elastic_region = elastic[:upperyieldpoint_index]\n lin_elastic_model = material_analytics.linfit(lin_elastic_region)\n \n \"\"\"\n If the upper yield point is the only yield point, \n then the material doesn't exhibit the yield point phenomenon.\n\n Otherwise, we establish the domain in which the yield point \n phenomenon occurs, within which we will be selecting nearest\n neighbors as a method of approximation. The yield point pheno-\n menon occurs when there are two distinct yield points, and \n that when plastic deformation begins, stress is immediately \n relieved.\n \"\"\"\n yieldpointphenom_region = None\n yieldpointphenom = True\n\n if upperyieldpoint_index==yield_index:\n yieldpointphenom = False \n\n if yieldpointphenom:\n yieldpointphenom_region = [upperyieldpoint_index,yield_index]\n\n \"\"\"We must determine which domain contains the strain point requested\"\"\"\n \n start_yield = upperyieldpoint[0]\n \n if strain < 0 or strain > max(data[:,0]):\n \n \"\"\"(Out of range)\"\"\"\n return np.nan\n \n elif strain < start_yield:\n \n \"\"\"Linear approximation (elastic region)\"\"\"\n return np.array([strain, lin_elastic_model.predict(strain)])[None,:] \n \n elif yieldpointphenom and strain >= start_yield and strain < yielding[0]:\n \n \"\"\"Picks the nearest neighbor in this zone\"\"\"\n yieldpoints_inregion = elastic[elastic[0]>=start_yield and elastic[0]<yielding[0]]\n \n for val in yieldpoints_inregion:\n if val[0] > strain:\n return np.array([strain,val [1]])[None,:] \n \n elif not yieldpointphenom or strain >= yielding[0]:\n \n \"\"\"We fit a logarithmic curve to approximate the plastic region\"\"\"\n plastic_reg = material_analytics.log_approx(plastic)\n return np.array([strain,plastic_reg(strain)])[None,:] \n \n ", "_____no_output_____" ], [ "% matplotlib inline\ndata1 = model('ref/850-300.dat').get_experimental_data()\nplot.plotmult2D(data1,predict_value(data1,0.05))\n", "Linear Approximation\n" ], [ "from parser import stress_strain as model\nimport material_analytics\nimport graph_suite as plot\n\n#reload(material_analytics)\n\ndata1 = model('ref/HSRS/22').get_experimental_data()\nplot.plotmult2D(data1,material_analytics.predict_stress(data1, 1.5))", "_____no_output_____" ] ], [ [ "## Fixing the nearest neighbors situation", "_____no_output_____" ] ], [ [ "import numpy as np\n\n#Given a dataset and a strain value, predicts what the stress will be at that point\ndef predict_stress(data, strain):\n \n yielding = material_analytics.yield_stress(data)[0]\n\n \"\"\"Finds the yield index\"\"\"\n yield_index = 0\n for index, point in enumerate(data):\n\n if (point == yielding).all():\n yield_index = index\n break\n\n \"\"\"Separates data into plastic and elastic regions\"\"\"\n elastic = data[:yield_index+1]\n plastic = data[yield_index+1:]\n\n \"\"\"\n Finds the upper yield point (lower yield point is the *yielding* variable). \n We're taking the first element ([0]) because it returns the \n first element that meets the criteria in parentheses.\n \n It's a two-dimensional array so we have to do this twice.\n \"\"\"\n upperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0]\n upperyieldpoint = elastic[upperyieldpoint_index]\n\n \"\"\"We estimate the region until the first upper yield point with a linear model\"\"\"\n lin_elastic_region = elastic[:upperyieldpoint_index]\n lin_elastic_model = material_analytics.linfit(lin_elastic_region)\n \n \"\"\"\n If the upper yield point is the only yield point, \n then the material doesn't exhibit the yield point phenomenon.\n\n Otherwise, we establish the domain in which the yield point \n phenomenon occurs, within which we will be selecting nearest\n neighbors as a method of approximation. The yield point pheno-\n menon occurs when there are two distinct yield points, and \n that when plastic deformation begins, stress is immediately \n relieved.\n \"\"\"\n yieldpointphenom_region = None\n yieldpointphenom = True\n\n if upperyieldpoint_index==yield_index:\n yieldpointphenom = False \n\n if yieldpointphenom:\n yieldpointphenom_region = [upperyieldpoint_index,yield_index]\n\n \"\"\"We must determine which domain contains the strain point requested\"\"\"\n \n start_yield = upperyieldpoint[0]\n \n if strain < 0 or strain > max(data[:,0]):\n \n \"\"\"(Out of range)\"\"\"\n return np.nan\n \n elif strain < start_yield:\n \n \"\"\"Linear approximation (elastic region)\"\"\"\n return np.array([strain, lin_elastic_model.predict(strain)])[None,:] \n \n elif yieldpointphenom and strain >= start_yield and strain < yielding[0]:\n \n \"\"\"Picks the nearest neighbor in this zone\"\"\"\n yieldpoints_inregion = elastic[np.where(np.logical_and(elastic[:,0] >= start_yield,\n elastic[:,0] < yielding[0]) )]\n \n for val in yieldpoints_inregion:\n if val[0] > strain:\n return np.array([strain,val [1]])[None,:] \n \n elif not yieldpointphenom or strain >= yielding[0]:\n \n \"\"\"We fit a logarithmic curve to approximate the plastic region\"\"\"\n plastic_reg = material_analytics.log_approx(plastic)\n return np.array([strain,plastic_reg(strain)])[None,:] \n \ndata1 = model('ref/HSRS/22').get_experimental_data()\nplot.plotmult2D(data1,predict_stress(data1, 1.5))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecae8c440cb0a1d1e11cf366998ec36be42a51e7
304,323
ipynb
Jupyter Notebook
ExploreSegmentCluster-NeighborhoodsToronto.ipynb
jcussi/coursera_capstone
f086e25a485340a1cf40ad634775a7f95a355818
[ "BSD-3-Clause" ]
null
null
null
ExploreSegmentCluster-NeighborhoodsToronto.ipynb
jcussi/coursera_capstone
f086e25a485340a1cf40ad634775a7f95a355818
[ "BSD-3-Clause" ]
null
null
null
ExploreSegmentCluster-NeighborhoodsToronto.ipynb
jcussi/coursera_capstone
f086e25a485340a1cf40ad634775a7f95a355818
[ "BSD-3-Clause" ]
null
null
null
450.848889
102,488
0.704035
[ [ [ "# Segmenting and Clustering Neighborhoods in Toronto\n\n## Week 3 - Assignment\n\n#### In this assignment, you will be required to explore, segment, and cluster the neighborhoods in the city of Toronto based on the postalcode and borough information.. However, unlike New York, the neighborhood data is not readily available on the internet. What is interesting about the field of data science is that each project can be challenging in its unique way, so you need to learn to be agile and refine the skill to learn new libraries and tools quickly depending on the project.\n\n#### For the Toronto neighborhood data, a Wikipedia page exists that has all the information we need to explore and cluster the neighborhoods in Toronto. You will be required to scrape the Wikipedia page and wrangle the data, clean it, and then read it into a pandas dataframe so that it is in a structured format like the New York dataset.\n\n#### Once the data is in a structured format, you can replicate the analysis that we did to the New York City dataset to explore and cluster the neighborhoods in the city of Toronto.\n\n#### Your submission will be a link to your Jupyter Notebook on your Github repository.\n\n### Note: You may not be able to view the maps of \"Folium\", therefore you can also see the following link:\nhttps://dataplatform.cloud.ibm.com/analytics/notebooks/v2/5f577cf6-ea52-4cec-aa5d-62c3a875ac40/view?access_token=d9361f6f7a8029b1fe68c114f3d604db3488a61a5440e39cefbeb9209d9f4294\n\n##### Author: Jhimy Cussi", "_____no_output_____" ] ], [ [ "#!conda install -c conda-forge folium=0.5.0 --yes", "Collecting package metadata (current_repodata.json): ...working... done\nSolving environment: ...working... failed with initial frozen solve. Retrying with flexible solve.\nCollecting package metadata (repodata.json): ...working... done\nSolving environment: ...working... \nWarning: 4 possible package resolutions (only showing differing packages):\n - anaconda/win-64::ca-certificates-2020.6.24-0, anaconda/win-64::certifi-2020.6.20-py38_0\n - anaconda/win-64::ca-certificates-2020.6.24-0, defaults/win-64::certifi-2020.6.20-py38_0\n - anaconda/win-64::certifi-2020.6.20-py38_0, defaults/win-64::ca-certificates-2020.6.24-0\n - defaults/win-64::ca-certificates-2020.6.24-0, defaults/win-64::certifi-2020.6.20-py38_0done\n\n## Package Plan ##\n\n environment location: C:\\Users\\jhimy\\anaconda3\n\n added / updated specs:\n - folium=0.5.0\n\n\nThe following packages will be downloaded:\n\n package | build\n ---------------------------|-----------------\n altair-4.1.0 | py_1 614 KB conda-forge\n branca-0.4.2 | pyhd8ed1ab_0 26 KB conda-forge\n folium-0.5.0 | py_0 45 KB conda-forge\n openssl-1.1.1g | he774522_0 5.7 MB conda-forge\n vincent-0.4.4 | py_1 28 KB conda-forge\n ------------------------------------------------------------\n Total: 6.4 MB\n\nThe following NEW packages will be INSTALLED:\n\n altair conda-forge/noarch::altair-4.1.0-py_1\n branca conda-forge/noarch::branca-0.4.2-pyhd8ed1ab_0\n folium conda-forge/noarch::folium-0.5.0-py_0\n vincent conda-forge/noarch::vincent-0.4.4-py_1\n\nThe following packages will be SUPERSEDED by a higher-priority channel:\n\n openssl anaconda --> conda-forge\n\n\n\nDownloading and Extracting Packages\n\nopenssl-1.1.1g | 5.7 MB | | 0% \nopenssl-1.1.1g | 5.7 MB | | 0% \nopenssl-1.1.1g | 5.7 MB | 1 | 1% \nopenssl-1.1.1g | 5.7 MB | 3 | 3% \nopenssl-1.1.1g | 5.7 MB | 6 | 7% \nopenssl-1.1.1g | 5.7 MB | 9 | 9% \nopenssl-1.1.1g | 5.7 MB | #1 | 12% \nopenssl-1.1.1g | 5.7 MB | #4 | 14% \nopenssl-1.1.1g | 5.7 MB | #6 | 16% \nopenssl-1.1.1g | 5.7 MB | #8 | 19% \nopenssl-1.1.1g | 5.7 MB | ## | 21% \nopenssl-1.1.1g | 5.7 MB | ##2 | 23% \nopenssl-1.1.1g | 5.7 MB | ##5 | 25% \nopenssl-1.1.1g | 5.7 MB | ##7 | 27% \nopenssl-1.1.1g | 5.7 MB | ##9 | 29% \nopenssl-1.1.1g | 5.7 MB | ###1 | 32% \nopenssl-1.1.1g | 5.7 MB | ###3 | 34% \nopenssl-1.1.1g | 5.7 MB | ###5 | 36% \nopenssl-1.1.1g | 5.7 MB | ###8 | 38% \nopenssl-1.1.1g | 5.7 MB | #### | 40% \nopenssl-1.1.1g | 5.7 MB | ####1 | 42% \nopenssl-1.1.1g | 5.7 MB | ####4 | 44% \nopenssl-1.1.1g | 5.7 MB | ####6 | 46% \nopenssl-1.1.1g | 5.7 MB | ####7 | 48% \nopenssl-1.1.1g | 5.7 MB | ####9 | 50% \nopenssl-1.1.1g | 5.7 MB | #####2 | 52% \nopenssl-1.1.1g | 5.7 MB | #####3 | 54% \nopenssl-1.1.1g | 5.7 MB | #####6 | 56% \nopenssl-1.1.1g | 5.7 MB | #####7 | 58% \nopenssl-1.1.1g | 5.7 MB | #####9 | 60% \nopenssl-1.1.1g | 5.7 MB | ######1 | 62% \nopenssl-1.1.1g | 5.7 MB | ######2 | 63% \nopenssl-1.1.1g | 5.7 MB | ######4 | 64% \nopenssl-1.1.1g | 5.7 MB | ######5 | 66% \nopenssl-1.1.1g | 5.7 MB | ######7 | 67% \nopenssl-1.1.1g | 5.7 MB | ######8 | 68% \nopenssl-1.1.1g | 5.7 MB | ######9 | 70% \nopenssl-1.1.1g | 5.7 MB | #######1 | 71% \nopenssl-1.1.1g | 5.7 MB | #######2 | 73% \nopenssl-1.1.1g | 5.7 MB | #######3 | 74% \nopenssl-1.1.1g | 5.7 MB | #######5 | 75% \nopenssl-1.1.1g | 5.7 MB | #######6 | 77% \nopenssl-1.1.1g | 5.7 MB | #######8 | 78% \nopenssl-1.1.1g | 5.7 MB | #######9 | 80% \nopenssl-1.1.1g | 5.7 MB | ######## | 81% \nopenssl-1.1.1g | 5.7 MB | ########2 | 82% \nopenssl-1.1.1g | 5.7 MB | ########3 | 84% \nopenssl-1.1.1g | 5.7 MB | ########5 | 85% \nopenssl-1.1.1g | 5.7 MB | ########6 | 86% \nopenssl-1.1.1g | 5.7 MB | ########8 | 88% \nopenssl-1.1.1g | 5.7 MB | ########9 | 89% \nopenssl-1.1.1g | 5.7 MB | ######### | 91% \nopenssl-1.1.1g | 5.7 MB | #########1 | 92% \nopenssl-1.1.1g | 5.7 MB | #########2 | 93% \nopenssl-1.1.1g | 5.7 MB | #########3 | 94% \nopenssl-1.1.1g | 5.7 MB | #########4 | 95% \nopenssl-1.1.1g | 5.7 MB | #########5 | 96% \nopenssl-1.1.1g | 5.7 MB | #########7 | 97% \nopenssl-1.1.1g | 5.7 MB | #########7 | 98% \nopenssl-1.1.1g | 5.7 MB | #########9 | 99% \nopenssl-1.1.1g | 5.7 MB | ########## | 100% \nopenssl-1.1.1g | 5.7 MB | ########## | 100% \n\nbranca-0.4.2 | 26 KB | | 0% \nbranca-0.4.2 | 26 KB | ######1 | 62% \nbranca-0.4.2 | 26 KB | ########## | 100% \n\naltair-4.1.0 | 614 KB | | 0% \naltair-4.1.0 | 614 KB | 2 | 3% \naltair-4.1.0 | 614 KB | # | 10% \naltair-4.1.0 | 614 KB | #8 | 18% \naltair-4.1.0 | 614 KB | ##8 | 29% \naltair-4.1.0 | 614 KB | ###3 | 34% \naltair-4.1.0 | 614 KB | ####1 | 42% \naltair-4.1.0 | 614 KB | ####9 | 50% \naltair-4.1.0 | 614 KB | #####7 | 57% \naltair-4.1.0 | 614 KB | ######5 | 65% \naltair-4.1.0 | 614 KB | #######2 | 73% \naltair-4.1.0 | 614 KB | ########3 | 83% \naltair-4.1.0 | 614 KB | #########1 | 91% \naltair-4.1.0 | 614 KB | #########9 | 99% \naltair-4.1.0 | 614 KB | ########## | 100% \n\nvincent-0.4.4 | 28 KB | | 0% \nvincent-0.4.4 | 28 KB | #####7 | 58% \nvincent-0.4.4 | 28 KB | ########## | 100% \n\nfolium-0.5.0 | 45 KB | | 0% \nfolium-0.5.0 | 45 KB | ###5 | 35% \nfolium-0.5.0 | 45 KB | ########## | 100% \nfolium-0.5.0 | 45 KB | ########## | 100% \nPreparing transaction: ...working... done\nVerifying transaction: ...working... done\nExecuting transaction: ...working... done\n" ], [ "import pandas as pd\nimport numpy as np\nfrom IPython.display import display_html\nfrom geopy.geocoders import Nominatim # convert an address into latitude \n\n# Matplotlib and associated plotting modules\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\n\n# import k-means from clustering stage\nfrom sklearn.cluster import KMeans\n\n#!conda install -c conda-forge folium=0.5.0 --yes # uncomment this line if you haven't completed the Foursquare API lab\nimport folium # map rendering library\n\nfrom bs4 import BeautifulSoup # this module helps in web scrapping.\nimport requests # this module helps us to download a web page\n\nprint('Libraries imported.')", "Libraries imported.\n" ] ], [ [ "## 1) Explore", "_____no_output_____" ] ], [ [ "url = \"https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M\"\ndata = requests.get(url).text\nsoup = BeautifulSoup(data,\"html5lib\")\ntables = soup.find_all('table')\nlen(tables)", "_____no_output_____" ], [ "# Verify the correcta table.\ntab = str(tables[0])\ndisplay_html(tab, raw=True)", "_____no_output_____" ], [ "print(tables[0].prettify())", " <br/>\n (\n <a href=\"/wiki/University_of_Toronto\" title=\"University of Toronto\">\n University of Toronto\n </a>\n / Harbord)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M6S\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n West Toronto\n <br/>\n (\n <a href=\"/wiki/Runnymede,_Toronto\" title=\"Runnymede, Toronto\">\n Runnymede\n </a>\n /\n <a href=\"/wiki/Swansea,_Toronto\" title=\"Swansea, Toronto\">\n Swansea\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M7S\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M8S\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M9S\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n </tr>\n <tr>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M1T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Scarborough,_Toronto\" title=\"Scarborough, Toronto\">\n Scarborough\n </a>\n <br/>\n (Clarks Corners /\n <a class=\"mw-redirect\" href=\"/wiki/Tam_O%27Shanter,_Ontario\" title=\"Tam O'Shanter, Ontario\">\n Tam O'Shanter\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Sullivan,_Toronto\" title=\"Sullivan, Toronto\">\n Sullivan\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M2T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M3T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M4T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n Central Toronto\n <br/>\n (\n <a href=\"/wiki/Moore_Park,_Toronto\" title=\"Moore Park, Toronto\">\n Moore Park\n </a>\n /\n <a href=\"/wiki/Summerhill,_Toronto\" title=\"Summerhill, Toronto\">\n Summerhill\n </a>\n East)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M5T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n (\n <a href=\"/wiki/Kensington_Market\" title=\"Kensington Market\">\n Kensington Market\n </a>\n /\n <a href=\"/wiki/Chinatown,_Toronto\" title=\"Chinatown, Toronto\">\n Chinatown\n </a>\n /\n <a href=\"/wiki/Grange_Park_(Toronto)\" title=\"Grange Park (Toronto)\">\n Grange Park\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M6T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M7T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M8T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M9T\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n </tr>\n <tr>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M1V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Scarborough,_Toronto\" title=\"Scarborough, Toronto\">\n Scarborough\n </a>\n <br/>\n (\n <a class=\"mw-redirect\" href=\"/wiki/Milliken,_Toronto\" title=\"Milliken, Toronto\">\n Milliken\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Agincourt_North\" title=\"Agincourt North\">\n Agincourt North\n </a>\n /\n <a href=\"/wiki/Steeles,_Toronto\" title=\"Steeles, Toronto\">\n Steeles\n </a>\n East /\n <a href=\"/wiki/L%27Amoreaux\" title=\"L'Amoreaux\">\n L'Amoreaux\n </a>\n East)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M2V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M3V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M4V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n Central Toronto\n <br/>\n (\n <a href=\"/wiki/Summerhill,_Toronto\" title=\"Summerhill, Toronto\">\n Summerhill\n </a>\n West /\n <a class=\"mw-redirect\" href=\"/wiki/Rathnelly\" title=\"Rathnelly\">\n Rathnelly\n </a>\n /\n <a href=\"/wiki/South_Hill,_Toronto\" title=\"South Hill, Toronto\">\n South Hill\n </a>\n /\n <a href=\"/wiki/Forest_Hill,_Toronto\" title=\"Forest Hill, Toronto\">\n Forest Hill\n </a>\n SE /\n <a href=\"/wiki/Deer_Park,_Toronto\" title=\"Deer Park, Toronto\">\n Deer Park\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M5V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n (\n <a href=\"/wiki/CN_Tower\" title=\"CN Tower\">\n CN Tower\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/King_and_Spadina\" title=\"King and Spadina\">\n King and Spadina\n </a>\n /\n <a href=\"/wiki/Railway_Lands\" title=\"Railway Lands\">\n Railway Lands\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Harbourfront_(Toronto)\" title=\"Harbourfront (Toronto)\">\n Harbourfront\n </a>\n West / Bathurst Quay /\n <a class=\"mw-redirect\" href=\"/wiki/South_Niagara\" title=\"South Niagara\">\n South Niagara\n </a>\n /\n <a href=\"/wiki/Billy_Bishop_Toronto_City_Airport\" title=\"Billy Bishop Toronto City Airport\">\n Island airport\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M6V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M7V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M8V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n (\n <a href=\"/wiki/New_Toronto\" title=\"New Toronto\">\n New Toronto\n </a>\n /\n <a href=\"/wiki/Mimico\" title=\"Mimico\">\n Mimico\n </a>\n South / Humber Bay Shores)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M9V\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n (\n <a class=\"mw-redirect\" href=\"/wiki/South_Steeles\" title=\"South Steeles\">\n South Steeles\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Silverstone,_Toronto\" title=\"Silverstone, Toronto\">\n Silverstone\n </a>\n / Humbergate /\n <a class=\"mw-redirect\" href=\"/wiki/Jamestown,_Toronto\" title=\"Jamestown, Toronto\">\n Jamestown\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Mount_Olive,_Toronto\" title=\"Mount Olive, Toronto\">\n Mount Olive\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Beaumond_Heights\" title=\"Beaumond Heights\">\n Beaumond Heights\n </a>\n /\n <a href=\"/wiki/Thistletown\" title=\"Thistletown\">\n Thistletown\n </a>\n / Albion Gardens)\n </span>\n </p>\n </td>\n </tr>\n <tr>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M1W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Scarborough,_Toronto\" title=\"Scarborough, Toronto\">\n Scarborough\n </a>\n <br/>\n (\n <a href=\"/wiki/Steeles,_Toronto\" title=\"Steeles, Toronto\">\n Steeles\n </a>\n West /\n <a href=\"/wiki/L%27Amoreaux\" title=\"L'Amoreaux\">\n L'Amoreaux\n </a>\n West)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M2W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M3W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M4W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n (\n <a href=\"/wiki/Rosedale,_Toronto\" title=\"Rosedale, Toronto\">\n Rosedale\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M5W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n Stn A PO Boxes\n <br/>\n 25 The Esplanade\n <br/>\n (Enclave of M5E)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M6W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M7W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M8W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n (\n <a href=\"/wiki/Alderwood,_Toronto\" title=\"Alderwood, Toronto\">\n Alderwood\n </a>\n /\n <a href=\"/wiki/Long_Branch,_Toronto\" title=\"Long Branch, Toronto\">\n Long Branch\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M9W\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n Northwest\n <br/>\n (\n <a class=\"mw-redirect\" href=\"/wiki/Clairville,_Toronto\" title=\"Clairville, Toronto\">\n Clairville\n </a>\n /\n <a href=\"/wiki/Humberwood\" title=\"Humberwood\">\n Humberwood\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Woodbine_Downs\" title=\"Woodbine Downs\">\n Woodbine Downs\n </a>\n / West Humber / Kipling Heights /\n <a href=\"/wiki/Rexdale\" title=\"Rexdale\">\n Rexdale\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Elms,_Toronto\" title=\"Elms, Toronto\">\n Elms\n </a>\n / Tandridge / Old Rexdale)\n </span>\n </p>\n </td>\n </tr>\n <tr>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M1X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Scarborough,_Toronto\" title=\"Scarborough, Toronto\">\n Scarborough\n </a>\n <br/>\n (\n <a class=\"mw-redirect\" href=\"/wiki/Upper_Rouge\" title=\"Upper Rouge\">\n Upper Rouge\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M2X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M3X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M4X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n (\n <a href=\"/wiki/St._James_Town\" title=\"St. James Town\">\n St. James Town\n </a>\n /\n <a class=\"mw-redirect\" href=\"/wiki/Cabbagetown_(Toronto)\" title=\"Cabbagetown (Toronto)\">\n Cabbagetown\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M5X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n (\n <a href=\"/wiki/First_Canadian_Place\" title=\"First Canadian Place\">\n First Canadian Place\n </a>\n /\n <a href=\"/wiki/PATH_(Toronto)\" title=\"PATH (Toronto)\">\n Underground city\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M6X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M7X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M8X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n (\n <a href=\"/wiki/The_Kingsway,_Toronto\" title=\"The Kingsway, Toronto\">\n The Kingsway\n </a>\n / Montgomery Road /\n <a href=\"/wiki/Old_Mill,_Toronto\" title=\"Old Mill, Toronto\">\n Old Mill\n </a>\n North)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M9X\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n </tr>\n <tr>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M1Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M2Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M3Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M4Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Downtown_Toronto\" title=\"Downtown Toronto\">\n Downtown Toronto\n </a>\n <br/>\n (\n <a href=\"/wiki/Church_and_Wellesley\" title=\"Church and Wellesley\">\n Church and Wellesley\n </a>\n )\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M5Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M6Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M7Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n East Toronto\n <br/>\n <a class=\"mw-redirect\" href=\"/wiki/Business_reply_mail\" title=\"Business reply mail\">\n Business reply mail\n </a>\n Processing Centre\n <br/>\n 969 Eastern\n <br/>\n (Enclave of M4L)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M8Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n (\n <a href=\"/wiki/Old_Mill,_Toronto\" title=\"Old Mill, Toronto\">\n Old Mill\n </a>\n South / King's Mill Park /\n <a class=\"mw-redirect\" href=\"/wiki/Sunnylea\" title=\"Sunnylea\">\n Sunnylea\n </a>\n /\n <a href=\"/wiki/Humber_Bay\" title=\"Humber Bay\">\n Humber Bay\n </a>\n /\n <a href=\"/wiki/Mimico\" title=\"Mimico\">\n Mimico\n </a>\n NE /\n <a class=\"mw-redirect\" href=\"/wiki/The_Queensway_(Toronto)\" title=\"The Queensway (Toronto)\">\n The Queensway\n </a>\n East / Royal York South East / Kingsway Park South East)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M9Y\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n </tr>\n <tr>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M1Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M2Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M3Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M4Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M5Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M6Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M7Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top;\">\n <p>\n <b>\n M8Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <a href=\"/wiki/Etobicoke\" title=\"Etobicoke\">\n Etobicoke\n </a>\n <br/>\n (\n <a href=\"/wiki/Mimico\" title=\"Mimico\">\n Mimico\n </a>\n NW /\n <a href=\"/wiki/The_Queensway\" title=\"The Queensway\">\n The Queensway\n </a>\n West / South of Bloor / Kingsway Park South West / Royal York South West)\n </span>\n </p>\n </td>\n <td style=\"vertical-align:top; color:#ccc;\">\n <p>\n <b>\n M9Z\n </b>\n <br/>\n <span style=\"font-size:85%;\">\n <i>\n Not assigned\n </i>\n </span>\n </p>\n </td>\n </tr>\n </tbody>\n</table>\n\n" ], [ "# Building the dataframe.\ndata = pd.DataFrame(columns=[\"postcode\", \"borough\", \"neighbourhood\"])\n\nfor row in tables[0].tbody.find_all(\"tr\"):\n for cell in row.find_all(\"td\"):\n postcode = cell.b.text\n #print(postcode)\n i = cell.i\n if i:\n borough = i.text\n neighbourhood = i.text\n #print(borough)\n else:\n i = 0\n for ca in cell.find_all(\"a\"):\n if (i == 0):\n borough = ca[\"title\"]\n else:\n neighbourhood = ca[\"title\"]\n i = i + 1\n #print(ca[\"title\"])\n data = data.append({\"postcode\":postcode, \"borough\":borough, \"neighbourhood\":neighbourhood}, ignore_index=True)\n\n# Print result\ndata", "_____no_output_____" ], [ "data.shape", "_____no_output_____" ] ], [ [ "### Data preprocesing and cleaning", "_____no_output_____" ] ], [ [ "# Eliminating those columns from the \"Borough\" field that have the value 'Not assigned'.\ndf = data[data.borough != 'Not assigned']\n\n# Combining all same Postalcodes.\ndf = df.groupby(['postcode','borough'], sort=False).agg(', '.join)\ndf.reset_index(inplace=True)\n\n# Replacing, if a cell has a borough but a Not assigned neighborhood, then the neighborhood will be the same as the borough.\ndf['neighbourhood'] = np.where(df['neighbourhood'] == 'Not assigned', df['borough'], df['neighbourhood'])\n\ndf.head()\n", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "## 2) Segment", "_____no_output_____" ], [ "### Importing data in format CSV containing latitude and longitude of Canada", "_____no_output_____" ] ], [ [ "geodata = pd.read_csv('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/labs_v1/Geospatial_Coordinates.csv')\ngeodata.head()", "_____no_output_____" ] ], [ [ "### Joining Toronto neighbourhood table and geoespacial data", "_____no_output_____" ] ], [ [ "# Rename columnt 'Postal Code' to 'postcode'\ngeodata.rename(columns={'Postal Code':'postcode', 'Latitude':'latitude', 'Longitude':'longitude'}, inplace=True)\n\n# Merge\ndf = pd.merge(df, geodata, on='postcode')\ndf.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "## 3) Cluster the neighborhoods", "_____no_output_____" ], [ "### Explore and cluster the neighborhoods in Toronto. Therefore, considering only boroughs that contain the word Toronto", "_____no_output_____" ] ], [ [ "dfc = df[df['borough'].str.contains('Toronto',regex=False)]\ndfc.head()", "_____no_output_____" ], [ "dfc.shape", "_____no_output_____" ] ], [ [ "### Create a map of Toronto with neighborhoods superimposed on top.", "_____no_output_____" ] ], [ [ "address = 'Toronto'\n\ngeolocator = Nominatim(user_agent=\"ny_explorer\")\nlocation = geolocator.geocode(address)\nlatitude = location.latitude\nlongitude = location.longitude\nprint('The geograpical coordinate of New York City are {}, {}.'.format(latitude, longitude))", "The geograpical coordinate of New York City are 43.6534817, -79.3839347.\n" ], [ "map_toronto = folium.Map(location=[latitude, longitude], zoom_start=10)\n\nfor lat,lng,borough,neighbourhood in zip(dfc['latitude'],dfc['longitude'],dfc['borough'],dfc['neighbourhood']):\n label = '{}, {}'.format(neighbourhood, borough)\n label = folium.Popup(label, parse_html=True)\n folium.CircleMarker(\n [lat,lng],\n radius=5,\n popup=label,\n color='blue',\n fill=True,\n fill_color='#3186cc',\n fill_opacity=0.7,\n parse_html=False).add_to(map_toronto)\nmap_toronto", "_____no_output_____" ] ], [ [ "### Run k-means to cluster the neighborhood", "_____no_output_____" ] ], [ [ "# set number of clusters\nkclusters = 5\n\ntoronto_clustering = dfc.drop(['postcode', 'borough', 'neighbourhood'], 1)\n\n# run k-means clustering\nkmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_clustering)\n\n# Add labels in datset dfc.\ndfc.insert(0, 'cluster_label', kmeans.labels_)\n\n# check cluster labels generated for each row in the dataframe\nkmeans.labels_[0:10] ", "_____no_output_____" ], [ "dfc.head()", "_____no_output_____" ] ], [ [ "### Map for cluster.", "_____no_output_____" ] ], [ [ "map_toronto_cluster = folium.Map(location=[latitude, longitude],zoom_start=10)\n\n# set color scheme for the clusters\nx = np.arange(kclusters)\nys = [i + x + (i*x)**2 for i in range(kclusters)]\ncolors_array = cm.rainbow(np.linspace(0, 1, len(ys)))\nrainbow = [colors.rgb2hex(i) for i in colors_array]\n\n# add markers to the map\nmarkers_colors = []\nfor lat, lon, neighbourhood, cluster in zip(dfc['latitude'],dfc['longitude'],dfc['neighbourhood'], dfc['cluster_label']):\n label = folium.Popup(' Cluster ' + str(cluster), parse_html=True)\n folium.CircleMarker(\n [lat, lon],\n radius=5,\n popup=label,\n color=rainbow[cluster-1],\n fill=True,\n fill_color=rainbow[cluster-1],\n fill_opacity=0.7).add_to(map_toronto_cluster)\n \nmap_toronto_cluster", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ecae8c618262bebb8cc9f9a80669fe5aa917bffc
59,296
ipynb
Jupyter Notebook
Multi-label CNN.ipynb
aayn/codeforces-clean
2152e3a7e52b3fb067d4068ee69f9edac2925d49
[ "MIT" ]
null
null
null
Multi-label CNN.ipynb
aayn/codeforces-clean
2152e3a7e52b3fb067d4068ee69f9edac2925d49
[ "MIT" ]
null
null
null
Multi-label CNN.ipynb
aayn/codeforces-clean
2152e3a7e52b3fb067d4068ee69f9edac2925d49
[ "MIT" ]
null
null
null
55.939623
1,992
0.618457
[ [ [ "# Final Experiments - Multi-label CNNText", "_____no_output_____" ], [ "## Utilities and Imports", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n\nimport itertools\nimport random\nfrom collections import Counter\nimport numpy as np\nimport pickle\nfrom operator import itemgetter\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\n%matplotlib inline\n# matplotlib.rcParams['figure.figsize'] = [5, 10]\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix, f1_score\nfrom sklearn.metrics import precision_recall_fscore_support, hamming_loss\nfrom sklearn.svm import LinearSVC, SVC\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom fastai import text as ft\nfrom fastai import dataloader as fd\nfrom fastai import dataset as fs\nfrom fastai import learner as fl\nfrom fastai import core as fc\nfrom fastai import metrics as fm\n\n\nfrom skai.runner import TextRunner, Adam_lambda\nfrom skai.mwrapper import MWrapper, SKModel\nfrom skai.utils import multi_to_text_out, vote_pred\nfrom skai.utils import get_classification_type, weights_init, multilabel_prediction, prf_report\nfrom skai.dataset import TokenDataset, SimpleDataset\nfrom skai.metrics import f1_micro_skai\n\n\ndef mapt(f, *iters):\n return tuple(map(f, *iters))\n\ndef mapl(f, *iters):\n return list(map(f, *iters))\n\ndef manually_remove_problems(data):\n \"\"\" remove problem from data if it has a certain tag\"\"\"\n final_data = {}\n remove = ['*special']\n for i in data:\n if set(data[i][1][0]).intersection(set(remove)) == set():\n if data[i][0][0] != '':\n final_data[i] = data[i]\n return final_data\n\ndef get_single_label_problems(data):\n '''returns a dict of all problems which only have one label'''\n single_label_problems = {}\n for i in data:\n if len(data[i][1][0]) == 1:\n single_label_problems[i] = data[i]\n return single_label_problems\n\ndef get_classwise_distribution(data):\n class_count = {}\n for i in data:\n for cls in data[i][1][0]:\n if cls in class_count:\n class_count[cls] +=1 \n else:\n class_count[cls] = 1\n return class_count\n\n\ndef get_topk_single_label_problems(data,k):\n \"\"\" get top k by frequency single label problems\"\"\"\n class_dict = get_classwise_distribution(data)\n print(class_dict)\n class_dict = dict(sorted(class_dict.items(), key=itemgetter(1), reverse=True)[:k])\n print(set(class_dict.keys()))\n\n topk_data = {}\n for i in data:\n if set(data[i][1][0]).intersection(set(class_dict.keys())) != set():\n topk_data[i] = data[i]\n \n return topk_data\n\ndef make_text_dataset(rdata):\n Xtext, ytext = [], []\n for url, data in rdata.items():\n try:\n ytext.append(data[1][0][0])\n except IndexError:\n continue\n Xtext.append(data[0][0])\n return Xtext, ytext\n\ndef make_multi_text_dataset(rdata):\n Xtext, ytext = [], []\n for url, data in rdata.items():\n try:\n ytext.append(data[1][0])\n except IndexError:\n continue\n Xtext.append(data[0][0])\n return Xtext, ytext\n\ndef make_statement_dataset(rdata):\n Xtext, ytext = [], []\n for url, data in rdata.items():\n try:\n ytext.append(data[1][0][0])\n except IndexError:\n continue\n Xtext.append(data[0][2])\n return Xtext, ytext\n\ndef make_non_statement_dataset(rdata):\n Xtext, ytext = [], []\n for url, data in rdata.items():\n try:\n ytext.append(data[1][0][0])\n except IndexError:\n continue\n Xtext.append(f'{data[0][3]}\\n{data[0][4]}\\n{data[0][5]}')\n return Xtext, ytext\n\ndef make_multi_statement_dataset(rdata):\n Xtext, ytext = [], []\n for url, data in rdata.items():\n try:\n ytext.append(data[1][0])\n except IndexError:\n continue\n Xtext.append(data[0][2])\n return Xtext, ytext\n\ndef get_class_list(labels):\n return list(set(labels))\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=True,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n fig = plt.gcf()\n fig.set_size_inches(22, 16)\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0.0, vmax=1.0)\n# plt.title(title, fontsize)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, fontsize=32)\n plt.yticks(tick_marks, classes, fontsize=32)\n\n print(cm.max())\n fmt = '.2f' if normalize else 'd'\n thresh = 0.5\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n fontsize=32)\n\n plt.tight_layout()\n plt.ylabel('True label', fontsize=32)\n plt.xlabel('Predicted label', fontsize=32)", "/home/aayn/anaconda3/envs/fastai/lib/python3.6/site-packages/sklearn/ensemble/weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\n from numpy.core.umath_tests import inner1d\n" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "top10m = pickle.load(open('data/10multi_26aug.pkl', 'rb'))\ntop20m = pickle.load(open('data/20multi_26aug.pkl', 'rb'))\n\ntop10m, top20m = mapt(make_multi_text_dataset, [top10m, top20m])", "_____no_output_____" ], [ "print(len(top10m[0]))", "3737\n" ], [ "print(top10m[1][0])", "['binary search', 'data structures', 'brute force', 'dp']\n" ], [ "m20_labels = mapt(lambda x: x[0], Counter(x for labels in top20m[1] for x in labels).most_common())\nm10_labels = mapt(lambda x: x[0], Counter(x for labels in top10m[1] for x in labels).most_common())", "_____no_output_____" ] ], [ [ "## CNN Experiments", "_____no_output_____" ] ], [ [ "class CNN_Text(nn.Module):\n def __init__(self, embed_num, class_num, channel_in=1, \n kernel_sizes=[3, 4, 5], kernel_num=512, embed_dim=300):\n super().__init__()\n self.kernel_num = kernel_num\n self.embed = nn.Embedding(embed_num, embed_dim)\n \n convs = [nn.Conv1d(1, kernel_num, (ks, embed_dim))\n for ks in kernel_sizes]\n self.convs = nn.ModuleList(convs)\n# self.bn1 = nn.BatchNorm2d(kernel_num)\n self.fc1 = nn.Linear(len(kernel_sizes) * kernel_num, class_num)\n self.sigmoid = nn.Sigmoid()\n# self.tanh = nn.Tanh()\n \n def conv_and_pool(self, x, conv):\n x = F.relu(conv(x)).squeeze(3) # (N, Co, W)\n x = F.max_pool1d(x, x.size(2)).squeeze(2)\n return x\n \n def forward(self, x):\n x = self.embed(x)\n x = x.unsqueeze(1)\n \n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs]\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x]\n x = torch.cat(x, 1)\n \n out = self.sigmoid(self.fc1(x))\n return out", "_____no_output_____" ] ], [ [ "### 10-multi", "_____no_output_____" ] ], [ [ "trunner = TextRunner([None], top10m[0], top10m[1], 'top10m')\nin_dim = len(trunner.alldata.tvectorizer.itos)\nXall, yall = trunner.dataset", "Checkpoint reached: raw data cleaned.\nmultilabel classification.\n" ], [ "runs = 1\nout_dim = 10\n\nall_preds, all_targs = [], []\n\nfor i in range(runs):\n outer_cv = KFold(n_splits=10, shuffle=True, random_state=i+41)\n \n outer_cv.get_n_splits(Xall, yall)\n for j, (nontest_i, test_i) in enumerate(outer_cv.split(Xall, yall)):\n X_train, y_train = Xall[nontest_i], yall[nontest_i]\n X_test, y_test = Xall[test_i], yall[test_i]\n \n textcnn = MWrapper(CNN_Text(in_dim, out_dim),\n f'{i}_cnntext10m_{j}')\n textcnn.model.apply(weights_init)\n\n dl_train = fd.DataLoader(SimpleDataset(X_train, y_train),\n batch_size=32, num_workers=2,\n pad_idx=1, transpose=False)\n dl_val = fd.DataLoader(SimpleDataset(X_test, y_test),\n batch_size=32, num_workers=2,\n pad_idx=1, transpose=False)\n modeldata = fs.ModelData(str(textcnn.path), dl_train, dl_val)\n learner = fl.Learner.from_model_data(textcnn.model,\n modeldata,\n opt_fn=Adam_lambda())\n learner.metrics = [f1_micro_skai]\n learner.fit(5e-4, 10, best_save_name='best')\n \n dl_test = fd.DataLoader(SimpleDataset(X_test, y_test),\n batch_size=32, num_workers=2,\n pad_idx=1, transpose=False)\n learner.load('best')\n preds, targs = learner.predict_dl(dl_test)\n preds = multilabel_prediction(preds, 0.5)\n \n all_preds.append(preds)\n all_targs.append(targs)", "Note: Model directory for 0_cnntext10m_0 exists.\n" ], [ "all_preds = np.array(all_preds)\nall_targs = np.array(all_targs)\n\nall_preds = np.concatenate(all_preds, axis=0)\nall_targs = np.concatenate(all_targs, axis=0)", "_____no_output_____" ], [ "# pickle.dump([all_preds, all_targs], open('data/results/cnn_10m.pkl', 'wb'))\nall_preds, all_targs = pickle.load(open('data/results/cnn_10m.pkl', 'rb'))", "_____no_output_____" ], [ "print(all_preds[5])\nprint(all_targs[5])", "[0 0 0 0 0 0 0 0 0 0]\n[1 0 0 1 0 0 0 0 0 0]\n" ], [ "hl = hamming_loss(all_targs, all_preds)\nmicro_f1 = f1_score(all_targs, all_preds, average='micro')\nmacro_f1 = f1_score(all_targs, all_preds, average='macro')\nprf_report(all_targs, all_preds, labels=m10_labels)", "Label\tPrecsion\tRecall\tFScore\tSupport\nimplementation\t0.6141248720573184\t0.48231511254019294\t0.540297163439892\t1244\ndp\t0.48358585858585856\t0.47696139476961397\t0.48025078369905955\t803\nmath\t0.5240384615384616\t0.27735368956743\t0.3627287853577371\t786\ngreedy\t0.4393162393162393\t0.33727034120734906\t0.3815887156644395\t762\nbrute force\t0.2958199356913183\t0.14959349593495935\t0.19870410367170627\t615\ndata structures\t0.6443298969072165\t0.41118421052631576\t0.5020080321285141\t608\nconstructive algorithms\t0.47058823529411764\t0.12933025404157045\t0.20289855072463767\t433\ndfs and similar\t0.5539906103286385\t0.3163538873994638\t0.40273037542662116\t373\nbinary search\t0.28169014084507044\t0.054945054945054944\t0.09195402298850576\t364\nsortings\t0.3389830508474576\t0.056179775280898875\t0.09638554216867469\t356\n" ], [ "print(f'Hamming loss = {hl}\\nMicro_F1 = {micro_f1}l\\nMacro_F1 = {macro_f1}')", "Hamming loss = 0.16716617607706716\nMicro_F1 = 0.3920194647201946l\nMacro_F1 = 0.32595460752697875\n" ] ], [ [ "### 20-multi", "_____no_output_____" ] ], [ [ "trunner = TextRunner([None], top20m[0], top20m[1], 'top20m')\nin_dim = len(trunner.alldata.tvectorizer.itos)\nXall, yall = trunner.dataset", "Checkpoint reached: raw data cleaned.\nmultilabel classification.\n" ], [ "runs = 1\nout_dim = 20\n\nall_preds, all_targs = [], []\n\nfor i in range(runs):\n outer_cv = KFold(n_splits=10, shuffle=True, random_state=i+41)\n \n outer_cv.get_n_splits(Xall, yall)\n for j, (nontest_i, test_i) in enumerate(outer_cv.split(Xall, yall)):\n X_train, y_train = Xall[nontest_i], yall[nontest_i]\n X_test, y_test = Xall[test_i], yall[test_i]\n \n textcnn = MWrapper(CNN_Text(in_dim, out_dim),\n f'{i}_cnntext20m_{j}')\n textcnn.model.apply(weights_init)\n\n dl_train = fd.DataLoader(SimpleDataset(X_train, y_train),\n batch_size=32, num_workers=2,\n pad_idx=1, transpose=False)\n dl_val = fd.DataLoader(SimpleDataset(X_test, y_test),\n batch_size=32, num_workers=2,\n pad_idx=1, transpose=False)\n modeldata = fs.ModelData(str(textcnn.path), dl_train, dl_val)\n learner = fl.Learner.from_model_data(textcnn.model,\n modeldata,\n opt_fn=Adam_lambda())\n learner.metrics = [f1_micro_skai]\n learner.fit(5e-4, 10, best_save_name='best')\n \n dl_test = fd.DataLoader(SimpleDataset(X_test, y_test),\n batch_size=32, num_workers=2,\n pad_idx=1, transpose=False)\n learner.load('best')\n preds, targs = learner.predict_dl(dl_test)\n preds = multilabel_prediction(preds, 0.5)\n \n all_preds.append(preds)\n all_targs.append(targs)\n \n print(f1_score(np.concatenate(np.array(all_targs), axis=0), \n np.concatenate(np.array(all_preds), axis=0), average='micro'))", "Note: Model directory for 0_cnntext20m_0 exists.\n" ], [ "all_preds = np.array(all_preds)\nall_targs = np.array(all_targs)\n\nall_preds = np.concatenate(all_preds, axis=0)\nall_targs = np.concatenate(all_targs, axis=0)", "_____no_output_____" ], [ "# pickle.dump([all_preds, all_targs], open('data/results/cnn_20m.pkl', 'wb'))\nall_preds, all_targs = pickle.load(open('data/results/cnn_20m.pkl', 'rb'))", "_____no_output_____" ], [ "print(all_preds[7])", "[1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]\n" ], [ "hl = hamming_loss(all_targs, all_preds)\nmicro_f1 = f1_score(all_targs, all_preds, average='micro')\nmacro_f1 = f1_score(all_targs, all_preds, average='macro')\nprf_report(all_targs, all_preds, labels=m20_labels)", "Label\tPrecsion\tRecall\tFScore\tSupport\nimplementation\t0.5350414468726451\t0.5707395498392283\t0.5523142746013224\t1244\ndp\t0.5245614035087719\t0.3723536737235367\t0.4355426074289876\t803\nmath\t0.5609243697478992\t0.33969465648854963\t0.42313787638668776\t786\ngreedy\t0.5062893081761006\t0.21128608923884515\t0.29814814814814816\t762\nbrute force\t0.28012048192771083\t0.15121951219512195\t0.19640971488912357\t615\ndata structures\t0.5545112781954887\t0.48519736842105265\t0.5175438596491229\t608\nconstructive algorithms\t0.4782608695652174\t0.22863741339491916\t0.309375\t433\ndfs and similar\t0.574468085106383\t0.07238605898123325\t0.12857142857142856\t373\nbinary search\t0.45\t0.024725274725274724\t0.046875\t364\nsortings\t0.3717948717948718\t0.08146067415730338\t0.1336405529953917\t356\ngraphs\t0.6607142857142857\t0.32456140350877194\t0.43529411764705883\t342\ntrees\t0.8306451612903226\t0.3678571428571429\t0.5099009900990099\t280\nstrings\t0.4959016393442623\t0.5238095238095238\t0.5094736842105263\t231\nnumber theory\t0.48717948717948717\t0.08597285067873303\t0.14615384615384616\t221\ncombinatorics\t0.4824561403508772\t0.25821596244131456\t0.3363914373088685\t213\ngeometry\t0.6929133858267716\t0.45595854922279794\t0.55\t193\ntwo pointers\t0.0\t0.0\t0.0\t158\nbitmasks\t0.0\t0.0\t0.0\t153\ndsu\t0.5\t0.007042253521126761\t0.01388888888888889\t142\nprobabilities\t0.6935483870967742\t0.43434343434343436\t0.5341614906832298\t99\n" ], [ "print(f'Hamming loss = {hl}\\nMicro_F1 = {micro_f1}l\\nMacro_F1 = {macro_f1}')", "Hamming loss = 0.10232323232323233\nMicro_F1 = 0.3843816469158311l\nMacro_F1 = 0.3038411458830821\n" ] ], [ [ "## Problem-Algorithm Separate Analyis", "_____no_output_____" ] ], [ [ "# Demo of how to separate tags into categories\nexm = all_preds[0:5]\nprint(exm)\n\nprob_idxs = (2, 5, 10, 11, 12, 13, 14 , 15, 19)\nalg_idxs = (0, 1, 3, 4, 6, 7, 8, 9, 16, 17, 18)\n\nprob_targs = [exm[:, i] for i in prob_idxs]\nprob_targs = np.concatenate([prob_targs]).transpose(1, 0)\nprint(prob_targs)\n\n# np.concatenate([[exm[:, 1]], [exm[:, 3]]]).transpose(1, 0)", "[[1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [1 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n [0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0]\n [1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0]]\n[[0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0 0 0]\n [0 0 0 0 0 0 1 0 0]\n [0 0 0 0 1 0 0 0 0]]\n" ], [ "# Actual problem-algorithm splitting\nprob_idxs = (2, 5, 10, 11, 12, 13, 14 , 15, 19)\nalg_idxs = (0, 1, 3, 4, 6, 7, 8, 9, 16, 17, 18)\n\nprobcat_targs = [all_targs[:, i] for i in prob_idxs]\nprobcat_targs = np.concatenate([probcat_targs]).transpose(1, 0)\nprint(probcat_targs)\n\nprobcat_preds = [all_preds[:, i] for i in prob_idxs]\nprobcat_preds = np.concatenate([probcat_preds]).transpose(1, 0)\nprint(probcat_preds)", "[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 1 0 0]\n ...\n [0 0 0 ... 0 1 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 1 0 ... 0 0 0]]\n" ], [ "probcat_hl = hamming_loss(probcat_targs, probcat_preds)\nprobcat_micro_f1 = f1_score(probcat_targs, probcat_preds, average='micro')\nprobcat_macro_f1 = f1_score(probcat_targs, probcat_preds, average='macro')\n\nprint(f'Hamming loss = {probcat_hl}\\nMicro_F1 = {probcat_micro_f1}l\\nMacro_F1 = {probcat_macro_f1}')", "Hamming loss = 0.07449494949494949\nMicro_F1 = 0.4535912739246759l\nMacro_F1 = 0.4402285891264833\n" ], [ "# Actual problem-algorithm splitting\nprob_idxs = (2, 5, 10, 11, 12, 13, 14 , 15, 19)\nalg_idxs = (0, 1, 3, 4, 6, 7, 8, 9, 16, 17, 18)\n\nalgcat_targs = [all_targs[:, i] for i in alg_idxs]\nalgcat_targs = np.concatenate([algcat_targs]).transpose(1, 0)\nprint(algcat_targs)\n\nalgcat_preds = [all_preds[:, i] for i in alg_idxs]\nalgcat_preds = np.concatenate([algcat_preds]).transpose(1, 0)\nprint(algcat_preds)", "[[1 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 1 0 ... 0 0 0]\n [0 1 0 ... 0 0 0]]\n[[1 0 0 ... 0 0 0]\n [1 0 1 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [1 0 0 ... 0 0 0]\n [0 1 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n" ], [ "algcat_hl = hamming_loss(algcat_targs, algcat_preds)\nalgcat_micro_f1 = f1_score(algcat_targs, algcat_preds, average='micro')\nalgcat_macro_f1 = f1_score(algcat_targs, algcat_preds, average='macro')\n\nprint(f'Hamming loss = {algcat_hl}\\nMicro_F1 = {algcat_micro_f1}l\\nMacro_F1 = {algcat_macro_f1}')", "Hamming loss = 0.12509182736455463\nMicro_F1 = 0.34388922335942207l\nMacro_F1 = 0.19225141959302644\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecae904ddfb82c855d1bffd4550be1daa24f0444
48,183
ipynb
Jupyter Notebook
Exemplo_02.ipynb
julianovale/project_trains
73f698ab9618363b93777ab7337be813bf14d688
[ "MIT" ]
null
null
null
Exemplo_02.ipynb
julianovale/project_trains
73f698ab9618363b93777ab7337be813bf14d688
[ "MIT" ]
null
null
null
Exemplo_02.ipynb
julianovale/project_trains
73f698ab9618363b93777ab7337be813bf14d688
[ "MIT" ]
null
null
null
34.367332
235
0.237822
[ [ [ "<a href=\"https://colab.research.google.com/github/julianovale/project_trains/blob/master/Exemplo_02.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "from sympy import I, Matrix, symbols, Symbol, eye\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\n", "_____no_output_____" ], [ "# Rotas\nR1 = Matrix([[0,\"R1_p1\",0],[0,0,\"R1_v1\"],[0,0,0]])\nR2 = Matrix([[0,\"R2_p1\",0],[0,0,\"R2_v1\"],[0,0,0]])", "_____no_output_____" ], [ "# Seções (semáforos)\nT1 = Matrix([[0, \"p1\"],[\"v1\", 0]])", "_____no_output_____" ], [ "def kronSum(A,B):\n m = np.size(A,1)\n n = np.size(B,1)\n A = np.kron(A,np.eye(n))\n B = np.kron(np.eye(m),B)\n return A + B", "_____no_output_____" ], [ "momento_inicio = datetime.now() \n'''\nAlgebra de rotas\n'''\nrotas = kronSum(R1,R2)\n\n'''\nAlgebra de seções \nsecoes = kronSum(T1,T2)\nsecoes = kronSum(secoes,T3)\nsecoes = kronSum(secoes,T4)\nsecoes = kronSum(secoes,T5)\n'''\n\n'''\nAlgebra de sistema\n'''\nsistema = np.kron(rotas, T1) # lembrar de trocar para \"secoes\" se tiver vários semáforos\n\n# calcula tempo de processamento\ntempo_processamento = datetime.now() - momento_inicio", "_____no_output_____" ], [ "sistema = pd.DataFrame(data=sistema,index=list(range(1,np.size(sistema,0)+1)), columns=list(range(1,np.size(sistema,1)+1)))", "_____no_output_____" ], [ "sistema.shape", "_____no_output_____" ], [ "print(tempo_processamento)", "0:00:00.012708\n" ], [ "sistema", "_____no_output_____" ], [ "momento_inicio = datetime.now() \ncolunas = ['de_noh', 'para_noh', 'aresta']\ngrafo = pd.DataFrame(columns=colunas)\nr = 1\nc = 1\nfor j in range(np.size(sistema,0)):\n for i in range(np.size(sistema,0)):\n if sistema.loc[r,c]==0 and c < np.size(sistema,0):\n c += 1\n elif c < np.size(sistema,0):\n grafo.loc[len(grafo)+1] = (r, c, sistema.loc[r,c]) \n c += 1\n else:\n c = 1\n r += 1\ntempo_processamento = datetime.now() - momento_inicio\nprint(tempo_processamento)", "0:00:00.082682\n" ], [ "grafo['aresta'] = grafo['aresta'].astype('str')\ngrafo", "_____no_output_____" ], [ "new = grafo[\"aresta\"].str.split(\"*\", n = -1, expand = True) \ngrafo[\"aresta\"]=new[1]\ngrafo[\"semaforo_secao\"]=new[2]\nnew = grafo[\"aresta\"].str.split(\"_\", n = -1, expand = True) \ngrafo[\"semaforo_trem\"]=new[1]\ngrafo\n", "_____no_output_____" ], [ "grafo = pd.DataFrame(data=grafo)", "_____no_output_____" ], [ "# PASSO 1\nalcancavel = [1]\nN = np.size(grafo,0)\nfor i in range(N):\n de = grafo.loc[i+1]['de_noh']\n para = grafo.loc[i+1]['para_noh']\n if de in alcancavel:\n alcancavel.append(para)\n else:\n i += 1\n\n alcancavel.sort()", "_____no_output_____" ], [ "grafo01 = grafo[grafo.de_noh.isin(alcancavel)]", "_____no_output_____" ], [ "grafo01", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecae952d72912a32e0363cba5f0bc18f16462834
4,510
ipynb
Jupyter Notebook
measfunc/setup_named_qdac_channel_parameters.ipynb
qdev-dk/MeasFunc
2ac720891c1e033a1a8e2cd0b49e6e11da53511c
[ "MIT" ]
null
null
null
measfunc/setup_named_qdac_channel_parameters.ipynb
qdev-dk/MeasFunc
2ac720891c1e033a1a8e2cd0b49e6e11da53511c
[ "MIT" ]
null
null
null
measfunc/setup_named_qdac_channel_parameters.ipynb
qdev-dk/MeasFunc
2ac720891c1e033a1a8e2cd0b49e6e11da53511c
[ "MIT" ]
null
null
null
53.690476
161
0.538359
[ [ [ "gate_to_bbox_mapping = {\n 'top_confinement': 14,\n 'bottom_confinement': 16,\n 'source_gate': 21,\n 'source_ohmic': 22,\n 'barrier_source_Tplunger': 11,\n 'Tplunger': 12,\n 'barrier_Tplunger_drain': 10,\n 'drain_gate': 13,\n 'drain_ohmic': 6,\n 'SET_confinement': 24,\n 'SET_plunger1_gate': 20,\n 'SET_plunger1_ohmic': 23,\n 'SET_left_barrier': 1,\n 'SET_right_barrier': 2,\n 'SET_plunger2_gate': 18,\n 'SET_plunger2_ohmic': 7}", "_____no_output_____" ], [ "safety_limits = {'V_gate': np.array([-0.5, 3.75]),\n 'V_SET_barrier': np.array([-0.5, 1.5]),\n 'V_barrier_Tplunger_drain': np.array([-0.5, 3.0]), \n 'V_JellyBean_gate': np.array([-0.5, 2.5]), \n 'V_confinement': np.array([-1.1, 0.1]), \n 'V_ohmic': np.array([-0.025, 0.025])}\n\nfor qdac_A_ch in qdac_A.channels:\n qdac_A_ch_index = int(qdac_A_ch._name.split(\"chan\")[1])\n if (qdac_A_ch_index in gate_to_bbox_mapping.values()):\n gate_name = list(gate_to_bbox_mapping.keys())[list(gate_to_bbox_mapping.values()).index(qdac_A_ch_index)]\n vars()[\"I_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"I_\"+gate_name, \n label=\"I_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"A\", get_cmd=qdac_A_ch.i) \n if (\"ohmic\" in gate_name):\n vars()[\"V_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"V_\"+gate_name, \n label=\"V_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"V\", get_cmd=qdac_A_ch.v, \n set_cmd=qdac_A_ch.v, vals=Numbers(safety_limits['V_ohmic'].min(), safety_limits['V_ohmic'].max())) \n elif (\"confinement\" in gate_name):\n vars()[\"V_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"V_\"+gate_name, \n label=\"V_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"V\", get_cmd=qdac_A_ch.v, \n set_cmd=qdac_A_ch.v, vals=Numbers(safety_limits['V_confinement'].min(), safety_limits['V_confinement'].max())) \n else:\n if (gate_name == \"SET_left_barrier\") or (gate_name == \"SET_right_barrier\"):\n vars()[\"V_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"V_\"+gate_name, \n label=\"V_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"V\", get_cmd=qdac_A_ch.v, \n set_cmd=qdac_A_ch.v, vals=Numbers(safety_limits['V_SET_barrier'].min(), safety_limits['V_SET_barrier'].max())) \n elif (gate_name == \"barrier_Tplunger_drain\"):\n vars()[\"V_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"V_\"+gate_name, \n label=\"V_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"V\", get_cmd=qdac_A_ch.v, \n set_cmd=qdac_A_ch.v, vals=Numbers(safety_limits['V_barrier_Tplunger_drain'].min(), safety_limits['V_barrier_Tplunger_drain'].max()))\n elif ((gate_name == \"Tplunger\") or (gate_name == \"barrier_source_Tplunger\")):\n vars()[\"V_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"V_\"+gate_name, \n label=\"V_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"V\", get_cmd=qdac_A_ch.v, \n set_cmd=qdac_A_ch.v, vals=Numbers(safety_limits['V_JellyBean_gate'].min(), safety_limits['V_JellyBean_gate'].max()))\n else:\n vars()[\"V_\"+gate_name] = qc.Parameter(instrument=qdac_A, name=\"V_\"+gate_name, \n label=\"V_\"+gate_name+\"_ch_\"+str(qdac_A_ch_index), unit=\"V\", get_cmd=qdac_A_ch.v, \n set_cmd=qdac_A_ch.v, vals=Numbers(safety_limits['V_gate'].min(), safety_limits['V_gate'].max()))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ecae9be6c7cc552683ea29bedbe115187962378c
767,347
ipynb
Jupyter Notebook
week_07/Project_week_07.ipynb
xhguo86/spiced_academy_backup-
7b65a94d0a03149bb9fc71e35a799074b4412925
[ "MIT" ]
null
null
null
week_07/Project_week_07.ipynb
xhguo86/spiced_academy_backup-
7b65a94d0a03149bb9fc71e35a799074b4412925
[ "MIT" ]
null
null
null
week_07/Project_week_07.ipynb
xhguo86/spiced_academy_backup-
7b65a94d0a03149bb9fc71e35a799074b4412925
[ "MIT" ]
null
null
null
175.674679
107,664
0.842779
[ [ [ "# Time Series Workflow", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## 0) Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.linear_model import LinearRegression\n# from statsmodels.api import OLS, add_constant, qqplot\nfrom statsmodels.tsa.ar_model import AutoReg, ar_select_order\nfrom statsmodels.graphics.tsaplots import plot_pacf\n\n# from cleanup import clean_data\n# from engineer import feature_engineer\n# Set figure size to (14,6)\nplt.rcParams['figure.figsize'] = (14,6)", "_____no_output_____" ] ], [ [ "## 1) Define the Business Goal\n\nBuild a model that can predict tomorrows temperature, given the temprature until today, as precisely as possible.", "_____no_output_____" ], [ "## 2) Get the Data\n\n### 2.1) Load the Data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('TG_STAID002759.csv', index_col=1, parse_dates=True)\ndf.head()", "_____no_output_____" ] ], [ [ "### 2.2) Clean the Data", "_____no_output_____" ] ], [ [ "def clean_data(df):\n df['TG'].replace(-9999, np.NaN, inplace=True)\n df['TG2']=df['TG']/10\n df = df.drop(['SOUID', 'TG','Q_TG'], axis=1)\n df = df.dropna()\n return df", "_____no_output_____" ], [ "df = clean_data(df)\ndf.head()", "_____no_output_____" ] ], [ [ "## 3) Train-Test-Split", "_____no_output_____" ] ], [ [ "df_train = df[:-365]\n\ndf_test = df[-365:]", "_____no_output_____" ] ], [ [ "## 4) Visualize the Data", "_____no_output_____" ] ], [ [ "df_train.plot()", "_____no_output_____" ] ], [ [ "## 5) Feature Engineer", "_____no_output_____" ] ], [ [ "df_train.head()\n#iterator = iter(range(len(df_train)))", "_____no_output_____" ], [ "def plot_temp(df, title='Monthly mean temperature over Time', ylim=True):\n '''\n Custom plotting function for plotting the flights dataset\n \n Parameters\n ----------\n df : pd.DataFrame\n The data to plot.\n title : str\n The title of the plot\n ylim : bool\n Whether to fix the minimum value of y; default is True\n \n Returns\n -------\n Plots the data\n '''\n df.plot()\n plt.title(title)\n plt.ylabel('Temperature')\n if ylim:\n plt.ylim(ymin=0)\n plt.show()", "_____no_output_____" ], [ "# year trend \ndf_train['timestep'] = range(len(df_train))\n\nX = df_train[['timestep']] #matrix (or DataFrame)\ny = df_train['TG2'] #vector (or Series)\nm = LinearRegression()\nm.fit(X, y)\nm.coef_", "<ipython-input-526-137cc0d45e3e>:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df_train['timestep'] = range(len(df_train))\n" ], [ "#df_train['year_trend'] = m.predict(X)\n#df_train[['TG2', 'year_trend']].plot()", "_____no_output_____" ], [ "def feature_engineer(df_train):\n #Extract the Timestep and the Seasonal Dummies\n df_train['timestep'] = range(len(df_train))\n \n seasonal_dummies = pd.get_dummies(df_train.index.month,\n prefix='month',\n drop_first=True).set_index(df_train.index)\n\n df_train_fe = df_train.join(seasonal_dummies)\n \n # define remainder \n m = LinearRegression()\n y_train = df_train_fe.copy().iloc[:,0]\n X_train = df_train_fe.copy().iloc[:,1:]\n m.fit(X_train, y_train)\n df_train_fe['trend_seasonal'] = m.predict(X_train)\n df_train_fe['remainder'] = df_train_fe['TG2'] - df_train_fe['trend_seasonal']\n \n # add lag(4)\n df_train_fe['lag1'] = df_train_fe['remainder'].shift(1)\n df_train_fe['lag2'] = df_train_fe['remainder'].shift(2)\n df_train_fe['lag3'] = df_train_fe['remainder'].shift(3)\n df_train_fe['lag4'] = df_train_fe['remainder'].shift(4)\n df_train_fe.dropna(inplace=True)\n \n # For assign X\n df_train_fe = df_train_fe.drop(columns=['trend_seasonal', 'remainder'])\n \n return df_train_fe", "_____no_output_____" ], [ "df_train_fe = feature_engineer(df_train)\ndf_train_fe.head()", "<ipython-input-528-8f40635a29d2>:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n df_train['timestep'] = range(len(df_train))\n" ], [ "# Now assign X and y\ny_train = df_train_fe.copy().iloc[:,0]\n\nX_train = df_train_fe.copy().iloc[:,1:]", "_____no_output_____" ], [ "X_train", "_____no_output_____" ] ], [ [ "## 6) Train a model", "_____no_output_____" ] ], [ [ "m = LinearRegression()\nm.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "**You will see other models this week!**", "_____no_output_____" ], [ "## 7) Cross-Validate and Optimize Hyperparameters", "_____no_output_____" ] ], [ [ "df_train_fe['pred'] = m.predict(X_train)\ndf_train_fe.head()", "_____no_output_____" ], [ "plot_temp(df_train_fe[['TG2', 'pred']])", "_____no_output_____" ], [ "# analysis remainder 2nd round\ndf_train_fe['remainder'] = df_train_fe['TG2'] - df_train_fe['pred']\ndf_train_fe['remainder'].plot()", "_____no_output_____" ], [ "# another way of ploting\nplot_temp(df_train_fe['remainder'], title='Remainder after modelling trend and seasonality', ylim=False)", "_____no_output_____" ], [ "#Inspect the remainder to decide how many lags to include\nfrom statsmodels.graphics.tsaplots import plot_pacf\nplot_pacf(df_train_fe['remainder']);", "_____no_output_____" ], [ "# Let ar_select_order select the number of lags for the remainder\nselected_order = ar_select_order(endog=df_train_fe['remainder'], maxlag=12)", "/home/guo/anaconda3/lib/python3.8/site-packages/statsmodels/tsa/base/tsa_model.py:216: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n warnings.warn('A date index has been provided, but it has no'\n/home/guo/anaconda3/lib/python3.8/site-packages/statsmodels/tsa/base/tsa_model.py:216: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n warnings.warn('A date index has been provided, but it has no'\n" ], [ "# Inspect the number of lags chosen\nselected_order.ar_lags", "_____no_output_____" ], [ "# statsmodels also has an AutoReg which does a bit of the modelling for you\n#from statsmodels.tsa.ar_model import AutoReg\n#ar_model = AutoReg(endog=df_train_fe['remainder'], lags=4).fit()\n# Look at the model summary\n#ar_model.summary()", "_____no_output_____" ], [ "X_full = df_train_fe.drop(columns=['TG2', 'pred', 'remainder'])\ny_full = df_train_fe['TG2']", "_____no_output_____" ], [ "X_full.head()", "_____no_output_____" ], [ "m_full = LinearRegression()\nm_full.fit(X_full, y_full)", "_____no_output_____" ], [ "# Create a new predictions column\ndf_train_fe['predictions_full_model'] = m_full.predict(X_full)", "_____no_output_____" ], [ "plot_temp(df_train_fe[['TG2', 'predictions_full_model']])", "_____no_output_____" ], [ "# Inspect correlations\ncorrelations = round(df_train_fe.corr(), 2)\nsns.heatmap(correlations, annot=True)", "_____no_output_____" ], [ "# cross validation\nfrom sklearn.model_selection import TimeSeriesSplit, cross_val_score\n# Create a TimeSeriesSplit object\nts_split = TimeSeriesSplit(n_splits=5)\nts_split.split(X_full, y_full)\n# Split the training data into folds\nfor i, (train_index, validation_index) in enumerate(ts_split.split(X_full, y_full)):\n print(f'The training data for the {i+1}th iteration are the observations {train_index}')\n print(f'The validation data for the {i+1}th iteration are the observations {validation_index}')\n print()", "The training data for the 1th iteration are the observations [ 0 1 2 ... 8749 8750 8751]\nThe validation data for the 1th iteration are the observations [ 8752 8753 8754 ... 17496 17497 17498]\n\nThe training data for the 2th iteration are the observations [ 0 1 2 ... 17496 17497 17498]\nThe validation data for the 2th iteration are the observations [17499 17500 17501 ... 26243 26244 26245]\n\nThe training data for the 3th iteration are the observations [ 0 1 2 ... 26243 26244 26245]\nThe validation data for the 3th iteration are the observations [26246 26247 26248 ... 34990 34991 34992]\n\nThe training data for the 4th iteration are the observations [ 0 1 2 ... 34990 34991 34992]\nThe validation data for the 4th iteration are the observations [34993 34994 34995 ... 43737 43738 43739]\n\nThe training data for the 5th iteration are the observations [ 0 1 2 ... 43737 43738 43739]\nThe validation data for the 5th iteration are the observations [43740 43741 43742 ... 52484 52485 52486]\n\n" ], [ "# Create the time series split\ntime_series_split = ts_split.split(X_full, y_full) ", "_____no_output_____" ], [ "# Do the cross validation\nresult = cross_val_score(estimator=m_full, X=X_full, y=y_full, cv=time_series_split)\nresult", "_____no_output_____" ], [ "result.mean()", "_____no_output_____" ], [ "import numpy as np\nfrom matplotlib import pyplot as plt\nimport statsmodels\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport pandas as pd\n\nimport statsmodels\nfrom statsmodels.tsa.ar_model import AutoReg\nfrom statsmodels.tsa import stattools\nimport seaborn as sns\n\ndef print_adf(data):\n \n \"\"\" Prints the results of the augmented Dickey Fuller Test\"\"\"\n \n adf_stats, p, used_lag, n_obs, levels, information_criterion = \\\n statsmodels.tsa.stattools.adfuller(data)\n \n print(f\"\"\"adf_stats: {adf_stats}\n p: {p} \n used lag: {used_lag} \n number of observations: {n_obs}\n CI 99%: {levels['1%']}\n CI 95%: {levels['5%']}\n CI 90%: {levels['10%']}\n information criterion (AIC): {information_criterion}\n \"\"\")", "_____no_output_____" ], [ "print_adf(df_train_fe['remainder'])", "adf_stats: -33.7013113767831\n p: 0.0 \n used lag: 34 \n number of observations: 52452\n CI 99%: -3.4304746781830016\n CI 95%: -2.8615951052531075\n CI 90%: -2.5667993306938484\n information criterion (AIC): 236568.0230080661\n \n" ], [ "ar_model = AutoReg(endog=df_train_fe['remainder'], lags=4).fit()\nar_model.summary()", "/home/guo/anaconda3/lib/python3.8/site-packages/statsmodels/tsa/base/tsa_model.py:216: ValueWarning: A date index has been provided, but it has no associated frequency information and so will be ignored when e.g. forecasting.\n warnings.warn('A date index has been provided, but it has no'\n" ], [ "df_train_fe.remainder.plot()", "_____no_output_____" ] ], [ [ "## 8) Test", "_____no_output_____" ] ], [ [ "df_test_fe = feature_engineer(df_test)\n\ndf.tail() = df_test_fe.copy().iloc[:,0]\nX_test = df_test_fe.copy().iloc[:,1:]", "_____no_output_____" ], [ "r2 = round(m.score(X_test, y_test), 2)", "_____no_output_____" ], [ "print(f'The R-squared of our model is {r2}')", "_____no_output_____" ], [ "# test 100 years later", "_____no_output_____" ], [ "# full m model\ndf['timestep'] = range(len(df))\n\nfrom sklearn.linear_model import LinearRegression\nbaseline_model = LinearRegression()\n\ndf_past = feature_engineer(df)", "_____no_output_____" ], [ "df_past", "_____no_output_____" ], [ "# define X, y\n#X_past = df_past.drop(columns=['TG2'])\nX_past = df_past.drop(columns=['TG2','lag1','lag2','lag3','lag4'])\n# X_past = df_past.drop(columns=['TG2'])\ny_past = df_past['TG2']", "_____no_output_____" ], [ "baseline_model.fit(X_past, y_past)", "_____no_output_____" ], [ "#df.tail()\nX_past\n# create predict future timeline table\n# plot", "_____no_output_____" ], [ "df_future = pd.DataFrame(index=pd.date_range(start='Apr 2021', end='Jan 2100', freq='D'))\n", "_____no_output_____" ], [ "last_timestep = max(df['timestep'])\nlast_timestep", "_____no_output_____" ], [ "df_future['timestep'] = range(last_timestep + 1, last_timestep + 1 + len(df_future))", "_____no_output_____" ], [ "# get dummies\nfuture_seasonal_dummies = pd.get_dummies(df_future.index.month, prefix='month',\n drop_first=True).set_index(df_future.index)\n# merge table\ndf_future = pd.concat([df_future, future_seasonal_dummies], axis=1)\n# \ndf_future.head()", "_____no_output_____" ], [ "baseline_model.predict(df_future)", "_____no_output_____" ], [ "# no lag included\nplt.plot(df['TG2'], 'b-')\nplt.plot(df_future.index, baseline_model.predict(df_future), 'ro-')", "_____no_output_____" ], [ "# include lags\nm_remainder = LinearRegression()", "_____no_output_____" ], [ "X_past", "_____no_output_____" ], [ "df_past['TG2']", "_____no_output_____" ], [ "X_past['pred_past'] = baseline_model.predict(X_past)", "_____no_output_____" ], [ "X_past['remainder'] = df_past['TG2'] - X_past['pred_past']", "_____no_output_____" ], [ "m_remainder.fit(X_past[['remainder']].shift(1)[1:], X_past['remainder'][1:])", "_____no_output_____" ], [ "X_past[['remainder']]", "_____no_output_____" ], [ "X_past['pred_past'] = baseline_model.predict(X_past)", "_____no_output_____" ], [ "m_remainder.predict(X_past[['remainder']])", "_____no_output_____" ], [ "X_past", "_____no_output_____" ], [ "from statsmodels.tsa.ar_model import AutoReg\nar_model = AutoReg(endog=X_past['remainder'], lags=3).fit()\nar_model.summary()", "_____no_output_____" ], [ "ar_model.predict(start='2020-03-31', end='2025-03-31')", "_____no_output_____" ], [ "predicted_remainder = np.zeros(len(df_future))\npredicted_remainder[0] = m_remainder.predict(X_past[['remainder']][-1:])", "_____no_output_____" ], [ "predicted_remainder", "_____no_output_____" ], [ "df_future['pred_past_reminder'] = baseline_model.predict(df_future)", "_____no_output_____" ], [ "df_future", "_____no_output_____" ], [ "predicted_remainder = df_future['pred_past_reminder'] ", "_____no_output_____" ], [ "# lag included\nplt.plot(df['TG2'], 'b-')\nplt.plot(df_future.index, baseline_model.predict(df_future), 'ro-')\nplt.plot(df_future.index, baseline_model.predict(df_future) + predicted_remainder, 'ro-')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecaeab4eb83cf8b137e777a5111cc40dbeacf9b5
181,799
ipynb
Jupyter Notebook
Code/Assignment-8/extended_analysis_ADHD_Bipolar_yatbear.ipynb
Upward-Spiral-Science/spect-team
b5876fd76fc1da376b5d1fc6fd9337f620df142c
[ "Apache-2.0" ]
null
null
null
Code/Assignment-8/extended_analysis_ADHD_Bipolar_yatbear.ipynb
Upward-Spiral-Science/spect-team
b5876fd76fc1da376b5d1fc6fd9337f620df142c
[ "Apache-2.0" ]
3
2016-02-11T21:18:53.000Z
2016-04-27T03:50:34.000Z
Code/Assignment-8/extended_analysis_ADHD_Bipolar_yatbear.ipynb
Upward-Spiral-Science/spect-team
b5876fd76fc1da376b5d1fc6fd9337f620df142c
[ "Apache-2.0" ]
null
null
null
200.439912
60,978
0.897282
[ [ [ "### Extended Analysis: ADHD and Bipolar Disorder Identification - by yatbear\n\nThis notebook contains several analyses I conducted before for the depression prediction (e.g. PCA, K-S test, five classification methods), which are reapplied on the ADHD and Bipolar Identification problem. New analyses (primarily in the prediction section) are also added.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\ndf_patient = pd.read_csv('patient_info.csv')\ndf_disorders = pd.read_csv('disorders.csv')\ndf_questionnaire = pd.read_csv('questionnaire.csv')\ndf_base_concen = pd.read_csv('base_concen.csv')", "_____no_output_____" ] ], [ [ "### Descriptive Analysis", "_____no_output_____" ], [ "#### Q1. How many mutually exclusive ADHD and Bipolar samples do we have?", "_____no_output_____" ] ], [ [ "c1, c2 = 0, 0\nadhd_id, bipolar_id = list(), list()\n\nfor i, a, b in df_disorders[['Patient_ID', 'ADHD', 'Bipolar']].values:\n if a == 1 and b == 0:\n c1 += 1\n adhd_id.append(i)\n elif a == 0 and b == 1:\n c2 += 1\n bipolar_id.append(i)\n \nprint 'Number of ADHD patients (without Bipolar) is', c1\nprint 'Number of Bipolar patients (without ADHD) is', c2", "Number of ADHD patients (without Bipolar) is 1040\nNumber of Bipolar patients (without ADHD) is 307\n" ], [ "# Extract Learning Disablity Screening Questionnair data\ndf_questionnaire.drop(['Patient_ID', 'LDS_Respondent'], axis=1, inplace=True)\nlds = list()\nfor col in df_questionnaire.columns:\n if 'LDS' in col:\n lds.append(col)\ndf_lds = df_questionnaire[lds]", "_____no_output_____" ], [ "# Separate baseline from concentration rCBF values\ndf_base_concen.drop(['Baseline_header_id', 'Concentration_header_id'], axis=1, inplace=True)\nbase, concen = list(), list()\nfor col in df_base_concen.columns:\n if 'Baseline' in col:\n base.append(col)\n elif 'Concentration' in col:\n concen.append(col)\ndf_base = df_base_concen[base]\ndf_concen = df_base_concen[concen]", "_____no_output_____" ], [ "# Get basic personal info and rCBF values\ndf_patient = df_patient[['Patient_ID', 'Age', 'Gender_id', 'race_id', 'location_id']]\ndf = pd.concat([df_patient, df_lds, df_base, df_concen], axis=1)", "_____no_output_____" ], [ "# Original positive and negative features\ndf_adhd = df.loc[df['Patient_ID'].isin(adhd_id)].drop(['Patient_ID'], axis=1)\ndf_bipolar = df.loc[df['Patient_ID'].isin(bipolar_id)].drop(['Patient_ID'], axis=1)\ndf_adhd.shape, df_bipolar.shape", "_____no_output_____" ] ], [ [ "### Exploratory Analysis", "_____no_output_____" ], [ "#### Principal Component Analysis", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\n# Plot explained variance ratio\ndef plot(ex_var_ratio):\n plt.plot(ex_var_ratio)\n plt.ylabel('Explained Variance Ratio')\n plt.xlabel('Number of Principal Components')\n\ndef pca(X, n):\n pca = PCA(n_components=n)\n pca_X = pca.fit_transform(X)\n \n print '\\nExplained Variance Ratios:'\n print pca.explained_variance_ratio_\n print '\\nSum of Explained Variance Ratios:', \n print np.sum(pca.explained_variance_ratio_)\n\n return pca_X, pca.explained_variance_ratio_", "_____no_output_____" ], [ "from sklearn.decomposition import SparsePCA\n\n# Compute explained variance ratio of transformed data\ndef compute_explained_variance_ratio(transformed_data):\n explained_variance = np.var(transformed_data, axis=0)\n explained_variance_ratio = explained_variance / np.sum(explained_variance)\n explained_variance_ratio = np.sort(explained_variance_ratio)[::-1]\n return explained_variance_ratio\n\ndef sparse_pca(X, n):\n spca = SparsePCA(n_components=n) \n spca_transform = spca.fit_transform(X)\n ex_var_ratio = compute_explained_variance_ratio(spca_transform)\n return spca_transform, ex_var_ratio", "_____no_output_____" ] ], [ [ "#### Q2. How many dimensions can capture more than 95% variance of the LDS data (using PCA)?\n- Sixty.", "_____no_output_____" ] ], [ [ "pca_lds, ex_var_ratio = pca(df_lds.get_values(), 60)\nplot(ex_var_ratio)", "\nExplained Variance Ratios:\n[ 0.25319082 0.06375808 0.05584385 0.04520017 0.0364009 0.03038621\n 0.02536708 0.02317602 0.02144985 0.01871833 0.01728378 0.01623972\n 0.0158757 0.01371272 0.01336975 0.0127428 0.01239438 0.01094738\n 0.01017911 0.00986827 0.00941541 0.00924407 0.00904353 0.00849211\n 0.00822588 0.00804994 0.00774795 0.00745031 0.00736814 0.00722003\n 0.00709658 0.00678852 0.00666003 0.00633405 0.00626434 0.0062329\n 0.00601061 0.00589109 0.00575607 0.00569043 0.00555617 0.00544859\n 0.00541802 0.00534806 0.00520097 0.00512579 0.0050675 0.00493996\n 0.00486919 0.00479003 0.00472562 0.00459259 0.0044639 0.00445442\n 0.00442158 0.00417666 0.00412479 0.00408071 0.00394264 0.0038429 ]\n\nSum of Explained Variance Ratios: 0.945676990608\n" ] ], [ [ "#### Therefore, use Sparse PCA to reduce the dimension of the LDS questionnaire instead, keep 10 dimensions only.", "_____no_output_____" ] ], [ [ "spca_lds, ex_var_ratio = sparse_pca(df_lds.get_values(), 10)\nplot(ex_var_ratio)", "/Library/Python/2.7/site-packages/sklearn/linear_model/least_angle.py:162: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n elif Gram == 'auto':\n" ] ], [ [ "#### Q3. How many dimensions can capture more than 99% variance of the baseline rCBF values (using PCA)?\n- One. The first principal component captures 99.73% variance. But here three dimensions are kept for further analysis.", "_____no_output_____" ] ], [ [ "pca_base, ex_var_ratio = pca(df_base.get_values(), 3)\nplot(ex_var_ratio)", "\nExplained Variance Ratios:\n[ 9.97327057e-01 1.93945335e-03 2.41780411e-04]\n\nSum of Explained Variance Ratios: 0.99950829102\n" ] ], [ [ "#### Q4. How many dimensions can capture more than 99% variance of the concentration rCBF values (using PCA)?\n- One. The first principal component captures 99.79% variance. But here three dimensions are kept for further analysis.", "_____no_output_____" ] ], [ [ "pca_concen, ex_var_ratio = pca(df_concen.get_values(), 3)\nplot(ex_var_ratio)", "\nExplained Variance Ratios:\n[ 9.97899257e-01 1.49942535e-03 1.96982752e-04]\n\nSum of Explained Variance Ratios: 0.999595665312\n" ], [ "# Put everything together\ndf_lds = pd.DataFrame(spca_lds)\ndf_base = pd.DataFrame(pca_base)\ndf_concen = pd.DataFrame(pca_concen)\ndf_reduced = pd.concat([df_patient, df_lds, df_base, df_concen], axis=1)", "_____no_output_____" ], [ "# Reduced ADHD and Bipolar features\ndf_adhd = df_reduced.loc[df_reduced['Patient_ID'].isin(adhd_id)].drop(['Patient_ID'], axis=1)\ndf_bipolar = df_reduced.loc[df_reduced['Patient_ID'].isin(bipolar_id)].drop(['Patient_ID'], axis=1)\n\nprint 'Reduced ADHD sample size:', df_adhd.shape\nprint 'Reduced Bipolar sample size:', df_bipolar.shape", "Reduced ADHD sample size: (1040, 20)\nReduced Bipolar sample size: (307, 20)\n" ] ], [ [ "#### Thus, number of features are reduced from 457 to 20.", "_____no_output_____" ] ], [ [ "# Have a look at the distribution of reduced data\ndf_adhd.plot(kind='hist', alpha=0.5, legend=None, title='ADHD After Dimension Reduction')\ndf_bipolar.plot(kind='hist', alpha=0.5, legend=None, title='Bipolar After Dimension Reduction')", "_____no_output_____" ] ], [ [ "### Inferential Analysis", "_____no_output_____" ] ], [ [ "adhd = df_adhd.get_values().T\nbipolar = df_bipolar.get_values().T", "_____no_output_____" ] ], [ [ "#### Q5. Are the reduced features from the ADHD and Bipolar samples drawn from the same continuous distribution?\n\n#### Kolmogorov-Smirnov Goodness-of-Fit Test\n\nUse K-S test to test if 2 independent samples (ADHD and Bipolar) are drawn from the same continuous distribution. All 20 features (resulted from the dimensionality reduction) are tested.", "_____no_output_____" ] ], [ [ "from scipy.stats import pearsonr\nfrom scipy.stats import chisquare\nfrom scipy.stats import ks_2samp\nfrom scipy.stats import anderson_ksamp\nfrom skbio.stats.power import subsample_power\n\npearsonr_test = lambda x: pearsonr(x[0], x[1])[1]\nchi_test = lambda x: chisquare(x[0], x[1])[1]\nks_test = lambda x: ks_2samp(x[0], x[1])[1]\nanderson_ksamp_test = lambda x: anderson_ksamp(x)[2]\n\n# Computer power of a sub sample set\ndef compute_sub_power(test, samples):\n pwr, counts = subsample_power(test=test,\n samples=samples,\n max_counts=300,\n min_counts=30,\n counts_interval=10,\n draw_mode=\"ind\",\n alpha_pwr=0.05)\n return pwr, counts", "_____no_output_____" ] ], [ [ "#### Compute p-values on the reduced data", "_____no_output_____" ] ], [ [ "p_vals = list()\nfor a, b in zip(adhd, bipolar):\n p_vals.append(round(ks_2samp(a, b)[1], 5))\nprint p_vals", "[0.91579, 1e-05, 0.57799, 0.02654, 0.93906, 6e-05, 0.17441, 0.0468, 0.02826, 0.04088, 0.19297, 0.50919, 0.00082, 0.00713, 0.233, 0.06367, 0.0, 0.21373, 0.64792, 1e-05]\n" ] ], [ [ "#### Sample from the reduced data and plot power curves", "_____no_output_____" ] ], [ [ "from mpl_toolkits.axes_grid1 import Grid\n\nplt.close('all')\nfig = plt.figure()\nfig.set_size_inches(11, 8.5)\n\ngrid = Grid(fig, rect=111, nrows_ncols=(4, 5),\n axes_pad=0.25, label_mode='L',\n )\n \ndef plot_power(i, ax):\n a, b = adhd[i], bipolar[i]\n samples = [np.array(a), np.array(b)]\n pwr, counts = compute_sub_power(ks_test, samples) \n ax.plot(counts, pwr.mean(0))\n \nfor i, ax in enumerate(grid):\n if i < 20:\n plot_power(i, ax)\n title = 'p = ' + str(p_vals[i])\n ax.set_title(title)\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "As we can see from the plotted power curves above, ten features (with large p-values) obviously follow the null distribution. For those whose p-values are slightly smaller than the alpha power(0.05), it is hard to be tell whether the corresponding powers will eventually approximate 1.0 due to the lack of data, but we can still see the trend of increasing.", "_____no_output_____" ], [ "### Predictive Analysis", "_____no_output_____" ], [ "Predict ADHD disorder against Bipolar disorder using 10-fold Cross Validation. \n\nSince we have 1040 ADHD samples out of 1347 samples, the chance is 77.2%, we want the prediction accuracy much larger than the chance.", "_____no_output_____" ], [ "#### Q6 - Q14. What are the performances of different classification methods on the current features?\n\n- Logistic Regression Classifier\n- K Nearest Neighbors Classifier\n- Support Vector Machine (linear kernel)\n- Linear Discriminant Analysis\n- Quadratic Discriminant Analysis\n- Random Forest Classifier\n- Gradient Boosting Classifier\n- Extra Trees Classifier\n- AdaBoost Classifier", "_____no_output_____" ] ], [ [ "from sklearn import cross_validation\nfrom sklearn.cross_validation import LeaveOneOut\nfrom sklearn.cross_validation import KFold\n\n# Train the given classifier\ndef train_clf(clf, train_feats, train_labels):\n # Supervised training\n clf.fit(train_feats, train_labels)\n \n# Test the given classifier and calculate accuracy\ndef test_clf(clf, test_feats, test_labels):\n # Predict using test set\n predicted = clf.predict(test_feats)\n # Compute accuracy\n acc = np.mean(predicted == test_labels)\n return predicted, acc\n\n# Compute accuracy of a model trained with a specific number(n) of samples \ndef compute_acc(clf, n):\n train_clf(clf, train_X[:n], train_y[:n])\n predict_y, acc = test_clf(clf, test_X, test_y)\n return acc\n\n# Leave one out cross validation\ndef loo_cv(clf, X, y):\n loo = LeaveOneOut(len(X))\n scores = cross_validation.cross_val_score(clf, X, y, cv=loo)\n return scores.mean(), scores.std()\n\n# K-fold cross validation\ndef kf_cv(clf, X, y, k=10):\n kf = KFold(len(X), n_folds=k)\n scores = cross_validation.cross_val_score(clf, X, y, cv=kf)\n return scores.mean(), scores.std()", "_____no_output_____" ], [ "df_all = pd.concat([df_adhd, df_bipolar], axis=0)\nX = df_all.values\ny = [0] * df_adhd.shape[0] + [1] * df_bipolar.shape[0]\n\nprint 'Double check data size:', X.shape, len(y)", "Double check data size: (1347, 20) 1347\n" ], [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.lda import LDA\nfrom sklearn.qda import QDA\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\n# Logistic Regression\nlg = LogisticRegression(penalty='l2')\nacc_lg, acc_std_lg = kf_cv(lg, X, y)\n\n# K Nearest Neighbors\nknn = KNeighborsClassifier(n_neighbors=7)\nacc_knn, acc_std_knn = kf_cv(knn, X, y)\n\n# Support Vector Machine - Linear Kernel\nsvc = LinearSVC()\nacc_svm, acc_std_svm = kf_cv(svc, X, y)\n\n# Linear Discriminant Analysis\nlda = LDA()\nacc_lda, acc_std_lda = kf_cv(lda, X, y)\n\n# Quadratic Discriminant Analysis\nqda = QDA()\nacc_qda, acc_std_qda = kf_cv(qda, X, y)\n\n# Random Forest\nrf = RandomForestClassifier(n_estimators=30) \nacc_rf, acc_std_rf = kf_cv(rf, X, y) \n\n# Gradient Boosting \ngb = GradientBoostingClassifier(n_estimators=20, max_depth=3)\nacc_gb, acc_std_gb = kf_cv(gb, X, y)\n\n# Extra Trees \net = ExtraTreesClassifier(n_estimators=40, max_depth=5)\nacc_et, acc_std_et = kf_cv(et, X, y)\n\n# AdaBoost\nada = AdaBoostClassifier()\nacc_ada, acc_std_ada = kf_cv(ada, X, y)", "_____no_output_____" ], [ "print 'Logistic Regression accuracy is %0.4f (+/- %0.3f)' % (acc_lg, acc_std_lg)\nprint 'K Nearest Neighbors accuracy is %0.4f (+/- %0.3f)' % (acc_knn, acc_std_knn)\nprint 'Support Vector Machine (Linear Kernel) accuracy is %0.4f (+/- %0.3f)' % (acc_svm, acc_std_svm)\nprint 'Linear Discriminant Analysis accuracy is %0.4f (+/- %0.3f)' % (acc_lda, acc_std_lda)\nprint 'Quadratic Discriminant Analysis accuracy is %0.4f (+/- %0.3f)' % (acc_qda, acc_std_qda)\nprint 'Random Forest accuracy is %0.4f (+/- %0.3f)' % (acc_rf, acc_std_rf)\nprint 'Gradient Boosting accuracy is %0.4f (+/- %0.3f)' % (acc_gb, acc_std_gb)\nprint 'Extra Trees accuracy is %0.4f (+/- %0.3f)' % (acc_et, acc_std_et)\nprint 'AdaBoost accuracy is %0.4f (+/- %0.3f)' % (acc_ada, acc_std_ada)", "Logistic Regression accuracy is 0.7664 (+/- 0.393)\nK Nearest Neighbors accuracy is 0.7191 (+/- 0.352)\nSupport Vector Machine (Linear Kernel) accuracy is 0.6534 (+/- 0.217)\nLinear Discriminant Analysis accuracy is 0.7524 (+/- 0.378)\nQuadratic Discriminant Analysis accuracy is 0.7126 (+/- 0.302)\nRandom Forest accuracy is 0.7502 (+/- 0.379)\nGradient Boosting accuracy is 0.7657 (+/- 0.382)\nExtra Trees accuracy is 0.7709 (+/- 0.395)\nAdaBoost accuracy is 0.7214 (+/- 0.323)\n" ], [ "from __future__ import division\n\n# Visualize classifier performance\nacc = [acc_lg, acc_knn, acc_svm, acc_lda, acc_qda, acc_rf, acc_gb, acc_et, acc_ada]\nclf_names = ['Logistic Regression', 'KNN', 'SVM', 'LDA', 'QDA', \\\n 'Random Forest', 'Gradient Boosting', 'Extra Trees', 'AdaBoost']\nx = range(len(acc))\n\nwidth = 0.6/1.5\nplt.bar(x, acc, width)\nchance = df_adhd.shape[0] / df_all.shape[0]\n\nplt.title('Classifier Performance')\nplt.xticks(x, clf_names, rotation=50)\nplt.ylabel('Accuracy')\nplt.axhline(chance, color='red', linestyle='--') # plot chance", "_____no_output_____" ] ], [ [ "### Conclusion\n\nFrom above we can see that current prediction accuracies yielded by different classifiers are not better than chance. \n\n- Next Step:\n - Analyze domain-specific (brain SPECT) information.\n - Get more data samples.\n - Use more features (perhaps disorder diagnosis).\n - Look into the feature engineering step (especially PCA).\n - Separate the genders?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ecaec8a0b1bc764c76cc0d2d3d6e1f8e383543ee
500,706
ipynb
Jupyter Notebook
04_00_feature_graph.ipynb
urielcaire/mult
45c0cba69153442be2cee6309d46d55086445e5c
[ "Apache-2.0" ]
null
null
null
04_00_feature_graph.ipynb
urielcaire/mult
45c0cba69153442be2cee6309d46d55086445e5c
[ "Apache-2.0" ]
null
null
null
04_00_feature_graph.ipynb
urielcaire/mult
45c0cba69153442be2cee6309d46d55086445e5c
[ "Apache-2.0" ]
null
null
null
616.633005
476,312
0.939473
[ [ [ "import pandas as pd\nimport os\n\nBASE_DIR = 'output/nmla/selected_markers/'\n\ngenes, clins = None, None\n\ngene_name = pd.read_csv('output/ensg_x_gene_name.csv', sep=',')\n\nfor filename in os.listdir(BASE_DIR):\n if '.csv' in filename:\n \n df = pd.read_csv(os.path.join(BASE_DIR, filename), sep=',')\n \n if 'gene' in filename:\n genes = df if genes is None else pd.concat([genes, df])\n else:\n clins = df if clins is None else pd.concat([clins, df])\n\nclins = clins.reset_index(drop=True)\ngenes = genes.reset_index(drop=True)\n\nclins = clins.groupby('clinical_marker').agg(['mean', 'count'])\ngenes = genes.groupby('gene').agg(['mean', 'count'])\n\nfor d in [clins, genes]:\n \n d.columns = ['_'.join(c) for c in d.columns]\n \n d.index.set_names('marker', inplace=True)\n \n d.reset_index(inplace=True)\n\n d.drop(columns=['pvalue_count'], inplace=True)\n\n d.rename(columns={'entropy_count': 'count'}, inplace=True)\n\ngene_name = pd.read_csv('output/ensg_x_gene_name.csv', sep=',')\n\ngenes = genes.merge(gene_name, left_on='marker', right_on='marker')\n\ngenes = genes.drop(columns=['marker']).rename(columns={'genemania': 'marker'})\n \ngenes['type'] = 'Gene'\nclins['type'] = 'Clinical'\n\nnodes = pd.concat([clins, genes])\n\nnodes['pvalue_mean'] = 1 - (nodes['pvalue_mean'] - nodes['pvalue_mean'] .min()) / (nodes['pvalue_mean'] .max() - nodes['pvalue_mean'] .min())\n\npatterns = {'ENSG[0]+': '',\n 'percent_plasma_cells_': '', \n 'percent_plama_cells_': '', \n 'first_line_transplant': 'transplant',\n 'beta_2_.+': 'beta_2',\n 'percent_': ''\n }\n\nfor k, v in patterns.items():\n nodes['marker'] = nodes['marker'].str.replace(k, v)\n\nprint(nodes.shape)\nnodes = nodes[['marker', 'count', 'entropy_mean', 'pvalue_mean', 'type']]\nnodes.head()", "(81, 5)\n" ], [ "genes.shape", "_____no_output_____" ], [ "ensg_mapping = {}\n\nfor i, row in gene_name.iterrows():\n ensg_mapping[row[0]] = row[1]\n \nensg_mapping", "_____no_output_____" ], [ "f1 = sorted([filename for filename in os.listdir(BASE_DIR) if '.csv' in filename and 'gene' in filename])\nf2 = sorted([filename for filename in os.listdir(BASE_DIR) if '.csv' in filename and 'clin' in filename])\n \nedges = {c: [] for c in ['n1', 'n2', 'count']}\n\nfor ff1, ff2 in zip(f1, f2):\n \n gen, cli = pd.read_csv(os.path.join(BASE_DIR, ff1)), pd.read_csv(os.path.join(BASE_DIR, ff2))\n \n for c1 in gen['gene'].tolist():\n for c2 in cli['clinical_marker'].tolist():\n try:\n edges['n1'].append(ensg_mapping[c1])\n edges['n2'].append(c2)\n edges['count'].append(1)\n except KeyError:\n sss.add(c1)\n \n for c1 in gen['gene'].tolist():\n for c2 in gen['gene'].tolist():\n if c1 != c2:\n edges['n1'].append(ensg_mapping[c1])\n edges['n2'].append(ensg_mapping[c2])\n edges['count'].append(1)\n \n for c1 in cli['clinical_marker'].tolist():\n for c2 in cli['clinical_marker'].tolist():\n if c1 != c2:\n edges['n1'].append(c1)\n edges['n2'].append(c2)\n edges['count'].append(1)\n\nedges = pd.DataFrame(edges).groupby(['n1', 'n2']).sum().reset_index()\n\nfor c in ['n1', 'n2']:\n for k, v in patterns.items():\n edges[c] = edges[c].str.replace(k, v)\n\nprint(edges.shape)\n\nedges.head()\n", "(2973, 3)\n" ], [ "import networkx as nx\nimport pandas as pd\nimport itertools\nimport copy", "_____no_output_____" ], [ "g = nx.Graph()\n\n# Add edges and edge attributes\nfor i, elrow in edges.iterrows():\n g.add_edge(elrow[0], elrow[1], attr_dict=elrow[2:].to_dict())\n \n# Edge list example\nprint(elrow[0]) # node1\nprint(elrow[1]) # node2\nprint(elrow[2:].to_dict()) # edge attribute dict", "wbc_x10_10_9_l\nldh\n{'count': 1}\n" ], [ "# Add node attributes\nfor i, nlrow in nodes.iterrows():\n g.node[nlrow['marker']].update(nlrow[1:].to_dict())\nprint(nlrow)", "marker RP11-329B9.5\ncount 14\nentropy_mean 2.06567\npvalue_mean 0.997263\ntype Gene\nName: 69, dtype: object\n" ], [ "print('# of edges: {}'.format(g.number_of_edges()))\nprint('# of nodes: {}'.format(g.number_of_nodes()))", "# of edges: 1752\n# of nodes: 81\n" ], [ "export = {'n1': [], 'n2': [], 'weight': []}\n\nfor e in g.edges(data=True):\n export['n1'].append(e[0])\n export['n2'].append(e[1])\n export['weight'].append(e[2]['attr_dict']['count'])\n \nexport = pd.DataFrame(export)\n \nexport.to_csv('output/graph/edges.csv', sep=',', index=False)\n\nexport.head()", "_____no_output_____" ], [ "export = {'node': [], 'weight': [], 'entropy_mean': [], 'pvalue_mean': [], 'type': []}\n\nfor e in g.nodes(data=True):\n export['node'].append(e[0])\n export['weight'].append(e[1]['count'])\n export['entropy_mean'].append(e[1]['entropy_mean'])\n export['pvalue_mean'].append(e[1]['pvalue_mean'])\n export['type'].append(e[1]['type'])\n \nexport = pd.DataFrame(export)\n \nexport.to_csv('output/graph/node.csv', sep=',', index=False)\n\nexport.head()", "_____no_output_____" ], [ "import pickle as pkl\n\nwith open('output/initial_graph_position.pkl', 'rb') as file:\n initial_pos = pkl.load(file)", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(12, 8))\n\npos = nx.spring_layout(g, k = 5.1, scale=10, iterations=100, weight='count')\npos_ = copy.deepcopy(pos)\n\nfor k in pos:\n pos[k][1] = pos[k][1] + .8\n\nnode_color = {'Clinical': 'tab:blue', 'Gene': 'tab:red'}\n\nfor t in ['Clinical', 'Gene']:\n nodelist = [n[0] for n in g.nodes(data=True) if n[1]['type'] == t]\n node_size = [n[1]['count'] * 50 + 200 for n in g.nodes(data=True) if n[1]['type'] == t]\n linewidths = [n[1]['pvalue_mean'] * 2 for n in g.nodes(data=True) if n[1]['type'] == t]\n labels = {n[0]: n[0] for n in g.nodes(data=True) if n[1]['type'] == t}\n nx.draw_networkx_labels(g, pos_, labels=labels, verticalalignment='top', bbox=dict(boxstyle='square,pad=0', alpha=.65, edgecolor='white', facecolor='white'), font_size=10)\n nx.draw_networkx_nodes(g, pos, nodelist=nodelist, node_size=node_size, edgecolors='black', linewidths=linewidths, node_color=node_color[t], node_shape=None)\n \nnx.draw_networkx_edges(g, pos, edge_color='gray', width=[e[2]['attr_dict']['count'] / 10 for e in g.edges(data=True)])\n\nax = plt.gca()\nax.axis('off')\n\nplt.show()\n\nfig.savefig('images/feature_graph.png', dpi=100, bbox_inches='tight', transparent=\"True\", pad_inches=0)", "C:\\Anaconda3\\lib\\site-packages\\networkx\\drawing\\nx_pylab.py:611: MatplotlibDeprecationWarning: isinstance(..., numbers.Number)\n if cb.is_numlike(alpha):\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecaedc35b7ed9dcc9fe552f3615035ddb9d87914
58,220
ipynb
Jupyter Notebook
DataSci/Quant_Ed.ipynb
Tshort76/reference
ad47f9e33e72b451e0d26bfeaf96b6ddfd0cd4a5
[ "MIT" ]
null
null
null
DataSci/Quant_Ed.ipynb
Tshort76/reference
ad47f9e33e72b451e0d26bfeaf96b6ddfd0cd4a5
[ "MIT" ]
null
null
null
DataSci/Quant_Ed.ipynb
Tshort76/reference
ad47f9e33e72b451e0d26bfeaf96b6ddfd0cd4a5
[ "MIT" ]
null
null
null
113.933464
21,836
0.824785
[ [ [ "# Linear Algebra (Refresher)\n\nLinear algebra is the study of vectors and vector spaces. The term linear is used to describe the case where every variable has a degree of at most 1 and its coefficient is a numeric value. For instance, $y=mx+b$ is a linear equation but $y=mx^2+b$ and $y=zx+b$ are not, since the variable $x$ has a degree greater than 1 and a variable coefficient, respectively.", "_____no_output_____" ], [ "## Vectors\n\nA vector is a list of numbers, i.e. $[1, 2, 4, 6]$ , that have specific operations defined for them. The simplest of these operations are defined as components level operations such as:\n\n - Addition : $[1,2] + [5,8] = [(1 + 5), (2 + 8)] = [6,10]$\n - Multiplication by a scalar : $ 3 * [1,2] = [3,6]$\n \nWe can also think of a vector as the sum of the scalar multiples of unit vector components. In most cases, these unit vector components are the natural basis, $e_1=[1,0,0,...0], e_3=[0,0,1,0,0,...,0]$. We can therefore describe a vector as the sum of scalar multiples of these basis vectors. For example, the vector $[1,3]$ can also be written as $3e_1 + 2e_2$.\n\n### Dot Product\n\n$$ [1,2] \\bullet [5,8] = 1(5) + 2(8) = 21$$\n\nThe dot (or inner) product of 2 vectors, defined as sum of the products of the corresponding components in each vector, has some very useful properties. Let us look at some of these:\n\nA vector 'dotted' with itself is equal to the vectors magnitude squared - $a \\bullet a = |a|^2$\n\n##### The angle between vectors\n\nMost usefully, we can recall the cosine rule : $c^2 = a^2 + b^2 - 2ab\\cos\\theta$, which tells us the relationship between 2 vectors, a and b, the angle between them, $\\theta$, and the third side of the triangle, c, that would be formed by connecting a and b. We can rewrite this, in terms of our new dot product operator, as \n$$|a-b|^2 = |a|^2 + |b|^2 - 2|a||b|\\cos\\theta$$\n\nand we rewrite the left side as\n\n$$ = |a-b|\\bullet|a-b| = a \\bullet a - a \\bullet b - a \\bullet b + b \\bullet b$$\n$$ = |a|^2 - 2(a \\bullet b) + |b|^2$$\n\nPlugging this back into our first equation and cancelling out terms yields:\n\n$$a \\bullet b = |a||b|\\cos\\theta$$\n\n__So the dot product can be used to determine the alignment of 2 vectors__!\n\nIf two vectors are pointed in the same direction (one is a positive scalar multiple of another), then $\\cos0 = 0$ and therefore the dot product of the two vectors is 0. Likewise, if the vectors are orthogonal then $\\cos90 = 1$ and $a \\bullet b = |a||b|$.\n\n#### Projections\n\nThe projection of vector $a$ onto vector $b$ can be visualized with the following picture.\n\n![title](resources/vector_projection.jpg)\n\nwhere the projection is the magnitude of the vector that starts at the origin of $b$ and ends at the dotted line. Since we have a right triangle, we can determine the projection $\\rho$ as \n\n$$ \\cos\\theta = \\frac{adj}{hyp} = \\rho /|a|$$\n\n$$\\frac{a \\bullet b}{|a||b|} = \\frac{\\rho}{|a|}$$\n\n$$ \\rho = \\frac{a \\bullet b}{|b|} $$\n\nAnother important idea is that of a vector projection, which is simply the scalar projection multiplied by the unit vector of b ($\\frac{b}{|b|}$). So the vector projection of a onto b is given by:\n\n$$ = \\frac{a \\bullet b}{|b|} * \\frac{b}{|b|}$$ \n\nAs motivation for the next section, we can see that if we were to take the vector projection of $a$ onto $b$ then we have $c$ such that $a$ can be represented in component form as $... + cb$. So we can write $ cb = \\frac{a \\bullet b}{|b|} * \\frac{b}{|b|}$ and therefore\n\n$$ c = \\frac{cb}{b} = \\frac{1}{b} * \\frac{a \\bullet b}{|b|} * \\frac{b}{|b|} = \\frac{a \\bullet b}{|b|^2}$$\n\n#### Basis vectors\n\nAs hinted at above, we can represent a vector in terms of a new set of basis vectors. If the basis vectors are orthogonal to each other, then we can use the dot product to do this. The vector $r$ can be represented in terms of orthogonal basis vectors $a$ and $b$ simply by using the unit projection of $r$ onto each basis vector as the associated component coefficient. Formally, $r$ represented in these terms as (using results from above):\n\n$$ [\\frac{r \\bullet a}{a \\bullet a} , \\frac{r \\bullet b}{b \\bullet b}]$$\n\n### Basis\n\nA _basis_ is a set of $n$ vectors that are linearly independent (1 vector cannot be represented as a linear combination of some set of the other vectors) and span the entire vector space (any vector in the space can be represented as a linear combination of the _basis_ vectors). If these hold, then the vector space of that basis is said to be $n$ dimensional.", "_____no_output_____" ], [ "## Matrices\n\nA matrix is a 2 dimensional vector of real (?) numbers which represent the coefficients in a system of simultaneous equations. For instance,\n\n$$ 3x + 2y = 12$$\n$$ 4x + 8y = 18$$\n\ncan be represented in matrix notation as: \n\n$$\\left[ {\\begin{array}{cc}\n 3 & 2\\\\\n 4 & 8\\\\\n \\end{array} } \\right] \\left[ {\\begin{array}{c}\n x\\\\\n y\\\\\n \\end{array} } \\right] = \\left[ {\\begin{array}{c}\n 12\\\\\n 18\\\\\n \\end{array} } \\right]$$\n \nTo think about what a matrix represents, from a geometric perspective, lets look at what happens when we multiply the standard basis' by a matrix\n\n$$\\left[ {\\begin{array}{cc}\n a & b\\\\\n c & d\\\\\n \\end{array} } \\right] \\left[ {\\begin{array}{c}\n 1\\\\\n 0\\\\\n \\end{array} } \\right] = \\left[ {\\begin{array}{c}\n a\\\\\n c\\\\\n \\end{array} } \\right] \\quad , \\quad \\left[ {\\begin{array}{cc}\n a & b\\\\\n c & d\\\\\n \\end{array} } \\right] \\left[ {\\begin{array}{c}\n 0\\\\\n 1\\\\\n \\end{array} } \\right] = \\left[ {\\begin{array}{c}\n b\\\\\n d\\\\\n \\end{array} } \\right]$$\n \nWe see that the ith column of the matrix tells us what happens to the unit vector along the ith axis of our space. This is an interesting feature as we see that it makes it pretty easy to understand how to rotate our space in a predictable manner. For instance, say we have a picture that we want to rotate by 90 degrees, counter clockwise. We can achieve this by applying a single matrix transformation to each vector (column) of pixels in the picture. What is the matrix transformation? Well, we want the x-axis unit vector $[1,0]$ transformed to $[0,1]$ (a 90 degree rotation) and the y-axis transformed to $[-1,0]$ (a 90 degree rotation). Thus, the transformation matrix that represents a 90 degree counter clockwise rotation is the matrix composed of these transformed axis vectors, \n\n$$\\left[ {\\begin{array}{cc}\n 0 & -1\\\\\n 1 & 0\\\\\n \\end{array} } \\right]$$\n \n\n_So we can also say that a matrix is a transformation of vector space and that the columns of a transformation matrix are the axes of the new basis vectors of the transformed space!_\n\n#### Quick notes on solving simulataneous equations with matrices\n\nTo solve a particular system of simultaneous equations, reduce the system to row echelon form. To solve the system for all r such that $Av = r$, we can find the inverse of matrix $A$, $A^{-1}$ and multiply both sides by it: \n\n$$ A^{-1}(Av) = A^{-1}r = v$$\n\n### Determinants\n\nThe determinant of a matrix is the scale/factor by which a matrix transformation changes the size of a vector space. For instance, the determinant of the matrix\n\n$$\\left[ {\\begin{array}{cc}\n a & 0\\\\\n 0 & d\\\\\n \\end{array} } \\right]$$\n\nis $a*d$ since this tranformation increases the size of our vector space by a factor of $a*d$ (see picture below).\n\n![title](resources/determinant_1.jpg)\n\nIn the general case of a matrix \n$$\\left[ {\\begin{array}{cc}\n a & b\\\\\n c & d\\\\\n \\end{array} } \\right]$$\n\nthe determinant can be shown (draw out the resulting parallelogram and compute its area as the area of the surrounding box minus the areas of the triangles and squares outside of the parallelogram) to be: \n\n$$determinant = ad - cb$$\n\nWe note that the determinant can be used to derive the identity matrix and also to determine the linear independence of the rows of a matrix transformation (a determinant of 0 means that there is a linear dependence and that the matrix is therefore _singular_ ).\n\n#### Orthogonal Matrices\n\nAn orthogonal matrix is a matrix that consists of pairwise orthogonal columns. It is orthonormal if each column is a unit vector (length of 1). For an orthonormal matrix, $A^T=A^{-1}$ and the determinant of the A is $\\pm1$.\n", "_____no_output_____" ], [ "## Key Points\n\n - The columns of a matrix transformation represent the new basis vectors for the transformed space\n - The determinant of a matrix is the scaling of the transformation, in terms of the size of the transformed space\n - Assume that we want to apply some sort of transformation, which is easily representable in the basis space X, to a basis space Y in which the transformation easy to intuit(vector space). If we know the transformation (matrix) A such that AX = Y, then we can get the desired transformation in Y by transforming the transformation represented in X basis form using matrix A. (or something like that)", "_____no_output_____" ], [ "# Time Series Analysis", "_____no_output_____" ], [ "## Fourier Series\n\nhttps://www.khanacademy.org/science/electrical-engineering/ee-signals/ee-fourier-series/v/ee-fourier-series-intro\n\nA fourier series attempts to represent a periodic function as a weighted sum of cosines and sines of different frequencies. Formally, we approximate our function using : $$f(t) = a_0 + a_1\\cos{t} + a_2\\cos{2t} + ... + a_n\\cos{nt} + b_1\\sin{t} + b_2\\sin{2t} + ... + b_n\\sin{nt}$$ Where $n$ approaches infinity", "_____no_output_____" ], [ "## Autocorrelation\n\nTo detect periodicity, we can employ a technique known as autocorrelation. In a nutshell, this involves finding the correlation between the data and a version of itself that has been shifted (so original_data[i] is being compared to original_data[i+SHIFT] ). Peaks in the plot of correlation vs shift should show candidate periodicity values. Here is a visualization that I snagged from http://qingkaikong.blogspot.com/ :\n\n<img src=\"resources/images/autocorrelation.png\" alt=\"Autocorrelation\">\n\n##### Code\nRunning this on a pandas Series with numpy (index alignment was a pain so I went to numpy)\n```\nosig = norm_df['ABLd'][0:4000].values\n\nx_vals = range(50,3000,25)\ny_vals = []\n\nfor i in x_vals:\n nsig = norm_df['ABLd'][i:(i+4000)]\n y_vals.append(np.correlate(osig,nsig.values))\n \nplt.plot(x_vals,y_vals);\n```", "_____no_output_____" ], [ "# Miscellaneous", "_____no_output_____" ], [ "## Markov Chain Monte Carlo", "_____no_output_____" ], [ "### Monte Carlo", "_____no_output_____" ], [ "Monte Carlo (sampling) is a technique for metric estimation via random sampling. The intuition behind the technique is that one can estimate the true (population) distribution of a random variable as the distribution of that random variable in a large set of independent samples. This is rooted in the law of large numbers, and we can intuit that the more samples that we make, the better the estimation. This allows us to solves problems that have a probabilistic interpretation.\n\nAs an example, let us consider the problem of estimating the value of $\\pi$. To use the Monte Carlo method, we must interpret the problem probabistically. To start, we note that if a unit circle (radius 1) is bounded by a square (sides with length 2), then the area of the square is 4 and the area of the circle is $\\pi$. In other words, the ratio of the areas of the two geometric shapes is $\\pi:4$ and thus the probability that a point that lies within the square also lies within the circle is $\\pi/4$. With our probabilistic interpretation in hand, we can estimate $\\pi$ by randomly sampling $n$ points from within the square and recording the number which are also inside the circle $n_C$. Our estimate is: $$\\pi \\approx \\dfrac{4*n_C}{n}$$", "_____no_output_____" ], [ "### Example\nWhat percent of points in a square are closer to the center than to the edges", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n# square is (0,0),(0,1),(1,1),(1,0)\n# center is (0.5,0.5)\n\ndist_to_center = lambda p1 : ((p1[0]-0.5)**2 + (p1[1]-0.5)**2)**0.5\ndist_to_edge = lambda x,y: min([x,y,1-x,1-y])\ncloser_to_center = lambda p : 1 if (dist_to_center(p) < dist_to_edge(*p)) else 0\n\nnum_trials = 100000\n\nvals = []\npoints = []\nfor i in range(num_trials):\n p = [random.random(),random.random()] #generate a random point\n points.append(p)\n vals.append(closer_to_center(p))\n\nprint(sum(vals)/len(vals)) # Print the ratio\n\n# plot to get an idea of the boundary of that internal shape\nxz = [x[0] for x in points]\nyz = [x[1] for x in points]\ncolors = ['#e6194B' if x else '#ffe119' for x in vals]\nplt.scatter(xz,yz,c=colors)\n\nplt.show()", "0.219089\n" ] ], [ [ "## Likelihood", "_____no_output_____" ], [ "Likelihood is a function of the parameters of a statistical model, given specific observed data, and plays a key role in estimating a parameter from a set of statistics. Likelihood describes the _plausibility_, but __not the probability__, of a model parameter value given an observation. It is important to note that likelihood is directly proportional to probability but is not equivalent. When used to estimate a model parameter given some observations, the log likelihood is preferred since its derivative is often easier to compute.\n\nCommon syntax for this likelihood is:\n\n$${L}(\\theta \\mid x)=f_{\\theta }(x)$$\n\nAlthough it looks like a continuous probability function, it should not be considered one as all of the values do not sum to 1. To illustrate, let us look at an example:\n\nAssume that we are trying to determine $p_H$, the probability that a coin will land 'heads up' on a flip, given some observations. To start, let us assume that we have observed HH (two heads up flips). The likelihood function for this situation is constructed as follows: For every possible value of $p_H$, determine the probability of our observation given that value of $p_H$. For example:\n\n$${P}(HH \\mid p_H = 0.5) = 0.5^2 = 0.25$$\n\nand therefore the likelihood that the model parameter $p_H$ equals 0.5 is :\n\n$${L}(p_H = 0.5 \\mid HH)=0.25$$\n\nBelow are two graphs of likelihood with different observations. We note that the sum under the graph, total likelihood, is not 1 and decreases with more observations since every hypothesis except $p_H = 1$ is less likely and $p_H = 1$ is just as likely as ever. We note here that even though the integral decreases, $p_H = 1$ becomes more likely _than competing hypothesis_ and we can therefore use likelihood to estimate a good model parameter.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\np_H = [(x*0.05) for x in range(21)]\nl_HH = [(x*0.05)**2 for x in range(21)]\nl_HHHH = [(x*0.05)**4 for x in range(21)]\nl_HHHHHH = [(x*0.05)**6 for x in range(21)]\n\nfig, ax = plt.subplots()\nax.plot(p_H, l_HH, label='HH')\nax.plot(p_H, l_HHHH, label='HHHH')\nax.plot(p_H, l_HHHHHH, label='HHHHHH')\nplt.xlabel('$p_H$')\nplt.ylabel('Likelihood')\nax.legend()\nplt.show()\n", "_____no_output_____" ] ], [ [ "## Embeddings\n\nAn embedding is a mapping of a discrete — categorical — variable to a vector of continuous numbers. In the context of neural networks, embeddings are learned low-dimensional representations of discrete data as continuous vectors. Neural network embeddings have 3 main use cases:\n\n - Assessing similarity between objects (e.g. for use in clustering and nearest neighbor searches)\n - As input to machine learning models\n - For visualization of concepts and relations between categories\n \nNeural embeddings overcome the two main limitations of the simple one-hot encoding method, namely by offering a fixed dimensionality that is independent of the cardinality of the variable (one-hot encoded vectors are equal in length to the cardinality of the variable) is reduced \n\n-William Koehrsen, Neural Network Embeddings Explained, Towards Data Science", "_____no_output_____" ], [ "### Word2Vec\nWord2Vec is an algorithm for determining useful word embeddings; typically embeddings such that the relationship between two (embedded) words in vector space captures interesting relationships between the semantic relationship of the words. The quintessential example is: embed(king) - embed(royalty) = embed(man)\n\nThe technique has several variants but the Continuous Bag of Words (CBOW) variant can be used to understand the basics. I want to take a little time to explain the concepts behind this idea because I find it to be so clever.\n1. Humans have discovered powerful mechanisms, Artificial Neural Networks (ANNs), for supervised learning.\n2. There exist large corpuses of digitized writing on the internet (e.g. wikipedia)\n\nThe key conceptual insight for Word2Vec, in my opinion, was that we could treat a large text corpus as a training set by attempting to predict a word from a set of context. We could then use that to train an ANN and subsequently use some intermediate layer of the ANN as a feature vector (embedding) for the word!\n\nIn practice, we might use a sliding window to move over a corpus and predict the next word in the corpus based on the words in the current window. We would one-hot encode the words to train an ANN with softmax. Once the network was trained, we could feed a word into it and use the output of the middle layer as the vector/feature/embedded representation for the word.", "_____no_output_____" ], [ "## Power Law\n\nThe power, or scaling, law states that a relative change in one quantity results in a *proportional* relative change in another. The simplest example of the law in action is a square; if you double the length of a side then the area will quadruple. Formally, a power law distribution has the form $y = kx^{\\alpha}$, where:\n\n- x and y are variables of interest,\n- $\\alpha$ is the law’s exponent,\n- k is a constant.\n\nThe power law describes phenomenon where most of the density of a distribution lies on either (exclusive) extreme.\n\nCommon examples of phenomena with this type of distribution:\n- Distribution of income\n- Magnitude of earthquakes\n- Size of cities according to population\n- Size of corporations\n- Trading volumes on the stock market\n- Word frequencies\n\nThe Pareto Principle is a special case of this distribution and states that 80% of effects result from 20% of causes.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ecaeec0621e8363601e4d602b4768befcd64b6a0
184,337
ipynb
Jupyter Notebook
Tag Classification/1.4 Tags Classification - Class LG.ipynb
priya6971/Research-Paper-Title-Generation-and-Tag-Classification
2ed6dc59b86742d888f611f8a500a8e9377db083
[ "MIT" ]
3
2021-05-21T18:05:42.000Z
2021-11-10T13:57:31.000Z
Tag Classification/1.4 Tags Classification - Class LG.ipynb
priya6971/Research-Paper-Title-Generation-and-Tag-Classification
2ed6dc59b86742d888f611f8a500a8e9377db083
[ "MIT" ]
null
null
null
Tag Classification/1.4 Tags Classification - Class LG.ipynb
priya6971/Research-Paper-Title-Generation-and-Tag-Classification
2ed6dc59b86742d888f611f8a500a8e9377db083
[ "MIT" ]
1
2021-12-17T19:50:33.000Z
2021-12-17T19:50:33.000Z
47.607696
32,282
0.57052
[ [ [ "# **NATURAL LANGUAGE PROCESSING:**\n## **Project Title :** **Title Generation**\n### -by\n* **Arkadipta De (AI20MTECH14002)**\n* **Venkatesh E (AI20MTECH14005)**\n* **Priya Bhatia (AI20MTECH14015)**\n\n#### **Guide : Dr Maunendra Sankar Desarkar, Dr Srijith**", "_____no_output_____" ], [ "**Description:** Given the abstract of a technical paper, generate a suitable title for it\n\n**Data Source Link :**[Arxiv Dataset](https://www.kaggle.com/neelshah18/arxivdataset)\n\n\n\n* Collection of 31000+ paper meta data.\n* This data contains all paper related to ML, CL, NER, AI and CV field publish between 1992 to 2018-Feb.", "_____no_output_____" ], [ "### **IMPORTING LIBRARIES :**\n", "_____no_output_____" ] ], [ [ "# For using in Google Colab\nfrom google.colab import drive\ndrive.mount(\"/content/gdrive\")", "Mounted at /content/gdrive\n" ], [ "!pip install transformers", "Collecting transformers\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d8/b2/57495b5309f09fa501866e225c84532d1fd89536ea62406b2181933fb418/transformers-4.5.1-py3-none-any.whl (2.1MB)\n\u001b[K |████████████████████████████████| 2.1MB 8.6MB/s \n\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers) (20.9)\nRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (1.19.5)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from transformers) (3.10.1)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers) (2019.12.20)\nCollecting tokenizers<0.11,>=0.10.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ae/04/5b870f26a858552025a62f1649c20d29d2672c02ff3c3fb4c688ca46467a/tokenizers-0.10.2-cp37-cp37m-manylinux2010_x86_64.whl (3.3MB)\n\u001b[K |████████████████████████████████| 3.3MB 33.7MB/s \n\u001b[?25hRequirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.7/dist-packages (from transformers) (4.41.1)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from transformers) (3.0.12)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from transformers) (2.23.0)\nCollecting sacremoses\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/75/ee/67241dc87f266093c533a2d4d3d69438e57d7a90abb216fa076e7d475d4a/sacremoses-0.0.45-py3-none-any.whl (895kB)\n\u001b[K |████████████████████████████████| 901kB 41.7MB/s \n\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers) (2.4.7)\nRequirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->transformers) (3.7.4.3)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < \"3.8\"->transformers) (3.4.1)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2020.12.5)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->transformers) (2.10)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.15.0)\nRequirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (7.1.2)\nRequirement already satisfied: joblib in /usr/local/lib/python3.7/dist-packages (from sacremoses->transformers) (1.0.1)\nInstalling collected packages: tokenizers, sacremoses, transformers\nSuccessfully installed sacremoses-0.0.45 tokenizers-0.10.2 transformers-4.5.1\n" ], [ "!pip install sentencepiece", "Collecting sentencepiece\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f5/99/e0808cb947ba10f575839c43e8fafc9cc44e4a7a2c8f79c60db48220a577/sentencepiece-0.1.95-cp37-cp37m-manylinux2014_x86_64.whl (1.2MB)\n\r\u001b[K |▎ | 10kB 17.4MB/s eta 0:00:01\r\u001b[K |▌ | 20kB 19.2MB/s eta 0:00:01\r\u001b[K |▉ | 30kB 10.9MB/s eta 0:00:01\r\u001b[K |█ | 40kB 9.4MB/s eta 0:00:01\r\u001b[K |█▍ | 51kB 7.8MB/s eta 0:00:01\r\u001b[K |█▋ | 61kB 8.2MB/s eta 0:00:01\r\u001b[K |██ | 71kB 8.4MB/s eta 0:00:01\r\u001b[K |██▏ | 81kB 8.2MB/s eta 0:00:01\r\u001b[K |██▌ | 92kB 8.0MB/s eta 0:00:01\r\u001b[K |██▊ | 102kB 7.5MB/s eta 0:00:01\r\u001b[K |███ | 112kB 7.5MB/s eta 0:00:01\r\u001b[K |███▎ | 122kB 7.5MB/s eta 0:00:01\r\u001b[K |███▌ | 133kB 7.5MB/s eta 0:00:01\r\u001b[K |███▉ | 143kB 7.5MB/s eta 0:00:01\r\u001b[K |████ | 153kB 7.5MB/s eta 0:00:01\r\u001b[K |████▍ | 163kB 7.5MB/s eta 0:00:01\r\u001b[K |████▋ | 174kB 7.5MB/s eta 0:00:01\r\u001b[K |█████ | 184kB 7.5MB/s eta 0:00:01\r\u001b[K |█████▏ | 194kB 7.5MB/s eta 0:00:01\r\u001b[K |█████▌ | 204kB 7.5MB/s eta 0:00:01\r\u001b[K |█████▊ | 215kB 7.5MB/s eta 0:00:01\r\u001b[K |██████ | 225kB 7.5MB/s eta 0:00:01\r\u001b[K |██████▎ | 235kB 7.5MB/s eta 0:00:01\r\u001b[K |██████▌ | 245kB 7.5MB/s eta 0:00:01\r\u001b[K |██████▉ | 256kB 7.5MB/s eta 0:00:01\r\u001b[K |███████ | 266kB 7.5MB/s eta 0:00:01\r\u001b[K |███████▍ | 276kB 7.5MB/s eta 0:00:01\r\u001b[K |███████▋ | 286kB 7.5MB/s eta 0:00:01\r\u001b[K |████████ | 296kB 7.5MB/s eta 0:00:01\r\u001b[K |████████▏ | 307kB 7.5MB/s eta 0:00:01\r\u001b[K |████████▍ | 317kB 7.5MB/s eta 0:00:01\r\u001b[K |████████▊ | 327kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████ | 337kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████▎ | 348kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████▌ | 358kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████▉ | 368kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████ | 378kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████▍ | 389kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████▋ | 399kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████ | 409kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████▏ | 419kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████▍ | 430kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████▊ | 440kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████ | 450kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████▎ | 460kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████▌ | 471kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████▉ | 481kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████ | 491kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 501kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████▋ | 512kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 522kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████▏ | 532kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████▍ | 542kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████▊ | 552kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████ | 563kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████▎ | 573kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████▌ | 583kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████▉ | 593kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████ | 604kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████▍ | 614kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████▋ | 624kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████▉ | 634kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████▏ | 645kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████▍ | 655kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████▊ | 665kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████ | 675kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████▎ | 686kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████▌ | 696kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████▉ | 706kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████ | 716kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████▎ | 727kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████▋ | 737kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████▉ | 747kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 757kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 768kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████▊ | 778kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 788kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 798kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████▌ | 808kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████▉ | 819kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 829kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 839kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████▋ | 849kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████▉ | 860kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████▏ | 870kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 880kB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████▊ | 890kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 901kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 911kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████▌ | 921kB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████▊ | 931kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 942kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████▎ | 952kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████▋ | 962kB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████▉ | 972kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████▏ | 983kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████▍ | 993kB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 1.0MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 1.0MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████▎ | 1.0MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████▌ | 1.0MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 1.0MB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████▎ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▏ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▍ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▊ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▏ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▌ | 1.1MB 7.5MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 1.2MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 1.2MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▎| 1.2MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▋| 1.2MB 7.5MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 1.2MB 7.5MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 1.2MB 7.5MB/s \n\u001b[?25hInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.95\n" ], [ "import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport seaborn as sns \n\nimport random\n\nimport json, re\nfrom tqdm import tqdm_notebook\nfrom uuid import uuid4\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset, RandomSampler, SequentialSampler\n\nfrom transformers import glue_compute_metrics\nfrom transformers import Trainer, TrainingArguments\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\nfrom transformers import BertTokenizer, BertModel, BertConfig, BertForSequenceClassification\nfrom transformers import RobertaConfig, RobertaModel, RobertaTokenizer, RobertaForSequenceClassification\n\nfrom sklearn.metrics import f1_score, accuracy_score,confusion_matrix \nfrom sklearn.metrics import classification_report as Report\n\nfrom tqdm.notebook import tqdm\n\nimport itertools \nfrom sklearn.utils import shuffle\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ], [ "print(\"GPU Torch Available = {}\".format(torch.cuda.is_available()))\nprint(\"Torch Version = {}\".format(torch.__version__))", "GPU Torch Available = True\nTorch Version = 1.8.1+cu101\n" ], [ "# to display the rows and columns of dataset properly \npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', -1)", "_____no_output_____" ] ], [ [ "### **DATA LOADING :**", "_____no_output_____" ] ], [ [ "# loading the data \ndef Load_Excel_Data(path1,path2):\n \"\"\"\n Input : path1 and path2 - file path of excel file of X_train and y_train\n Returns : dataframe \n \"\"\"\n df1=pd.read_excel(path1)\n df2=pd.read_excel(path2)\n df=pd.concat([df1,df2],axis=1)\n df.columns=['Abstract','Domain_Labels']\n return df \n\ndef Get_Sampled_Dataset(df):\n vals=list(df['Domain_Labels'].value_counts())\n min_val=min(vals)\n label_1=df[df['Domain_Labels']==1]\n label_0=df[df['Domain_Labels']==0]\n df0 = label_0.sample(n=min_val,random_state=42)\n new_df=pd.concat([df0,label_1],axis=0)\n new_df=shuffle(new_df)\n return new_df\n\ndef Train_Val_Test(label,undersampling=0):\n \"\"\"\n Input : label - return train, val, test data for given label \n undersampling = 0(default - don't perform undersampling on train data), else : perform undersampling on train dataset \n Returns : dataframe - train, val and test \n \"\"\"\n train_data = Load_Excel_Data(\"/content/gdrive/My Drive/Title Generation NLP/Dataset/X_train.xlsx\",\"/content/gdrive/My Drive/Title Generation NLP/Dataset/y_train_\"+label+\".xlsx\")\n val_data = Load_Excel_Data(\"/content/gdrive/My Drive/Title Generation NLP/Dataset/X_val.xlsx\",\"/content/gdrive/My Drive/Title Generation NLP/Dataset/y_val_\"+label+\".xlsx\")\n test_data = Load_Excel_Data(\"/content/gdrive/My Drive/Title Generation NLP/Dataset/X_test.xlsx\",\"/content/gdrive/My Drive/Title Generation NLP/Dataset/y_test_\"+label+\".xlsx\")\n if undersampling!=0:\n train_data=Get_Sampled_Dataset(train_data)\n return train_data, val_data, test_data \n", "_____no_output_____" ] ], [ [ "**Class Label : LG**", "_____no_output_____" ] ], [ [ "# No undersampling \ntrain_LG, val_LG, test_LG = Train_Val_Test(\"LG\")", "_____no_output_____" ], [ "# shape of data \nprint(\"Train Shape : \",train_LG.shape)\nprint(\"Validation Shape : \",val_LG.shape)\nprint(\"Test Shape : \",test_LG.shape)", "Train Shape : (25107, 2)\nValidation Shape : (6277, 2)\nTest Shape : (7847, 2)\n" ] ], [ [ "### **MODEL BUILDING :**", "_____no_output_____" ] ], [ [ "def Result_Evaluator(data_val,model):\n \"\"\"\n This function evaluates the f1-score and accuracy for given data \n Input : model - Bert Base/RoBerta Base \n \"\"\"\n model.eval()\n loss_val_total = 0\n predictions, true_vals = [], []\n \n for batch in data_val:\n batch = tuple(b.to(device) for b in batch)\n inputs = {'input_ids':batch[0],'attention_mask': batch[1],'labels':batch[2],}\n\n with torch.no_grad(): \n outputs = model(**inputs)\n \n loss = outputs[0]\n logits = outputs[1]\n loss_val_total += loss.item()\n\n logits = logits.detach().cpu().numpy()\n label_ids = inputs['labels'].cpu().numpy()\n predictions.append(logits)\n true_vals.append(label_ids)\n \n loss_val_avg = loss_val_total/len(data_val) \n predictions = np.concatenate(predictions, axis=0)\n true_vals = np.concatenate(true_vals, axis=0)\n \n return loss_val_avg, predictions, true_vals", "_____no_output_____" ] ], [ [ "#### **Performance Measure :**", "_____no_output_____" ] ], [ [ "def f1_score_func(preds, labels):\n \"\"\"\n This function returns the f1-score for given and predicted labels \n Input : preds - predicted label \n labels - original label \n Returns : F1 - Score \n \"\"\"\n preds_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return f1_score(labels_flat, preds_flat, average='weighted')\n\ndef Accuracy_Score(preds,labels):\n \"\"\"\n This function returns the Accuracy-score for given and predicted labels \n Input : preds - predicted label \n labels - original label \n Returns : Accuracy - Score \n \"\"\"\n preds_flat=np.argmax(preds,axis=1).flatten()\n labels_flat=labels.flatten()\n return accuracy_score(labels_flat,preds_flat)", "_____no_output_____" ], [ "def Tag_Classifier_Modelling(tokenizer,model,folder_path,file_path):\n \"\"\"\n Perform Modelling using the given tokenizer \n Input : tokenizer - Bert Base/Roberta Base tokenizer \n model - Bert Base/Roberta Base model \n folder_path,file_path - folder and file path to store models \n \"\"\"\n encoded_data_train = tokenizer.batch_encode_plus(train_LG.Abstract.values, \n add_special_tokens=True, \n return_attention_mask=True, \n pad_to_max_length=True,\n max_length=256, \n return_tensors='pt')\n \n encoded_data_val = tokenizer.batch_encode_plus(val_LG.Abstract.values, \n add_special_tokens=True, \n return_attention_mask=True, \n pad_to_max_length=True, \n max_length=256, \n return_tensors='pt')\n \n encoded_data_test = tokenizer.batch_encode_plus(test_LG.Abstract.values, \n add_special_tokens=True, \n return_attention_mask=True, \n pad_to_max_length=True, \n max_length=256, \n return_tensors='pt')\n \n input_ids_train = encoded_data_train['input_ids']\n attention_masks_train = encoded_data_train['attention_mask']\n labels_train = torch.tensor(train_LG.Domain_Labels.values)\n \n input_ids_val = encoded_data_val['input_ids']\n attention_masks_val = encoded_data_val['attention_mask']\n labels_val = torch.tensor(val_LG.Domain_Labels.values)\n \n input_ids_test = encoded_data_test['input_ids']\n attention_masks_test = encoded_data_test['attention_mask']\n labels_test = torch.tensor(test_LG.Domain_Labels.values)\n \n dataset_train = TensorDataset(input_ids_train, attention_masks_train, labels_train)\n dataset_val = TensorDataset(input_ids_val, attention_masks_val, labels_val)\n dataset_test = TensorDataset(input_ids_test, attention_masks_test, labels_test)\n \n batch_size = 4\n \n dataloader_train = DataLoader(dataset_train, \n sampler=RandomSampler(dataset_train), \n batch_size=batch_size)\n \n dataloader_validation = DataLoader(dataset_val, \n sampler=SequentialSampler(dataset_val), \n batch_size=batch_size)\n \n dataloader_test = DataLoader(dataset_test, \n sampler=SequentialSampler(dataset_test), \n batch_size=batch_size)\n \n optimizer = AdamW(model.parameters(),lr=1e-5, eps=1e-8)\n \n epochs = 5\n \n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0,num_training_steps=len(dataloader_train)*epochs)\n \n val_f1_list=[]\n \n for epoch in tqdm(range(1, epochs+1)):\n model.train()\n loss_train_total = 0\n model = model.to(device)\n\n progress_bar = tqdm(dataloader_train, desc='Epoch {:1d}'.format(epoch), leave=False, disable=False)\n for batch in progress_bar:\n model.zero_grad()\n batch = tuple(b.to(device) for b in batch)\n inputs = {'input_ids':batch[0],'attention_mask': batch[1],'labels':batch[2],} \n outputs = model(**inputs)\n \n loss = outputs[0]\n loss_train_total += loss.item()\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n scheduler.step()\n \n progress_bar.set_postfix({'training_loss': '{:.3f}'.format(loss.item()/len(batch))})\n \n # Model Save\n model_save_path = folder_path\n torch.save(model.state_dict(), model_save_path + file_path +str(epoch)+ '.pth') \n\n tqdm.write(f'\\nEpoch {epoch}')\n \n loss_train_avg = loss_train_total/len(dataloader_train) \n tqdm.write(f'Training loss: {loss_train_avg}')\n \n val_loss, predictions, true_vals = Result_Evaluator(dataloader_validation,model)\n val_f1 = f1_score_func(predictions, true_vals)\n val_acc=Accuracy_Score(predictions,true_vals)\n tqdm.write(f'Validation loss: {val_loss}')\n tqdm.write(f'F1 Score (Weighted): {val_f1}')\n tqdm.write(f'Accuracy Score: {val_acc}')\n val_f1_list.append(val_f1)\n\n maxi_f1=np.argmax(val_f1_list)+1 \n model.load_state_dict(torch.load(model_save_path +file_path+str(maxi_f1)+'.pth', map_location=torch.device('cpu')))\n\n _, predictions, true_vals = Result_Evaluator(dataloader_test,model)\n\n return predictions,true_vals", "_____no_output_____" ], [ "tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\nmodel = BertForSequenceClassification.from_pretrained(\"bert-base-cased\",\n num_labels=2,\n output_attentions=False,\n output_hidden_states=False)", "_____no_output_____" ], [ "seed_val = 17\nrandom.seed(seed_val)\nnp.random.seed(seed_val)\ntorch.manual_seed(seed_val)\ntorch.cuda.manual_seed_all(seed_val)\n\ntest_pred, test_true_vals = Tag_Classifier_Modelling(tokenizer,model,\"/content/gdrive/My Drive/Title Generation NLP/Bert_Models/\",\"Bert_LG_Final_Epoch_\")", "Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n" ], [ "# Test data results \ntest_f1 = f1_score_func(test_pred, test_true_vals)\ntest_acc=Accuracy_Score(test_pred,test_true_vals)\ntqdm.write(f'F1 Score (Weighted): {test_f1}')\ntqdm.write(f'Accuracy Score: {test_acc}')", "F1 Score (Weighted): 0.5557375370821629\nAccuracy Score: 0.6003568242640499\n" ], [ "def plot_confusion_matrix(cm,\n target_names,\n title='Confusion matrix',\n cmap=None,\n normalize=False):\n \"\"\"\n given a sklearn confusion matrix (cm), make a nice plot\n\n Arguments\n ---------\n cm: confusion matrix from sklearn.metrics.confusion_matrix\n target_names: given classification classes such as [0, 1, 2]\n the class names, for example: ['high', 'medium', 'low']\n title: the text to display at the top of the matrix\n cmap: the gradient of the values displayed from matplotlib.pyplot.cm\n see http://matplotlib.org/examples/color/colormaps_reference.html\n plt.get_cmap('jet') or plt.cm.Blues\n\n normalize: If False, plot the raw numbers\n If True, plot the proportions\n\n Usage\n -----\n plot_confusion_matrix(cm = cm, # confusion matrix created by\n # sklearn.metrics.confusion_matrix\n normalize = True, # show proportions\n target_names = y_labels_vals, # list of names of the classes\n title = best_estimator_name) # title of graph\n\n Citiation\n ---------\n http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n\n \"\"\"\n accuracy = np.trace(cm) / np.sum(cm).astype('float')\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title,fontsize=20)\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=0,fontsize=20)\n plt.yticks(tick_marks, target_names,fontsize=20)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",fontsize=30,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",fontsize=30,\n color=\"white\" if cm[i, j] > thresh else \"black\" )\n\n\n plt.tight_layout()\n plt.ylabel('True label',fontsize=16)\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass),fontsize=16)\n plt.show()\n\ndef plot(neglabel,poslabel,preds,labels):\n \"\"\"\n input : neglabel - false label\n poslabel - true label \n y_true - true y_labels \n y_pred - predicted y_labels\n plots : confusion matrix for the y_true and y_pred that is being passed \n \"\"\"\n y_pred = np.argmax(preds, axis=1).flatten()\n y_true = labels.flatten()\n # Classification Report\n print(Report(y_true,y_pred))\n # Label Names\n target_names = [neglabel,poslabel]\n # Confusion Matrix\n cm = confusion_matrix(y_true, y_pred)\n # Actual Plot (Downloadble)\n plot_confusion_matrix(cm, target_names)\n\nplot(\"Non-LG\",\"LG\",test_pred, test_true_vals)", " precision recall f1-score support\n\n 0 0.65 0.83 0.73 5100\n 1 0.35 0.17 0.23 2747\n\n accuracy 0.60 7847\n macro avg 0.50 0.50 0.48 7847\nweighted avg 0.55 0.60 0.56 7847\n\n" ], [ "# save the test true labels and test predictions \nnp.save(\"LG_test_pred\",test_pred)\nnp.save(\"LG_test_true_labels\",test_true_vals)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecaeecb1f84bbf5b31fb7ecbf93cf91483134fef
19,085
ipynb
Jupyter Notebook
research/mapillary_vistas/train.ipynb
Nhat-Minh-Hoang-Tran-BSC2021/Intel-Convolutional_Neural_Networks
bbc2c1adb38bfd7c066a4fe6b5a0e0dab6df21cb
[ "MIT" ]
2
2019-04-02T02:36:41.000Z
2021-01-18T15:20:39.000Z
research/mapillary_vistas/train.ipynb
Nhat-Minh-Hoang-Tran-BSC2021/Intel-Convolutional_Neural_Networks
bbc2c1adb38bfd7c066a4fe6b5a0e0dab6df21cb
[ "MIT" ]
1
2020-05-20T08:40:00.000Z
2020-05-20T08:40:00.000Z
research/mapillary_vistas/train.ipynb
Nhat-Minh-Hoang-Tran-BSC2021/Intel-Convolutional_Neural_Networks
bbc2c1adb38bfd7c066a4fe6b5a0e0dab6df21cb
[ "MIT" ]
2
2019-04-22T09:19:13.000Z
2020-02-12T18:01:32.000Z
39.760417
448
0.590097
[ [ [ "## Mounting Google Drive locally", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/gdrive')", "_____no_output_____" ] ], [ [ "## Installing pycocotools\n\nThis library will be used for computing the evaluation metrics following the COCO metric for intersection over union (IoU).", "_____no_output_____" ] ], [ [ "%%shell\n\n# Install pycocotools\ngit clone https://github.com/cocodataset/cocoapi.git\ncd cocoapi/PythonAPI\npython setup.py build_ext install", "_____no_output_____" ] ], [ [ "## Import necessary packages", "_____no_output_____" ] ], [ [ "import os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\n\nimport torchvision\nfrom torchvision import models\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\nimport numpy as np\nfrom PIL import Image", "_____no_output_____" ] ], [ [ "## Writing a custom dataset for Mapillary Vistas\n\nThe [torchvision reference scripts for training object detection, instance segmentation and person keypoint detection](https://github.com/pytorch/vision/tree/v0.3.0/references/detection) allows for easily supporting adding new custom datasets.\nThe dataset should inherit from the standard `torch.utils.data.Dataset` class, and implement `__len__` and `__getitem__`.\n\nThe only specificity that we require is that the dataset `__getitem__` should return:\n\n* image: a PIL Image of size (H, W)\n* target: a dict containing the following fields\n * `boxes` (`FloatTensor[N, 4]`): the coordinates of the `N` bounding boxes in `[x0, y0, x1, y1]` format, ranging from `0` to `W` and `0` to `H`\n * `labels` (`Int64Tensor[N]`): the label for each bounding box\n * `image_id` (`Int64Tensor[1]`): an image identifier. It should be unique between all the images in the dataset, and is used during evaluation\n * `area` (`Tensor[N]`): The area of the bounding box. This is used during evaluation with the COCO metric, to separate the metric scores between small, medium and large boxes.\n * `iscrowd` (`UInt8Tensor[N]`): instances with `iscrowd=True` will be ignored during evaluation.\n * (optionally) `masks` (`UInt8Tensor[N, H, W]`): The segmentation masks for each one of the objects\n * (optionally) `keypoints` (`FloatTensor[N, K, 3]`): For each one of the `N` objects, it contains the `K` keypoints in `[x, y, visibility]` format, defining the object. `visibility=0` means that the keypoint is not visible. Note that for data augmentation, the notion of flipping a keypoint is dependent on the data representation, and you should probably adapt `references/detection/transforms.py` for your new keypoint representation\n\nIf your model returns the above methods, they will make it work for both training and evaluation, and will use the evaluation scripts from pycocotools.\n\nAdditionally, if you want to use aspect ratio grouping during training (so that each batch only contains images with similar aspect ratio), then it is recommended to also implement a `get_height_and_width` method, which returns the height and the width of the image. If this method is not provided, we query all elements of the dataset via `__getitem__` , which loads the image in memory and is slower than if a custom method is provided.\n", "_____no_output_____" ] ], [ [ "%%shell\n\nunzip /content/gdrive/My\\ Drive/datasets/mapillary-vistas-dataset_public_v1.1.zip \\\n -d /content/mapillary_vistas", "_____no_output_____" ], [ "class MapillaryVistasDataset(Dataset):\n def __init__(self, root, transforms):\n self.root = root\n self.transforms = transforms\n # load all image files, sorting them to\n # ensure that they are aligned\n self.imgs = list(sorted(os.listdir(os.path.join(root, 'training', 'images'))))\n self.masks = list(sorted(os.listdir(os.path.join(root, 'training', 'instances'))))\n\n def __getitem__(self, idx):\n # load images ad masks\n img_path = os.path.join(self.root, 'training', 'images', self.imgs[idx])\n mask_path = os.path.join(self.root, 'training', 'labels', self.masks[idx])\n img = Image.open(img_path).convert(\"RGB\")\n # note that we haven't converted the mask to RGB,\n # because each color corresponds to a different instance\n # with 0 being background\n mask = Image.open(mask_path)\n # convert the PIL Image into a numpy array\n mask = np.array(mask)\n # instances are encoded as different colors\n obj_ids = np.unique(mask)\n# # first id is the background, so remove it\n# obj_ids = obj_ids[1:]\n\n # split the color-encoded mask into a set\n # of binary masks\n masks = mask == obj_ids[:, None, None]\n\n # get bounding box coordinates for each mask\n num_objs = len(obj_ids)\n boxes = []\n for i in range(num_objs):\n pos = np.where(masks[i])\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n boxes.append([xmin, ymin, xmax, ymax])\n\n # convert everything into a torch.Tensor\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n # there is only one class\n labels = torch.ones((num_objs,), dtype=torch.int64)\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n\n image_id = torch.tensor([idx])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n", "_____no_output_____" ] ], [ [ "## Defining your model\n\nIn this tutorial, we will be using [Mask R-CNN](https://arxiv.org/abs/1703.06870), which is based on top of [Faster R-CNN](https://arxiv.org/abs/1506.01497). Faster R-CNN is a model that predicts both bounding boxes and class scores for potential objects in the image.\n\n<p align=center>\n <img src=https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image03.png>\n</p>\n\nMask R-CNN adds an extra branch into Faster R-CNN, which also predicts segmentation masks for each instance.\n\n<p align=center>\n <img src=https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image04.png>\n</p>", "_____no_output_____" ], [ "### 1 - Finetuning from a pretrained model\n\n```python\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n\n# load a model pre-trained pre-trained on COCO\nmodel = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n\n# replace the classifier with a new one, that has\n# num_classes which is user-defined\nnum_classes = 2 # 1 class (person|) + background\n# get number of input features for the classifier\nin_features = model.roi_heads.box_predictor.cls_score.in_features\n# replace the pre-trained head with a new one\nmodel.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n```", "_____no_output_____" ], [ "### 2 - Modifying the model to add a different backbone\n\n```python\nfrom torchvision.models.detection import FasterRCNN\nfrom torchvision.models.detection.rpn import AnchorGenerator\n\n# load a pre-trained model for classification and return\n# only the features\nbackbone = torchvision.models.mobilenet_v2(pretrained=True).features\n# FasterRCNN needs to know the number of\n# output channels in a backbone. For mobilenet_v2, it's 1280\n# so we need to add it here\nbackbone.out_channels = 1280\n\n# let's make the RPN generate 5 x 3 anchors per spatial\n# location, with 5 different sizes and 3 different aspect\n# ratios. We have a Tuple[Tuple[int]] because each feature\n# map could potentially have different sizes and\n# aspect ratios\nanchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n aspect_ratios=((0.5, 1.0, 2.0),))\n\n# let's define what are the feature maps that we will\n# use to perform the region of interest cropping, as well as\n# the size of the crop after rescaling.\n# if your backbone returns a Tensor, featmap_names is expected to\n# be [0]. More generally, the backbone should return an\n# OrderedDict[Tensor], and in featmap_names you can choose which\n# feature maps to use.\nroi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],\n output_size=7,\n sampling_ratio=2)\n\n# put the pieces together inside a FasterRCNN model\nmodel = FasterRCNN(backbone,\n num_classes=152,\n rpn_anchor_generator=anchor_generator,\n box_roi_pool=roi_pooler)\n```", "_____no_output_____" ], [ "### An Instance segmentation model for PennFudan Dataset\n\nIn our case, we want to fine-tune from a pre-trained model, given that our dataset is very small, so we will be following approach number 1.\n\nHere we want to also compute the instance segmentation masks, so we will be using Mask R-CNN:\n", "_____no_output_____" ] ], [ [ "import torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\n\n\ndef get_model_instance_segmentation(num_classes):\n # load an instance segmentation model pre-trained pre-trained on COCO\n model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n\n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n # now get the number of input features for the mask classifier\n in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels\n hidden_layer = 256\n # and replace the mask predictor with a new one\n model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,\n hidden_layer,\n num_classes)\n\n return model", "_____no_output_____" ] ], [ [ "That's it, this will make model be ready to be trained and evaluated on our custom dataset.\n\n## Training and evaluation functions\n\nIn `references/detection/,` we have a number of helper functions to simplify training and evaluating detection models.\nHere, we will use `references/detection/engine.py`, `references/detection/utils.py` and `references/detection/transforms.py`.\n\nLet's copy those files (and their dependencies) in here so that they are available in the notebook", "_____no_output_____" ] ], [ [ "%%shell\n\n# Download TorchVision repo to use some files from\n# references/detection\ngit clone -b v0.3.0 https://github.com/pytorch/vision.git\ncd vision\n\ncp references/detection/utils.py ../\ncp references/detection/transforms.py ../\ncp references/detection/coco_eval.py ../\ncp references/detection/engine.py ../\ncp references/detection/coco_utils.py ../", "_____no_output_____" ] ], [ [ "## Putting everything together\n\nLet's write some helper functions for data augmentation / transformation, which leverages the functions in `refereces/detection` that we have just copied:", "_____no_output_____" ] ], [ [ "from engine import train_one_epoch, evaluate\nimport utils\nimport transforms as T\n\n\ndef get_transform(train):\n transforms = []\n # converts the image, a PIL image, into a PyTorch Tensor\n transforms.append(T.ToTensor())\n if train:\n # during training, randomly flip the training images\n # and ground-truth for data augmentation\n transforms.append(T.RandomHorizontalFlip(0.5))\n return T.Compose(transforms)", "_____no_output_____" ] ], [ [ "**Note that**: we do not need to add a mean/std normalization nor image rescaling in the data transforms, as those are handled internally by the Mask R-CNN model.\n\nLet’s now write the main function which performs the training and the validation:", "_____no_output_____" ] ], [ [ "from engine import train_one_epoch, evaluate\nimport utils\n\n\ndef main():\n # train on the GPU or on the CPU, if a GPU is not available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # our dataset has two classes only - background and person\n num_classes = 66\n # use our dataset and defined transformations\n dataset = MapillaryVistasDataset('/media/storage/datasets/mapillary-vistas-dataset_public_v1.1', get_transform(train=True))\n dataset_test = MapillaryVistasDataset('/media/storage/datasets/mapillary-vistas-dataset_public_v1.1', get_transform(train=False))\n\n # split the dataset in train and test set\n indices = torch.randperm(len(dataset)).tolist()\n dataset = torch.utils.data.Subset(dataset, indices[:-50])\n dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])\n\n # define training and validation data loaders\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=4, shuffle=True, num_workers=4,\n collate_fn=utils.collate_fn)\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=1, shuffle=False, num_workers=4,\n collate_fn=utils.collate_fn)\n\n # get the model using our helper function\n model = get_model_instance_segmentation(num_classes)\n\n # move model to the right device\n model.to(device)\n\n # construct an optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=0.005,\n momentum=0.9, weight_decay=0.0005)\n # and a learning rate scheduler\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n step_size=3,\n gamma=0.1)\n\n # let's train it for 10 epochs\n num_epochs = 10\n\n for epoch in range(num_epochs):\n # train for one epoch, printing every 10 iterations\n train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)\n # update the learning rate\n lr_scheduler.step()\n # evaluate on the test dataset\n evaluate(model, data_loader_test, device=device)\n\n print(\"That's it!\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecaefc776974263fcd81a10829205a784f217d42
64,642
ipynb
Jupyter Notebook
DSA (1).ipynb
krish1511/Data-Structures-and-Algorithms
58df16a05d93cb662f6433c29cf5c6f9a3405a0b
[ "MIT" ]
1
2020-06-30T19:36:28.000Z
2020-06-30T19:36:28.000Z
DSA (1).ipynb
krish1511/Data-Structures-and-Algorithms
58df16a05d93cb662f6433c29cf5c6f9a3405a0b
[ "MIT" ]
null
null
null
DSA (1).ipynb
krish1511/Data-Structures-and-Algorithms
58df16a05d93cb662f6433c29cf5c6f9a3405a0b
[ "MIT" ]
null
null
null
32.256487
129
0.437564
[ [ [ "# Bubble Sort", "_____no_output_____" ], [ "## Bubble sort using for loop ", "_____no_output_____" ] ], [ [ "def bubbleSort(nums):\n for i in range(len(nums)-1 , 0 , -1):\n for j in range(i):\n if nums[j] > nums[j+1]:\n nums[j] , nums[j+1] = nums[j+1] , nums[j]\n print(nums)\n \nnums=[6,4,2,7,1,3]\nbubbleSort(nums)", "[1, 2, 3, 4, 6, 7]\n" ], [ "def bubbleSort(nums):\n for i in range(len(nums)-1):\n for j in range(len(nums)-1):\n if nums[j] > nums[j+1]:\n nums[j] , nums[j+1] = nums[j+1] , nums[j]\n \n print(nums)\n \n \nnums=[3,9,4,2,1,5]\nbubbleSort(nums)\n", "[1, 2, 3, 4, 5, 9]\n" ] ], [ [ "## bubble sort using while loop", "_____no_output_____" ] ], [ [ "def bubbleSort(nums):\n i = 0\n while i < len(nums)-1:\n j = 0\n while j < len(nums)-1:\n if nums[j] > nums[j+1]:\n nums[j] , nums[j+1] = nums[j+1] , nums[j]\n j+=1\n i+=1\n \n print(nums)\n \n \nnums=[3,9,4,2,1,5]\nbubbleSort(nums)\n", "[1, 2, 3, 4, 5, 9]\n" ] ], [ [ "# Selection Sort ", "_____no_output_____" ], [ "## Selection Sort using for loop", "_____no_output_____" ] ], [ [ "def selectionSort(nums):\n for i in range(len(nums)):\n minPos = i\n for j in range(i,len(nums)):\n if nums[j] < nums[minPos]:\n minPos = j\n nums[i] , nums[minPos] = nums[minPos] , nums[i]\n print(nums)\nnums = [9,3,1,2,7,4]\nselectionSort(nums)", "[1, 2, 3, 4, 7, 9]\n" ] ], [ [ "## Selection Sort using while loop ", "_____no_output_____" ] ], [ [ "def selectionSort(nums):\n i = 0\n while i < len(nums):\n minPos = i\n for j in range(i , len(nums)):\n if nums[j] < nums[minPos]:\n minPos = j\n nums[i] , nums[minPos] = nums[minPos] , nums[i]\n \n i+=1\n print(nums)\nnums = [9,6,4,7,2,1]\nselectionSort(nums)", "[1, 2, 4, 6, 7, 9]\n" ] ], [ [ "# InsertionSort", "_____no_output_____" ] ], [ [ "def insertSort(nums):\n for i in range(1, len(nums)):\n cur_ele = nums[i]\n pos = i\n while cur_ele < nums[pos - 1] and pos > 0:\n nums[pos] = nums[pos - 1]\n pos = pos - 1\n nums[pos] = cur_ele\n print(nums)\nnums = [6,8,1,2,3,7]\ninsertSort(nums)", "[1, 2, 3, 6, 7, 8]\n" ] ], [ [ "# MergingSort", "_____no_output_____" ] ], [ [ "def mergeSort(lst):\n # Dividing the list\n if len(lst) > 1:\n mid = len(lst) // 2\n left = lst[:mid]\n right = lst[mid:]\n # dividing the list into two[left and right]\n mergeSort(left)\n mergeSort(right)\n \n # merging the compared values\n i=0\n j=0\n k=0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n lst[k] = left[i]\n i+=1\n k+=1\n else:\n lst[k] = right[j]\n j+=1\n k+=1\n # whether any values are left out\n while i < len(left):\n lst[k] = left[i]\n i+=1\n k+=1\n while j < len(right):\n lst[k] = right[j]\n j+=1\n k+=1\n \n \nnums = int(input('enter the elments you want in list'))\nlst = [int(input()) for i in range(nums)]\nmergeSort(lst)\nprint(lst)", "enter the elments you want in list3\n3\n5\n1\n[1, 3, 5]\n" ], [ "def mergesort(lst):\n # to divide the list\n if len(lst) > 1:\n mid = len(lst) // 2\n left = lst[:mid]\n right = lst[mid:]\n mergesort(left)\n mergesort(right)\n \n # to merge the list\n i = 0\n j = 0\n k = 0\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n lst[k] = left[i]\n i+=1\n \n else:\n lst[k] = right[j]\n j+=1\n k+=1\n # merge the left out values\n while i < len(left):\n lst[k] = left[i]\n i+=1\n k+=1\n while j < len(right):\n lst[k] = right[j]\n j+=1\n k+=1\n \n \nlst = [9,2,4,1,3,7,8]\nmergesort(lst)\nprint(lst)", "[1, 2, 3, 4, 7, 8, 9]\n" ] ], [ [ "# CountingSort", "_____no_output_____" ] ], [ [ "def countingsort(lst, max):\n count = [0] * (max + 1) #7 len(lst)+1 zeros\n \n for i in lst:\n count[i] +=1 # it count the repeated(incremented) values\n \n start = 0 # list starting from 0\n for i in range(len(count)): # empty list has to be given of max+1 times\n for j in range(count[i]): # count[i] = according the element count, it increases the count of repeated values\n lst[start] = i #lst[2] = 2 times\n start += 1\n return lst\nlst = [2,9,6,5,2,3]\ncountingsort(lst, max(lst))\n \n ", "_____no_output_____" ] ], [ [ "## AVL", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.height = 1\nclass AVL:\n def getHeight(self, root):\n if not root:\n return 0\n return root.height\n \n def getBalance(self, root):\n if not root:\n return 0\n return self.getHeight(root.left) - self.getHeight(root.right)\n def rightRotate(self, z):\n y = z.left\n T3 = y.right\n # right rotate\n y.right = z\n z.left = T3\n \n z.height = 1 + max(self.getHeight(z.left) , self.getHeight(z.right))\n y.height = 1 + max(self.getHeight(y.left) , self.getHeight(y.right))\n \n return y\n def leftRotate(self, z):\n y = z.right\n T2 = y.left\n # left rotate\n y.left = z\n z.right = T2\n \n z.height = 1 + max(self.getHeight(z.left) , self.getHeight(z.right))\n y.height = 1 + max(self.getHeight(y.left) , self.getHeight(y.right))\n \n return y\n def insert(self, root, data):\n if not root:\n return Node(data)\n elif data < root.data:\n if root.left is None:\n root.left = Node(data)\n else:\n root.left = self.insert(root.left,data)\n else:\n if root.right is None:\n root.right = Node(data)\n else:\n root.right = self.insert(root.right,data)\n # updating heights\n root.height = 1 + max(self.getHeight(root.left) , self.getHeight(root.right))\n # balancing Nodes\n balance = self.getBalance(root)\n # unbalancing nodes\n if balance > 1 and data < root.left.data:\n return self.rightRotate(root)\n if balance < -1 and data > root.right.data:\n return self.leftRotate(root)\n if balance > 1 and data > root.left.data:\n root.left = self.leftRotate(root.left)\n return self.rightRotate(root)\n if balance < -1 and data < root.right.data:\n root.right = self.rightRotate(root.right)\n return self.leftRotate(root)\n return root\n \n def delete(self, root,data):\n if not root:\n return root\n elif data < root.data:\n root.left = self.delete(root.left, data)\n elif data > root.data:\n root.right = self.delete(root.right, data)\n else:\n if root.left is None:\n temp = root.right\n root = None\n return temp\n elif root.right is None:\n temp = root.left\n root = None\n return temp\n \n temp = self.inOrderSuccessor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data)\n if root is None:\n return root\n \n root.height = 1 + max(self.getHeight(root.left) , self.getHeight(root.right))\n \n balance = self.getBalance(root)\n \n if balance > 1 and self.getBalance(root.left) >= 0:\n return self.rightRotate(root)\n if balance < -1 and self.getBalance(root.right) <= 0:\n return self.leftRotate(root)\n if balance > 1 and self.getBalance(root.left) < 0:\n root.left = self.leftRotate(root.left)\n return self.rightRotate(root)\n if balance < -1 and self.getBalance(root.right) > 0:\n root.right = self.rightRotate(root.right)\n return self.leftRotate(root)\n return root\n \n def inOrderSuccessor(self, root):\n myVal = root\n while myVal.left is not None:\n myVal = myVal.left\n return myVal\n \n def preOrder(self, root):\n if not root:\n return 0\n print('{0}'.format(root.data),end=' ')\n self.preOrder(root.left)\n self.preOrder(root.right)\navt = AVL()\nroot = None\nnums = [9, 5, 10, 0, 6, 11, -1, 1, 2] \n\nfor num in nums:\n root = avt.insert(root,num)\n \navt.preOrder(root)\nprint()\ndata = 10\nroot = avt.delete(root, data)\n\navt.preOrder(root)\n ", "9 1 0 -1 5 2 6 10 11 \n1 0 -1 9 5 2 6 11 " ] ], [ [ "## CircularQueue", "_____no_output_____" ] ], [ [ "class CircularQueue:\n def __init__(self, size):\n self.size = size\n self.queue = [None for i in range(size)]\n self.rear = self.front = -1\n def enqueue(self, data):\n if (self.rear + 1) % self.size == self.front:\n print('queue is full')\n return\n elif self.rear == -1 and self.front == -1:\n self.rear = 0\n self.front = 0\n self.queue[self.rear] = data\n else:\n self.rear = (self.rear + 1) % self.size\n self.queue[self.rear] = data\n def dequeue(self):\n if self.rear == -1 and self.front == -1:\n print('queue is empty')\n return\n elif self.rear == self.front:\n temp = self.queue[self.front]\n self.rear = -1\n self.front = -1\n return temp\n else:\n temp = self.queue[self.front]\n self.front = (self.front + 1) % self.size\n return temp\n def display(self):\n if (self.rear + 1) % self.size == self.front:\n print('queue is full')\n return\n if self.rear == -1 and self.front == -1:\n print('queue is empty')\n return\n elif self.rear >= self.front:\n print('elements in the circularqueue are :',end=' ')\n for i in range(self.front , self.rear + 1):\n print(self.queue[i],end=' ')\n print()\n else:\n print('elements in the circularqueue are :',end=' ')\n for i in range(self.front , self.size):\n print(self.queue[i],end=' ')\n for i in range(0, self.rear + 1):\n print(self.queue[i],end=' ')\n print()\n \ncq = CircularQueue(6)\ncq.enqueue(10)\ncq.enqueue(20)\ncq.enqueue(30)\ncq.enqueue(40)\ncq.display()\nprint('delete elements:',cq.dequeue())\nprint('delete elements:',cq.dequeue())\ncq.display()\ncq.enqueue(50)\ncq.enqueue(60)\ncq.display()\n ", "elements in the circularqueue are : 10 20 30 40 \ndelete elements: 10\ndelete elements: 20\nelements in the circularqueue are : 30 40 \nelements in the circularqueue are : 30 40 50 60 \n" ] ], [ [ "## BinarySearchTree [BST]", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\nclass BST:\n def __init__(self, root = None):\n self.root = root\n def insert(self, data):\n if self.root is None:\n self.root = Node(data)\n else:\n self._insert(self.root, data)\n def _insert(self, cur_node, data):\n if data < cur_node.data:\n if cur_node.left is None:\n cur_node.left = Node(data)\n else:\n self._insert(cur_node.left, data)\n else:\n if cur_node.right is None:\n cur_node.right = Node(data)\n else:\n self._insert(cur_node.right, data)\n \n def delete(self, cur_node, data):\n if cur_node is None:\n return cur_node\n elif data < cur_node.data :\n cur_node.left = self.delete(cur_node.left, data)\n elif data > cur_node.data:\n cur_node.right = self.delete(cur_node.right, data)\n else:\n if cur_node.left is None:\n temp = cur_node.right\n cur_node = None\n return temp\n elif cur_node.right is None:\n temp = cur_node.left \n cur_node = None\n return temp\n else:\n temp = self.inOrderSuccessor(cur_node.right)\n cur_node.data = temp.data\n cur_node.right = self.delete(cur_node.right, temp.data)\n return cur_node\n \n def inOrderSuccessor(self, cur_node):\n myVal = cur_node\n while myVal.left is not None:\n myVal = myVal.left\n return myVal\n def search(self, cur_node, data):\n if cur_node is None:\n print('data not found')\n return False\n \n elif cur_node.data == data :\n print(data found)\n return True\n \n elif data < cur_node.data :\n cur_node.left = self.search(cur_node.left, data)\n else:\n cur_node.right = self.search(cur_node.right,data)\n \n def inOrder(self, cur_node):\n if cur_node:\n self.inOrder(cur_node.left)\n print('{0}'.format(cur_node.data),end= ' ')\n self.inOrder(cur_node.right)\n def preOrder(self, cur_node):\n if cur_node:\n print('{0}'.format(cur_node.data),end= ' ')\n self.preOrder(cur_node.left)\n self.preOrder(cur_node.right)\n def postOrder(self, cur_node):\n if cur_node:\n self.postOrder(cur_node.left)\n self.postOrder(cur_node.right)\n print('{0}'.format(cur_node.data),end= ' ')\n def print(self):\n if self.root is not None:\n self._print(self, cur_node)\n def _print(self, cur_node):\n if cur_node:\n self._print(self, cur_node.left)\n print(cur_node.data)\n self._print(self, cur_node.right)\n ", "_____no_output_____" ] ], [ [ "## AVL", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.height = 1\nclass AVL:\n def getHeight(self , root):\n if not root:\n return 0\n return root.height\n def getBalance(self , root):\n if not root:\n return 0\n return self.getHeight(root.left) - self.getHeight(root.right)\n def rightRotate(self, z):\n y = z.left\n T3 = y.right\n # right rotate\n y.right = z\n z.left = T3\n z.height = 1 + max(self.getHeight(z.left) , self.getHeight(z.right))\n y.height = 1 + max(self.getHeight(y.left) , self.getHeight(y.right))\n return y\n def leftRotate(self, z):\n y = z.right\n T2 = y.left\n # left rotate\n y.left = z\n z.right = T2\n z.height = 1 + max(self.getHeight(z.left) , self.getHeight(z.right))\n y.height = 1 + max(self.getHeight(y.left) , self.getHeight(y.right))\n return y\n def insert(self, root, data):\n if not root:\n return Node(data)\n elif data < root.data :\n if root.left is None:\n root.left = Node(data)\n else:\n root.left = self.insert(root.left, data)\n else:\n if root.right is None:\n root.right = Node(data)\n else:\n root.right = self.insert(root.right, data)\n \n \n # updating height\n root.height = 1 + max(self.getHeight(root.left) , self.getHeight(root.right))\n # balancing factor\n balance = self.getBalance(root)\n # unbalancing factor\n if balance > 1 and data < root.left.data:\n return self.rightRotate(root)\n if balance < -1 and data > root.right.data:\n return self.leftRotate(root)\n if balance > 1 and data > root.left.data:\n root.left = self.leftRotate(root.left)\n return self.rightRotate(root)\n if balance < -1 and data < root.right.data:\n root.right = self.rightRotate(root.right)\n return self.leftRotate(root)\n return root\n \n def delete(self, root , data):\n if not root:\n return 0\n elif data < root.data:\n root.left = self.delete(root.left , data)\n elif data > root.data:\n root.right = self.delete(root.right, data)\n else:\n # case with 0 child or 1 child\n if root.left is None:\n temp = root.right\n root = None\n return temp\n elif root.right is None:\n temp = root.left\n root = None\n return temp\n else:\n temp = self.inOrderSuccessor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data)\n if root is None:\n return root\n \n # updating height\n root.height = 1 + max(self.getHeight(root.left) , self.getHeight(root.right))\n # balancing factor\n balance = self.getBalance(root)\n # unbalancing factor\n if balance > 1 and self.getBalance(root.left) >= 0:\n return self.rightRotate(root)\n if balance < -1 and self.getBalance(root.right) <= 0:\n return self.leftRotate(root)\n if balance > 1 and self.getBalance(root.left) < 0:\n root.left = self.leftRotate(root.left)\n return self.rightRotate(root)\n if balance < -1 and self.getBalance(root.right) > 0:\n root.right = self.rightRotate(root.right)\n return self.leftRotate(root)\n return root\n \n def inOrderSuccessor(self, root):\n myVal = root\n while myVal.left is not None:\n myVal = myVal.left\n return myVal\n def preOrder(self, root):\n if not root:\n return 0\n print('{0}'.format(root.data), end=' ')\n self.preOrder(root.left)\n self.preOrder(root.right)\n\n \navl = AVL()\nroot = None\nnums = [9, 5, 10, 0, 6, 11, -1, 1, 2] \nfor i in nums:\n root = avl.insert(root, i)\navl.preOrder(root)\nprint()\ndata = 10\nroot= avl.delete(root, data)\n\navl.preOrder(root)\n ", "9 1 0 -1 5 2 6 10 11 \n1 0 -1 9 5 2 6 11 " ] ], [ [ "## SinglyLinkedList", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.isVisited = False\nclass SinglyLinkedList:\n def __init__(self):\n self.head = None\n def listLength(self):\n length = 0\n currentNode = self.head\n while currentNode is not None:\n currentNode = currentNode.next\n length += 1\n return length\n \n def insertHead(self, newNode):\n temporaryNode = self.head\n self.head = newNode\n newNode.next = temporaryNode\n del temporaryNode\n def insertAt(self, newNode, position):\n if position is 0:\n self.insertHead(newNode)\n return\n if position < 0 or position > self.listLength():\n print('position is invalid')\n return\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n previousNode.next = newNode\n newNode.next = currentNode\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentPos += 1\n def insertEnd(self, newNode):\n if self.head is None:\n self.head = newNode\n return\n lastNode = self.head\n while lastNode.next is not None:\n lastNode = lastNode.next\n lastNode.next = newNode\n def deleteHead(self):\n temporaryNode = self.head\n self.head = self.head.next\n temporaryNode.next = self.head\n del temporaryNode\n def deleteAt(self, position):\n if position is 0:\n self.deleteHead()\n return\n if position < 0 or position > self.listLength()-1:\n print('position is invalid')\n return\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n previousNode.next = currentNode.next\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentPos += 1\n \n def deleteEnd(self):\n if self.head is None:\n print('list is empty')\n return\n lastNode = self.head\n while lastNode.next is not None:\n previousNode = lastNode\n lastNode = lastNode.next\n previousNode.next = lastNode.next\n previousNode.next = None\n def printMiddle(self):\n length = self.listLength()\n if length !=0:\n if length ==1:\n print(self.head.data)\n return\n \n currentNode = self.head\n mid = length // 2\n currentPos = 0\n while True:\n if currentPos == mid:\n print('middle node is',currentNode.data)\n break\n currentNode = currentNode.next\n currentPos +=1\n def countNodes(self):\n currentNode = self.head\n odd = 0\n even = 0\n while currentNode is not None:\n if currentNode.data % 2 is not 0:\n odd += 1\n else:\n even += 1\n \n currentNode = currentNode.next\n print('odd', odd)\n print('even',even)\n \n def printList(self):\n if self.head is None:\n print('list is empty')\n return\n currentNode =self.head\n while True:\n print(currentNode.data)\n currentNode = currentNode.next\n if currentNode is None:\n break\ndef detectCycle(linkedList):\n lastNode = linkedList.head\n lastNode.isVisited = False\n while True:\n if lastNode.next.isVisited is True:\n lastNode.next = None\n break\n lastNode = lastNode.next\n lastNode.isVisited = True\n\ndef swap(linkedList, firstNode, secondNode):\n currentNode = linkedList.head\n previousFirst = None\n previousSecond = None\n while True:\n if currentNode.data == firstNode:\n nodeOne = currentNode\n break\n previousFirst = currentNode\n currentNode = currentNode.next\n currentNode = linkedList.head\n while True:\n if currentNode.data == secondNode:\n nodeTwo = currentNode\n break\n previousSecond = currentNode\n currentNode = currentNode.next\n #previousFirst=4,nodeOne=3,nodeOne.next=5,previousSecond=2,nodeTwo=7,nodeTwo.next=1\n previousFirst.next = nodeTwo\n nodeTwoNext = nodeTwo.next\n nodeTwo.next = nodeOne.next\n previousSecond.next = nodeOne\n nodeOne.next = nodeTwoNext\n\ndef swapNode(linkedList,previousNode, largestNode, secondLargestNode): #3,4,5,1\n largestNode.next = secondLargestNode.next\n secondLargestNode.next = largestNode\n if linkedList.head is largestNode:\n linkedList.head = secondLargestNode\n return\n previousNode.next = secondLargestNode\n \ndef sort(linkedList):\n length = linkedList.listLength()\n for i in range(length - 1 , 0 , -1):\n largestNode = linkedList.head\n previousNode = None\n for j in range(i):\n if largestNode.data > largestNode.next.data:\n swapNode(linkedList,previousNode, largestNode , largestNode.next)\n else:\n previousNode = largestNode\n largestNode = largestNode.next\n \ndef removeDuplicates(linkedList):\n lastNode = linkedList.head\n while True:\n if lastNode.data == lastNode.next.data:\n previousNode.next = lastNode.next\n break\n previousNode = lastNode\n lastNode = lastNode.next\n \ndef mergedList(firstList, secondList, mergeList):\n currentFirst = firstList.head\n currentSecond = secondList.head\n while True:\n if currentFirst is None:\n mergeList.insertEnd(currentSecond)\n return\n if currentSecond is None:\n mergeList.insertEnd(currentFirst)\n return\n if currentFirst.data < currentSecond.data:\n currentFirstNext = currentFirst.next\n currentFirst.next = None\n mergeList.insertEnd(currentFirst)\n currentFirst = currentFirstNext\n else:\n currentSecondNext = currentSecond.next\n currentSecond.next = None\n mergeList.insertEnd(currentSecond)\n currentSecond = currentSecondNext\n''' \nfirstList = SinglyLinkedList()\nnode1 = Node(1)\nnode2 = Node(3)\nnode3 = Node(4)\nfirstList.insertEnd(node1)\nfirstList.insertEnd(node2)\nfirstList.insertEnd(node3)\n\nfirstList.printList()\n\nsecondList = SinglyLinkedList()\nnode4 = Node(2)\nnode5 = Node(7)\nnode6 = Node(9)\nsecondList.insertEnd(node4)\nsecondList.insertEnd(node5)\nsecondList.insertEnd(node6)\n\nsecondList.printList()\n\nmergeList = SinglyLinkedList()\nmergedList(firstList, secondList, mergeList)\ndel firstList\ndel secondList\nprint('merge list')\nmergeList.printList()\n'''\nlinkedList = SinglyLinkedList()\nnode1 = Node(1)\nnode2 = Node(2)\nnode3 = Node(3)\nnode4 = Node(3)\nnode5 = Node(5)\nlinkedList.insertEnd(node1)\nlinkedList.insertEnd(node2)\nlinkedList.insertEnd(node3)\nlinkedList.insertEnd(node4)\nlinkedList.insertEnd(node5)\n#linkedList.deleteAt(-1)\n#node3.next = node2\n#detectCycle(linkedList)\n#swap(linkedList,3,7)\n#sort(linkedList)\n#removeDuplicates(linkedList)\n#linkedList.printMiddle()\nlinkedList.countNodes()\nlinkedList.printList()\n", "odd 4\neven 1\n1\n2\n3\n3\n5\n" ] ], [ [ "## CircularSinglyLinkedList ", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\nclass CircularSinglyLinkedList:\n def __init__(self):\n self.head = None\n def insertHead(self, newNode):\n lastNode = self.head\n while lastNode.next is not self.head:\n lastNode = lastNode.next\n temporaryNode = self.head\n self.head = newNode\n newNode.next = temporaryNode\n lastNode.next = self.head\n del temporaryNode\n def insertAt(self, newNode, position):\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n previousNode.next = newNode\n newNode.next = currentNode\n currentNode.next = self.head\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentPos += 1\n \n def insertEnd(self, newNode):\n if self.head is None:\n self.head = newNode\n newNode.next = self.head\n return\n lastNode = self.head\n while lastNode.next is not self.head:\n lastNode = lastNode.next\n lastNode.next = newNode\n newNode.next = self.head\n def deleteHead(self):\n lastNode = self.head\n while lastNode.next is not self.head:\n lastNode = lastNode.next\n temporaryNode = self.head\n self.head = self.head.next\n temporaryNode.next = self.head\n lastNode.next = self.head\n del temporaryNode\n def deleteAt(self, position):\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n previousNode.next = currentNode.next\n currentNode.next = self.head\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentPos += 1\n def deleteEnd(self):\n lastNode = self.head\n while lastNode.next is not self.head:\n previousNode = lastNode\n lastNode = lastNode.next\n previousNode.next = lastNode.next\n def printList(self):\n if self.head is None:\n print('list is empty')\n return\n currentNode = self.head\n while True:\n print(currentNode.data)\n currentNode = currentNode.next\n if currentNode is self.head:\n break\n print(currentNode.data)\nlinkedList = CircularSinglyLinkedList()\nnode1 = Node('sai')\nnode2 = Node('krish')\nnode3 = Node('sushma')\nlinkedList.insertEnd(node1)\nlinkedList.insertEnd(node2)\nlinkedList.insertEnd(node3)\nlinkedList.deleteAt(1)\nlinkedList.printList()", "sai\nsushma\nsai\n" ] ], [ [ "## CircularDoublyLinkedList", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.previous = None\nclass CircularDoublyLinkedList:\n def __init__(self):\n self.head = None\n def insertHead(self, newNode):\n lastNode = self.head\n while lastNode.next is not self.head:\n lastNode = lastNode.next\n temporaryNode = self.head\n self.head = newNode\n newNode.next = temporaryNode\n lastNode.next = self.head\n self.head.previous = lastNode\n del temporaryNode\n def insertAt(self, newNode, position):\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n previousNode.next = newNode\n newNode.next = currentNode\n currentNode.next = self.head\n self.head.previous = currentNode\n currentNode.previous = newNode\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentPos += 1\n \n def insertEnd(self, newNode):\n if self.head is None:\n self.head = newNode\n newNode.next = self.head\n newNode.previous = self.head\n return\n lastNode = self.head\n while lastNode.next is not self.head:\n lastNode = lastNode.next\n lastNode.next = newNode\n newNode.next = self.head\n self.head.previous = newNode\n newNode.previous = lastNode\n \n def deleteEnd(self):\n lastNode = self.head\n while True:\n if lastNode.next.next is self.head: #sai,krish,sushma\n \n lastNode.next = None\n self.head.previous = lastNode\n lastNode.previous = None\n lastNode.next = self.head\n break\n \n lastNode = lastNode.next\n \n def deleteAt(self, position):\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position: # sai,krish,sushma\n currentNode.previous.next = currentNode.next\n currentNode.next.previous = currentNode.previous\n currentNode.next = self.head\n currentNode.previous = self.head\n break\n currentNode = currentNode.next\n currentPos += 1\n \n def deleteHead(self):\n lastNode =self.head\n while lastNode.next is not self.head:\n lastNode = lastNode.next\n temporaryNode = self.head\n self.head = self.head.next\n temporaryNode.next = self.head\n lastNode.next = self.head\n self.head.previous = lastNode\n lastNode.previous = self.head\n \n def printList(self):\n if self.head is None:\n print('list is empty')\n return\n currentNode = self.head\n while True:\n print(currentNode.data)\n currentNode = currentNode.next\n if currentNode is self.head:\n break\n print(currentNode.previous.data)\nlinkedList = CircularDoublyLinkedList()\nnode1 = Node('sai')\nnode2 = Node('krish')\nnode3 = Node('sushma')\nlinkedList.insertEnd(node1)\nlinkedList.insertEnd(node2)\nlinkedList.insertEnd(node3)\nlinkedList.deleteAt(1)\nlinkedList.printList()", "sai\nsushma\nsushma\n" ] ], [ [ "## Doubly LinkedList", "_____no_output_____" ] ], [ [ "class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.previous = None\nclass DoublyLinkedList:\n \n def __init__(self):\n self.head = None\n def listLength(self):\n length = 0\n currentNode = self.head\n while currentNode is not None:\n length += 1\n currentNode = currentNode.next\n return length\n def insertHead(self, newNode):\n temporaryNode = self.head\n self.head = newNode\n newNode.next = temporaryNode\n temporaryNode.previous = newNode\n del temporaryNode\n def insertAt(self, newNode, position):\n \n currentNode = self.head\n currentpos = 0\n while True:\n if currentpos == position:\n previousNode.next = newNode\n newNode.next = currentNode\n currentNode.previous = newNode\n newNode.previous = self.head\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentpos += 1\n '''\n if position is 0:\n self.insertHead(newNode)\n return \n if position is self.listLength():\n self.insertEnd(newNode)\n return\n if position < 0 or position > self.listLength():\n print('position is invalid')\n return\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n currentNode.previous.next = newNode\n newNode.previous = currentNode.previous\n newNode.next = currentNode\n currentNode.previous = newNode\n break\n currentNode = currentNode.next\n currentPos +=1\n '''\n def insertEnd(self, newNode):\n if self.head is None:\n self.head = newNode\n return\n lastNode = self.head\n while lastNode.next is not None:\n lastNode = lastNode.next\n lastNode.next = newNode\n newNode.previous = lastNode\n def deleteHead(self):\n '''\n temporaryNode = self.head\n self.head = self.head.next\n temporaryNode.next = self.head\n self.head.previous = temporaryNode.previous\n del temporaryNode\n '''\n self.head = self.head.next\n self.head.previous.next = None\n self.head.previous= None\n \n def deleteAt(self, position):\n if position is 0:\n self.deleteHead()\n return\n if position < 0 or position > self.listLength()-1:\n print('position is invalid')\n return\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n previousNode.next = currentNode.next #sai,krish,sushma\n currentNode.previous = None\n break\n previousNode = currentNode\n currentNode = currentNode.next\n currentPos += 1\n '''\n currentNode = self.head\n currentPos = 0\n while True:\n if currentPos == position:\n currentNode.previous.next = currentNode.next\n currentNode.next.previous = currentNode.previous\n currentNode.next = None\n currentNode.previous = None\n break\n currentNode = currentNode.next\n currentPos += 1\n ''' \n def deleteEnd(self):\n lastNode = self.head\n while True:\n if lastNode.next is None: #sai,krish\n previousNode.next = None\n lastNode.previous = previousNode\n break\n previousNode = lastNode\n lastNode = lastNode.next\n \n def deleteNode(self, data):\n currentNode = self.head\n while currentNode is not None:\n if currentNode.data == data:\n # head node deletion\n if currentNode is self.head:\n self.head = self.head.next\n currentNode.next = None\n if self.head is not None:\n self.head.previous = None\n return\n if currentNode.next is None:\n currentNode.previous.next = currentNode.next\n currentNode.previous = None\n currentNode.next = None\n return\n currentNode.previous.next = currentNode.next\n currentNode.next.previous = currentNode.previous\n currentNode.next = None\n currentNode.previous = None\n return\n currentNode = currentNode.next\n print('not found')\n \n def swapHead(self):\n if self.head is None:\n print('list is empty')\n return\n if self.head.next is None:\n return\n lastNode = self.head\n while lastNode.next is not None: #10,20,30\n lastNode = lastNode.next\n self.head.next.previous = lastNode\n lastNode.previous.next = self.head\n lastNode.next = self.head.next\n self.head.previous = lastNode.previous\n lastNode.previous = None\n self.head.next = None\n self.head = lastNode\n \n def printList(self):\n if self.head is None:\n print('list is empty')\n return\n currentNode = self.head\n print('printing from starting')\n while True:\n print(currentNode.data)\n if currentNode.next is None:\n traversalNode = currentNode\n currentNode = currentNode.next\n if currentNode is None:\n break\n print('printing from ending')\n while True:\n if traversalNode is None:\n break\n print(traversalNode.data)\n traversalNode = traversalNode.previous\n \ndef findMiddle(linkedList):\n length = linkedList.listLength()\n currentNode = linkedList.head\n currentPos = 0\n mid = length //2\n while True:\n if currentPos == mid:\n if currentNode.previous.data > currentNode.next.data:\n print('greater')\n else:\n print('not greater')\n break\n currentNode = currentNode.next\n currentPos += 1\n \ndef findOdd(linkedList):\n currentNode = linkedList.head.next\n while currentNode is not None:\n if currentNode.previous.data % 2 is not 0:\n currentNode.data = currentNode.data // 2\n currentNode = currentNode.next\n\ndef reverse(linkedList):\n #2->13->10->20->15->None || None<-2<-13<-10<-20<-15\n #15->20->10->13->2->None || None<-15<-20<-10<-13<-2\n currentNode = linkedList.head\n while currentNode is not None:\n currentNodeNext = currentNode.next\n currentNode.next = currentNode.previous\n currentNode.previous = currentNodeNext\n \n if currentNode.previous is None:\n linkedList.head = currentNode\n currentNode = currentNodeNext\ndef removeDuplicates(linkedList):\n countNode = {}\n currentNode = linkedList.head\n while True:\n if currentNode.data not in countNode.keys(): #13,5,13,2,13\n countNode[currentNode.data] = 1\n else:\n countNode[currentNode.data] += 1\n \n if currentNode.next is None:\n \n while True:\n if currentNode.previous is None:\n break\n previousNode = currentNode.previous\n if countNode[currentNode.data] > 1:\n currentNode.previous.next = currentNode.next\n if currentNode.next is not None:\n currentNode.next.previous = currentNode.previous\n return\n currentNode.previous = None\n currentNode.next = None\n countNode[currentNode.data] -=1\n currentNode = previousNode\n break\n currentNode = currentNode.next\n \ndef palindrome(linkedList):\n firstNode = linkedList.head\n secondNode = linkedList.head\n while secondNode.next is not None:\n secondNode = secondNode.next\n while True:\n if firstNode == secondNode:\n print('palindrome')\n return\n elif firstNode.data == secondNode.data:\n firstNode = firstNode.next\n secondNode = secondNode.previous\n else:\n print('not palindrome')\n return\n \nlinkedList = DoublyLinkedList()\nnode1 = Node(10)\nnode2 = Node(5)\nnode3 = Node(20)\nlinkedList.insertEnd(node1)\nlinkedList.insertEnd(node2)\nlinkedList.insertEnd(node3)\n#findMiddle(linkedList)\n#findOdd(linkedList)\n#reverse(linkedList)\n#removeDuplicates(linkedList)\n#palindrome(linkedList)\n#linkedList.deleteNode(20)\nlinkedList.swapHead()\nlinkedList.printList()", "printing from starting\n20\n5\n10\nprinting from ending\n10\n5\n20\n" ] ], [ [ "## Insertion Sort", "_____no_output_____" ] ], [ [ "def insertion_sort(lst):\n for i in range(1, len(lst)):\n cur_ele = lst[i]\n pos = i\n while pos >=1 and cur_ele < lst[pos-1]:\n lst[pos] = lst[pos-1]\n pos -= 1\n lst[pos] = cur_ele\n return lst\nlst = [9,5,8,7,1,2]\ninsertion_sort(lst)", "_____no_output_____" ] ], [ [ "## Shell_Sort", "_____no_output_____" ] ], [ [ "def shell_sort(lst):\n gap = len(lst) // 2\n while gap > 0:\n for i in range(gap, len(lst)):\n cur_ele = lst[i]\n pos = i\n while pos >=gap and cur_ele < lst[pos - gap]:\n lst[pos] = lst[pos - gap]\n pos -= gap\n lst[pos] = cur_ele\n gap = gap // 2\n return lst\nlst = [9,3,4,7,1,2]\nshell_sort(lst)", "_____no_output_____" ] ], [ [ "## Linear Search", "_____no_output_____" ] ], [ [ "pos = -1\ndef linearSearch(lst, num):\n \n i = 0\n while i < len(lst):\n if lst[i] == num:\n globals ()['pos'] = i\n return True\n i +=1\n return False\nlst = [9,3,1,4,5,6]\nnum = 4 \nif linearSearch(lst,num):\n print('found',pos)\nelse:\n print('not found')", "found 3\n" ], [ "pos = -1\ndef linearSearch(lst, n):\n for i in range(len(lst)):\n if lst[i] == n:\n globals()['pos'] = i\n return True\n return False\nlst = [9,3,1,4,7,5]\nn = 5\nif linearSearch(lst,n):\n print('found',pos)\nelse:\n print('not found')", "found 5\n" ] ], [ [ "## Binary Search", "_____no_output_____" ] ], [ [ "\ndef binarySearch(lst, n):\n l = 0\n u = len(lst)-1\n while l <= u:\n mid = (l + u) // 2\n if lst[mid] == n:\n globals()['pos'] = mid\n return True\n else:\n if lst[mid] < n:\n l = mid + 1\n else:\n u = mid - 1\n return False\nlst = [1,6,7,8,9]\nn = 10\nif binarySearch(lst, n):\n print('found at',pos)\nelse:\n print('not found')", "not found\n" ], [ "def binarySearch(lst, n):\n l = 0\n u = len(lst) \n for i in range(u):\n if l <= i :\n mid = (l+u) // 2\n if lst[mid] == n:\n globals()['pos'] = mid\n return True\n else:\n if lst[mid] <n:\n l = mid + 1\n else:\n u = mid - 1\n \n return False\nlst = [2,6,7,8,9,10]\nn = 10\nif binarySearch(lst, n):\n print('found at',pos)\nelse:\n print('not found')", "found at 5\n" ], [ "arr1 = []\nn = int(input('enter the list size you want'))\n\n\nfor i in range(n):\n x = int (input('enter the element in the array'))\n arr1.append(x)\nlst = sorted(arr1)\nprint(lst)\n\ndef binarySearch(lst, n):\n l = 0\n u = len(lst) - 1\n for i in range(u):\n if l <= i:\n mid = (l + u) //2\n if lst[mid] == n:\n globals()['pos'] = mid\n return True\n else:\n if lst[mid] < n:\n l = mid + 1\n else:\n u = mid - 1\n return False\nnum = int(input('enter the number you want to search'))\nif binarySearch(lst, num):\n print('found at',pos)\nelse:\n print('not found')\n ", "enter the list size you want3\nenter the element in the array6\nenter the element in the array9\nenter the element in the array7\n[6, 7, 9]\nenter the number you want to search1\nnot found\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ecaf02a80608cc1becec02e80aaec4d78f83053f
13,842
ipynb
Jupyter Notebook
Chapter03/Handling missing values in pandas.ipynb
dragonyy/Mastering-Exploratory-Analysis-with-pandas
d1a6ff1054f303b0b12aa03649c83706f27e779b
[ "MIT" ]
13
2018-11-04T20:02:41.000Z
2021-11-21T01:15:30.000Z
Chapter03/Handling missing values in pandas.ipynb
dragonyy/Mastering-Exploratory-Analysis-with-pandas
d1a6ff1054f303b0b12aa03649c83706f27e779b
[ "MIT" ]
null
null
null
Chapter03/Handling missing values in pandas.ipynb
dragonyy/Mastering-Exploratory-Analysis-with-pandas
d1a6ff1054f303b0b12aa03649c83706f27e779b
[ "MIT" ]
30
2018-10-25T22:41:06.000Z
2022-02-11T14:45:55.000Z
22.398058
89
0.412513
[ [ [ "%%html\n<style>\n.output_wrapper, .output {\n height:auto !important;\n max-height:300px; /* your desired max-height here */\n}\n.output_scroll {\n box-shadow:none !important;\n webkit-box-shadow:none !important;\n}\n</style>", "_____no_output_____" ], [ "from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"", "_____no_output_____" ] ], [ [ "## Import Pandas", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "## Read in the dataset\n\nTitanic Survival Dataset from https://www.kaggle.com/c/titanic/data", "_____no_output_____" ] ], [ [ "data = pd.read_csv('data-titanic.csv')\ndata.head()", "_____no_output_____" ] ], [ [ "## Missing Records", "_____no_output_____" ], [ "### Find out total records in the dataset", "_____no_output_____" ] ], [ [ "data.shape", "_____no_output_____" ] ], [ [ "### Number of valid records per column", "_____no_output_____" ] ], [ [ "data.count()", "_____no_output_____" ] ], [ [ "## Dropping missing records", "_____no_output_____" ], [ "### Drop all records that have one or more missing values", "_____no_output_____" ] ], [ [ "data_missing_dropped = data.dropna()\ndata_missing_dropped.shape", "_____no_output_____" ] ], [ [ "### Drop only those rows that have all records missing", "_____no_output_____" ] ], [ [ "data_all_missing_dropped = data.dropna(how=\"all\")\ndata_all_missing_dropped.shape", "_____no_output_____" ] ], [ [ "## Fill in missing data", "_____no_output_____" ], [ "### Fill in missing data with zeros", "_____no_output_____" ] ], [ [ "data_filled_zeros = data.fillna(0)\ndata_filled_zeros.count()", "_____no_output_____" ] ], [ [ "### Fill in missing data with a mean of the values from other rows", "_____no_output_____" ] ], [ [ "data_filled_in_mean = data.copy()\ndata_filled_in_mean.Age.fillna(data.Age.mean(), inplace=True)\ndata_filled_in_mean.count()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecaf048eb1a3a9013663ceaee1ca7d288b6f1b90
193,831
ipynb
Jupyter Notebook
notebooks/examples.ipynb
csinva/data-viz-utils
0041d35f33f2866bb295e8f0c975344addc1e6a9
[ "MIT" ]
14
2020-01-09T05:27:54.000Z
2021-12-12T13:32:56.000Z
notebooks/examples.ipynb
csinva/data-viz-utils
0041d35f33f2866bb295e8f0c975344addc1e6a9
[ "MIT" ]
null
null
null
notebooks/examples.ipynb
csinva/data-viz-utils
0041d35f33f2866bb295e8f0c975344addc1e6a9
[ "MIT" ]
3
2021-03-18T03:24:02.000Z
2021-03-23T07:22:01.000Z
737
58,412
0.952907
[ [ [ "%load_ext autoreload\n%autoreload 2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport seaborn as sns\nimport dvu\nimport seaborn as sns\nfrom sklearn import datasets\nfrom sklearn import decomposition\nfrom sklearn.datasets import load_diabetes\nimport pandas as pd\nfrom os.path import join as oj\ndvu.set_style()\ncolors = ['#6E8E96', '#D3787D', '#AC3931']\nout_dir = '../docs/img'", "_____no_output_____" ] ], [ [ "# generate some data", "_____no_output_____" ] ], [ [ "data_all = load_diabetes()\ndf = pd.DataFrame(data_all.data, columns=data_all.feature_names)\ndf.head()\n\nd = df.copy() # d is same data, but with a categorical var\nremap = {-0.044641636506989: 'male', 0.0506801187398187: 'female'}\nd['sex'] = [remap[v] for v in df.sex]", "_____no_output_____" ] ], [ [ "**extended heatmap**", "_____no_output_____" ] ], [ [ "mat = np.random.randn(10, 10)\ncond1 = mat.sum(axis=0)\ncond2 = mat.sum(axis=1)\ndvu.heatmap_extended(mat, cond1, cond2, show_cbar=True, annot=False)\nplt.savefig(oj(out_dir, 'plot_heatmap_extended.png'), dpi=300)", "_____no_output_____" ] ], [ [ "**pc plot**", "_____no_output_____" ] ], [ [ "pca = decomposition.PCA()\npca.fit(df)\ncomps = pca.components_.transpose()\nplt.figure(figsize=(6, 5))\ndvu.plot_pcs(pca, list(df.columns))\nplt.savefig(oj(out_dir, 'plot_pcs.png'), dpi=300)\nplt.show()", "/Users/chandan/Desktop/data-viz-utils/data-viz-utils/dvu.py:158: UserWarning: FixedFormatter should only be used together with FixedLocator\n ax.set_yticklabels(list(feat_names))\n/usr/local/Cellar/ipython/6.4.0_1/libexec/vendor/lib/python3.7/site-packages/ipykernel_launcher.py:6: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n \n/usr/local/Cellar/ipython/6.4.0_1/libexec/lib/python3.7/site-packages/IPython/core/pylabtools.py:125: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.\n fig.canvas.print_figure(bytes_io, **kw)\n" ] ], [ [ "**grouped jointplot**", "_____no_output_____" ] ], [ [ "dvu.jointplot_grouped('age', 'bmi', 'sex', d)\nplt.savefig(oj(out_dir, 'plot_joint_grouped.png'), dpi=300)", "_____no_output_____" ] ], [ [ "**auto-label lines**", "_____no_output_____" ] ], [ [ "x = np.arange(30)\ny1 = np.sin(x)\ny2 = np.cos(x)\ny3 = np.tanh(x)\nplt.plot(x, y1, label='sin')\nplt.plot(x, y2, label='cos')\nplt.plot(x, y3, label='tanh')\nplt.xlabel('Time')\n\n# original legend\nplt.legend()\nplt.savefig(oj(out_dir, 'plot_labeled_lines_orig.png'), dpi=300)\nplt.gca().get_legend().remove()\n\n# dvu line legend\ndvu.line_legend(fontsize=18.2, adjust_text_labels=True, extra_spacing=0.15)\nplt.savefig(oj(out_dir, 'plot_labeled_lines.png'), dpi=300)\n \n# dark mode legend\ndvu.invert_plot()\nplt.savefig(oj(out_dir, 'plot_labeled_lines_dark.png'), dpi=300)", "_____no_output_____" ] ], [ [ "**2-legend plot**", "_____no_output_____" ] ], [ [ "# cs_d = {'Male': 'red', 'Female': 'blue'}\n# cs = ['#91bfdb','#dec34b','#fc8d59']\nmarkersize_rescaling = 0.01\nxlab = 'age'\nylab = 'bmi'\ncolorlab = 'sex'\nsizelab = 'bp'\nx = df[xlab]\ny = d[ylab]\nc = d[colorlab] #[cs_d[x] for x in d['sex']] # d['Severity 3-day'] #\ns = (d[sizelab] - d[sizelab].min() + 0.01) / markersize_rescaling\n\ndvu.scatter_2_legends(x, y, c, s,\n xlab, ylab, colorlab, sizelab,\n markersize_rescaling, figsize=(7, 5))\nplt.savefig(oj(out_dir, 'plot_scatter_2_legends.png'), dpi=300)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecaf0e17cce8c561584e4c0e72d15ecd3d856e42
1,530
ipynb
Jupyter Notebook
04_3_InverseVI_multi_class_cdc17/03_Tiergarten/01_prepare_demand_data_Tiergarten.ipynb
jingzbu/InverseVITraffic
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
[ "MIT" ]
null
null
null
04_3_InverseVI_multi_class_cdc17/03_Tiergarten/01_prepare_demand_data_Tiergarten.ipynb
jingzbu/InverseVITraffic
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
[ "MIT" ]
null
null
null
04_3_InverseVI_multi_class_cdc17/03_Tiergarten/01_prepare_demand_data_Tiergarten.ipynb
jingzbu/InverseVITraffic
c0d33d91bdd3c014147d58866c1a2b99fb8a9608
[ "MIT" ]
null
null
null
23.181818
91
0.452288
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecaf133f40e9e8b7e61d3c04d17e39eb86b48608
23,537
ipynb
Jupyter Notebook
code/analysis/STRING preprocessed and adjacency matrix .ipynb
shreyaraghavendra/BoltBio
b9b152170f466e9024efb4837f3b09ddf91689f5
[ "Apache-2.0" ]
null
null
null
code/analysis/STRING preprocessed and adjacency matrix .ipynb
shreyaraghavendra/BoltBio
b9b152170f466e9024efb4837f3b09ddf91689f5
[ "Apache-2.0" ]
9
2021-11-16T05:22:45.000Z
2022-02-01T10:34:07.000Z
code/analysis/STRING preprocessed and adjacency matrix .ipynb
shreyaraghavendra/BoltBio
b9b152170f466e9024efb4837f3b09ddf91689f5
[ "Apache-2.0" ]
3
2021-11-19T11:38:22.000Z
2022-01-10T09:29:16.000Z
69.430678
13,124
0.798148
[ [ [ "1. STRING Pre Processing\n\n\n\n```\nOriginal dataset downloaded from \nhttps://stringdb-static.org/download/protein.links.v11.5/9606.protein.links.v11.5.txt.gz \nusing the homo sapiens filter.\n\nPre-processed data with HUGO symbols and relevant nodes downloaded from the NDEx compendium web server \nhttp://www.ndexbio.org/#/networkset/e8ebbdde-86dc-11e7-a10d-0ac135e8bacf?accesskey=7fbd23635b798321954e66c63526c46397a3f45b40298cf43f22d07d4feed0fa\n\nhttps://github.com/schulter/EMOGI/blob/master/network_preprocessing/preprocessing_STRINGdb.ipynb\nhttps://github.com/idekerlab/Network_Evaluation_Tools/blob/master/Network%20Processing%20Notebooks/STRING%20Processing.ipynb\n\nCombined score in STRING PPI:\nhttp://version10.string-db.org/help/faq/\n```\n\n\n\n", "_____no_output_____" ] ], [ [ "import pandas as pd\n!pip install mygene\nimport mygene\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\ndef get_gene_symbols_from_proteins(list_of_ensembl_ids):\n # get Ensembl IDs for gene names\n mg = mygene.MyGeneInfo()\n res = mg.querymany(list_of_ensembl_ids,\n scopes='ensembl.protein',\n fields='symbol',\n species='human', returnall=True\n )\n def get_symbol_and_ensembl(d):\n if 'symbol' in d:\n return [d['query'], d['symbol']]\n else:\n return [d['query'], None]\n\n node_names = [get_symbol_and_ensembl(d) for d in res['out']]\n # now, retrieve the names and IDs from a dictionary and put in DF\n node_names = pd.DataFrame(node_names, columns=['Ensembl_ID', 'Symbol']).set_index('Ensembl_ID')\n node_names.dropna(axis=0, inplace=True)\n return node_names", "Defaulting to user installation because normal site-packages is not writeable\nRequirement already satisfied: mygene in /home/gee3/.local/lib/python3.8/site-packages (3.2.2)\nRequirement already satisfied: biothings-client>=0.2.6 in /home/gee3/.local/lib/python3.8/site-packages (from mygene) (0.2.6)\nRequirement already satisfied: requests>=2.3.0 in /home/gee3/.local/lib/python3.8/site-packages (from biothings-client>=0.2.6->mygene) (2.27.1)\nRequirement already satisfied: certifi>=2017.4.17 in /home/gee3/.local/lib/python3.8/site-packages (from requests>=2.3.0->biothings-client>=0.2.6->mygene) (2021.10.8)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /home/gee3/.local/lib/python3.8/site-packages (from requests>=2.3.0->biothings-client>=0.2.6->mygene) (1.26.8)\nRequirement already satisfied: idna<4,>=2.5 in /home/gee3/.local/lib/python3.8/site-packages (from requests>=2.3.0->biothings-client>=0.2.6->mygene) (3.3)\nRequirement already satisfied: charset-normalizer~=2.0.0 in /home/gee3/.local/lib/python3.8/site-packages (from requests>=2.3.0->biothings-client>=0.2.6->mygene) (2.0.10)\n" ], [ "string_ppis = pd.read_csv('9606.protein.links.v11.5.txt', sep=' ')\nfig = plt.figure(figsize=(14, 8))\nsns.distplot(string_ppis.combined_score, kde_kws={'bw': 20})", "/home/gee3/.local/lib/python3.8/site-packages/seaborn/distributions.py:2619: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n/home/gee3/.local/lib/python3.8/site-packages/seaborn/distributions.py:1699: FutureWarning: The `bw` parameter is deprecated in favor of `bw_method` and `bw_adjust`. Using 20 for `bw_method`, but please see the docs for the new parameters and update your code.\n warnings.warn(msg, FutureWarning)\n" ], [ "high_conf_string_ppis = string_ppis[string_ppis.combined_score >= 850].copy() # same as 0.85 threshold (they multiplied by 1000 to have ints)\nhigh_conf_string_ppis.loc[:, 'protein1'] = [i[1] for i in high_conf_string_ppis.protein1.str.split('.')]\nhigh_conf_string_ppis.loc[:, 'protein2'] = [i[1] for i in high_conf_string_ppis.protein2.str.split('.')]\n# high_conf_string_ppis.to_csv('../data/networks/string_ENSP_ids_highconf.tsv', sep='\\t', compression='gzip')", "_____no_output_____" ], [ "ens_names = high_conf_string_ppis.protein1.append(high_conf_string_ppis.protein2).unique()\nens_to_symbol = get_gene_symbols_from_proteins(ens_names)", "querying 1-1000...done.\nquerying 1001-2000...done.\nquerying 2001-3000...done.\nquerying 3001-4000...done.\nquerying 4001-5000...done.\nquerying 5001-6000...done.\nquerying 6001-7000...done.\nquerying 7001-8000...done.\nquerying 8001-9000...done.\nquerying 9001-10000...done.\nquerying 10001-11000...done.\nquerying 11001-12000...done.\nquerying 12001-12885...done.\nFinished.\n350 input query terms found no hit:\n\t['ENSP00000035383', 'ENSP00000062104', 'ENSP00000204615', 'ENSP00000205890', 'ENSP00000215739', 'ENS\n" ], [ "p1_incl = high_conf_string_ppis.join(ens_to_symbol, on='protein1', how='inner', rsuffix='_p1')\nboth_incl = p1_incl.join(ens_to_symbol, on='protein2', how='inner', rsuffix='_p2')\nstring_edgelist_symbols = both_incl.drop(['protein1', 'protein2'], axis=1)\nstring_edgelist_symbols.columns = ['confidence', 'partner1', 'partner2']\nstring_ppi_final = string_edgelist_symbols[['partner1', 'partner2', 'confidence']]", "_____no_output_____" ], [ "G = nx.from_pandas_edgelist(string_ppi_final, source='partner1', target='partner2')\nprint (\"Network with {} nodes and {} edges\".format(G.number_of_nodes(), G.number_of_edges()))", "Network with 12412 nodes and 139565 edges\n" ], [ "string_ppi_final.to_csv('string_preprocessed.tsv', sep='\\t', compression='gzip')", "_____no_output_____" ], [ "import networkx as nx", "_____no_output_____" ], [ "import scipy", "_____no_output_____" ], [ "import numpy as npb", "_____no_output_____" ], [ "am = nx.adjacency_matrix(G, nodelist=None, weight='weight')", "_____no_output_____" ], [ "dm = am.toarray()", "_____no_output_____" ], [ "filename='adjacency_matrix.txt'\nnp.savetxt(filename,dm)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecaf1d2ff3d80176f503a571c97ba85e8557bffa
5,778
ipynb
Jupyter Notebook
demo/CELImplementation.ipynb
kapilt/cel-python
4ed717892b7506107f93898e389fa4659db6e978
[ "Apache-2.0" ]
null
null
null
demo/CELImplementation.ipynb
kapilt/cel-python
4ed717892b7506107f93898e389fa4659db6e978
[ "Apache-2.0" ]
null
null
null
demo/CELImplementation.ipynb
kapilt/cel-python
4ed717892b7506107f93898e389fa4659db6e978
[ "Apache-2.0" ]
null
null
null
22.053435
134
0.488404
[ [ [ "# CEL -- Common Expression Language", "_____no_output_____" ], [ "## Agenda\n\n- Why CEL?\n\n- About CEL\n\n- Processing\n\n- Implementation Details", "_____no_output_____" ], [ "## Why CEL?\n\nhttps://github.com/cloud-custodian/cloud-custodian/issues/5759", "_____no_output_____" ], [ "## About CEL\n\n- Keep it small & fast.\n\n- Make it extensible.\n\n- Developer-friendly. Similar to C/C++/Java/JavaScript.", "_____no_output_____" ], [ "## Processing\n\n1. Text of the expression. ``355. / 113.``\n\n2. Abstract Syntax Tree. Created by a `lark` parser. ``Tree('expr', [Tree('literal', [Token]), Tree('literal', [Token])])``.\n\n3. Compiler to produce an executable form. This is Python. Nothing much to do here. \n\n4. Context with variables. None for this example.\n\n5. Evaluation to apply executable expression to variables to get a response.", "_____no_output_____" ] ], [ [ "import celpy\nenv = celpy.Environment()\nast = env.compile(\"355. / 113.\")\nprgm = env.program(ast)\nprgm.evaluate({})", "_____no_output_____" ] ], [ [ "## Implementation\n\n- Lark used to do two things: Lexical Scanning and Parsing.\n\n - Lexical Scanning locates language tokens: int, float, identifier, operator, etc.\n \n - Parsing recognizes higher-level (possibly recursive) constructs.\n \n- An \"evaluator\" is a Lark ``Interpreter`` subclass.\n\n - Operators mapped to functions with possible run-time overrides.\n\n- Sits on top of ``celtypes`` module with Python implementations of the CEL data types.\n\n - Provides GO-like semantics", "_____no_output_____" ], [ "## Lark EBNF Rules\n\n```\nexpr : conditionalor [\"?\" conditionalor \":\" expr]\n\nconditionalor : [conditionalor \"||\"] conditionaland\n\nconditionaland : [conditionaland \"&&\"] relation\n\nrelation : [relation_lt | relation_le | relation_ge | relation_gt | relation_eq | relation_ne | relation_in] addition\n```", "_____no_output_____" ], [ "## Lark EBNF Tokens\n\n```\nINT_LIT : /-?/ /0x/ HEXDIGIT+ | /-?/ DIGIT+\n\nUINT_LIT : INT_LIT /[uU]/\n```", "_____no_output_____" ], [ "## Rule Implementation\n\n```\n func = self.functions[\"_?_:_\"]\n cond_value, left, right = cast(Tuple[Result, Result, Result], self.visit_children(tree))\n try:\n return func(cond_value, left, right)\n except TypeError as ex:\n logger.debug(f\"{func.__name__}({left}, {right}) --> {ex}\")\n err = (\n f\"found no matching overload for _?_:_ \"\n f\"applied to '({type(cond_value)}, {type(left)}, {type(right)})'\"\n )\n value = CELEvalError(err, ex.__class__, ex.args, tree=tree)\n value.__cause__ = ex\n return value\n```", "_____no_output_____" ], [ "## celtypes\n\nA lot of this.\n\n```\nclass DoubleType(float):\n def __truediv__(self, other: Any) -> 'DoubleType':\n if cast(float, other) == 0.0:\n return DoubleType(\"inf\")\n else:\n return DoubleType(super().__truediv__(other))\n\n```", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ecaf2acb42ac3abfffeccf714f9797286bfa7d43
193,938
ipynb
Jupyter Notebook
2.0-amazon-sagemaker/2.1-image-classification-transfer-learning/Image-classification-transfer-learning.ipynb
obernardocosta/aws-ml-demos
f3929d0e1e334b376ae3a8312067d9f7093184cb
[ "Apache-2.0" ]
1
2020-08-31T13:25:00.000Z
2020-08-31T13:25:00.000Z
2.0-amazon-sagemaker/2.1-image-classification-transfer-learning/Image-classification-transfer-learning.ipynb
bcosta12/aws-ml-demos
f3929d0e1e334b376ae3a8312067d9f7093184cb
[ "Apache-2.0" ]
null
null
null
2.0-amazon-sagemaker/2.1-image-classification-transfer-learning/Image-classification-transfer-learning.ipynb
bcosta12/aws-ml-demos
f3929d0e1e334b376ae3a8312067d9f7093184cb
[ "Apache-2.0" ]
1
2022-02-18T18:51:24.000Z
2022-02-18T18:51:24.000Z
233.098558
39,361
0.904634
[ [ [ "# Image classification transfer learning demo\n\n1. [Introduction](#Introduction)\n2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)\n3. [Fine-tuning the Image classification model](#Fine-tuning-the-Image-classification-model)\n4. [Set up hosting for the model](#Set-up-hosting-for-the-model)\n 1. [Import model into hosting](#Import-model-into-hosting)\n 2. [Create endpoint configuration](#Create-endpoint-configuration)\n 3. [Create endpoint](#Create-endpoint)\n5. [Perform Inference](#Perform-Inference)\n", "_____no_output_____" ], [ "## Introduction\n\nWelcome to our end-to-end example of distributed image classification algorithm in transfer learning mode. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). \n\nTo get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.", "_____no_output_____" ], [ "## Prequisites and Preprocessing\n\n### Permissions and environment variables\n\nHere we set up the linkage and authentication to AWS services. There are three parts to this:\n\n* The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook\n* The S3 bucket that you want to use for training and model data\n* The Amazon sagemaker image classification docker image which need not be changed", "_____no_output_____" ] ], [ [ "%%time\nimport boto3\nimport re\nfrom sagemaker import get_execution_role\n\nrole = get_execution_role()\n\nbucket = 'amazon-sagemaker-demo1' # customize to your bucket\n\ncontainers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',\n 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',\n 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',\n 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'}\ntraining_image = containers[boto3.Session().region_name]\nprint(training_image)", "811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest\nCPU times: user 954 ms, sys: 127 ms, total: 1.08 s\nWall time: 1.03 s\n" ] ], [ [ "## Fine-tuning the Image classification model\n\nThe caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. \n\nThe image classification algorithm can take two types of input formats. The first is a [recordio format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the recordio format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/).", "_____no_output_____" ] ], [ [ "import os\nimport urllib.request\nimport boto3\n\ndef download(url):\n filename = url.split(\"/\")[-1]\n if not os.path.exists(filename):\n urllib.request.urlretrieve(url, filename)\n\n \ndef upload_to_s3(channel, file):\n s3 = boto3.resource('s3')\n data = open(file, \"rb\")\n key = channel + '/' + file\n s3.Bucket(bucket).put_object(Key=key, Body=data)\n\n\n# # caltech-256\ndownload('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec')\ndownload('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec')\nupload_to_s3('validation', 'caltech-256-60-val.rec')\nupload_to_s3('train', 'caltech-256-60-train.rec')", "_____no_output_____" ] ], [ [ "Once we have the data available in the correct format for training, the next step is to actually train the model using the data. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail.", "_____no_output_____" ], [ "## Training parameters\nThere are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include:\n\n* **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the \"InputDataConfig\" section. The main parameters that need to be set is the \"ContentType\" which can be set to \"application/x-recordio\" or \"application/x-image\" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. \n* **Output specification**: This is specified in the \"OutputDataConfig\" section. We just need to specify the path where the output can be stored after training\n* **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If \"InstanceCount\" is more than 1, then training can be run in a distributed manner. \n\nApart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are:\n\n* **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.\n* **num_training_samples**: This is the total number of training samples. It is set to 15420 for caltech dataset with the current split\n* **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class\n* **epochs**: Number of training epochs\n* **learning_rate**: Learning rate for training\n* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run", "_____no_output_____" ], [ "After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between 10 to 12 minutes per epoch on a p2.xlarge machine. The network typically converges after 10 epochs. ", "_____no_output_____" ] ], [ [ "# The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200\n# For this training, we will use 18 layers\nnum_layers = 18\n# we need to specify the input image shape for the training data\nimage_shape = \"3,224,224\"\n# we also need to specify the number of training samples in the training set\n# for caltech it is 15420\nnum_training_samples = 15420\n# specify the number of output classes\nnum_classes = 257\n# batch size for training\nmini_batch_size = 128\n# number of epochs\nepochs = 2\n# learning rate\nlearning_rate = 0.01\ntop_k=2\n# Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be \n# initialized with pre-trained weights\nuse_pretrained_model = 1", "_____no_output_____" ] ], [ [ "# Training\nRun the training using Amazon sagemaker CreateTrainingJob API", "_____no_output_____" ] ], [ [ "%%time\nimport time\nimport boto3\nfrom time import gmtime, strftime\n\n\ns3 = boto3.client('s3')\n# create unique job name \njob_name_prefix = 'sagemaker-imageclassification-notebook'\ntimestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\njob_name = job_name_prefix + timestamp\ntraining_params = \\\n{\n # specify the training docker image\n \"AlgorithmSpecification\": {\n \"TrainingImage\": training_image,\n \"TrainingInputMode\": \"File\"\n },\n # SPOT\n \"EnableManagedSpotTraining\": True,\n \"RoleArn\": role,\n \"OutputDataConfig\": {\n \"S3OutputPath\": 's3://{}/{}/output'.format(bucket, job_name_prefix)\n },\n \"ResourceConfig\": {\n \"InstanceCount\": 1,\n \"InstanceType\": \"ml.p2.8xlarge\",\n \"VolumeSizeInGB\": 50\n },\n \"TrainingJobName\": job_name,\n \"HyperParameters\": {\n \"image_shape\": image_shape,\n \"num_layers\": str(num_layers),\n \"num_training_samples\": str(num_training_samples),\n \"num_classes\": str(num_classes),\n \"mini_batch_size\": str(mini_batch_size),\n \"epochs\": str(epochs),\n \"learning_rate\": str(learning_rate),\n \"use_pretrained_model\": str(use_pretrained_model)\n },\n \"StoppingCondition\": {\n \"MaxRuntimeInSeconds\": 3600,\n \"MaxWaitTimeInSeconds\": 3600\n },\n#Training data should be inside a subdirectory called \"train\"\n#Validation data should be inside a subdirectory called \"validation\"\n#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)\n \"InputDataConfig\": [\n {\n \"ChannelName\": \"train\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": 's3://{}/train/'.format(bucket),\n \"S3DataDistributionType\": \"FullyReplicated\"\n }\n },\n \"ContentType\": \"application/x-recordio\",\n \"CompressionType\": \"None\"\n },\n {\n \"ChannelName\": \"validation\",\n \"DataSource\": {\n \"S3DataSource\": {\n \"S3DataType\": \"S3Prefix\",\n \"S3Uri\": 's3://{}/validation/'.format(bucket),\n \"S3DataDistributionType\": \"FullyReplicated\"\n }\n },\n \"ContentType\": \"application/x-recordio\",\n \"CompressionType\": \"None\"\n }\n ]\n}\nprint('Training job name: {}'.format(job_name))\nprint('\\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))", "Training job name: sagemaker-imageclassification-notebook-2019-10-16-17-27-11\n\nInput Data Location: {'S3DataType': 'S3Prefix', 'S3Uri': 's3://amazon-sagemaker-demo1/train/', 'S3DataDistributionType': 'FullyReplicated'}\nCPU times: user 5.59 ms, sys: 0 ns, total: 5.59 ms\nWall time: 5.46 ms\n" ], [ "%%time\n# create the Amazon SageMaker training job\nsagemaker = boto3.client(service_name='sagemaker')\nsagemaker.create_training_job(**training_params)\n\n# confirm that the training job has started\nstatus = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']\nprint('Training job current status: {}'.format(status))\n\ntry:\n # wait for the job to finish and report the ending status\n sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)\n training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\n status = training_info['TrainingJobStatus']\n print(\"Training job ended with status: \" + status)\nexcept:\n print('Training failed to start')\n # if exception is raised, that means it has failed\n message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']\n print('Training failed with the following error: {}'.format(message))", "Training job current status: InProgress\nTraining job ended with status: Completed\nCPU times: user 98.7 ms, sys: 6.95 ms, total: 106 ms\nWall time: 8min\n" ], [ "training_info = sagemaker.describe_training_job(TrainingJobName=job_name)\nstatus = training_info['TrainingJobStatus']\nprint(\"Training job ended with status: \" + status)", "Training job ended with status: Completed\n" ] ], [ [ "If you see the message,\n\n> `Training job ended with status: Completed`\n\nthen that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`.\n\nYou can also view information about and the status of a training job using the AWS SageMaker console. Just click on the \"Jobs\" tab.", "_____no_output_____" ], [ "# Inference\n\n***\n\nA trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the topic mixture representing a given document.\n\nThis section involves several steps,\n\n1. [Create Model](#CreateModel) - Create model for the training output\n1. [Create Endpoint Configuration](#CreateEndpointConfiguration) - Create a configuration defining an endpoint.\n1. [Create Endpoint](#CreateEndpoint) - Use the configuration to create an inference endpoint.\n1. [Perform Inference](#Perform Inference) - Perform inference on some input data using the endpoint.", "_____no_output_____" ], [ "## Create Model\n\nWe now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.", "_____no_output_____" ] ], [ [ "%%time\nimport boto3\nfrom time import gmtime, strftime\nimport uuid\nsage = boto3.Session().client(service_name='sagemaker') \n\nmodel_name=\"test-image-classification-model\"+'-'+(str(uuid.uuid4()).split('-')[0])\nprint(model_name)\ninfo = sage.describe_training_job(TrainingJobName=job_name)\nmodel_data = info['ModelArtifacts']['S3ModelArtifacts']\nprint(model_data)\ncontainers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',\n 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',\n 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',\n 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'}\nhosting_image = containers[boto3.Session().region_name]\nprimary_container = {\n 'Image': hosting_image,\n 'ModelDataUrl': model_data,\n}\n\ncreate_model_response = sage.create_model(\n ModelName = model_name,\n ExecutionRoleArn = role,\n PrimaryContainer = primary_container)\n\nprint(create_model_response['ModelArn'])", "test-image-classification-model-744f8b96\ns3://amazon-sagemaker-demo1/sagemaker-imageclassification-notebook/output/sagemaker-imageclassification-notebook-2019-10-16-17-27-11/output/model.tar.gz\narn:aws:sagemaker:us-east-1:853202365172:model/test-image-classification-model-744f8b96\nCPU times: user 72.3 ms, sys: 0 ns, total: 72.3 ms\nWall time: 380 ms\n" ] ], [ [ "### Create Endpoint Configuration\nAt launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.\n\nIn addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.", "_____no_output_____" ] ], [ [ "from time import gmtime, strftime\n\ntimestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\nendpoint_config_name = job_name_prefix + '-epc-' + timestamp\nendpoint_config_response = sage.create_endpoint_config(\n EndpointConfigName = endpoint_config_name,\n ProductionVariants=[{\n 'InstanceType':'ml.m4.xlarge',\n 'InitialInstanceCount':1,\n 'ModelName':model_name,\n 'VariantName':'AllTraffic'}])\n\nprint('Endpoint configuration name: {}'.format(endpoint_config_name))\nprint('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))", "Endpoint configuration name: sagemaker-imageclassification-notebook-epc--2019-10-16-17-37-08\nEndpoint configuration arn: arn:aws:sagemaker:us-east-1:853202365172:endpoint-config/sagemaker-imageclassification-notebook-epc--2019-10-16-17-37-08\n" ] ], [ [ "### Create Endpoint\nLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.", "_____no_output_____" ] ], [ [ "%%time\nimport time\n\ntimestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())\nendpoint_name = job_name_prefix + '-ep-' + timestamp\nprint('Endpoint name: {}'.format(endpoint_name))\n\nendpoint_params = {\n 'EndpointName': endpoint_name,\n 'EndpointConfigName': endpoint_config_name,\n}\nendpoint_response = sagemaker.create_endpoint(**endpoint_params)\nprint('EndpointArn = {}'.format(endpoint_response['EndpointArn']))", "Endpoint name: sagemaker-imageclassification-notebook-ep--2019-10-16-17-37-13\nEndpointArn = arn:aws:sagemaker:us-east-1:853202365172:endpoint/sagemaker-imageclassification-notebook-ep--2019-10-16-17-37-13\nCPU times: user 14.9 ms, sys: 0 ns, total: 14.9 ms\nWall time: 223 ms\n" ] ], [ [ "Finally, now the endpoint can be created. It may take sometime to create the endpoint...", "_____no_output_____" ] ], [ [ "%%time\n# get the status of the endpoint\nresponse = sagemaker.describe_endpoint(EndpointName=endpoint_name)\nstatus = response['EndpointStatus']\nprint('EndpointStatus = {}'.format(status))\n\n\n# wait until the status has changed\nsagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)\n\n# print the status of the endpoint\nendpoint_response = sagemaker.describe_endpoint(EndpointName=endpoint_name)\nstatus = endpoint_response['EndpointStatus']\nprint('Endpoint creation ended with EndpointStatus = {}'.format(status))\n\nif status != 'InService':\n raise Exception('Endpoint creation failed.')", "EndpointStatus = InService\nEndpoint creation ended with EndpointStatus = InService\nCPU times: user 20 ms, sys: 0 ns, total: 20 ms\nWall time: 244 ms\n" ] ], [ [ "If you see the message,\n\n> `Endpoint creation ended with EndpointStatus = InService`\n\nthen congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the \"Endpoints\" tab in the AWS SageMaker console.\n\nWe will finally create a runtime object from which we can invoke the endpoint.", "_____no_output_____" ], [ "## Perform Inference\nFinally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.\n", "_____no_output_____" ] ], [ [ "import boto3\nruntime = boto3.Session().client(service_name='runtime.sagemaker') ", "_____no_output_____" ] ], [ [ "### Download test image", "_____no_output_____" ] ], [ [ "!wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg\n!wget -O /tmp/soccer-ball.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/193.soccer-ball/193_0009.jpg\n!wget -O /tmp/chess-board.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/037.chess-board/037_0018.jpg\n!wget -O /tmp/cartman.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/032.cartman/032_0013.jpg\n!wget -O /tmp/bear.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/009.bear/009_0002.jpg\nfile_names = ['/tmp/bear.jpg', '/tmp/soccer-ball.jpg', '/tmp/chess-board.jpg', '/tmp/cartman.jpg', '/tmp/test.jpg']", "--2019-10-16 19:05:40-- http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg\nResolving www.vision.caltech.edu (www.vision.caltech.edu)... 34.208.54.77\nConnecting to www.vision.caltech.edu (www.vision.caltech.edu)|34.208.54.77|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 23750 (23K) [image/jpeg]\nSaving to: ‘/tmp/test.jpg’\n\n/tmp/test.jpg 100%[===================>] 23.19K --.-KB/s in 0.07s \n\n2019-10-16 19:05:40 (320 KB/s) - ‘/tmp/test.jpg’ saved [23750/23750]\n\n--2019-10-16 19:05:41-- http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/193.soccer-ball/193_0009.jpg\nResolving www.vision.caltech.edu (www.vision.caltech.edu)... 34.208.54.77\nConnecting to www.vision.caltech.edu (www.vision.caltech.edu)|34.208.54.77|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 28851 (28K) [image/jpeg]\nSaving to: ‘/tmp/soccer-ball.jpg’\n\n/tmp/soccer-ball.jp 100%[===================>] 28.17K --.-KB/s in 0.08s \n\n2019-10-16 19:05:41 (365 KB/s) - ‘/tmp/soccer-ball.jpg’ saved [28851/28851]\n\n--2019-10-16 19:05:41-- http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/037.chess-board/037_0018.jpg\nResolving www.vision.caltech.edu (www.vision.caltech.edu)... 34.208.54.77\nConnecting to www.vision.caltech.edu (www.vision.caltech.edu)|34.208.54.77|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 29502 (29K) [image/jpeg]\nSaving to: ‘/tmp/chess-board.jpg’\n\n/tmp/chess-board.jp 100%[===================>] 28.81K --.-KB/s in 0.07s \n\n2019-10-16 19:05:41 (408 KB/s) - ‘/tmp/chess-board.jpg’ saved [29502/29502]\n\n--2019-10-16 19:05:41-- http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/032.cartman/032_0013.jpg\nResolving www.vision.caltech.edu (www.vision.caltech.edu)... 34.208.54.77\nConnecting to www.vision.caltech.edu (www.vision.caltech.edu)|34.208.54.77|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 12570 (12K) [image/jpeg]\nSaving to: ‘/tmp/cartman.jpg’\n\n/tmp/cartman.jpg 100%[===================>] 12.28K --.-KB/s in 0s \n\n2019-10-16 19:05:41 (219 MB/s) - ‘/tmp/cartman.jpg’ saved [12570/12570]\n\n--2019-10-16 19:05:42-- http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/009.bear/009_0002.jpg\nResolving www.vision.caltech.edu (www.vision.caltech.edu)... 34.208.54.77\nConnecting to www.vision.caltech.edu (www.vision.caltech.edu)|34.208.54.77|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 23992 (23K) [image/jpeg]\nSaving to: ‘/tmp/bear.jpg’\n\n/tmp/bear.jpg 100%[===================>] 23.43K --.-KB/s in 0.07s \n\n2019-10-16 19:05:42 (328 KB/s) - ‘/tmp/bear.jpg’ saved [23992/23992]\n\n" ], [ "import json\nimport numpy as np\n\n# test image\nfrom IPython.display import Image, display\nfor file_name in file_names:\n display(Image(file_name))\n with open(file_name, 'rb') as f:\n payload = f.read()\n payload = bytearray(payload)\n response = runtime.invoke_endpoint(EndpointName=endpoint_name, \n ContentType='application/x-image', \n Body=payload)\n result = response['Body'].read()\n # result will be in json format and convert it to ndarray\n result = json.loads(result)\n # the result will output the probabilities for all classes\n # find the class with maximum probability and print the class index\n index = np.argmax(result)\n object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']\n print(\"Result: label - \" + object_categories[index] + \", probability - \" + str(result[index]))", "_____no_output_____" ] ], [ [ "### Clean up\n\nWhen we're done with the endpoint, we can just delete it and the backing instances will be released. Run the following cell to delete the endpoint.", "_____no_output_____" ] ], [ [ "sage.delete_endpoint(EndpointName=endpoint_name)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ecaf44efb701284110f933d19e713b514b664d56
4,839
ipynb
Jupyter Notebook
notebooks/augmentations.ipynb
neptune-ml/open-solution-salt-identification
394f16b23b6e30543aee54701f81a06b5dd92a98
[ "MIT" ]
80
2018-08-29T17:51:41.000Z
2020-01-14T03:20:37.000Z
notebooks/augmentations.ipynb
neptune-ai/open-solution-salt-identification
394f16b23b6e30543aee54701f81a06b5dd92a98
[ "MIT" ]
66
2018-08-29T09:58:51.000Z
2019-07-21T13:55:11.000Z
notebooks/augmentations.ipynb
neptune-ml/open-solution-salt-identification
394f16b23b6e30543aee54701f81a06b5dd92a98
[ "MIT" ]
39
2018-08-30T03:07:53.000Z
2019-08-25T21:21:12.000Z
24.815385
88
0.493284
[ [ [ "%matplotlib inline\nimport sys\nsys.path.append('../')\n\nimport numpy as np\nimport glob\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom common_blocks.augmentation import iaa, PadFixed, RandomCropFixedSize\nfrom common_blocks.utils import plot_list", "_____no_output_____" ], [ "IMG_DIR = 'YOUR/DIR'\nIMG_DIR = '/mnt/ml-team/minerva/open-solutions/salt/data/train/images'\n\nIMG_IDX = 592\nimg_filepath = sorted(glob.glob('{}/*'.format(IMG_DIR)))[IMG_IDX]\nimg = np.array(Image.open(img_filepath)).astype(np.uint8)\nplt.imshow(img)", "_____no_output_____" ] ], [ [ "# Affine augmentations", "_____no_output_____" ] ], [ [ "affine_seq = iaa.Sequential([\n# General\n iaa.SomeOf((1, 2),\n [iaa.Fliplr(0.5),\n iaa.Affine(rotate=(-10, 10), \n translate_percent={\"x\": (-0.05, 0.05)},\n mode='edge'), \n ]),\n# Deformations\n iaa.Sometimes(0.3, iaa.PiecewiseAffine(scale=(0.04, 0.08))),\n iaa.Sometimes(0.3, iaa.PerspectiveTransform(scale=(0.05, 0.1))),\n], random_order=True)", "_____no_output_____" ], [ "AUG_NR = 6\naug_imgs = []\nfor _ in range(AUG_NR):\n aug_img = affine_seq.augment_image(img)\n aug_imgs.append(aug_img)\nplot_list(images=aug_imgs)", "_____no_output_____" ] ], [ [ "# Intensity Augmentations", "_____no_output_____" ] ], [ [ "intensity_seq = iaa.Sequential([\n iaa.Invert(0.3),\n iaa.Sometimes(0.3, iaa.ContrastNormalization((0.5, 1.5))),\n iaa.OneOf([\n iaa.Noop(),\n iaa.Sequential([\n iaa.OneOf([\n iaa.Add((-10, 10)),\n iaa.AddElementwise((-10, 10)),\n iaa.Multiply((0.95, 1.05)),\n iaa.MultiplyElementwise((0.95, 1.05)),\n ]),\n ]),\n iaa.OneOf([\n iaa.GaussianBlur(sigma=(0.0, 1.0)),\n iaa.AverageBlur(k=(2, 5)),\n iaa.MedianBlur(k=(3, 5))\n ])\n ])\n], random_order=False)", "_____no_output_____" ], [ "AUG_NR = 6\naug_imgs = []\nfor _ in range(AUG_NR):\n aug_img = intensity_seq.augment_image(img)\n aug_imgs.append(aug_img)\nplot_list(images=aug_imgs)", "_____no_output_____" ] ], [ [ "# Resize+Pad+Augmentations\nproposed by Heng CherKeng", "_____no_output_____" ] ], [ [ "def resize_pad_seq(resize_target_size, pad_method, pad_size):\n seq = iaa.Sequential([\n affine_seq,\n iaa.Scale({'height': resize_target_size, 'width': resize_target_size}),\n PadFixed(pad=(pad_size, pad_size), pad_method=pad_method),\n ], random_order=False)\n return seq\n\nheng_seq = resize_pad_seq(101,'edge',13)", "_____no_output_____" ], [ "AUG_NR = 6\naug_imgs = []\nfor _ in range(AUG_NR):\n aug_img = heng_seq.augment_image(img)\n aug_imgs.append(aug_img)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecaf89ec6cc04bfe8a0c6fe98da3da5593da696f
11,023
ipynb
Jupyter Notebook
class-2020-04-17/Class Notes.ipynb
spu-bigdataanalytics-201/class-materials
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
[ "MIT" ]
null
null
null
class-2020-04-17/Class Notes.ipynb
spu-bigdataanalytics-201/class-materials
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
[ "MIT" ]
3
2021-06-08T21:12:36.000Z
2022-03-12T00:22:27.000Z
class-2020-04-17/Class Notes.ipynb
spu-bigdataanalytics-201/class-materials
3a7c84d851c4aadd702420e7d4a5d0e80ada0387
[ "MIT" ]
1
2020-04-24T23:48:50.000Z
2020-04-24T23:48:50.000Z
22.821946
101
0.442529
[ [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "### Pandas Dtypes", "_____no_output_____" ], [ "#### Dataframe Simple", "_____no_output_____" ] ], [ [ "df = pd.DataFrame([{'id': 1.0, 'name': 'Metin'}])", "_____no_output_____" ], [ "df.id", "_____no_output_____" ], [ "df.name", "_____no_output_____" ], [ "df.info(verbose=False)", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1 entries, 0 to 0\nColumns: 2 entries, id to name\ndtypes: float64(1), object(1)\nmemory usage: 144.0+ bytes\n" ] ], [ [ "#### Dataframe - Using Dtypes", "_____no_output_____" ] ], [ [ "df = pd.DataFrame([{'id': 1.0, 'name': 'Metin'}])", "_____no_output_____" ], [ "df.id = df.id.astype(int)\ndf.id", "_____no_output_____" ], [ "df.name = df.name.astype(str)\ndf.name", "_____no_output_____" ], [ "df.info(verbose=False)", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1 entries, 0 to 0\nColumns: 2 entries, id to name\ndtypes: int32(1), object(1)\nmemory usage: 140.0+ bytes\n" ] ], [ [ "#### When you Increase the Numbers", "_____no_output_____" ], [ "1 row -> 4 bytes\n100M row -> 40 MB", "_____no_output_____" ], [ "### Pandas Chunking", "_____no_output_____" ] ], [ [ "df = pd.read_csv('sample.csv', encoding='ANSI')", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "1e6", "_____no_output_____" ], [ "# byte divided by 1000000\nsum(df.memory_usage()) / 1e6", "_____no_output_____" ], [ "df_chunks = pd.read_csv('sample.csv', encoding='ANSI', chunksize=1000)\ndf_chunks", "_____no_output_____" ], [ "for df in df_chunks:\n print(df.shape, \"row index start: \", df.index.min(), \"row index end: \", df.index.max())", "(1000, 6) row index start: 0 row index end: 999\n(1000, 6) row index start: 1000 row index end: 1999\n(1000, 6) row index start: 2000 row index end: 2999\n(1000, 6) row index start: 3000 row index end: 3999\n(1000, 6) row index start: 4000 row index end: 4999\n(1000, 6) row index start: 5000 row index end: 5999\n(1000, 6) row index start: 6000 row index end: 6999\n(1000, 6) row index start: 7000 row index end: 7999\n(1000, 6) row index start: 8000 row index end: 8999\n(1000, 6) row index start: 9000 row index end: 9999\n(1000, 6) row index start: 10000 row index end: 10999\n(1000, 6) row index start: 11000 row index end: 11999\n(1000, 6) row index start: 12000 row index end: 12999\n(1000, 6) row index start: 13000 row index end: 13999\n(617, 6) row index start: 14000 row index end: 14616\n" ] ], [ [ "### Definition of Big Data", "_____no_output_____" ], [ "Relational vs NoSQL Database\n\ncheckout mongodb.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
ecaf8f3ea7e9cb0d7df70eaa9b4732f0d73c0bcb
44,470
ipynb
Jupyter Notebook
examples/noisy_linear/noisy_linear.ipynb
ptigwe/treex
c46687376ccc50c8fea6cb8617e22e4b4dd1924a
[ "MIT" ]
null
null
null
examples/noisy_linear/noisy_linear.ipynb
ptigwe/treex
c46687376ccc50c8fea6cb8617e22e4b4dd1924a
[ "MIT" ]
null
null
null
examples/noisy_linear/noisy_linear.ipynb
ptigwe/treex
c46687376ccc50c8fea6cb8617e22e4b4dd1924a
[ "MIT" ]
null
null
null
80.270758
19,666
0.815561
[ [ [ "# isort:skip_file", "_____no_output_____" ] ], [ [ "# Treex\n\n**Main features**:\n* Modules contain their parameters\n* Easy transfer learning\n* Simple initialization\n* No metaclass magic\n* No apply method\n* No need special versions of `vmap`, `jit`, and friends.\n\nWe will showcase each of the above features by creating a very contrived but complete module that will use everything from parameters, states, and random states:", "_____no_output_____" ] ], [ [ "from typing import Tuple\n\nimport jax.numpy as jnp\nimport numpy as np\n\nimport treex as tx\n\n\nclass NoisyLinear(tx.Module):\n # tree parts are defined by treex annotations\n w: tx.Parameter\n b: tx.Parameter\n rng: tx.Rng # tx.Rng inherits from tx.State\n\n # other annotations are possible but ignored by type\n name: str\n\n def __init__(self, din, dout):\n \n # Initializers only expect RNG key\n self.w = tx.Initializer(lambda k: jax.random.uniform(k, shape=(din, dout)))\n self.b = tx.Initializer(lambda k: jax.random.uniform(k, shape=(dout,)))\n\n # random state is JUST state, we can keep it locally\n self.rng = tx.Initializer(lambda k: k)\n\n def __call__(self, x: np.ndarray) -> np.ndarray:\n assert isinstance(self.rng, jnp.ndarray)\n\n # update state in place\n key, self.rng = jax.random.split(self.rng, 2)\n\n # your typical linear operation\n y = jnp.dot(x, self.w) + self.b\n\n # add noise for fun\n return y + 0.8 * jax.random.normal(key, shape=y.shape)\n\n\nmodel = NoisyLinear(1, 1)\n\nprint(model)", "_____no_output_____" ] ], [ [ "Initialization is straightforward. The only thing you need to do is to call `init` on your module with a random key:", "_____no_output_____" ] ], [ [ "import jax\n\nmodel = model.init(key=42)\nprint(model)", "WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" ] ], [ [ "Now we will be reusing the previous NoisyLinear model, and we will create an optax optimizer that is used to train the model:", "_____no_output_____" ] ], [ [ "import optax\n\noptimizer = tx.Optimizer(optax.adam(1e-2))\n\nparams = model.filter(tx.Parameter)\noptimizer = optimizer.init(params)\n\nprint(f\"{params=}\")", "_____no_output_____" ] ], [ [ "Notice that we are already splitting the model into `params` and `states` since we only need to pass the `params` to the optimizer. Next, we will create the loss function, it will take the model parts and the data parts and return the loss plus the new states:", "_____no_output_____" ] ], [ [ "from functools import partial\n\n\n@partial(jax.value_and_grad, has_aux=True)\ndef loss_fn(params: NoisyLinear, model: NoisyLinear, x, y):\n # update params into model\n model = model.update(params)\n # apply model\n pred_y = model(x)\n # MSE loss\n loss = jnp.mean((y - pred_y) ** 2)\n # return model with state updates\n return loss, model", "_____no_output_____" ] ], [ [ "Notice that we are merging the `params` and `states` into the complete model since we need everything in place to perform the forward pass. Also, we return the updated states from the model. The above steps are required because JAX functional API requires us to be explicit about state management.\n\n**Note**: inside `loss_fn` (wrapped by `value_and_grad`) module can behave like a regular mutable Python object. However, every time it is treated as a pytree a new reference will be created in `jit`, `grad`, `vmap`, etc. It is essential to consider this when using functions like `vmap` inside a module, as JAX will need specific bookkeeping to manage the state correctly.\n\nNext, we will implement the `update` function, it will look indistinguishable from your standard Haiku update, which also separates weights into `params` and `states`: ", "_____no_output_____" ] ], [ [ "@jax.jit\ndef train_step(model: NoisyLinear, optimizer: tx.Optimizer, x, y):\n # select Parameters\n params = model.filter(tx.Parameter)\n\n # call loss_fn to get loss, model state, and gradients\n (loss, model), grads = loss_fn(params, model, x, y)\n\n # apply optax update\n new_params = optimizer.update(grads, params)\n\n # update new_params\n model = model.update(new_params)\n\n return model, optimizer, loss", "_____no_output_____" ] ], [ [ "Before we start training lets get some data:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n\nnp.random.seed(0)\n\n\ndef get_data(dataset_size: int) -> Tuple[np.ndarray, np.ndarray]:\n x = np.random.normal(size=(dataset_size, 1))\n y = 5 * x - 2 + 0.4 * np.random.normal(size=(dataset_size, 1))\n return x, y\n\n\ndef get_batch(\n data: Tuple[np.ndarray, np.ndarray], batch_size: int\n) -> Tuple[np.ndarray, np.ndarray]:\n idx = np.random.choice(len(data[0]), batch_size)\n return jax.tree_map(lambda x: x[idx], data)\n\n\ndata = get_data(1000)\n\nplt.scatter(data[0], data[1])\nplt.show()", "_____no_output_____" ] ], [ [ "Finally, we create a simple training loop that performs a few thousand updates and update `params` and `states` back into a single `model` at the end:", "_____no_output_____" ] ], [ [ "steps = 10_000\n\nfor step in range(steps):\n x, y = get_batch(data, batch_size=32)\n\n model, optimizer, loss = train_step(model, optimizer, x, y)\n\n if step % 1000 == 0:\n print(f\"[{step}] loss = {loss}\")", "[0] loss = 26.22455596923828\n" ] ], [ [ "Now lets generate some test data and see how our model performed:", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nX_test = np.linspace(data[0].min(), data[0].max(), 100)[:, None]\npreds = model(X_test)\n\nplt.scatter(data[0], data[1], label=\"data\", color=\"k\")\nplt.plot(X_test, preds, label=\"prediction\")\nplt.legend()\nplt.show()", "_____no_output_____" ] ], [ [ "We can see that the model has learned the general trend, but because of the `NoisyLinear` modules we have a bit of noise in the predictions.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecaf946c22cb4174909794dec5874c9cf9bb7954
11,052
ipynb
Jupyter Notebook
notebooks/testing.ipynb
albertorizzardini/unicatt
6aaf8374c59d9a4e06f0e07f61f43b2c740d733b
[ "MIT" ]
null
null
null
notebooks/testing.ipynb
albertorizzardini/unicatt
6aaf8374c59d9a4e06f0e07f61f43b2c740d733b
[ "MIT" ]
null
null
null
notebooks/testing.ipynb
albertorizzardini/unicatt
6aaf8374c59d9a4e06f0e07f61f43b2c740d733b
[ "MIT" ]
null
null
null
21.131931
139
0.513029
[ [ [ "import numpy as np\nimport pandas as pd\nimport string", "_____no_output_____" ], [ "x = { 'tag':[ string.ascii_lowercase[np.random.randint(0,5)] for i in range(10) ], \\\n 'col1':np.arange(10), 'col2':np.random.randint(0,100,10)}\nx", "_____no_output_____" ], [ "df = pd.DataFrame.from_dict(x);\ndf", "_____no_output_____" ], [ "df.set_index('tag', inplace=True)\n", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.col1.groupby('tag').count()", "_____no_output_____" ], [ "df.groupby('tag').count()", "_____no_output_____" ], [ "df.iloc[0,1]=np.nan\ndf.iloc[0,1]", "_____no_output_____" ], [ "df.groupby('tag').count()", "_____no_output_____" ], [ "df.to_numpy()\n", "_____no_output_____" ], [ "class Dice:\n def __init__(self, nfaces=6):\n self.number_of_faces = nfaces\n self.number_of_rolls = 0\n self.roll()\n\n def roll(self):\n self.number_of_rolls += 1\n self.myvalue = np.random.randint(1,self.number_of_faces+1)\n return self.myvalue\n\n def value(self):\n return self.myvalue\n\n def numberOfRolls(self):\n return self.number_of_rolls\n\n def numberOfFaces(self):\n return number_of_faces\n\n def __str__(self):\n return str(self.myvalue)\n\n def __add__(self, other):\n return self.myvalue+other.myvalue\n\n def __repr__(self):\n return self.__str__()\n\nd1 = Dice()\nd2 = Dice()\nx=d1+d2\nprint(d1, d2, d1.numberOfRolls())\nd1\n", "_____no_output_____" ], [ "x = pd.Series([ d1.roll() for i in range(100)])\nx.value_counts()", "_____no_output_____" ], [ "x = { 'a':1, 'b':2, 'c':3}\nx['d']=4\nx", "_____no_output_____" ], [ "x = {'a':[1,2,3,4], 'b':[4,5,6,7]}", "_____no_output_____" ], [ "df = pd.DataFrame(x)\ndf", "_____no_output_____" ], [ "df.iloc[2]", "_____no_output_____" ], [ "df = pd.read_excel(\"http://www.dmf.unicatt.it/~tessera/sales-funnel.xlsx\")\ndf.head()", "_____no_output_____" ], [ "df[\"Status\"] = df[\"Status\"].astype(\"category\")", "_____no_output_____" ], [ "df2=df.set_index('Name')\ndf2.loc['Trantow-Barrows']", "_____no_output_____" ], [ "del(df2)\npd.pivot_table(df,index=[\"Name\"],values=[\"Quantity\",\"Price\"],aggfunc=[np.sum, np.mean])", "_____no_output_____" ], [ "pd.pivot_table(df,index=[\"Manager\", \"Rep\"],values=[\"Quantity\",\"Price\"],aggfunc=np.sum)", "_____no_output_____" ], [ "pd.pivot_table(df,index=[\"Manager\", \"Rep\"],values=[\"Quantity\",\"Price\"],columns=[\"Product\"] , aggfunc=np.sum , fill_value=0)", "_____no_output_____" ], [ "df.query('Manager == [\"Debra Henley\"]')", "_____no_output_____" ], [ "df['Price'].plot()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Load the diabetes dataset\ndiabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecaf99346fa1a39b3c1b58f963f0f1a7f3bdb72e
6,554
ipynb
Jupyter Notebook
JupyterProcessing/.ipynb_checkpoints/DataPrep-checkpoint.ipynb
PaulLefeuvre/HousePrices
0a903ffb56b502a8c3589df308bd7ba0c23c59aa
[ "MIT" ]
null
null
null
JupyterProcessing/.ipynb_checkpoints/DataPrep-checkpoint.ipynb
PaulLefeuvre/HousePrices
0a903ffb56b502a8c3589df308bd7ba0c23c59aa
[ "MIT" ]
null
null
null
JupyterProcessing/.ipynb_checkpoints/DataPrep-checkpoint.ipynb
PaulLefeuvre/HousePrices
0a903ffb56b502a8c3589df308bd7ba0c23c59aa
[ "MIT" ]
null
null
null
34.314136
120
0.574153
[ [ [ "# Get all the necessary libraries and data\nfrom __future__ import print_function\nimport os\n\nimport pandas as pd\nimport seaborn as sb\nimport numpy as np\n\n# The labelled training data\ndata = pd.read_csv(\"csv-download/train.csv\")\ndata = data.drop('Id', axis=1) # Drop the id axis, which brings nothing to the machine learning system\n\n# The unlabelled test data for competition submission\nsub_data = pd.read_csv(\"csv-download/test.csv\")\nsub_data = sub_data.drop('Id', axis=1)", "_____no_output_____" ], [ "# Fill all unknown values with 0 or \"Unknown\" so it can be properly one-hot encoded\nfor col in data.columns:\n if data[col].dtype == np.object:\n data[col] = data[col].fillna(\"Unknown\")\n else:\n data[col] = data[col].fillna(0)\n\n# Same for submission data\nfor col in sub_data.columns:\n if sub_data[col].dtype == np.object:\n sub_data[col] = sub_data[col].fillna(\"Unknown\")\n else:\n sub_data[col] = sub_data[col].fillna(0)", "_____no_output_____" ], [ "# Find all data columns that need to be one-hot encoded\nmask = data.dtypes == np.object\nmask['MSSubClass'] = True # This one is all numbers but still uses types - needs to be one-hot encoded as well\ncategorical_cols = data.columns[mask]\n\nnum_ohc_cols = (data[categorical_cols].apply(lambda x: x.nunique()).sort_values(ascending=False))\n\nsmall_num_ohc_cols = num_ohc_cols.loc[num_ohc_cols>1] # Don't one-hot encode if there's only one type\n\nsmall_num_ohc_cols -= 1\n\nsmall_num_ohc_cols.sum()\n\n# This mask will be used for both data and sub_data to ensure they are both similarly one-hot encoded", "_____no_output_____" ], [ "from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n\ndata_ohc = data.copy()\nsub_data_ohc = sub_data.copy()\n\nle = LabelEncoder()\nohc = OneHotEncoder()\n\nfor col in num_ohc_cols.index:\n # Integer encode the string categories\n le.fit(np.concatenate((data_ohc[col], sub_data_ohc[col]), axis=None))\n dat = le.transform(data_ohc[col]).astype(np.int)\n sub_dat = le.transform(sub_data_ohc[col]).astype(np.int)\n \n # Remove the original column from the dataframe\n data_ohc = data_ohc.drop(col, axis=1)\n sub_data_ohc = sub_data_ohc.drop(col, axis=1)\n\n # One hot encode the data--this returns a sparse array\n ohc.fit(np.concatenate((dat, sub_dat),axis=None).reshape(-1,1))\n new_dat = ohc.transform(dat.reshape(-1,1))\n new_sub_dat = ohc.transform(sub_dat.reshape(-1,1))\n \n # Create unique column names\n n_cols = new_dat.shape[1]\n col_names = ['_'.join([col, str(le.inverse_transform([x])[0])]) for x in range(n_cols)]\n n_sub_cols = new_sub_dat.shape[1]\n sub_col_names = ['_'.join([col, str(le.inverse_transform([x])[0])]) for x in range(n_sub_cols)]\n\n # Create the new dataframe\n new_df = pd.DataFrame(new_dat.toarray(), \n index=data_ohc.index, \n columns=col_names)\n new_sub_df = pd.DataFrame(new_sub_dat.toarray(), \n index=sub_data_ohc.index, \n columns=sub_col_names)\n\n # Append the new data to the dataframe\n data_ohc = pd.concat([data_ohc, new_df], axis=1)\n sub_data_ohc = pd.concat([sub_data_ohc, new_sub_df], axis=1)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\n# Put the SalePrice column at the end for easy finding in Octave\nsalePrice = data_ohc.pop('SalePrice')\n\nX_train, X_test, y_train, y_test = train_test_split(data_ohc, salePrice, train_size=0.8, random_state=42)\nX_train, X_cv, y_train, y_cv = train_test_split(X_train, y_train, train_size=0.75, random_state=42)", "_____no_output_____" ], [ "# Export it all to .txt files\nnp.savetxt('txt-data/X.txt', X_train.values, fmt='%d')\nnp.savetxt('txt-data/Xtest.txt', X_test.values, fmt='%d')\nnp.savetxt('txt-data/Xcv.txt', X_cv.values, fmt='%d')\nnp.savetxt('txt-data/y.txt', y_train.values, fmt='%d')\nnp.savetxt('txt-data/ytest.txt', y_test.values, fmt='%d')\nnp.savetxt('txt-data/ycv.txt', y_cv.values, fmt='%d')\nnp.savetxt('txt-data/yfinal.txt', sub_data_ohc.values, fmt='%d')\n\n# In order to export to csv files\n# X_train.to_csv('processed_Xtrain.csv', index=False)\n# X_test.to_csv('processed_Xtest.csv', index=False)\n# X_cv.to_csv('processed_Xcv.csv', index=False)\n# y_train.to_csv('processed_ytrain.csv', index=False)\n# y_test.to_csv('processed_ytest.csv', index=False)\n# y_cv.to_csv('processed_ycv.csv', index=False)\n# sub_data_ohc.to_csv('processed_sub_test.csv', index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ecaf9f9a8756ecbcccf269462fd2fa94e2f0ff48
1,752
ipynb
Jupyter Notebook
python_materials/client.ipynb
marcustylerwong/CS488S21
b9d69a5d426d69d9f4e1c43f96e7ee6bedf469ed
[ "MIT" ]
2
2022-01-25T13:45:30.000Z
2022-02-07T18:21:20.000Z
python_materials/client.ipynb
marcustylerwong/CS488S21
b9d69a5d426d69d9f4e1c43f96e7ee6bedf469ed
[ "MIT" ]
null
null
null
python_materials/client.ipynb
marcustylerwong/CS488S21
b9d69a5d426d69d9f4e1c43f96e7ee6bedf469ed
[ "MIT" ]
2
2022-02-03T22:33:45.000Z
2022-03-03T05:18:50.000Z
23.36
92
0.540525
[ [ [ "# Import libraries\nimport socket\n\n# Create server\nServerName = 'localhost'\nServerPort = 1200\nServerAddress = (ServerName, ServerPort)\n\n# Create client socket\nclientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # IPv4, TCP socket\n\n# Ask for connection to server\nclientSocket.connect(ServerAddress)\n\n# three-way handshake is performed\n# a TCP connection is established between the client and server.\nwhile 1:\n \n # Create message\n message = input('Enter the lower case message: ')\n # Send message\n clientSocket.send(message.encode('ascii'))\n\n # Receive from server\n modified_sent = clientSocket.recvfrom(2048)\n\n # Print received message\n print(\"From server:\", modified_sent)\n \n continue\n\n# Close connection\nclientSocket.close()\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ecafb6f155ab7e54a652dde2e3f56d9928910ecf
82,314
ipynb
Jupyter Notebook
notebooks/test/interp_few_iter.ipynb
Yu-Group/adaptive-wavelets
e67f726e741d83c94c3aee3ed97a772db4ce0bb3
[ "MIT" ]
22
2021-02-13T05:22:13.000Z
2022-03-07T09:55:55.000Z
notebooks/test/interp_few_iter.ipynb
Yu-Group/adaptive-wavelets
e67f726e741d83c94c3aee3ed97a772db4ce0bb3
[ "MIT" ]
null
null
null
notebooks/test/interp_few_iter.ipynb
Yu-Group/adaptive-wavelets
e67f726e741d83c94c3aee3ed97a772db4ce0bb3
[ "MIT" ]
5
2021-12-11T13:43:19.000Z
2022-03-19T07:07:37.000Z
242.1
30,964
0.902544
[ [ [ "%load_ext autoreload\n%autoreload 2\n%matplotlib inline\nimport numpy as np\nimport torch\nimport random\nimport sys\nfrom copy import deepcopy\nimport matplotlib.pyplot as plt\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nfrom ex_biology import p\nsys.path.append('preprocessing')\nimport data\nimport neural_networks\n\n# adaptive-wavelets modules\nsys.path.append('../..')\nfrom src import adaptive_wavelets\nfrom src.dsets.biology import dset\nfrom src.visualize import cshow, plot_1dfilts, plot_1dreconstruct, plot_wavefun\n\nsys.path.append('../../lib/trim')\nfrom trim import TrimModel", "_____no_output_____" ], [ "# parameters for initialization\np.wave = 'db5'\np.J = 4\np.mode = 'zero'\np.init_factor = 1\np.noise_factor = 0 \np.const_factor = 0\np.num_epochs = 200\np.lamL1wave = 0.0001\np.lamL1attr = 0.2", "_____no_output_____" ], [ "# load data and model\ntrain_loader, test_loader = dset.get_dataloader(p.data_path, \n batch_size=p.batch_size,\n is_continuous=p.is_continuous) \n\nmodel = dset.load_pretrained_model(p.model_path, device=device) \n\n# prepare model\nrandom.seed(p.seed)\nnp.random.seed(p.seed)\ntorch.manual_seed(p.seed) \n\nwt = adaptive_wavelets.DWT1d(wave=p.wave, mode=p.mode, J=p.J, \n init_factor=p.init_factor, \n noise_factor=p.noise_factor,\n const_factor=p.const_factor).to(device)\nwt.train()\n\n# train\nparams = list(wt.parameters())\noptimizer = torch.optim.Adam(params, lr=p.lr)\nloss_f = adaptive_wavelets.get_loss_f(lamlSum=p.lamlSum, lamhSum=p.lamhSum, lamL2norm=p.lamL2norm, \n lamCMF=p.lamCMF, lamConv=p.lamConv, lamL1wave=p.lamL1wave, lamL1attr=p.lamL1attr)\n\nmodel = model.to(device)\nwt_inverse = wt.inverse \nmt = TrimModel(model, wt_inverse, use_residuals=True) \nattributer = adaptive_wavelets.Attributer(mt, attr_methods=p.attr_methods, device=device)\ntarget = p.target\nn_print = 5", "_____no_output_____" ], [ "# store training losses\ntrain_losses = []\n\nfor epoch in range(p.num_epochs):\n epoch_loss = 0.\n for batch_idx, (data, y) in enumerate(train_loader):\n data = data.to(device)\n y = y.to(device)\n \n # zero grad\n optimizer.zero_grad()\n \n # transform\n data_t = wt(data)\n\n # reconstruction\n recon_data = wt_inverse(data_t) \n \n # TRIM score\n with torch.backends.cudnn.flags(enabled=False):\n attributions = attributer(data_t, target=p.target, additional_forward_args=deepcopy(data)) if epoch % 10 == 0 else None \n \n # loss\n loss = loss_f(wt, data, recon_data, data_t, attributions) \n \n # backward\n loss.backward()\n \n # update step\n optimizer.step() \n \n iter_loss = loss.item() \n epoch_loss += iter_loss\n \n if epoch % n_print == 0: \n print('\\rTrain Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), iter_loss), end='') \n\n mean_epoch_loss = epoch_loss / (batch_idx + 1)\n \n if epoch % n_print == 0:\n print('\\n====> Epoch: {} Average train loss: {:.4f}'.format(epoch, mean_epoch_loss)) \n \n train_losses.append(mean_epoch_loss)\n ", "Train Epoch: 0 [1044/2936 (97%)]\tLoss: 0.094658\n====> Epoch: 0 Average train loss: 0.0929\nTrain Epoch: 5 [1044/2936 (97%)]\tLoss: 0.000460\n====> Epoch: 5 Average train loss: 0.0004\nTrain Epoch: 10 [1044/2936 (97%)]\tLoss: 0.080808\n====> Epoch: 10 Average train loss: 0.0870\nTrain Epoch: 15 [1044/2936 (97%)]\tLoss: 0.000445\n====> Epoch: 15 Average train loss: 0.0004\nTrain Epoch: 20 [1044/2936 (97%)]\tLoss: 0.094864\n====> Epoch: 20 Average train loss: 0.0872\nTrain Epoch: 25 [1044/2936 (97%)]\tLoss: 0.000425\n====> Epoch: 25 Average train loss: 0.0004\nTrain Epoch: 30 [1044/2936 (97%)]\tLoss: 0.081746\n====> Epoch: 30 Average train loss: 0.0867\nTrain Epoch: 35 [1044/2936 (97%)]\tLoss: 0.000430\n====> Epoch: 35 Average train loss: 0.0004\nTrain Epoch: 40 [1044/2936 (97%)]\tLoss: 0.088773\n====> Epoch: 40 Average train loss: 0.0867\nTrain Epoch: 45 [1044/2936 (97%)]\tLoss: 0.000417\n====> Epoch: 45 Average train loss: 0.0004\nTrain Epoch: 50 [1044/2936 (97%)]\tLoss: 0.076556\n====> Epoch: 50 Average train loss: 0.0862\nTrain Epoch: 55 [1044/2936 (97%)]\tLoss: 0.000438\n====> Epoch: 55 Average train loss: 0.0004\nTrain Epoch: 60 [1044/2936 (97%)]\tLoss: 0.089264\n====> Epoch: 60 Average train loss: 0.0863\nTrain Epoch: 65 [1044/2936 (97%)]\tLoss: 0.000444\n====> Epoch: 65 Average train loss: 0.0004\nTrain Epoch: 70 [1044/2936 (97%)]\tLoss: 0.076758\n====> Epoch: 70 Average train loss: 0.0858\nTrain Epoch: 75 [1044/2936 (97%)]\tLoss: 0.000470\n====> Epoch: 75 Average train loss: 0.0004\nTrain Epoch: 80 [1044/2936 (97%)]\tLoss: 0.091963\n====> Epoch: 80 Average train loss: 0.0858\nTrain Epoch: 85 [1044/2936 (97%)]\tLoss: 0.000439\n====> Epoch: 85 Average train loss: 0.0004\nTrain Epoch: 90 [1044/2936 (97%)]\tLoss: 0.080330\n====> Epoch: 90 Average train loss: 0.0852\nTrain Epoch: 95 [1044/2936 (97%)]\tLoss: 0.000411\n====> Epoch: 95 Average train loss: 0.0004\nTrain Epoch: 100 [1044/2936 (97%)]\tLoss: 0.080242\n====> Epoch: 100 Average train loss: 0.0851\nTrain Epoch: 105 [1044/2936 (97%)]\tLoss: 0.000420\n====> Epoch: 105 Average train loss: 0.0004\nTrain Epoch: 110 [1044/2936 (97%)]\tLoss: 0.092203\n====> Epoch: 110 Average train loss: 0.0855\nTrain Epoch: 115 [1044/2936 (97%)]\tLoss: 0.000461\n====> Epoch: 115 Average train loss: 0.0004\nTrain Epoch: 120 [1044/2936 (97%)]\tLoss: 0.080716\n====> Epoch: 120 Average train loss: 0.0846\nTrain Epoch: 125 [1044/2936 (97%)]\tLoss: 0.000445\n====> Epoch: 125 Average train loss: 0.0004\nTrain Epoch: 130 [1044/2936 (97%)]\tLoss: 0.088356\n====> Epoch: 130 Average train loss: 0.0846\nTrain Epoch: 135 [1044/2936 (97%)]\tLoss: 0.000436\n====> Epoch: 135 Average train loss: 0.0004\nTrain Epoch: 140 [1044/2936 (97%)]\tLoss: 0.091272\n====> Epoch: 140 Average train loss: 0.0845\nTrain Epoch: 145 [1044/2936 (97%)]\tLoss: 0.000436\n====> Epoch: 145 Average train loss: 0.0004\nTrain Epoch: 150 [1044/2936 (97%)]\tLoss: 0.078470\n====> Epoch: 150 Average train loss: 0.0846\nTrain Epoch: 155 [1044/2936 (97%)]\tLoss: 0.000408\n====> Epoch: 155 Average train loss: 0.0004\nTrain Epoch: 160 [1044/2936 (97%)]\tLoss: 0.088480\n====> Epoch: 160 Average train loss: 0.0841\nTrain Epoch: 165 [1044/2936 (97%)]\tLoss: 0.000451\n====> Epoch: 165 Average train loss: 0.0004\nTrain Epoch: 170 [1044/2936 (97%)]\tLoss: 0.078403\n====> Epoch: 170 Average train loss: 0.0836\nTrain Epoch: 175 [1044/2936 (97%)]\tLoss: 0.000402\n====> Epoch: 175 Average train loss: 0.0004\nTrain Epoch: 180 [1044/2936 (97%)]\tLoss: 0.083068\n====> Epoch: 180 Average train loss: 0.0835\nTrain Epoch: 185 [1044/2936 (97%)]\tLoss: 0.000404\n====> Epoch: 185 Average train loss: 0.0004\nTrain Epoch: 190 [1044/2936 (97%)]\tLoss: 0.080239\n====> Epoch: 190 Average train loss: 0.0836\nTrain Epoch: 195 [1044/2936 (97%)]\tLoss: 0.000442\n====> Epoch: 195 Average train loss: 0.0004\n" ], [ "plt.plot(np.log(train_losses))\nplt.xlabel(\"epochs\")\nplt.ylabel(\"log train loss\")\nplt.title('Log-train loss vs epochs')\nplt.show()", "_____no_output_____" ], [ "data = iter(test_loader).next()[0].to(device)\ndata_t = wt(data)\nrecon = wt.inverse(data_t)\n\nprint(\"Reconstruction error={:.5f}\".format(torch.norm(recon - data)**2/data.size(0)))\n\n# get 2d wavelet filters\nfilt = adaptive_wavelets.get_1dfilts(wt)\nphi, psi, x = adaptive_wavelets.get_wavefun(wt)\n\nplot_1dfilts(filt, is_title=True, figsize=(2,2))\nplot_wavefun((phi, psi, x), is_title=True, figsize=(3,1))", "Reconstruction error=0.00000\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ecafcac58db462c1d396af81d7dd03f519e1c666
395,662
ipynb
Jupyter Notebook
Assignment_Day_29.ipynb
Nishadansh47/AIML
5b1f0a3b8768c5596197d8058472d5c971ed0812
[ "MIT" ]
null
null
null
Assignment_Day_29.ipynb
Nishadansh47/AIML
5b1f0a3b8768c5596197d8058472d5c971ed0812
[ "MIT" ]
null
null
null
Assignment_Day_29.ipynb
Nishadansh47/AIML
5b1f0a3b8768c5596197d8058472d5c971ed0812
[ "MIT" ]
null
null
null
21.721768
93
0.336459
[ [ [ "# Question 1:\nTry to create the backpropagation and feedforward in python by creating function", "_____no_output_____" ] ], [ [ "import numpy as np\n\nxAll = np.array(([2, 9], [1, 5], [3, 6], [5, 10]), dtype=float) # input data\ny = np.array(([92], [86], [89]), dtype=float) # output\n\n# scale units\nxAll = xAll / np.amax(xAll, axis=0) # scaling input data\ny = y / 100 # scaling output data (max test score is 100)\n\n# split data\nX = np.split(xAll, [3])[0] # training data\nxPredicted = np.split(xAll, [3])[1] # testing data\n\ny = np.array(([92], [86], [89]), dtype=float)\ny = y / 100 # max test score is 100\n\n\nclass Neural_Network(object):\n def __init__(self):\n #parameters\n self.inputSize = 2\n self.outputSize = 1\n self.hiddenSize = 3\n\n #weights\n self.W1 = np.random.randn(\n self.inputSize,\n self.hiddenSize) # (3x2) weight matrix from input to hidden layer\n self.W2 = np.random.randn(\n self.hiddenSize,\n self.outputSize) # (3x1) weight matrix from hidden to output layer\n\n def forward(self, X):\n #forward propagation through our network\n self.z = np.dot(\n X,\n self.W1) # dot product of X (input) and first set of 3x2 weights\n self.z2 = self.sigmoid(self.z) # activation function\n self.z3 = np.dot(self.z2, self.W2\n ) # dot product of hidden layer (z2) and second set of 3x1 weights\n o = self.sigmoid(self.z3) # final activation function\n return o\n\n def sigmoid(self, s):\n # activation function\n return 1 / (1 + np.exp(-s))\n\n def sigmoidPrime(self, s):\n #derivative of sigmoid\n return s * (1 - s)\n\n def backward(self, X, y, o):\n # backward propagate through the network\n self.o_error = y - o # error in output\n self.o_delta = self.o_error * self.sigmoidPrime(\n o) # applying derivative of sigmoid to error\n\n self.z2_error = self.o_delta.dot(\n self.W2.T\n ) # z2 error: how much our hidden layer weights contributed to output error\n self.z2_delta = self.z2_error * self.sigmoidPrime(\n self.z2) # applying derivative of sigmoid to z2 error\n self.W1 += X.T.dot(\n self.z2_delta) # adjusting first set (input --> hidden) weights\n self.W2 += self.z2.T.dot(\n self.o_delta) # adjusting second set (hidden --> output) weights\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def saveWeights(self):\n np.savetxt(\"w1.txt\", self.W1, fmt=\"%s\")\n np.savetxt(\"w2.txt\", self.W2, fmt=\"%s\")\n\n def predict(self):\n print(\"Predicted data based on trained weights: \")\n print(\"Input (scaled): \\n\" + str(xPredicted))\n print(\"Output: \\n\" + str(self.forward(xPredicted)))\n\n\nNN = Neural_Network()\nfor i in range(1000): # trains the NN 1,000 times\n print(\"# \" + str(i) + \"\\n\")\n print(\"Input (scaled): \\n\" + str(X))\n print(\"Actual Output: \\n\" + str(y))\n print(\"Predicted Output: \\n\" + str(NN.forward(X)))\n print(\"Loss: \\n\" +\n str(np.mean(np.square(y - NN.forward(X))))) # mean sum squared loss\n print(\"\\n\")\n NN.train(X, y)\n\nNN.saveWeights()\nNN.predict()", "# 0\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.09443174]\n [0.12480145]\n [0.13866122]]\nLoss: \n0.5955299395644976\n\n\n# 1\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.11352662]\n [0.14605729]\n [0.16271039]]\nLoss: \n0.5630212302559169\n\n\n# 2\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.13763666]\n [0.17206389]\n [0.19164385]]\nLoss: \n0.5243499324481039\n\n\n# 3\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.16751648]\n [0.20336313]\n [0.22560279]]\nLoss: \n0.4796090239257634\n\n\n# 4\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.20333251]\n [0.23998744]\n [0.26400406]]\nLoss: \n0.4299662620453679\n\n\n# 5\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.24424684]\n [0.28117017]\n [0.3053851 ]]\nLoss: \n0.3778202958822526\n\n\n# 6\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.28833922]\n [0.32530478]\n [0.34764504]]\nLoss: \n0.3263477399040891\n\n\n# 7\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.33312054]\n [0.37031185]\n [0.38866778]]\nLoss: \n0.2785186604463027\n\n\n# 8\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.37636771]\n [0.41424067]\n [0.42693691]]\nLoss: \n0.23622162411670286\n\n\n# 9\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.41668236]\n [0.45571253]\n [0.4617542 ]]\nLoss: \n0.20005715591035297\n\n\n# 10\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.45351833]\n [0.49400752]\n [0.4930676 ]]\nLoss: \n0.1697036599618753\n\n\n# 11\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.48689892]\n [0.5289084 ]\n [0.52117147]]\nLoss: \n0.14441089471696442\n\n\n# 12\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.51711638]\n [0.56049432]\n [0.54647342]]\nLoss: \n0.12334312412988464\n\n\n# 13\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.54453681]\n [0.58898514]\n [0.56936974]]\nLoss: \n0.10574180931312771\n\n\n# 14\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.56950627]\n [0.61465028]\n [0.59019823]]\nLoss: \n0.09097448011018017\n\n\n# 15\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.59231902]\n [0.63776347]\n [0.60923112]]\nLoss: \n0.07853168858416393\n\n\n# 16\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.61321532]\n [0.65858358]\n [0.62668384]]\nLoss: \n0.06800693734276475\n\n\n# 17\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.63239052]\n [0.67734817]\n [0.64272781]]\nLoss: \n0.0590748138964852\n\n\n# 18\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.65000607]\n [0.69427264]\n [0.6575023 ]]\nLoss: \n0.051472488600327375\n\n\n# 19\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.66619927]\n [0.70955131]\n [0.67112375]]\nLoss: \n0.044985476491755855\n\n\n# 20\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.68109046]\n [0.72335919]\n [0.68369251]]\nLoss: \n0.0394370871252625\n\n\n# 21\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.69478778]\n [0.73585354]\n [0.69529737]]\nLoss: \n0.034680666431667194\n\n\n# 22\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.70739028]\n [0.74717547]\n [0.70601845]]\nLoss: \n0.03059382731641802\n\n\n# 23\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.71898951]\n [0.75745126]\n [0.71592897]]\nLoss: \n0.027074062291168068\n\n\n# 24\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.72967052]\n [0.76679365]\n [0.72509634]]\nLoss: \n0.02403531727789511\n\n\n# 25\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.73951227]\n [0.77530306]\n [0.73358276]]\nLoss: \n0.021405248070710706\n\n\n# 26\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.74858789]\n [0.78306869]\n [0.74144563]]\nLoss: \n0.019122978929561923\n\n\n# 27\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.75696487]\n [0.79016967]\n [0.74873781]]\nLoss: \n0.017137245795185985\n\n\n# 28\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.76470519]\n [0.79667607]\n [0.75550785]]\nLoss: \n0.015404845333453774\n\n\n# 29\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.77186561]\n [0.80264988]\n [0.76180027]]\nLoss: \n0.01388933422161527\n\n\n# 30\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.77849789]\n [0.80814592]\n [0.76765574]]\nLoss: \n0.01255993700842313\n\n\n# 31\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.78464908]\n [0.81321266]\n [0.77311132]]\nLoss: \n0.011390629525396668\n\n\n# 32\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.79036186]\n [0.81789298]\n [0.77820079]]\nLoss: \n0.010359370589144067\n\n\n# 33\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.79567486]\n [0.82222481]\n [0.78295481]]\nLoss: \n0.009447458946148528\n\n\n# 34\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.80062302]\n [0.82624176]\n [0.78740124]]\nLoss: \n0.008638995765029007\n\n\n# 35\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.80523789]\n [0.82997362]\n [0.79156534]]\nLoss: \n0.00792043581411342\n\n\n# 36\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.80954796]\n [0.83344687]\n [0.79547002]]\nLoss: \n0.007280212927524764\n\n\n# 37\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.81357894]\n [0.83668504]\n [0.79913607]]\nLoss: \n0.006708427527915448\n\n\n# 38\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.81735405]\n [0.83970909]\n [0.80258235]]\nLoss: \n0.006196585870162735\n\n\n# 39\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.82089426]\n [0.84253774]\n [0.80582596]]\nLoss: \n0.00573738231821208\n\n\n# 40\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.82421853]\n [0.84518771]\n [0.80888244]]\nLoss: \n0.005324517385917368\n\n\n# 41\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.827344 ]\n [0.84767397]\n [0.81176592]]\nLoss: \n0.0049525454828375426\n\n\n# 42\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.83028621]\n [0.85000996]\n [0.81448923]]\nLoss: \n0.004616747329672384\n\n\n# 43\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.83305921]\n [0.85220777]\n [0.81706407]]\nLoss: \n0.0043130228680174874\n\n\n# 44\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.83567579]\n [0.85427828]\n [0.81950111]]\nLoss: \n0.004037801207444598\n\n\n# 45\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.83814755]\n [0.8562313 ]\n [0.82181006]]\nLoss: \n0.0037879647501902793\n\n\n# 46\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.84048504]\n [0.85807571]\n [0.82399982]]\nLoss: \n0.003560785128639597\n\n\n# 47\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.84269787]\n [0.85981956]\n [0.82607853]]\nLoss: \n0.003353868999824853\n\n\n# 48\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.84479482]\n [0.86147014]\n [0.82805361]]\nLoss: \n0.0031651120786310733\n\n\n# 49\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.84678389]\n [0.86303407]\n [0.82993191]]\nLoss: \n0.0029926600695271994\n\n\n# 50\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.84867242]\n [0.86451741]\n [0.83171969]]\nLoss: \n0.002834875385746447\n\n\n# 51\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.8504671 ]\n [0.86592565]\n [0.83342272]]\nLoss: \n0.0026903087335526216\n\n\n# 52\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.85217411]\n [0.86726384]\n [0.8350463 ]]\nLoss: \n0.0025576747947321913\n\n\n# 53\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.85379908]\n [0.86853658]\n [0.83659533]]\nLoss: \n0.00243583136867615\n\n\n# 54\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.85534721]\n [0.8697481 ]\n [0.83807432]]\nLoss: \n0.0023237614412514236\n\n\n# 55\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.8568233 ]\n [0.87090226]\n [0.83948744]]\nLoss: \n0.002220557735121893\n\n\n# 56\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.85823175]\n [0.87200265]\n [0.84083856]]\nLoss: \n0.0021254093685551373\n\n\n# 57\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.85957663]\n [0.87305254]\n [0.84213124]]\nLoss: \n0.002037590309737256\n\n\n# 58\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86086171]\n [0.87405497]\n [0.8433688 ]]\nLoss: \n0.0019564493634172493\n\n\n# 59\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86209048]\n [0.87501274]\n [0.84455432]]\nLoss: \n0.0018814014681186552\n\n\n# 60\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86326617]\n [0.87592842]\n [0.84569066]]\nLoss: \n0.0018119201166632527\n\n\n# 61\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86439176]\n [0.87680442]\n [0.84678048]]\nLoss: \n0.0017475307415568565\n\n\n# 62\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86547005]\n [0.87764297]\n [0.84782626]]\nLoss: \n0.0016878049308803836\n\n\n# 63\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86650361]\n [0.87844612]\n [0.84883031]]\nLoss: \n0.0016323553605199206\n\n\n# 64\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86749487]\n [0.87921579]\n [0.84979479]]\nLoss: \n0.0015808313455246154\n\n\n# 65\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86844606]\n [0.87995378]\n [0.85072173]]\nLoss: \n0.0015329149276471137\n\n\n# 66\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.86935927]\n [0.88066174]\n [0.851613 ]]\nLoss: \n0.0014883174281493366\n\n\n# 67\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87023648]\n [0.88134123]\n [0.85247038]]\nLoss: \n0.00144677640511759\n\n\n# 68\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87107949]\n [0.8819937 ]\n [0.85329551]]\nLoss: \n0.00140805296313278\n\n\n# 69\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87189002]\n [0.88262051]\n [0.85408996]]\nLoss: \n0.0013719293704372345\n\n\n# 70\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87266969]\n [0.88322292]\n [0.85485518]]\nLoss: \n0.0013382069449404544\n\n\n# 71\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87341998]\n [0.88380213]\n [0.85559255]]\nLoss: \n0.0013067041756862291\n\n\n# 72\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87414231]\n [0.88435925]\n [0.85630335]]\nLoss: \n0.0012772550509082394\n\n\n# 73\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.874838 ]\n [0.88489532]\n [0.8569888 ]]\nLoss: \n0.0012497075676527703\n\n\n# 74\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.8755083 ]\n [0.88541133]\n [0.85765003]]\nLoss: \n0.001223922401244853\n\n\n# 75\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87615439]\n [0.88590821]\n [0.85828815]]\nLoss: \n0.001199771715704556\n\n\n# 76\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87677735]\n [0.88638683]\n [0.85890414]]\nLoss: \n0.0011771380986525571\n\n\n# 77\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87737825]\n [0.88684801]\n [0.859499 ]]\nLoss: \n0.0011559136063394324\n\n\n# 78\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87795804]\n [0.88729252]\n [0.86007361]]\nLoss: \n0.0011359989062402673\n\n\n# 79\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87851767]\n [0.88772109]\n [0.86062884]]\nLoss: \n0.0011173025062183241\n\n\n# 80\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87905801]\n [0.88813441]\n [0.8611655 ]]\nLoss: \n0.001099740060613587\n\n\n# 81\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.87957988]\n [0.88853314]\n [0.86168437]]\nLoss: \n0.0010832337447848294\n\n\n# 82\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88008407]\n [0.88891789]\n [0.86218617]]\nLoss: \n0.0010677116906520508\n\n\n# 83\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88057131]\n [0.88928924]\n [0.86267161]]\nLoss: \n0.0010531074766726872\n\n\n# 84\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88104232]\n [0.88964776]\n [0.86314132]]\nLoss: \n0.0010393596664568176\n\n\n# 85\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88149775]\n [0.88999395]\n [0.86359595]]\nLoss: \n0.0010264113909008565\n\n\n# 86\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88193824]\n [0.89032833]\n [0.86403609]]\nLoss: \n0.0010142099693082043\n\n\n# 87\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88236438]\n [0.89065135]\n [0.86446229]]\nLoss: \n0.0010027065654811763\n\n\n# 88\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88277675]\n [0.89096348]\n [0.86487511]]\nLoss: \n0.0009918558752206924\n\n\n# 89\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88317589]\n [0.89126513]\n [0.86527504]]\nLoss: \n0.000981615842067268\n\n\n# 90\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88356231]\n [0.89155672]\n [0.86566258]]\nLoss: \n0.0009719473984660332\n\n\n# 91\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.8839365 ]\n [0.89183863]\n [0.86603818]]\nLoss: \n0.0009628142298457747\n\n\n# 92\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88429894]\n [0.89211122]\n [0.8664023 ]]\nLoss: \n0.0009541825593732649\n\n\n# 93\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88465005]\n [0.89237485]\n [0.86675536]]\nLoss: \n0.0009460209513832016\n\n\n# 94\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88499027]\n [0.89262985]\n [0.86709776]]\nLoss: \n0.0009383001316959067\n\n\n# 95\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88532001]\n [0.89287653]\n [0.86742988]]\nLoss: \n0.0009309928232221085\n\n\n# 96\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88563964]\n [0.89311521]\n [0.86775209]]\nLoss: \n0.000924073595420098\n\n\n# 97\n\nInput (scaled): \n[[0.4 0.9]\n [0.2 0.5]\n [0.6 0.6]]\nActual Output: \n[[0.92]\n [0.86]\n [0.89]]\nPredicted Output: \n[[0.88594954]\n [0.89334617]\n [0.86806475]]\nLoss: \n0.0009175187263180724\n\n\n# 98\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
ecafcd27ee5b862d4ca4f27b54d0142ac7d17f31
162,707
ipynb
Jupyter Notebook
molmap/feature/pdb/pdb_represent.ipynb
riversdark/bidd-molmap
7e3325433e2f29c189161859c63398574af6572b
[ "MIT" ]
75
2020-07-07T01:18:30.000Z
2022-03-25T13:40:19.000Z
molmap/feature/pdb/pdb_represent.ipynb
riversdark/bidd-molmap
7e3325433e2f29c189161859c63398574af6572b
[ "MIT" ]
12
2020-09-28T14:11:17.000Z
2022-02-10T04:33:25.000Z
molmap/feature/pdb/pdb_represent.ipynb
riversdark/bidd-molmap
7e3325433e2f29c189161859c63398574af6572b
[ "MIT" ]
24
2020-07-22T08:52:59.000Z
2022-03-14T09:59:44.000Z
485.692537
14,344
0.945061
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom molmap.feature.pdb.pdb2fmap import PDB2Fmap", "RDKit WARNING: [12:43:17] Enabling RDKit 2019.09.2 jupyter extensions\n" ] ], [ [ "## transofrm self properties", "_____no_output_____" ] ], [ [ "pm = PDB2Fmap(embd_grain='all', fmap_shape=None)\npm.fit(pdb_file='./1a1e/1a1e_protein.pdb', embd_chain='B')\nX = pm.transform_xyz(scale=True, feature_range=(0.1,1))\nX = pm.transofrm_bf(scale = True, feature_range=(0.2,1))\nX = pm.transofrm_pkt('./1a1e/1a1e_pocket.pdb')\nX = pm.transform_intrinsic()\nsns.heatmap(X[2].reshape(*pm.fmap_shape), cmap = 'jet')", "2021-07-21 12:43:19,682 - INFO - [bidd-molmap] - Calculating distance ...\n2021-07-21 12:43:19,805 - INFO - [bidd-molmap] - the number of process is 16\n" ] ], [ [ "## transofrm custom indexes", "_____no_output_____" ] ], [ [ "from molmap.feature.sequence.aas.local_feature.aai import load_index\naaidx = load_index()\ndfindex = aaidx.data\nX = pm.transform_custom(dfindex.T)", "100%|##########| 566/566 [00:00<00:00, 677.79it/s]\n" ], [ "X.shape", "_____no_output_____" ], [ "for i in range(0, 500, 50):\n fig, ax = plt.subplots()\n sns.heatmap(X[i].reshape(*pm.fmap_shape), ax=ax)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ecafd3dc916b1c90d5a3ccc7f678c586c3c424f8
26,067
ipynb
Jupyter Notebook
Programmeerelementen/Datatypes/0200_List.ipynb
dwengovzw/PythonNotebooks
633bea4b07efbd920349d6f1dc346522ce118b70
[ "CC0-1.0" ]
null
null
null
Programmeerelementen/Datatypes/0200_List.ipynb
dwengovzw/PythonNotebooks
633bea4b07efbd920349d6f1dc346522ce118b70
[ "CC0-1.0" ]
3
2021-09-30T11:38:24.000Z
2021-10-04T09:25:39.000Z
Programmeerelementen/Datatypes/0200_List.ipynb
dwengovzw/PythonNotebooks
633bea4b07efbd920349d6f1dc346522ce118b70
[ "CC0-1.0" ]
null
null
null
33.038023
2,725
0.580888
[ [ [ "<img src=\"images/logodwengo.png\" alt=\"Dwengo\" width=\"150\"/>", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h1>LISTS</h1> \n </font>\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-box alert-success\">\n Taaltechnologie wordt steeds meer aangewend in allerlei toepassingen. De kans is groot dat je er reeds mee in aanraking kwam. Aan de basis van taaltechnologie liggen de vorm waarin tekst aan een computer wordt gegeven en hoe je met een computer tekst kan verwerken. <br>\n In deze notebook leer je de basisfunctionaliteiten om te werken met lijsten (<em>lists</em>). \n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\"> \n Deze notebook bereidt voor op de notebooks van het project Chatbot van AI Op School. In dat project maak je kennis met de kunstmatige intelligente systemen uit het domein van de taaltechnologie.<br>\n Taaltechnologen doen bijvoorbeeld een beroep op machine learning-modellen om bij gegeven teksten onderzoek te doen naar sentimentwoorden. De huidige chatbots worden vaak regelgebaseerd geprogrammeerd, maar met machine learning-technieken kunnen chatbots beter gemaakt worden. <br><br>\n Voor het 'Chatbot'-project doorloop je naast de notebook 'Strings' en deze notebook ook best de notebooks 'Dictionaries' en 'Structuren (toepassingen bij strings, lijsten en dictionaries)'. \n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\"> \n Het gebruik van technologie wordt steeds toegankelijker voor werknemers in een niet-technologische sector, zoals taalkundigen, communicatiewetenschappers, historici en juristen. <br>\n Dankzij de zeer toegankelijke programmeertaal Python zal ook jij enkele mogelijkheden van technologie ontdekken. <br>\n Python is vaak zeer intuïtief in gebruik en bovendien zo populair dat er heel wat modules voorhanden zijn die men vrij kan gebruiken. In een module zitten heel wat functies vervat die ervaren informatici reeds voor jou hebben geprogrammeerd. \n</div>", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>1. Lists</h2> \n </font>\n</div>", "_____no_output_____" ], [ "Herneem enkele zaken uit de notebook 'Strings':", "_____no_output_____" ] ], [ [ "zin = \"Hello, world!\"\nprint(zin[7])\nprint(zin[7:12]) ", "_____no_output_____" ], [ "zin.split()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\"> \n <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">zin</span> is een <b>variabele</b> die verwijst naar de string <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">\"Hello, world!\"</span>. <br>\n De naam die je aan de variabele geeft, kies je zelf, maar kies best een betekenisvolle naam.<br>\n Elk karakter in een string heeft een <b>index</b> die vertelt het hoeveelste karakter dat karakter is in die string. <br><br>\n De methode <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">split()</span> zet een string om naar een list (een lijst). De methode baseert zich op de spaties om de opsplitsing te maken.<br>\n In het voorbeeld werd de string <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">\"Hello, world\"</span> omgezet naar een list met twee strings in dezelfde volgorde als dat ze in de string voorkomen.\n</div>", "_____no_output_____" ], [ "#### Voorbeeld 1.1", "_____no_output_____" ], [ "Voer de volgende code-cellen uit.", "_____no_output_____" ] ], [ [ "getallen = [-2, 510, -7, 28.4, -2, 0, 3.14] # list met zeven reële getallen \ngemengdelijst = [\"koekoek\", 2.5, -1, [5, \"ren\"], \"?!@\", \" \"] # list met zes elementen, waarvan een zelf een list", "_____no_output_____" ], [ "type(getallen), type(gemengdelijst)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n Een <b>list</b> is een lijst van elementen, zoals getallen en strings, waarbij eenzelfde element meerdere keren kan voorkomen en waarbij de volgorde van de elementen van belang is. Je kan een list herkennen aan <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">[ ]</span>.\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\"> \n <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">[-2, 510, -7, 28.4, -2, 0, 3.14]</span> is een object met het type <b>list</b>.<br>\n <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">[\"koekoek\", 2.5, -1, [5, \"ren\"], \"?!@\", \" \"]</span> is een object met het type <b>list</b>. \n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n Net als bij strings kan je lijsten samenvoegen m.b.v. de <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">+-operator</span>.<br>\n Net als bij een string wordt de plaats van een element in een list gegeven door de <b>index</b>.\n</div>", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>2. Concateneren</h2> \n </font>\n</div>", "_____no_output_____" ], [ "#### Voorbeeld 2.1", "_____no_output_____" ] ], [ [ "# lijsten samenvoegen\ngetallen + gemengdelijst", "_____no_output_____" ] ], [ [ "#### Oefening 2.1\nVerzin zelf drie lijsten en concateneer ze tot één lijst. Gebruik de onderstaande code-cel voor deze opdracht. ", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>3. Index</h2> \n </font>\n</div>", "_____no_output_____" ], [ "#### Oefening 3.1\nGebruik de onderstaande code-cel voor deze opdracht. \n- Toon het eerste deel van de list `getallen`, nl. een list met de eerste vier elementen, op het scherm. In de instructie maak je gebruik van `index`. \n- Voer de code-cel uit en controleer.", "_____no_output_____" ], [ "Naast de +-operator zijn er nog andere operatoren. Deze komen verder in deze notebook aan bod.", "_____no_output_____" ], [ "#### Oefening 3.2\n- Beschouw de list `[\"boter\", \"koffie\", \"ochtend\", \"choco\"]`. \n- Hoeveel elementen bevat deze lijst?\n- Wat is het element in deze lijst met index 0? \n- Wat is het element in deze lijst met index 3?", "_____no_output_____" ], [ "Antwoord:", "_____no_output_____" ], [ "#### Oefening 3.3\nGebruik voor iedere opdracht in deze oefening telkens een van onderstaande code-cellen. \n- Geef de lijst `[\"boter\", \"koffie\", \"ochtend\", \"choco\"]` in in Python en verwijs ernaar met een variabele met de naam `ontbijt`.\n- Vraag met een instructie het aantal elementen in deze lijst op.\n- Vraag met een instructie het element met index 0 op. \n- Vraag met een instructie het element met index 3 op. \n- Vraag met een instructie de middelste twee elementen op. ", "_____no_output_____" ] ], [ [ "# lijst ingeven", "_____no_output_____" ], [ "# aantal elementen", "_____no_output_____" ], [ "# element met index 0", "_____no_output_____" ], [ "# element met index 3", "_____no_output_____" ], [ "# middelste twee elementen", "_____no_output_____" ] ], [ [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>4. De functies len() en sum()</h2> \n </font>\n</div>", "_____no_output_____" ], [ "#### Voorbeeld 4.1", "_____no_output_____" ] ], [ [ "len(getallen)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n Het <b>aantal elementen</b> in een list vraag je op met de functie <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">len()</span>. \n</div>", "_____no_output_____" ], [ "#### Oefening 4.1", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-info\">\n Als een list enkel getallen bevat, dan kan je de <b>som</b> van al die getallen opvragen met de functie <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">sum()</span>. \n</div>", "_____no_output_____" ], [ "Test dit uit op de list getallen.", "_____no_output_____" ], [ "#### Samengevat\nJe begrijpt nu wat de volgende begrippen inhouden:\n- een lijst;\n- een index van een element van een lijst;\n- het samenvoegen van lijsten tot één nieuwe lijst (concateneren);\n- het aantal elementen van een lijst (`len()`);\n- de som van een lijst getallen (`sum()`).", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h2>5. Vergelijkingsoperatoren en logische operatoren</h2> \n </font>\n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\"> \n De +-operator is een wiskundige operator. Er zijn nog andere wiskundige operatoren. Deze worden uitvoerig uit de doeken gedaan in de notebook 'Rekenen' van de 'Python in wiskunde'-reeks. \n</div> ", "_____no_output_____" ], [ "Nu verdiep je je in een andere soort operatoren: de **vergelijkingsoperatoren**, waarmee je twee objecten met elkaar kunt laten vergelijken.", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h3>Vergelijkingsoperatoren</h3> \n </font>\n</div>\n\n<table>\n <thead>\n <tr>\n <th>&nbsp;</th> \n <th><p align=\"center\">Wiskunde</th>\n <th>&nbsp;</th>\n <th><p align=\"center\">Python</th> \n </thead>\n <tr> <td> <p align=\"left\">groter dan <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; > <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; > \n <tr> <td> <p align=\"left\">kleiner dan <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; < <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; < \n <tr> <td> <p align=\"left\">gelijk aan <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; = <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; == \n <tr> <td> <p align=\"left\">verschillend van <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; &#8800; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; != \n <tr> <td> <p align=\"left\">groter dan of gelijk aan <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; &#8805; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; >= \n <tr> <td> <p align=\"left\">kleiner dan of gelijk aan <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; &#8804; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; <= \n <tr> <td> <p align=\"left\">behoort tot <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; in \n <tr> <td> <p align=\"left\">behoort niet tot <td> <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; &nbsp; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; not in \n</table>", "_____no_output_____" ], [ "#### Voorbeeld 5.1", "_____no_output_____" ], [ "Gebruik de bovenstaande tabel om te achterhalen wat de code in de volgende code-cellen doet.", "_____no_output_____" ] ], [ [ "ontbijt[2] == \"ochtend\" ", "_____no_output_____" ], [ "ontbijt[0] == \"boten\"", "_____no_output_____" ], [ "ontbijt[0] != \"boten\"", "_____no_output_____" ], [ "5 <= 2", "_____no_output_____" ], [ "\"boten\" in ontbijt", "_____no_output_____" ], [ "\"boter\" in ontbijt", "_____no_output_____" ], [ "\"boten\" not in ontbijt", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n Bij het uitvoeren van de code-cellen wordt telkens nagegaan of de gegeven (logische) uitdrukking waar is. Indien deze waar is, is de uitvoer <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">True</span>, indien niet dan is de uitvoer <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">False</span>.<br>\n <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">True</span> en <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">False</span> zijn <b>Booleaanse waarden</b>. \n</div>", "_____no_output_____" ], [ "<div class=\"alert alert-box alert-info\">\n Een logische uitdrukking kan slechts de waarden <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">False</span> en <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">True</span> aannemen.<br>\n Zo'n logische uitdrukking is een object met het type <b>bool</b>. Men spreekt ook van een Booleaanse uitdrukking. \n</div>", "_____no_output_____" ], [ "#### Voorbeeld 5.2", "_____no_output_____" ] ], [ [ "type(5>2)", "_____no_output_____" ], [ "type(False)", "_____no_output_____" ] ], [ [ "Met behulp van de **logische** of **Booleaanse** operatoren kan je (logische) uitdrukkingen combineren tot een nieuwe (logische) uitdrukking en controleren of de combinatie waar is.", "_____no_output_____" ], [ "<div>\n <font color=#690027 markdown=\"1\">\n <h3>Logische of Booleaanse operatoren</h3> \n </font>\n</div>\n\n<table>\n <thead>\n <tr>\n <th>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</th> \n <th><p align=\"center\">Logica</th>\n <th>&nbsp;</th>\n <th><p align=\"center\">Python</th> \n </thead>\n <tr> <td> <p align=\"left\">en <td> <p align=\"center\">&nbsp;&nbsp;&nbsp; &#8743; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; and \n <tr> <td> <p align=\"left\">of <td> <p align=\"center\">&nbsp;&nbsp;&nbsp; &#8744; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; or \n <tr> <td> <p align=\"left\">niet <td> <p align=\"center\">&nbsp;&nbsp;&nbsp; &#172; <td> <td> \n <p align=\"center\">&nbsp;&nbsp;&nbsp;&nbsp; not \n</table>", "_____no_output_____" ], [ "#### Voorbeeld 5.3", "_____no_output_____" ], [ "Gebruik de bovenstaande tabel om de uitvoer van de volgende code-cellen te begrijpen.", "_____no_output_____" ] ], [ [ "ontbijt[0] != \"boten\" and 5 <= 2", "_____no_output_____" ], [ "ontbijt[0] != \"boten\" and 5 > 2", "_____no_output_____" ], [ "ontbijt[0] != \"boten\" or 5 <= 2", "_____no_output_____" ], [ "not(5>2)", "_____no_output_____" ] ], [ [ "#### Oefening 5.1\nDeze techniek kan je ook toepassen op strings.\nVul het volgende script aan om na te gaan of het woord \"aanbod\" in de gegeven string `Netflix` voorkomt [1]. Doe hetzelfde voor het woord \"vers\".", "_____no_output_____" ] ], [ [ "netflix = \"Geschiedenis. Netflix is in 1997 opgericht in Scotts Valley, Californië door Marc Randolph en Reed Hastings. Hastings kwam op het idee om het bedrijf te beginnen, nadat hij een boete van 40 dollar moest betalen na het te laat inleveren van de gehuurde film Apollo 13. Op 14 april 1998 is de website van Netflix online gegaan met 30 medewerkers en 925 te huren video's. Vier jaar later kreeg het bedrijf een notering aan de technologiebeurs NASDAQ. Aanvankelijk was het bedrijfsmodel van Netflix gebaseerd op het voor onbepaalde tijd verhuren van dvd's die per post verstuurd werden in markante rode enveloppen. Zodra men de dvd terugstuurde zond Netflix de volgende dvd uit de lijst van gereserveerde films. Het aantal dvd's dat iemand tegelijkertijd in bezit kon hebben hing af van het abonnementstype. Met de toenemende breedband-penetratie heeft Netflix de overstap naar streaming gemaakt. In januari 2019 trad het bedrijf ook toe tot de Motion Picture Association (MPA). Internationaal. Netflix was tot 22 september 2010 alleen actief in de Verenigde Staten, maar daarna gingen ook servers in Canada online en werd het een internationale onderneming. In september 2011 werd het ook actief in Latijns-Amerika. Op 9 januari 2012 begon het bedrijf in Europa, in Ierland en het Verenigd Koninkrijk. In oktober 2012 volgden Noorwegen, Zweden, Denemarken en Finland. Op 11 september 2013 lanceerde Netflix zijn SVOD (subscription video on demand) in Nederland. Het aanbod bestaat vanaf de start uit zowel Nederlandse als buitenlandse films, series, documentaires, concerten en cabaret. Bij alle buitenlandse inhoud zal in principe ondertiteling aanwezig zijn met uitzondering van programma's voor jonge kinderen op het kanaal Netflix Kids, die zijn nagesynchroniseerd. Inhoud voor oudere kinderen en tieners zal meestal ondertiteld zijn, waarbij soms ook de keuze voor nasynchronisatie mogelijk is. Op 19 september 2014 werd Netflix gelanceerd in België, met voor Vlaanderen een gelijkaardig aanbod als in Nederland. Op 24 maart 2015 werd Netflix gelanceerd in Australië en Nieuw-Zeeland en op 2 september werd Japan toegevoegd. In Spanje, Portugal en Italië kwam Netflix beschikbaar op respectievelijk 20, 21 en 22 oktober 2015. Op 6 januari 2016 kondigde Reed Hastings op de CES-technologiebeurs in Las Vegas aan dat vanaf deze dag Netflix in 130 bijkomende landen beschikbaar wordt. Daardoor is Netflix wereldwijd beschikbaar behalve in de Volksrepubliek China, waar Netflix nog aan werkt, en landen en regio's die onder Amerikaanse sancties staan zoals Noord-Korea, het schiereiland de Krim en Syrië. In het grootste deel van de nieuwe landen wordt echter alleen de eigen content van Netflix aangeboden.\"", "_____no_output_____" ] ], [ [ "#### Oefening 5.2\nLaat de gegeven string `Netflix` zien op het scherm. Kloppen de resultaten die je verkreeg?", "_____no_output_____" ], [ "Antwoord:", "_____no_output_____" ], [ "<div class=\"alert alert-box alert-info\">\n Als je met de operator <span style=\"background-color:whitesmoke; font-family:consolas; font-size:1em;\">in</span> op zoek gaat naar een bepaald woord in een gegeven string, dan kan het zijn dat het 'woord' gedetecteerd wordt als deel van een ander woord, en dat is niet wat je wil. Je kan dit echter oplossen door eerst de woorden uit de string op te lijsten en erna in de lijst op zoek te gaan naar het gewenste woord.\n</div>", "_____no_output_____" ], [ "#### Oefening 5.3\nPas de code aan zodat je wel het juiste antwoord krijgt.", "_____no_output_____" ], [ "<div>\n <h2>Referentielijst</h2> \n</div>", "_____no_output_____" ], [ "[1] Netflix. (2021, 10 augustus 8). In Wikipedia. https://nl.wikipedia.org/wiki/Netflix", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\nIn een volgende notebook maak je kennis met <em>dictionaries</em>. Daarna kan je nog extra oefenen met strings, lijsten en dictionaries in een notebook over structuren.\n</div>", "_____no_output_____" ], [ "<img src=\"images/cclic.png\" alt=\"Banner\" align=\"left\" width=\"100\"/><br><br>\nNotebook Programmeerelementen met Python, zie Computationeel denken - Programmeren in Python van <a href=\"http://www.aiopschool.be\">AI Op School</a>, van F. wyffels & N. Gesquière is in licentie gegeven volgens een <a href=\"http://creativecommons.org/licenses/by-nc-sa/4.0/\">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ecafd929e467415a187dc00ae326b3de704fd5a5
11,686
ipynb
Jupyter Notebook
Enrollment_Form_Notebook.ipynb
James11222/Auto_Form_to_Email_Pipeline
a27b4b3a6956c17dc641a8cd76ef2226ced8abd5
[ "MIT" ]
null
null
null
Enrollment_Form_Notebook.ipynb
James11222/Auto_Form_to_Email_Pipeline
a27b4b3a6956c17dc641a8cd76ef2226ced8abd5
[ "MIT" ]
null
null
null
Enrollment_Form_Notebook.ipynb
James11222/Auto_Form_to_Email_Pipeline
a27b4b3a6956c17dc641a8cd76ef2226ced8abd5
[ "MIT" ]
null
null
null
11,686
11,686
0.6809
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "pip install pygsheets", "_____no_output_____" ] ], [ [ "# Read in Google Sheets Data with Python!", "_____no_output_____" ] ], [ [ "import pygsheets\nimport pandas as pd\n#authorization\nCLIENT_SECRET_FILE = '/content/drive/My Drive/Testing_Stuff/client_secrets.json'\ngc = pygsheets.authorize(client_secret=CLIENT_SECRET_FILE)\n", "Please go to this URL and finish the authentication flow: https://accounts.google.com/o/oauth2/auth?response_type=code&client_id=571222025068-m9ovg7f56g0ldejvv4j7it2j8k3i4ulo.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fspreadsheets+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=KY7seQmaw7ClkW1Tm3K5o4eDyO3PAn&prompt=consent&access_type=offline\nEnter the authorization code: 4/1AX4XfWgUnpCnflS2u1yNy6Js7WKi79ra8Do7u5WSauCS786o0Lm20ZoRCQg\n" ] ], [ [ "Once we succesfully read in the client secret file, we can open any spreadsheet under my google drive account using the pygsheets `gc` object", "_____no_output_____" ] ], [ [ "#open the google spreadsheet (where 'Python DeCal Application Form 21 (Responses)' is the name of my sheet)\nresponse_sheet_title = 'Python DeCal Application Form Spring 2022 (Responses)'\nenrollment_codes_sheet = 'Enrollment Codes'\nsh_responses = gc.open(response_sheet_title)\nsh_codes = gc.open(enrollment_codes_sheet)\n", "_____no_output_____" ] ], [ [ "We then take the data from a spreadsheet and store it in a dataframe using pandas and numpy to clean it up.", "_____no_output_____" ] ], [ [ "########################\n# Responses #\n########################\n\n#select the first sheet \nwks = sh_responses[0]\n\n#format data into a nice dataframe using pandas\ndata = pd.DataFrame(wks)\ndata_np = data.to_numpy()\ncolumn_names = data_np[0,:]\n#cleanly format dataframe\ndata = pd.DataFrame(data_np[1:,:], columns = column_names)\n\n########################\n# Enrollment Codes #\n########################\n\n#select the first sheet \nwks = sh_codes[0]\n\n#format data into a nice dataframe using pandas\ndata_codes = pd.DataFrame(wks)\ndata_np = data_codes.to_numpy()\ncolumn_names = data_np[0,:]\n#cleanly format dataframe\ndata_codes = pd.DataFrame(data_np[1:,:], columns = column_names)\n\n", "_____no_output_____" ], [ "data['First and Last Name'][0]", "_____no_output_____" ], [ "data['Email Address (ending with berkeley.edu) ']", "_____no_output_____" ] ], [ [ "Examine the first few rows:", "_____no_output_____" ] ], [ [ "print(data.head())\n\nprint(data_codes.head())", " Timestamp First and Last Name Berkeley Student ID ... \n0 11/15/2021 0:15:02 James Sunseri 3033975793 ... \n\n[1 rows x 20 columns]\n Count Code email Status ... \n0 1 53363 [email protected] ... \n1 2 686508 [email protected] ... \n2 3 469739 [email protected] ... \n3 4 442742 [email protected] ... \n4 5 158596 [email protected] ... \n\n[5 rows x 27 columns]\n" ], [ "email_messages = []\nfor i in range(len(data['First and Last Name'])):\n email_string = \"\"\" Hello {0},\n\n We look forward to having you in our class this fall! Here is the permission code for the decal: {1}\n\n Please let me know if you have any questions\n\n Onwards,\n Python Decal Staff\"\"\".format(data['First and Last Name'][i], data_codes['Code'][i])\n\n email_messages.append(email_string)\n\n ", "_____no_output_____" ], [ "\nfor i in range(len(data['First and Last Name'])):\n address = data['Email Address (ending with berkeley.edu) '][i]\n message = email_messages[i]\n email_dict[address] = message\n\n\nemail_dict = {'Emails':data['Email Address (ending with berkeley.edu) '], 'Messages':email_messages}", "_____no_output_____" ], [ "final_email_df = pd.DataFrame(email_dict, index=range(len(data['First and Last Name'])))\n\nfinal_email_df.to_csv(\"/content/drive/My Drive/Python DeCal Spring 2022/email_list.csv\")", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "final_email_df", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecafdfe3ab718d2bafd402fb2680d69a055f33ed
1,939
ipynb
Jupyter Notebook
notebooks/1.1-kj_format_iframes_for_article_tagging.ipynb
ebanalyse/label-studio
a7bc3a472e79322580cc1232c1d4cfe4aeb14217
[ "Apache-2.0" ]
null
null
null
notebooks/1.1-kj_format_iframes_for_article_tagging.ipynb
ebanalyse/label-studio
a7bc3a472e79322580cc1232c1d4cfe4aeb14217
[ "Apache-2.0" ]
null
null
null
notebooks/1.1-kj_format_iframes_for_article_tagging.ipynb
ebanalyse/label-studio
a7bc3a472e79322580cc1232c1d4cfe4aeb14217
[ "Apache-2.0" ]
null
null
null
20.849462
119
0.545126
[ [ [ "import pandas as pd\nfrom pathlib import Path", "_____no_output_____" ] ], [ [ "# load articles", "_____no_output_____" ] ], [ [ "path_articles = Path().absolute().parents[2].joinpath('Desktop', 'data', 'eb-articles', '10k_eb_articles.pkl')\ndf = pd.read_pickle(path_articles)", "_____no_output_____" ] ], [ [ "# format iframes", "_____no_output_____" ] ], [ [ "base_iframe = \"<iframe src='https://ekstrabladet.dk/a/{}' width='100%' height='1000px'/>\"\ndf['url'] = df['id'].apply(lambda x: base_iframe.format(x))", "_____no_output_____" ], [ "path_articles = Path().absolute().parent.joinpath('data_storage', '10k_articles.csv')\ndf[['id', 'url']].to_csv(path_articles)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecafe02221410418f719c816b0d4e817289e3695
245,816
ipynb
Jupyter Notebook
notebooks/market index.ipynb
yuchen-robin-cao/Modifying-Shiller-CAPE-Ratio
458eafcee4fce3f0d42f885bf8c83eee10c10d3a
[ "MIT" ]
6
2021-12-25T09:13:07.000Z
2022-03-03T15:48:28.000Z
notebooks/market index.ipynb
samsonq/Modifying-Shiller-CAPE-Ratio
c905c8c0ac8089802a96ef870ca9485f81b396ee
[ "MIT" ]
null
null
null
notebooks/market index.ipynb
samsonq/Modifying-Shiller-CAPE-Ratio
c905c8c0ac8089802a96ef870ca9485f81b396ee
[ "MIT" ]
null
null
null
171.419805
86,612
0.814451
[ [ [ "import os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mlp\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nmlp.style.use(\"seaborn\")\nimport statsmodels\nimport statsmodels.api as sm\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "'''The Frist Proposal'''", "_____no_output_____" ], [ "df = pd.read_excel('../data/index_data.xlsx')\ndf = df.fillna(np.nan)\ndf", "_____no_output_____" ], [ "'''calculate new EPS and CAPE'''\nnew_eps = []\nweight = []\nfor i in range(10):\n weight.extend([i+1] * 12)\ntotal_weight = sum(weight)\nweight = np.array(weight)\nfor date in range(len(df)):\n if date < 120:\n new_eps.append(np.nan)\n else:\n hist_eps = np.array(df['Real Earnings'][date-120:date])\n new_eps.append(np.nansum(weight * hist_eps) / total_weight)\ndf['new_eps'] = pd.Series(new_eps)\ndf['new_CAPE'] = df['Real Price'] / df['new_eps']\ndf", "_____no_output_____" ], [ "df.index = pd.date_range('1871-01', '2021-10', freq = 'M')\nplt.figure(figsize = [12, 8])\nplt.plot(df[['CAPE', 'new_CAPE']], label = ['CAPE', 'CAPE adj I'])\nplt.legend(loc = 'upper left', fontsize = 14)\nplt.show()", "_____no_output_____" ], [ "# total period, old CAPE\nsm_df = df[['Fraction', 'Real Stock Return', 'CAPE', 'new_CAPE']]\nsm_df.dropna(inplace = True)\ny = sm_df['Real Stock Return']\nX = sm.add_constant(np.log(sm_df['CAPE']))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Real Stock Return R-squared: 0.311\nModel: OLS Adj. R-squared: 0.311\nMethod: Least Squares F-statistic: 707.9\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 5.29e-129\nTime: 05:04:04 Log-Likelihood: 2710.8\nNo. Observations: 1568 AIC: -5418.\nDf Residuals: 1566 BIC: -5407.\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.2650 0.008 35.031 0.000 0.250 0.280\nCAPE -0.0732 0.003 -26.607 0.000 -0.079 -0.068\n==============================================================================\nOmnibus: 25.884 Durbin-Watson: 0.012\nProb(Omnibus): 0.000 Jarque-Bera (JB): 21.009\nSkew: -0.206 Prob(JB): 2.74e-05\nKurtosis: 2.611 Cond. No. 21.7\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "# total period, new CAPE\ny = sm_df['Real Stock Return']\nX = sm.add_constant(np.log(sm_df['new_CAPE']))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Real Stock Return R-squared: 0.304\nModel: OLS Adj. R-squared: 0.304\nMethod: Least Squares F-statistic: 685.0\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 1.49e-125\nTime: 05:04:04 Log-Likelihood: 2702.9\nNo. Observations: 1568 AIC: -5402.\nDf Residuals: 1566 BIC: -5391.\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.2692 0.008 34.303 0.000 0.254 0.285\nnew_CAPE -0.0753 0.003 -26.173 0.000 -0.081 -0.070\n==============================================================================\nOmnibus: 24.088 Durbin-Watson: 0.012\nProb(Omnibus): 0.000 Jarque-Bera (JB): 19.811\nSkew: -0.201 Prob(JB): 4.99e-05\nKurtosis: 2.624 Cond. No. 22.2\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "# before 2006, old CAPE\nsm_df_before = sm_df[sm_df.Fraction <= 2007]\ny = sm_df_before['Real Stock Return']\nX = sm.add_constant(np.log(sm_df_before['CAPE']))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Real Stock Return R-squared: 0.347\nModel: OLS Adj. R-squared: 0.346\nMethod: Least Squares F-statistic: 801.3\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 9.09e-142\nTime: 05:04:04 Log-Likelihood: 2646.5\nNo. Observations: 1512 AIC: -5289.\nDf Residuals: 1510 BIC: -5278.\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.2746 0.008 36.618 0.000 0.260 0.289\nCAPE -0.0775 0.003 -28.307 0.000 -0.083 -0.072\n==============================================================================\nOmnibus: 21.712 Durbin-Watson: 0.012\nProb(Omnibus): 0.000 Jarque-Bera (JB): 18.971\nSkew: -0.215 Prob(JB): 7.59e-05\nKurtosis: 2.660 Cond. No. 21.5\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "# before 2006, new CAPE\ny = sm_df_before['Real Stock Return']\nX = sm.add_constant(np.log(sm_df_before['new_CAPE']))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Real Stock Return R-squared: 0.338\nModel: OLS Adj. R-squared: 0.338\nMethod: Least Squares F-statistic: 772.3\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 1.25e-137\nTime: 05:04:04 Log-Likelihood: 2636.9\nNo. Observations: 1512 AIC: -5270.\nDf Residuals: 1510 BIC: -5259.\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.2788 0.008 35.808 0.000 0.264 0.294\nnew_CAPE -0.0797 0.003 -27.791 0.000 -0.085 -0.074\n==============================================================================\nOmnibus: 19.375 Durbin-Watson: 0.012\nProb(Omnibus): 0.000 Jarque-Bera (JB): 17.039\nSkew: -0.203 Prob(JB): 0.000200\nKurtosis: 2.675 Cond. No. 22.0\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "# after 2007, old CAPE\nsm_df_after = sm_df[sm_df.Fraction > 2007]\ny = sm_df_after['Real Stock Return']\nX = sm.add_constant(np.log(sm_df_after['CAPE']))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Real Stock Return R-squared: 0.622\nModel: OLS Adj. R-squared: 0.615\nMethod: Least Squares F-statistic: 88.74\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 5.43e-13\nTime: 05:04:04 Log-Likelihood: 148.66\nNo. Observations: 56 AIC: -293.3\nDf Residuals: 54 BIC: -289.3\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.4565 0.038 12.034 0.000 0.380 0.533\nCAPE -0.1169 0.012 -9.420 0.000 -0.142 -0.092\n==============================================================================\nOmnibus: 7.599 Durbin-Watson: 0.065\nProb(Omnibus): 0.022 Jarque-Bera (JB): 7.285\nSkew: 0.820 Prob(JB): 0.0262\nKurtosis: 2.342 Cond. No. 55.4\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "# after 2007, new CAPE\ny = sm_df_after['Real Stock Return']\nX = sm.add_constant(np.log(sm_df_after['new_CAPE']))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Real Stock Return R-squared: 0.527\nModel: OLS Adj. R-squared: 0.518\nMethod: Least Squares F-statistic: 60.19\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 2.42e-10\nTime: 05:04:04 Log-Likelihood: 142.42\nNo. Observations: 56 AIC: -280.8\nDf Residuals: 54 BIC: -276.8\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.4383 0.044 10.030 0.000 0.351 0.526\nnew_CAPE -0.1126 0.015 -7.759 0.000 -0.142 -0.083\n==============================================================================\nOmnibus: 16.166 Durbin-Watson: 0.052\nProb(Omnibus): 0.000 Jarque-Bera (JB): 5.787\nSkew: 0.516 Prob(JB): 0.0554\nKurtosis: 1.811 Cond. No. 56.4\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "'''The Second Proposal'''", "_____no_output_____" ], [ "'''this section derives all SPX constituents, original data (.xlsx) are from Bloomberg'''\n\nqtrs = [' Mar', ' Jun', ' Sep', ' Dec']\ntic_qtr = {}\n\nnames_1 = []\nfor year in range(1990, 2001):\n for qtr in qtrs:\n names_1.append(str(year) + qtr)\n\nnames_2 = []\nfor year in range(2001, 2012):\n for qtr in qtrs:\n names_2.append(str(year) + qtr)\n\nfor name in names_1:\n df = pd.read_excel('../data/index_SPX 1990-2000.xlsx', sheet_name = name)\n tic_qtr[name] = np.array(df['Ticker'].apply(lambda x: x.split(' ')[0]))\nfor name in names_2:\n df = pd.read_excel('../data/index_SPX 2000-2011.xlsx', sheet_name = name)\n tic_qtr[name] = np.array(df['Ticker'].apply(lambda x: x.split(' ')[0]))", "_____no_output_____" ], [ "'''this section calculates modified CAPE ratio, data (.csv) is from WRDS'''\ndf = pd.read_csv('../data/index_individual_dat.csv')\n\nrolling_EPS = {}\nreal_P = {}\nqtr_str_to_num = {'Mar': 3, 'Jun': 6, 'Sep': 9, 'Dec': 12}\n\nfor qtr in tic_qtr:\n this_df = df[df['tic'].isin(tic_qtr[qtr])]\n comp_df = pd.DataFrame((this_df.groupby('datacqtr')['ibq'].sum() \\\n - this_df.groupby('datacqtr')['dvpy'].sum()).rename('EARN'))\n comp_df['SHOUT'] = this_df.groupby('datacqtr')['cshoq'].sum()\n year = int(qtr.split(' ')[0])\n fqtr = qtr_str_to_num[qtr.split(' ')[1]]\n comp_df['datadate'] = comp_df.index\n comp_df['datadate'] = comp_df['datadate'].apply(lambda x: int(x.split('Q')[0]) * 100 \\\n + int(x.split('Q')[1]) * 3)\n datadate = year * 100 + fqtr\n comp_df = comp_df[comp_df.datadate <= datadate]\n comp_df = comp_df[-40:]\n \n # CPI adjustment\n cpi = pd.read_excel('../data/index_CPI.xlsx')\n cpi['datadate'] = cpi['Year'] * 100 + cpi['Month']\n comp_df = pd.merge(comp_df, cpi, \n left_on = 'datadate', right_on = 'datadate', how = 'left')\n comp_df['real_EARN'] = comp_df['EARN'] * cpi['CPI'][len(cpi)-1] / comp_df['CPI']\n comp_df['real_P'] = comp_df['P'] * cpi['CPI'][len(cpi)-1] / comp_df['CPI']\n \n # add to output\n rolling_EPS[comp_df['datadate'][len(comp_df)-1]] = comp_df['real_EARN'].mean()\n real_P[comp_df['datadate'][len(comp_df)-1]] = comp_df['real_P'][len(comp_df)-1] * \\\n comp_df['SHOUT'][len(comp_df)-1]", "_____no_output_____" ], [ "'''this section calculates the new CAPE'''\nimport matplotlib.pyplot as plt\nCAPE = []\nfor year in real_P:\n CAPE.append(real_P[year] / rolling_EPS[year] / 100)\nCAPE = pd.DataFrame(CAPE, index = pd.date_range('1990-03', '2012-01', freq = 'Q'))\nCAPE.columns = ['new_CAPE']\nplt.plot(CAPE)", "_____no_output_____" ], [ "'''regression'''\nregression = pd.read_excel('../data/index_Regression.xlsx')\nregression['datadate'] = regression['Year'] * 100 + regression['Month']\nCAPE['datadate'] = CAPE.index.year * 100 + CAPE.index.month\nCAPE = pd.merge(CAPE, regression, left_on = 'datadate', right_on = 'datadate', how = 'left')\nCAPE.dropna(inplace = True)\n\n# old CAPE\ny = CAPE['Ret']\nX = sm.add_constant(np.log(CAPE[['CAPE']]))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Ret R-squared: 0.878\nModel: OLS Adj. R-squared: 0.877\nMethod: Least Squares F-statistic: 605.6\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 3.66e-40\nTime: 05:04:24 Log-Likelihood: 228.85\nNo. Observations: 86 AIC: -453.7\nDf Residuals: 84 BIC: -448.8\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.6073 0.022 27.232 0.000 0.563 0.652\nCAPE -0.1704 0.007 -24.610 0.000 -0.184 -0.157\n==============================================================================\nOmnibus: 1.818 Durbin-Watson: 0.306\nProb(Omnibus): 0.403 Jarque-Bera (JB): 1.195\nSkew: 0.211 Prob(JB): 0.550\nKurtosis: 3.393 Cond. No. 42.7\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "# new CAPE\ny = CAPE['Ret']\nX = sm.add_constant(np.log(CAPE[['new_CAPE']]))\nmodel = sm.OLS(y, X).fit()\nprint(model.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: Ret R-squared: 0.413\nModel: OLS Adj. R-squared: 0.406\nMethod: Least Squares F-statistic: 59.09\nDate: Thu, 02 Dec 2021 Prob (F-statistic): 2.55e-11\nTime: 05:04:24 Log-Likelihood: 161.23\nNo. Observations: 86 AIC: -318.5\nDf Residuals: 84 BIC: -313.5\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.1964 0.018 10.818 0.000 0.160 0.233\nnew_CAPE -0.0442 0.006 -7.687 0.000 -0.056 -0.033\n==============================================================================\nOmnibus: 0.566 Durbin-Watson: 0.067\nProb(Omnibus): 0.753 Jarque-Bera (JB): 0.676\nSkew: 0.176 Prob(JB): 0.713\nKurtosis: 2.746 Cond. No. 15.5\n==============================================================================\n\nNotes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "'''plot old CAPE, new CAPE, and forward 10-year return'''\nplt.figure(figsize = [12, 9])\nplt.plot(CAPE[['CAPE','new_CAPE']], label = ['CAPE', 'new_CAPE'])\nplt.legend(loc = 'upper left', fontsize = 14)\nplt.twinx()\nplt.plot(CAPE['Ret'], c = 'r', label = 'Forward Ret')\nplt.legend(fontsize = 14)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecafe334f386f378f3406eba02f9481e4d3e72e1
10,011
ipynb
Jupyter Notebook
Parcial 1/Tarea 3.1_BetsyTorres.ipynb
BetsyTorres/Tareas-Cuantitativas
34ac2140697447ae71184419f2d54f5b0867cceb
[ "MIT" ]
null
null
null
Parcial 1/Tarea 3.1_BetsyTorres.ipynb
BetsyTorres/Tareas-Cuantitativas
34ac2140697447ae71184419f2d54f5b0867cceb
[ "MIT" ]
null
null
null
Parcial 1/Tarea 3.1_BetsyTorres.ipynb
BetsyTorres/Tareas-Cuantitativas
34ac2140697447ae71184419f2d54f5b0867cceb
[ "MIT" ]
null
null
null
28.359773
196
0.473879
[ [ [ "# Esta tarea fue realizada en equipo por Betsy Torres y Mariana Briones\n\n# Tarea 3.1\n \nCompre una acción inicial que no pague dividendos y estime los resultados esperados usando la Teoría de las finanzas corporativas del factor X estocástico. Asuma una distribución normal. \n- Método analítico \n- Simule en python para probar la precisión.", "_____no_output_____" ], [ "### Importar librerias", "_____no_output_____" ] ], [ [ "import time\nimport scipy.stats as st\nimport pandas as pd\npd.core.common.is_list_like = pd.api.types.is_list_like\n#import warnings\nimport numpy as np", "_____no_output_____" ] ], [ [ "### Funciones para ajustar distribuciones", "_____no_output_____" ] ], [ [ "def best_fit_distribution(data, bins=200, ax=None):\n \"\"\"Model data by finding best fit distribution to data\"\"\"\n # Get histogram of original data\n y, x = np.histogram(data, bins=bins, density=True)\n x = (x + np.roll(x, -1))[:-1] / 2.0\n\n # Distributions to check\n DISTRIBUTIONS = [ \n st.gennorm,st.genexpon,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf,\n st.nct,st.norm,st.powerlognorm, st.uniform\n ]\n\n # Best holders\n best_distribution = st.norm\n best_params = (0.0, 1.0)\n best_sse = np.inf\n\n # Estimate distribution parameters from data\n for distribution in DISTRIBUTIONS:\n\n # Try to fit the distribution\n try:\n # Ignore warnings from data that can't be fit\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n\n # fit dist to data\n params = distribution.fit(data)\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Calculate fitted PDF and error with fit in distribution\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\n sse = np.sum(np.power(y - pdf, 2.0))\n\n # if axis pass in add to plot\n try:\n if ax:\n pd.Series(pdf, x).plot(ax=ax)\n \n except Exception:\n pass\n\n # identify if this distribution is better\n if best_sse > sse > 0:\n best_distribution = distribution\n best_params = params\n best_sse = sse\n\n except Exception:\n pass\n\n return (best_distribution.name, best_params)\n\ndef make_pdf(dist, params, size=10000):\n \"\"\"Generate distributions's Probability Distribution Function \"\"\"\n\n # Separate parts of parameters\n arg = params[:-2]\n loc = params[-2]\n scale = params[-1]\n\n # Get sane start and end points of distribution\n start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale)\n end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale)\n\n # Build PDF and turn into pandas Series\n x = np.linspace(start, end, size)\n y = dist.pdf(x, loc=loc, scale=scale, *arg)\n pdf = pd.Series(y, x)\n\n return pdf", "_____no_output_____" ] ], [ [ "### Obtención de Beta y parametros de distribución normal", "_____no_output_____" ] ], [ [ "Raw_nas = pd.read_csv('^IXIC.csv')\nnas = Raw_nas['Adj Close']\nnas = nas[1:].values/nas[:-1].values -1\nnas = pd.DataFrame(nas,columns=['Adj Close'])\n\nRaw_sp = pd.read_csv('SPOT.csv')\nsp = Raw_sp['Adj Close']\nsp = sp[1:].values/sp[:-1].values -1\nsp = pd.DataFrame(sp,columns=['Adj Close'])\n\nBeta = nas['Adj Close'].corr(sp['Adj Close'])\nprint('Beta: ' + str(Beta))\nbest_fit_sp, sp_params = best_fit_distribution(sp, 200)\nprint('SPOT ~ N(' + str(sp_params[0]*252) + ', ' + str(sp_params[1]*252**0.5) + ')')\nbest_fit_nas, nas_params = best_fit_distribution(nas, 200)\nprint('NASDAQ ~ N(' + str(nas_params[0]*252) + ', ' + str(nas_params[1]*252**0.5) + ')')\n\nprint('\\n\\n')", "Beta: -0.044918330261616256\nSPOT ~ N(0.0, 15.874507866387544)\nNASDAQ ~ N(0.0, 15.874507866387544)\n\n\n\n" ], [ "best_fit_sp, sp_params", "_____no_output_____" ], [ "best_fit_nas, nas_params", "_____no_output_____" ] ], [ [ "### Resolver analíticamente", "_____no_output_____" ], [ "$$ r = \\alpha + \\beta (m - rf) + rf $$\n\n$$ E[r] = E[\\alpha + \\beta (m - rf) + rf] $$\n$$ E[r] = E[\\alpha + \\beta (m - rf) + rf] $$\n$$ E[r] = E[\\alpha] + E[\\beta (m - rf)] + E[rf] $$\n$$ E[r] = E[\\alpha] + \\beta E[m - rf] + E[rf] $$\n$$ E[r] = E[\\alpha] + \\beta E[m] - \\beta E[rf] + E[rf] $$\n$$ E[r] = E[\\alpha] + \\beta E[m] - \\beta * rf + rf $$", "_____no_output_____" ], [ "$$ r = \\alpha + \\beta (m - rf) + rf $$\n\n$$ Var[r] = Var [\\alpha + \\beta (m - rf) + rf] $$\n$$ Var[r] = Var[\\alpha] + Var[\\beta (m - rf)] + Var[rf] + 2 *Cov(\\alpha,m)$$\n$$ Var[r] = Var[\\alpha] + \\beta^2 Var[m - rf] + Var[rf] + 2 *Cov(\\alpha,m)$$\n$$ Var[r] = Var[\\alpha] + \\beta^2 Var[m] - \\beta^2 Var[rf] + Var[rf] + 2 *Cov(\\alpha,m)$$\n$$ Var[r] = Var[\\alpha] + \\beta^2 Var[m] - \\beta^2 * 0 + 0 + 2 *Cov(\\alpha,m)$$\n$$ Var[r] = Var[\\alpha] + \\beta^2 Var[m] + 2 *Cov(\\alpha,m)$$\n$$ Var[r] = Var[\\alpha] + \\beta^2 Var[m] + 2 * \\sqrt{Var[\\alpha]*Var[m]}*Corr[\\alpha,m]$$\n\n", "_____no_output_____" ], [ "### Resolver con simulación", "_____no_output_____" ] ], [ [ "t0 = time.time()\nn=10000\n\nrf = 0.03\nalfa = pd.DataFrame(np.random.normal(loc = sp_params[0]*252, scale = np.sqrt(sp_params[1]*np.sqrt(252)), size=n))\nalfa.columns = ['Alfa']\n\nm = pd.DataFrame(np.random.normal(loc = nas_params[0]*252, scale = np.sqrt(nas_params[1]*np.sqrt(252)), size=n))\nm.columns = ['Market']\n\nR=pd.DataFrame(np.zeros(n))\ni=0\nfor i in range(n):\n R.iloc[i,0]=alfa.iloc[i,0] + Beta*m.iloc[i,0]-Beta*rf+ rf\n\nprint(\"Alfa (spot)\")\nprint(alfa.mean().values)\nprint(alfa.var().values)\n\nprint(\"m (ixic)\")\nprint(m.mean().values)\nprint(m.var().values)\n\nprint(\"R\")\nprint(R.mean().values)\nprint(R.var().values)\n\n\nt1 = time.time()\nprint('Elapsed time: ' + str(np.round(t1-t0, 3)) + ' sec.')", "Alfa (spot)\n[-0.05059922]\n[16.18162989]\nm (ixic)\n[0.00481483]\n[16.03777769]\nR\n[-0.01946794]\n[16.1809358]\nElapsed time: 8.133 sec.\n" ], [ "SumZ = alfa.Alfa + m.Market\nz = pd.DataFrame(np.cov(alfa, m, rowvar=0)).iloc[0,1] #-0.8*.03+0.03\n#c = np.cov(z.T)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
ecafe44dfc11b6c6aca16e7f71a68242e54477a4
26,866
ipynb
Jupyter Notebook
P1.ipynb
OngWheeCheng/CarND-LaneLines-P1
09ac3475896970e69ea69c56911baf71bb7f5a0b
[ "MIT" ]
null
null
null
P1.ipynb
OngWheeCheng/CarND-LaneLines-P1
09ac3475896970e69ea69c56911baf71bb7f5a0b
[ "MIT" ]
null
null
null
P1.ipynb
OngWheeCheng/CarND-LaneLines-P1
09ac3475896970e69ea69c56911baf71bb7f5a0b
[ "MIT" ]
null
null
null
40.339339
647
0.601578
[ [ [ "# Self-Driving Car Engineer Nanodegree\n\n\n## Project: **Finding Lane Lines on the Road** \n***\nIn this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip \"raw-lines-example.mp4\" (also contained in this repository) to see what the output should look like after using the helper functions below. \n\nOnce you have a result that looks roughly like \"raw-lines-example.mp4\", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.\n\nIn addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.\n\n---\nLet's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the \"play\" button above) to display the image.\n\n**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the \"Kernel\" menu above and selecting \"Restart & Clear Output\".**\n\n---", "_____no_output_____" ], [ "**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**\n\n---\n\n<figure>\n <img src=\"examples/line-segments-example.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> \n </figcaption>\n</figure>\n <p></p> \n<figure>\n <img src=\"examples/laneLines_thirdPass.jpg\" width=\"380\" alt=\"Combined Image\" />\n <figcaption>\n <p></p> \n <p style=\"text-align: center;\"> Your goal is to connect/average/extrapolate line segments to get output like this</p> \n </figcaption>\n</figure>", "_____no_output_____" ], [ "**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, see [this forum post](https://carnd-forums.udacity.com/cq/viewquestion.action?spaceKey=CAR&id=29496372&questionTitle=finding-lanes---import-cv2-fails-even-though-python-in-the-terminal-window-has-no-problem-with-import-cv2) for more troubleshooting tips.** ", "_____no_output_____" ], [ "## Import Packages", "_____no_output_____" ] ], [ [ "#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport os\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Read in an Image", "_____no_output_____" ] ], [ [ "#reading in an image\n#image = mpimg.imread('test_images/solidWhiteRight.jpg')\n\n#printing out some stats and plotting\n#print('This image is:', type(image), 'with dimensions:', image.shape)\n#plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')", "_____no_output_____" ] ], [ [ "## Ideas for Lane Detection Pipeline", "_____no_output_____" ], [ "**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**\n\n`cv2.inRange()` for color selection \n`cv2.fillPoly()` for regions selection \n`cv2.line()` to draw lines on an image given endpoints \n`cv2.addWeighted()` to coadd / overlay two images\n`cv2.cvtColor()` to grayscale or change color\n`cv2.imwrite()` to output images to file \n`cv2.bitwise_and()` to apply a mask to an image\n\n**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**", "_____no_output_____" ], [ "## Helper Functions", "_____no_output_____" ], [ "Below are some helper functions to help get you started. They should look familiar from the lesson!", "_____no_output_____" ] ], [ [ "import math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n \n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img) \n \n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n #filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=8):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to \n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4). \n \n Think about things like separating line segments by their \n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of \n the lines and extrapolate to the top and bottom of the lane.\n \n This function draws `lines` with `color` and `thickness`. \n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n \"\"\"\n \n yMin = img.shape[0]\n yMax = img.shape[0]\n \n leftGradient = []\n leftXCoord = []\n leftYCoord = []\n \n rightGradient = []\n rightXCoord = []\n rightYCoord = []\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n m, C = np.polyfit((x1,x2), (y1,y2), 1)\n yMin = min(y1, y2, yMin)\n if m > 0: # +ve slope\n leftGradient += [m]\n leftXCoord += [x1, x2]\n leftYCoord += [y1, y2]\n else: # -ve slope\n rightGradient += [m]\n rightXCoord += [x1, x2]\n rightYCoord += [y1, y2]\n \n leftGradientMean = np.mean(leftGradient)\n leftXCoordMean = np.mean(leftXCoord)\n leftYCoordMean = np.mean(leftYCoord)\n leftCMean = leftYCoordMean - (leftGradientMean * leftXCoordMean) # C = y - mx\n\n rightGradientMean = np.mean(rightGradient)\n rightXCoordMean = np.mean(rightXCoord)\n rightYCoordMean = np.mean(rightYCoord)\n rightCMean = rightYCoordMean - (rightGradientMean * rightXCoordMean) # C = y - mx\n \n if ((len(leftGradient) > 0) and (len(rightGradient) > 0)):\n # x = (y - C) / m\n leftX1Coord = int((yMin - leftCMean) / leftGradientMean)\n leftX2Coord = int((yMax - leftCMean) / leftGradientMean)\n rightX1Coord = int((yMin - rightCMean) / rightGradientMean)\n rightX2Coord = int((yMax - rightCMean) / rightGradientMean)\n \n cv2.line(img, (leftX1Coord, yMin), (leftX2Coord, yMax), color, thickness)\n cv2.line(img, (rightX1Coord, yMin), (rightX2Coord, yMax), color, thickness)\n \ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n \n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)", "_____no_output_____" ], [ "def convertYellowToWhite(img):\n # Convert BGR to HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # define range of yellow color in HSV\n # ref: http://www.workwithcolor.com/yellow-color-hue-range-01.htm\n lowerYellow = np.array([52, 100, 50]) #Golden Yellow\n upperYellow = np.array([60, 100, 91]) #Cream\n # Threshold the HSV image to get only yellow colors\n mask = cv2.inRange(hsv, lowerYellow, upperYellow) \n # Bitwise-AND mask and original image\n maskedYellow = cv2.addWeighted(img, 1, cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB), 1, 0) \n return maskedYellow\n\ndef edgeDetection(img, low_threshold):\n grayImage = grayscale(img)\n #plt.imshow(grayImage, cmap='Greys_r')\n kernelSize = 5\n blurGray = gaussian_blur(grayImage, kernelSize)\n #plt.figure()\n #plt.imshow(blurGray, cmap='Greys_r')\n lowThreshold = low_threshold\n highThreshold = low_threshold * 2\n edgeImage = canny(blurGray, lowThreshold, highThreshold)\n #plt.figure()\n #plt.imshow(edgeImage, cmap='Greys_r')\n return edgeImage \n\ndef regionMask(xSize, ySize, xOffset, yOffset):\n xDelta = xOffset/2\n # bottom left, top left, top right, bottom right\n vertices = np.array([[(xOffset, ySize), (xSize/2 - xDelta, ySize/2 + yOffset), \n (xSize/2 + xDelta, ySize/2 + yOffset), (xSize - xOffset/2, ySize)]], np.int32)\n return vertices\n\ndef laneLines(edgeimg, img): # img = edge-detected image\n # Define the Hough transform parameters\n # Make a blank the same size as our image to draw on\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi/180 # angular resolution in radians of the Hough grid\n threshold = 68 # minimum number of votes (intersections in Hough grid cell)\n minLineLen = 25 # minimum number of pixels making up a line\n maxLineGap = 35 # maximum gap in pixels between connectable line segments\n\n vertices = regionMask(edgeimg.shape[1], edgeimg.shape[0], 80, 60)\n maskImage = region_of_interest(edgeimg, vertices)\n #plt.figure()\n #plt.imshow(maskImage, cmap='Greys_r')\n \n # Run Hough transform on edge detected image\n houghImage = hough_lines(maskImage, rho, theta, threshold, minLineLen, maxLineGap)\n #plt.figure()\n #plt.imshow(houghImage)\n \n # Draw the lines on the original image\n comboImage = weighted_img(houghImage, img)\n return comboImage", "_____no_output_____" ] ], [ [ "## Test Images\n\nBuild your pipeline to work on the images in the directory \"test_images\" \n**You should make sure your pipeline works well on these images before you try the videos.**", "_____no_output_____" ], [ "## Build a Lane Finding Pipeline\n\n", "_____no_output_____" ], [ "Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.\n\nTry tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.", "_____no_output_____" ] ], [ [ "# create directory\noutPath = \"test_images_output\"\nif not os.path.exists(outPath):\n os.mkdir(outPath)\n \nfor file in os.listdir(\"test_images/\"):\n image = mpimg.imread(os.path.join('test_images/',file))\n if 'Yellow' in file:\n temp = np.copy(image)\n img = convertYellowToWhite(temp)\n else:\n img = np.copy(image)\n \n edgeImage = edgeDetection(img, 100)\n laneLineImage = laneLines(edgeImage, img)\n\n # Display the image \n #plt.figure()\n #plt.imshow(laneLineImage)\n \n # save the image\n filename = file.split('.')[0] + '.png' \n plt.imsave(os.path.join(outPath, filename), laneLineImage) \n", "_____no_output_____" ] ], [ [ "## Test on Videos\n\nYou know what's cooler than drawing lanes over images? Drawing lanes over video!\n\nWe can test our solution on two provided videos:\n\n`solidWhiteRight.mp4`\n\n`solidYellowLeft.mp4`\n\n**Note: if you get an `import error` when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, check out [this forum post](https://carnd-forums.udacity.com/questions/22677062/answers/22677109) for more troubleshooting tips.**\n\n**If you get an error that looks like this:**\n```\nNeedDownloadError: Need ffmpeg exe. \nYou can download it by calling: \nimageio.plugins.ffmpeg.download()\n```\n**Follow the instructions in the error message and check out [this forum post](https://carnd-forums.udacity.com/display/CAR/questions/26218840/import-videofileclip-error) for more troubleshooting tips across operating systems.**", "_____no_output_____" ] ], [ [ "# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML", "_____no_output_____" ], [ "def process_image(img):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image where lines are drawn on lanes)\n edgeImage = edgeDetection(img, 100)\n laneLineImage = laneLines(edgeImage, img)\n return laneLineImage\n\ndef process_yellow_image(img):\n yellow = convertYellowToWhite(img)\n return process_image(yellow)", "_____no_output_____" ] ], [ [ "Let's try the one with the solid white lane on the right first ...", "_____no_output_____" ] ], [ [ "# create directory\noutVideoPath = \"test_videos_output\"\nif not os.path.exists(outVideoPath):\n os.mkdir(outVideoPath)\n\nfilename = \"solidWhiteRight.mp4\"\nwhite_output = os.path.join(outVideoPath, filename)\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n#clip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\").subclip(0,5)\nclip1 = VideoFileClip(\"test_videos/solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\n%time white_clip.write_videofile(white_output, audio=False)", "_____no_output_____" ] ], [ [ "Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.", "_____no_output_____" ] ], [ [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(white_output))", "_____no_output_____" ] ], [ [ "## Improve the draw_lines() function\n\n**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video \"P1_example.mp4\".**\n\n**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**", "_____no_output_____" ], [ "Now for the one with the solid yellow lane on the left. This one's more tricky!", "_____no_output_____" ] ], [ [ "filename = \"solidYellowLeft.mp4\"\nyellow_output = os.path.join(outVideoPath, filename)\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)\nclip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_yellow_image)\n%time yellow_clip.write_videofile(yellow_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(yellow_output))", "_____no_output_____" ] ], [ [ "## Writeup and Submission\n\nIf you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.\n", "_____no_output_____" ], [ "## Optional Challenge\n\nTry your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!", "_____no_output_____" ] ], [ [ "challenge_output = 'test_videos_output/challenge.mp4'\n## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video\n## To do so add .subclip(start_second,end_second) to the end of the line below\n## Where start_second and end_second are integer values representing the start and end of the subclip\n## You may also uncomment the following line for a subclip of the first 5 seconds\n##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)\nclip3 = VideoFileClip('test_videos/challenge.mp4')\nchallenge_clip = clip3.fl_image(process_image)\n%time challenge_clip.write_videofile(challenge_output, audio=False)", "_____no_output_____" ], [ "HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(challenge_output))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
ecafee16c1f6346384af600c8b215bf2b9373b8c
70,537
ipynb
Jupyter Notebook
Starter_Code/.ipynb_checkpoints/lstm_stock_predictor_fng-checkpoint.ipynb
amejiali/deep_learning-homework
4b1e96dd56beff2d8a798d21def48884889d10c5
[ "ADSL" ]
null
null
null
Starter_Code/.ipynb_checkpoints/lstm_stock_predictor_fng-checkpoint.ipynb
amejiali/deep_learning-homework
4b1e96dd56beff2d8a798d21def48884889d10c5
[ "ADSL" ]
null
null
null
Starter_Code/.ipynb_checkpoints/lstm_stock_predictor_fng-checkpoint.ipynb
amejiali/deep_learning-homework
4b1e96dd56beff2d8a798d21def48884889d10c5
[ "ADSL" ]
null
null
null
68.085907
39,832
0.735642
[ [ [ "# LSTM Stock Predictor Using Fear and Greed Index\n\nIn this notebook, you will build and train a custom LSTM RNN that uses a 10 day window of Bitcoin fear and greed index values to predict the 11th day closing price. \n\nYou will need to:\n\n1. Prepare the data for training and testing\n2. Build and train a custom LSTM RNN\n3. Evaluate the performance of the model", "_____no_output_____" ], [ "## Data Preparation\n\nIn this section, you will need to prepare the training and testing data for the model. The model will use a rolling 10 day window to predict the 11th day closing price.\n\nYou will need to:\n1. Use the `window_data` function to generate the X and y values for the model.\n2. Split the data into 70% training and 30% testing\n3. Apply the MinMaxScaler to the X and y values\n4. Reshape the X_train and X_test data for the model. Note: The required input format for the LSTM is:\n\n```python\nreshape((X_train.shape[0], X_train.shape[1], 1))\n```", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport hvplot.pandas\n\n%matplotlib inline", "_____no_output_____" ], [ "# Set the random seed for reproducibility\n# Note: This is for the homework solution, but it is good practice to comment this out and run multiple experiments to evaluate your model\nfrom numpy.random import seed\nseed(1)\nfrom tensorflow import random\nrandom.set_seed(2)", "_____no_output_____" ], [ "# Load the fear and greed sentiment data for Bitcoin\ndf = pd.read_csv('btc_sentiment.csv', index_col=\"date\", infer_datetime_format=True, parse_dates=True)\ndf = df.drop(columns=\"fng_classification\")\ndf.head()", "_____no_output_____" ], [ "# Load the historical closing prices for Bitcoin\ndf2 = pd.read_csv('btc_historic.csv', index_col=\"Date\", infer_datetime_format=True, parse_dates=True)['Close']\ndf2 = df2.sort_index()\ndf2.tail()", "_____no_output_____" ], [ "# Join the data into a single DataFrame\ndf = df.join(df2, how=\"inner\")\ndf.tail()", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# This function accepts the column number for the features (X) and the target (y)\n# It chunks the data up with a rolling window of Xt-n to predict Xt\n# It returns a numpy array of X any y\ndef window_data(df, window, feature_col_number, target_col_number):\n X = []\n y = []\n for i in range(len(df) - window - 1):\n features = df.iloc[i:(i + window), feature_col_number]\n target = df.iloc[(i + window), target_col_number]\n X.append(features)\n y.append(target)\n return np.array(X), np.array(y).reshape(-1, 1)", "_____no_output_____" ], [ "# Predict Closing Prices using a 10 day window of fear and greed index values and a target of the 11th day closing price\n# Try a window size anywhere from 1 to 10 and see how the model performance changes\nwindow_size = 10\n\n# Column index 1 is the `Close` column\nfeature_column = 0\ntarget_column = 0\nX, y = window_data(df, window_size, feature_column, target_column)\n\nprint (f\"X sample values:\\n{X[:5]} \\n\")\nprint (f\"y sample values:\\n{y[:5]}\")", "X sample values:\n[[30 15 40 24 11 8 36 30 44 54]\n [15 40 24 11 8 36 30 44 54 31]\n [40 24 11 8 36 30 44 54 31 42]\n [24 11 8 36 30 44 54 31 42 35]\n [11 8 36 30 44 54 31 42 35 55]] \n\ny sample values:\n[[31.]\n [42.]\n [35.]\n [55.]\n [71.]]\n" ], [ "# Use 70% of the data for training and the remainder for testing\n# YOUR CODE HERE!\nsplit = int(0.7 * len(X))\nX_train = X[: split - 1]\nX_test = X[split:]\ny_train = y[: split - 1]\ny_test = y[split:]", "_____no_output_____" ], [ "# Use MinMaxScaler to scale the data between 0 and 1. \n# YOUR CODE HERE!\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\nscaler.fit(X)\nX_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)\nscaler.fit(y)\ny_train = scaler.transform(y_train)\ny_test = scaler.transform(y_test)", "_____no_output_____" ], [ "# Reshape the features for the model\n# YOUR CODE HERE!\nX_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))\nX_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))\n\nprint (f\"X_train sample values:\\n{X_train[:5]} \\n\")\nprint (f\"X_test sample values:\\n{X_test[:5]}\")", "X_train sample values:\n[[[0.25287356]\n [0.08045977]\n [0.36781609]\n [0.18390805]\n [0.03448276]\n [0. ]\n [0.31395349]\n [0.24418605]\n [0.40697674]\n [0.52325581]]\n\n [[0.08045977]\n [0.36781609]\n [0.18390805]\n [0.03448276]\n [0. ]\n [0.32183908]\n [0.24418605]\n [0.40697674]\n [0.52325581]\n [0.25581395]]\n\n [[0.36781609]\n [0.18390805]\n [0.03448276]\n [0. ]\n [0.32183908]\n [0.25287356]\n [0.40697674]\n [0.52325581]\n [0.25581395]\n [0.38372093]]\n\n [[0.18390805]\n [0.03448276]\n [0. ]\n [0.32183908]\n [0.25287356]\n [0.4137931 ]\n [0.52325581]\n [0.25581395]\n [0.38372093]\n [0.30232558]]\n\n [[0.03448276]\n [0. ]\n [0.32183908]\n [0.25287356]\n [0.4137931 ]\n [0.52873563]\n [0.25581395]\n [0.38372093]\n [0.30232558]\n [0.53488372]]] \n\nX_test sample values:\n[[[0.36781609]\n [0.43678161]\n [0.34482759]\n [0.45977011]\n [0.45977011]\n [0.40229885]\n [0.39534884]\n [0.37209302]\n [0.3372093 ]\n [0.62790698]]\n\n [[0.43678161]\n [0.34482759]\n [0.45977011]\n [0.45977011]\n [0.40229885]\n [0.40229885]\n [0.37209302]\n [0.3372093 ]\n [0.62790698]\n [0.65116279]]\n\n [[0.34482759]\n [0.45977011]\n [0.45977011]\n [0.40229885]\n [0.40229885]\n [0.37931034]\n [0.3372093 ]\n [0.62790698]\n [0.65116279]\n [0.58139535]]\n\n [[0.45977011]\n [0.45977011]\n [0.40229885]\n [0.40229885]\n [0.37931034]\n [0.34482759]\n [0.62790698]\n [0.65116279]\n [0.58139535]\n [0.58139535]]\n\n [[0.45977011]\n [0.40229885]\n [0.40229885]\n [0.37931034]\n [0.34482759]\n [0.63218391]\n [0.65116279]\n [0.58139535]\n [0.58139535]\n [0.60465116]]]\n" ] ], [ [ "---", "_____no_output_____" ], [ "## Build and Train the LSTM RNN\n\nIn this section, you will design a custom LSTM RNN and fit (train) it using the training data.\n\nYou will need to:\n1. Define the model architecture\n2. Compile the model\n3. Fit the model to the training data\n\n### Hints:\nYou will want to use the same model architecture and random seed for both notebooks. This is necessary to accurately compare the performance of the FNG model vs the closing price model. ", "_____no_output_____" ] ], [ [ "from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense, Dropout", "_____no_output_____" ], [ "# Build the LSTM model. \n# The return sequences need to be set to True if you are adding additional LSTM layers, but \n# You don't have to do this for the final layer. \n# YOUR CODE HERE!\n\nmodel = Sequential()\n\nnumber_units = 10\ndropout_fraction = 0.2\n\n# Layer 1\nmodel.add(LSTM(\n units=number_units,\n return_sequences=True,\n input_shape=(X_train.shape[1], 1))\n )\nmodel.add(Dropout(dropout_fraction))\n# Layer 2\nmodel.add(LSTM(units=number_units, return_sequences=True))\nmodel.add(Dropout(dropout_fraction))\n# Layer 3\nmodel.add(LSTM(units=number_units))\nmodel.add(Dropout(dropout_fraction))\n# Output layer\nmodel.add(Dense(1))\n", "_____no_output_____" ], [ "# Compile the model\n# YOUR CODE HERE!\nmodel.compile(optimizer=\"adam\", loss=\"mean_squared_error\")", "_____no_output_____" ], [ "# Summarize the model\n# YOUR CODE HERE!\nmodel.summary()", "Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nlstm_9 (LSTM) (None, 10, 10) 480 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 10, 10) 0 \n_________________________________________________________________\nlstm_10 (LSTM) (None, 10, 10) 840 \n_________________________________________________________________\ndropout_10 (Dropout) (None, 10, 10) 0 \n_________________________________________________________________\nlstm_11 (LSTM) (None, 10) 840 \n_________________________________________________________________\ndropout_11 (Dropout) (None, 10) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 11 \n=================================================================\nTotal params: 2,171\nTrainable params: 2,171\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# Train the model\n# Use at least 10 epochs\n# Do not shuffle the data\n# Experiement with the batch size, but a smaller batch size is recommended\n# YOUR CODE HERE!\nmodel.fit(X_train, y_train, epochs=50, shuffle=False, batch_size=1, verbose=1)\n", "Train on 371 samples\nEpoch 1/50\n371/371 [==============================] - 17s 47ms/sample - loss: 0.0316\nEpoch 2/50\n371/371 [==============================] - 10s 28ms/sample - loss: 0.0233\nEpoch 3/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0234\nEpoch 4/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0201\nEpoch 5/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0187\nEpoch 6/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0170\nEpoch 7/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0136\nEpoch 8/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0105\nEpoch 9/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0104\nEpoch 10/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0092\nEpoch 11/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0088\nEpoch 12/50\n371/371 [==============================] - 11s 29ms/sample - loss: 0.0096\nEpoch 13/50\n371/371 [==============================] - 9s 26ms/sample - loss: 0.0091\nEpoch 14/50\n371/371 [==============================] - 9s 25ms/sample - loss: 0.0089\nEpoch 15/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.00850s - lo\nEpoch 16/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0076\nEpoch 17/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0078\nEpoch 18/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0080\nEpoch 19/50\n371/371 [==============================] - 10s 26ms/sample - loss: 0.0077\nEpoch 20/50\n371/371 [==============================] - 8s 21ms/sample - loss: 0.0080\nEpoch 21/50\n371/371 [==============================] - 8s 21ms/sample - loss: 0.0081\nEpoch 22/50\n371/371 [==============================] - 8s 21ms/sample - loss: 0.0079\nEpoch 23/50\n371/371 [==============================] - 8s 22ms/sample - loss: 0.0074\nEpoch 24/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0067\nEpoch 25/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0068\nEpoch 26/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0074\nEpoch 27/50\n371/371 [==============================] - 9s 25ms/sample - loss: 0.0075\nEpoch 28/50\n371/371 [==============================] - 10s 28ms/sample - loss: 0.0068\nEpoch 29/50\n371/371 [==============================] - 10s 26ms/sample - loss: 0.0070\nEpoch 30/50\n371/371 [==============================] - 9s 25ms/sample - loss: 0.0070\nEpoch 31/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0072\nEpoch 32/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0068\nEpoch 33/50\n371/371 [==============================] - 10s 26ms/sample - loss: 0.0065\nEpoch 34/50\n371/371 [==============================] - 10s 27ms/sample - loss: 0.0067\nEpoch 35/50\n371/371 [==============================] - 9s 25ms/sample - loss: 0.0066\nEpoch 36/50\n371/371 [==============================] - 11s 29ms/sample - loss: 0.0073\nEpoch 37/50\n371/371 [==============================] - 11s 29ms/sample - loss: 0.0068\nEpoch 38/50\n371/371 [==============================] - 9s 25ms/sample - loss: 0.0072\nEpoch 39/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0068\nEpoch 40/50\n371/371 [==============================] - 10s 27ms/sample - loss: 0.0063\nEpoch 41/50\n371/371 [==============================] - 9s 23ms/sample - loss: 0.0067\nEpoch 42/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0068\nEpoch 43/50\n371/371 [==============================] - 9s 24ms/sample - loss: 0.0062\nEpoch 44/50\n371/371 [==============================] - 7s 19ms/sample - loss: 0.0066\nEpoch 45/50\n371/371 [==============================] - 7s 18ms/sample - loss: 0.0068\nEpoch 46/50\n371/371 [==============================] - 6s 17ms/sample - loss: 0.0061\nEpoch 47/50\n371/371 [==============================] - 7s 18ms/sample - loss: 0.0065\nEpoch 48/50\n371/371 [==============================] - 6s 17ms/sample - loss: 0.0065\nEpoch 49/50\n371/371 [==============================] - 6s 17ms/sample - loss: 0.0064\nEpoch 50/50\n371/371 [==============================] - 7s 18ms/sample - loss: 0.0068\n" ] ], [ [ "---", "_____no_output_____" ], [ "## Model Performance\n\nIn this section, you will evaluate the model using the test data. \n\nYou will need to:\n1. Evaluate the model using the `X_test` and `y_test` data.\n2. Use the X_test data to make predictions\n3. Create a DataFrame of Real (y_test) vs predicted values. \n4. Plot the Real vs predicted values as a line chart\n\n### Hints\nRemember to apply the `inverse_transform` function to the predicted and y_test values to recover the actual closing prices.", "_____no_output_____" ] ], [ [ "# Evaluate the model\n# YOUR CODE HERE!\nmodel.evaluate(X_test, y_test)", "160/160 [==============================] - 2s 9ms/sample - loss: 0.0237\n" ], [ "# Make some predictions\n# YOUR CODE HERE!\npredicted = model.predict(X_test)", "_____no_output_____" ], [ "# Recover the original prices instead of the scaled version\npredicted_prices = scaler.inverse_transform(predicted)\nreal_prices = scaler.inverse_transform(y_test.reshape(-1, 1))", "_____no_output_____" ], [ "# Create a DataFrame of Real and Predicted values\nstocks = pd.DataFrame({\n \"Real\": real_prices.ravel(),\n \"Predicted\": predicted_prices.ravel()\n})\nstocks.head()", "_____no_output_____" ], [ "# Plot the real vs predicted values as a line chart\n# YOUR CODE HERE!\nstocks.plot()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecb00c343dba6760d83c7227c33c4eb3d664997b
11,511
ipynb
Jupyter Notebook
ITER1/data_create_to_tfrecords_iteration_final_basic_word_tokenized_500_test.ipynb
jpbarrett13/MAG-model-code
5eb204572312a25c69b09001a9025e737f911dbb
[ "MIT" ]
null
null
null
ITER1/data_create_to_tfrecords_iteration_final_basic_word_tokenized_500_test.ipynb
jpbarrett13/MAG-model-code
5eb204572312a25c69b09001a9025e737f911dbb
[ "MIT" ]
null
null
null
ITER1/data_create_to_tfrecords_iteration_final_basic_word_tokenized_500_test.ipynb
jpbarrett13/MAG-model-code
5eb204572312a25c69b09001a9025e737f911dbb
[ "MIT" ]
null
null
null
33.955752
260
0.57545
[ [ [ "# !pip install tensorflow==2.4.1\n# !pip install transformers\n# !pip install pyarrow", "_____no_output_____" ], [ "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\nfrom math import ceil", "_____no_output_____" ], [ "def check_targets(targs):\n if targs[0] == -1:\n return 1\n else:\n return 0", "_____no_output_____" ], [ "def create_tfrecords_dataset(data, iter_num, dataset_type='train'):\n# paper_title = tf.keras.preprocessing.sequence.pad_sequences(data['paper_title_tok'].to_list(), \n# maxlen=512, dtype='int64', \n# padding='post', truncating='post', value=0)\n data['no_target'] = data['target_tok'].apply(check_targets)\n data = data[data['no_target']==0].copy()\n \n paper_title = tf.ragged.constant(data['paper_title_tok'].to_list())\n \n targets = tf.keras.preprocessing.sequence.pad_sequences(data['target_tok'].to_list(), maxlen=20, \n dtype='int64', padding='post', \n truncating='post', value=0)\n\n ds = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(paper_title),\n tf.data.Dataset.from_tensor_slices(data['journal_tok'].to_list()),\n tf.data.Dataset.from_tensor_slices(data['doc_type_tok'].to_list()),\n tf.data.Dataset.from_tensor_slices(targets)))\n \n serialized_features_dataset = ds.map(tf_serialize_example)\n \n filename = f\"./iteration_final/basic_word_tokenized_500_test/tfrecords/{dataset_type}/{str(iter_num).zfill(4)}.tfrecord\"\n writer = tf.data.experimental.TFRecordWriter(filename)\n writer.write(serialized_features_dataset)", "_____no_output_____" ], [ "def tf_serialize_example(f0, f1, f2, f3):\n tf_string = tf.py_function(serialize_example, (f0, f1, f2, f3), tf.string)\n return tf.reshape(tf_string, ())", "_____no_output_____" ], [ "def serialize_example(paper_title, journal, doc_type, targets):\n paper_title_list = tf.train.Int64List(value=paper_title.numpy().tolist())\n journal_list = tf.train.Int64List(value=journal.numpy().tolist())\n doc_type_list = tf.train.Int64List(value=doc_type.numpy().tolist())\n targets_list = tf.train.Int64List(value=targets.numpy().tolist())\n \n paper_title_feature = tf.train.Feature(int64_list = paper_title_list)\n journal_feature = tf.train.Feature(int64_list = journal_list)\n doc_type_feature = tf.train.Feature(int64_list = doc_type_list)\n targets_feature = tf.train.Feature(int64_list = targets_list)\n \n features_for_example = {\n 'paper_title': paper_title_feature,\n 'journal': journal_feature,\n 'doc_type': doc_type_feature,\n 'targets': targets_feature\n }\n \n example_proto = tf.train.Example(features=tf.train.Features(feature=features_for_example))\n \n return example_proto.SerializeToString()", "_____no_output_____" ], [ "def turn_part_file_into_tfrecord(base_path, dataset_type='train'):\n file_list = [x for x in os.listdir(f\"{base_path}{dataset_type}\") if x.endswith('parquet')]\n file_list.sort()\n print(f\"There are {len(file_list)} files for {dataset_type}\")\n for i, file_name in enumerate(file_list):\n data = pd.read_parquet(f\"{base_path}{dataset_type}/{file_name}\")\n print(f\"_____File number: {i} ({data.shape[0]} samples)\")\n create_tfrecords_dataset(data, i, dataset_type)", "_____no_output_____" ], [ "base_file_path = f\"./iteration_final/basic_word_tokenized_500_test/tokenized_data/\"", "_____no_output_____" ] ], [ [ "#### Without padding", "_____no_output_____" ] ], [ [ "%%time\nturn_part_file_into_tfrecord(base_file_path, 'train')", "There are 50 files for train\n_____File number: 0 (2109134 samples)\n" ], [ "%%time\nturn_part_file_into_tfrecord(base_file_path, 'val')", "There are 10 files for val\n_____File number: 0 (61298 samples)\n_____File number: 1 (62022 samples)\n_____File number: 2 (30502 samples)\n_____File number: 3 (31457 samples)\n_____File number: 4 (26913 samples)\n_____File number: 5 (29725 samples)\n_____File number: 6 (60653 samples)\n_____File number: 7 (30778 samples)\n_____File number: 8 (55072 samples)\n_____File number: 9 (59489 samples)\nCPU times: user 2min 35s, sys: 11 s, total: 2min 46s\nWall time: 2min 20s\n" ], [ "%%time\nturn_part_file_into_tfrecord(base_file_path, 'test')", "There are 5 files for test\n_____File number: 0 (20395 samples)\n_____File number: 1 (24338 samples)\n_____File number: 2 (24756 samples)\n_____File number: 3 (17072 samples)\n_____File number: 4 (10677 samples)\nCPU times: user 34.3 s, sys: 2.5 s, total: 36.8 s\nWall time: 30.8 s\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ecb035d096d61d4cc39e9fe9b0694cc81a57533f
108,261
ipynb
Jupyter Notebook
Week 1/SLU02 - Data Structures/Learning notebook.ipynb
RicSegundo/ds-prep-course
75a9a8ff11628c07d37094d8f15026e318ac5834
[ "MIT" ]
1
2021-02-08T20:38:23.000Z
2021-02-08T20:38:23.000Z
Week 1/SLU02 - Data Structures/Learning notebook.ipynb
RicSegundo/ds-prep-course
75a9a8ff11628c07d37094d8f15026e318ac5834
[ "MIT" ]
null
null
null
Week 1/SLU02 - Data Structures/Learning notebook.ipynb
RicSegundo/ds-prep-course
75a9a8ff11628c07d37094d8f15026e318ac5834
[ "MIT" ]
null
null
null
19.817133
462
0.492467
[ [ [ "# SLU02 - Data Structures", "_____no_output_____" ], [ "In this notebook we will be covering the following: \n \n- Data Structures - Tuples, Lists and Dictionaries \n- Creation of Data Structure\n- Length of a Data Structure \n- Immutability\n- Checking if elements are in a Data Structure\n- Replace, Append and Delete Operations in Data Structures\n- Conversion between Data Structure ", "_____no_output_____" ], [ "A [Data Structure](https://en.wikipedia.org/wiki/Data_structure) is a collection of data that enables data organization, management, and storage. In this notebook, we will learn three different types of data structures in python. Tuples, lists, and dictionaries. ", "_____no_output_____" ], [ "## 1 Data Structure - Tuple <a name=\"1\"></a>", "_____no_output_____" ], [ "# <center> (🐙, 🐷, 🐬, 🐞, 🐈, 🙉, 🐸, 🐓) </center>", "_____no_output_____" ], [ "### 1.1 Definition and Tuple Creation <a name=\"1.1\"></a>", "_____no_output_____" ], [ "#### 1.1.1 Definition <a name=\"1.1.1\"></a>", "_____no_output_____" ], [ "A Tuple is a Python data structure type that has a collection of data that once created cannot be changed. It is ordered and accepts duplicated elements. Elements on a tuple should be between brackets `()` and separated by commas `,`. A tuple can have as many elements as we want, and it can have elements of different types.", "_____no_output_____" ], [ "Let's create our first tuple with 5 elements.", "_____no_output_____" ] ], [ [ "this_tuple = (0, 1, 2, 3, 4)\nthis_tuple", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 1.1.2 Function `type()` and `isinstance()` <a name=\"1.1.2\"></a>", "_____no_output_____" ], [ "We have two ways of confirming that a variable is of a certain type. We can use `type()` function or `isinstance()` function. \n- __`type()`__ function receives a variable as an input and returns its type.\n- __`isinstance()`__ function receives a variable and a type variable as inputs. It returns `True` if the input variable matches the input type and `False` otherwise.", "_____no_output_____" ], [ "Let's start to check if the variable `this_tuple` is a tuple. Let's use the function __`type()`__ for it. ", "_____no_output_____" ] ], [ [ "this_tuple", "_____no_output_____" ], [ "type(this_tuple) #This function returns the variable type of this_tuple", "_____no_output_____" ] ], [ [ "The output of the cell above is a tuple. We have our confirmation that our variable `this_tuple` is a tuple.", "_____no_output_____" ], [ "Let's check again if the variable type is a tuple using __`isinstance()`__ function. If the output is `True` that means that our input variable matches the input type.", "_____no_output_____" ] ], [ [ "isinstance(this_tuple, tuple) ", "_____no_output_____" ] ], [ [ "The output of the cell above is `True`. Because the input type was a tuple, we have our confirmation that the input variable matches the input type.", "_____no_output_____" ], [ "__Note__: We will use these two functions during all the notebook, also for lists and dicts, not just for tuples.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### 1.1.3 Tuples creation <a name=\"1.1.3\"></a>", "_____no_output_____" ], [ "It is possible to create a tuple with elements of multiple types.", "_____no_output_____" ] ], [ [ "#types (float, str and boolean)\nthis_tuple = (1.5, \"hello\", True, \"book\")\nthis_tuple", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Tuples can have duplicated elements. Note that elements do not get reordered.", "_____no_output_____" ] ], [ [ "#a tuple with duplicated values\nthis_tuple = (1, 1, 3, 7, 7, 7, 5)\nthis_tuple", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Is it possible to have an empty tuple?", "_____no_output_____" ] ], [ [ "this_tuple = ()\ntype(this_tuple)", "_____no_output_____" ] ], [ [ "The answer is yes.", "_____no_output_____" ], [ "We can also create an empty tuple using function __tuple__.", "_____no_output_____" ] ], [ [ "this_tuple = tuple()\ntype(this_tuple)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "It is possible to create a tuple with more than one element without writing brackets.", "_____no_output_____" ] ], [ [ "this_tuple = 1.5, \"hello\", True, \"book\"\nisinstance(this_tuple, tuple)", "_____no_output_____" ] ], [ [ "Let's see what happens if we try to create one tuple with one element without brackets.", "_____no_output_____" ] ], [ [ "this_tuple = \"book\"\nisinstance(this_tuple, tuple)", "_____no_output_____" ] ], [ [ "So, what is the type of the variable `this_tuple` in this case?", "_____no_output_____" ] ], [ [ "type(this_tuple)", "_____no_output_____" ] ], [ [ "As you might have guessed, the variable above is a string and not a tuple. Let's now check if it works with brackets. ", "_____no_output_____" ] ], [ [ "this_tuple = (\"book\")\nthis_tuple, type(this_tuple)", "_____no_output_____" ] ], [ [ "We can see that it doesn't work to create tuples without brackets if we have just one element inside the tuple.", "_____no_output_____" ], [ "But if we add a comma after the value, it is assumed as a tuple, even if we don't have brackets.", "_____no_output_____" ] ], [ [ "this_tuple = (\"book\",)\nisinstance(this_tuple, tuple)", "_____no_output_____" ], [ "this_tuple = \"book\", \nisinstance(this_tuple, tuple)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "As a last example, here is a tuple of tuples.", "_____no_output_____" ] ], [ [ "this_tuple = ((1,2), (3,4), (4,5))\nisinstance(this_tuple, tuple)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 1.2 Tuple length and accessing to specific elements <a name=\"1.2\"></a>", "_____no_output_____" ], [ "Now, let's see how we can know the length of a tuple and access certain elements of a tuple.", "_____no_output_____" ], [ "#### 1.2.1 Length <a name=\"1.2.1\"></a>", "_____no_output_____" ], [ "We can use the function __`len()`__ in order to check how many elements a tuple has.", "_____no_output_____" ] ], [ [ "this_tuple = (0, 1, 2, 3, 4)\nlen(this_tuple) ", "_____no_output_____" ] ], [ [ "On the output of the cell above, it is possible to confirm with function __`len()`__ that `this_tuple` has 5 elements inside.", "_____no_output_____" ] ], [ [ "this_tuple = 1,\ntype(this_tuple), len(this_tuple)", "_____no_output_____" ] ], [ [ "And one element on the tuple above.", "_____no_output_____" ] ], [ [ "this_tuple = ((1,2), (3,4), (4,5))\nlen(this_tuple)", "_____no_output_____" ] ], [ [ "Again, with function `len()`, we can confirm that the variable `this_tuple` on the cell above has 3 elements inside (each element is a tuple like we saw on <a href=\"#1.1.3\">section 1.1.3</a>). ", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### 1.2.2 Indexing <a name=\"1.2.2\"></a>", "_____no_output_____" ], [ "What we can do if we want to get a specific element of a tuple? ", "_____no_output_____" ], [ "### <center>fruit = (\"banana\", \"apple\", \"tomato\", watermelon, \"pineapple\")</center> \n# <center> &downarrow; </center> \n### <center>\"tomato\"</center> ", "_____no_output_____" ], [ "Giving the tuple above, what we can do if we want to extract just the `\"tomato\"` element? \nWe must make use of __indexing__.", "_____no_output_____" ], [ "__Positive Indexing__ is done from left to right, starts with zero for the first position of the tuple and goes until the last position that is indexed with the value \n(size of the tuple - 1). ", "_____no_output_____" ], [ "__Negative Indexing__ is done from right to left, starts with -1 for the last element of the tuple and goes until -(size of the tuple) for the first element. ", "_____no_output_____" ], [ "<img src=\"data/tuples-in-python-with-examples.png\" width=\"600\" height=\"150\" >", "_____no_output_____" ], [ "This image and additional materials can be found [here](https://www.faceprep.in/python/tuples-in-python/).", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "Extracting `\"tomato\"` from the tuple `fruit` using __positive indexing__:", "_____no_output_____" ] ], [ [ "fruit = (\"banana\", \"apple\", \"tomato\", \"watermelon\", \"pineapple\")", "_____no_output_____" ], [ "fruit[2]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Extracting \"`tomato`\" from the tuple `fruit` using __negative indexing__:", "_____no_output_____" ] ], [ [ "fruit[-3]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Now we want to extract the last element from tuple `fruit`. ", "_____no_output_____" ], [ "Using positive indexing:", "_____no_output_____" ] ], [ [ "fruit[4]", "_____no_output_____" ] ], [ [ "Using positive indexing but making use of the function `len()`: \n__Note__: This might be useful when we don't know the size of the tuple.", "_____no_output_____" ] ], [ [ "fruit[len(fruit)-1] ", "_____no_output_____" ] ], [ [ "Using negative indexing:", "_____no_output_____" ] ], [ [ "fruit[-1] ", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "And if we want one element of a tuple that is inside another tuple?", "_____no_output_____" ] ], [ [ "fruit_by_color = ((\"strawberry\", \"cherry\"), (\"green apple\", \"kiwi\",\"pear\"), (\"mango\", \"papaya\"))", "_____no_output_____" ] ], [ [ "We want to extract just the element `\"pear\"` from the tuple `fruit_by_color`. What can we do? \nWe first need to know what is the position of the tuple where `\"pear\"` is. It is in position 1 (positive indexing). \nLet's extract position 1 to see if it matches the tuple that we want.", "_____no_output_____" ] ], [ [ "green_fruit = fruit_by_color[1]\ngreen_fruit", "_____no_output_____" ] ], [ [ "From the cell above we can see that position 1 corresponds to the green fruits in `fruit_by_color` tuple, the tuple that has element `\"pear\"` inside. \nIn this extracted tuple, `green_fruit`, we can see that `\"pear\"` is in position 2.", "_____no_output_____" ] ], [ [ "green_fruit[2]", "_____no_output_____" ] ], [ [ "Now let's go to our main tuple, `fruit_by_color`, and extract `\"pear\"` at once by indexing it twice. First time to extract `green_fruit` and second time to extract `\"pear\"`. ", "_____no_output_____" ], [ "Successive indexing should be written from left to right.", "_____no_output_____" ] ], [ [ "fruit_by_color[1][2]", "_____no_output_____" ] ], [ [ "#### 1.2.3 Slicing <a name=\"1.2.3\"></a>", "_____no_output_____" ], [ "If we want to extract more than one element, what can we do? ", "_____no_output_____" ], [ "### <center>fruit = (\"banana\", \"apple\", \"tomato\", watermelon, \"pineapple\")</center> \n# <center> &downarrow; </center> \n### <center>(\"tomato\", \"watermelon\", \"pineapple\")</center> ", "_____no_output_____" ], [ "The answer is __slicing__.", "_____no_output_____" ] ], [ [ "fruit = (\"banana\", \"apple\", \"tomato\", \"watermelon\", \"pineapple\")", "_____no_output_____" ] ], [ [ "In order to slice a tuple, we need to know the starting position of the slice that we want and the stop position. ", "_____no_output_____" ], [ "### <center>Tuple[start:stop:step] <center/> ", "_____no_output_____" ], [ "To slice a tuple, start, stop and step positions should be between square brackets `[]` and separated by colon `:`.\n\n- __start=n__: the beginning index of the slice, it will include the element at this index unless it is the same as stop, defaults to 0, i.e. the first index. If it's negative, it means to start n items from the end.\n\n- __stop=n__: the ending index of the slice, it __excludes__ the element at this index, defaults to the length of the sequence being sliced, that is, up to and including the end. If it's negative, it means to stop -n + 1 items from the end.\n\n- __step=n__: the amount by which the index increases, defaults to 1. If it's negative, you're slicing over the iterable in reverse.\n\n", "_____no_output_____" ], [ "Let's see the following example:", "_____no_output_____" ], [ "<img src=\"data/slicing.jpeg\" width=\"600\" height=\"150\" >", "_____no_output_____" ] ], [ [ "monty_python = (\"M\", \"o\", \"n\", \"t\", \"y\",\" \", \"P\", \"y\", \"t\", \"h\", \"o\", \"n\")\nmonty_python", "_____no_output_____" ] ], [ [ "Let's extract `Monty` and `Pyth` from the tuple above.", "_____no_output_____" ] ], [ [ "#start=-12\n#stop=-7\n#step=1\nmonty_python[-12:-7]", "_____no_output_____" ], [ "#start=6\n#stop=10\n#step=1\nmonty_python[6:10]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "For the tuple `fruit`, in order to slice it and extract `tomato`, `watermelon` and `pineapple`, we know that the first position is 2 and the last 4. This means that `start=2` and `stop=4`. Don't forget that we need to add 1 to the stop position if we also want to extract the element with index 4.", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ], [ "fruit[2:5]", "_____no_output_____" ] ], [ [ "Because `pineapple` is the last element on our list, we can slice our tuple from the index 2 until the end. This can be done by writing no stop index.", "_____no_output_____" ] ], [ [ "fruit[2:]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "We can also add a __step__ to our slicing. ", "_____no_output_____" ], [ "If our step is 2, this means that we will start on starting index and each next element will be on the position of the previous index + 2 until we reach the stop index. When we write no step, python assumes that it is 1.", "_____no_output_____" ], [ "With `step=1`:", "_____no_output_____" ] ], [ [ "fruit[2:5]", "_____no_output_____" ], [ "fruit[2:5:1]", "_____no_output_____" ] ], [ [ "From the two cells above, we can confirm that when we write no step, it is assumed as 1.", "_____no_output_____" ], [ "With `step=2`:", "_____no_output_____" ] ], [ [ "fruit[0:5:2]", "_____no_output_____" ] ], [ [ "In the cell above we have every other element from the `tuple` fruit, starting on the first position and until the last.", "_____no_output_____" ], [ "Because we are starting on the first position and finishing in the last one, we can also write in the following way:", "_____no_output_____" ] ], [ [ "fruit[::2]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Let's now try slicing with __negative indexing__.", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ] ], [ [ "Let's slice the tuple, from the right to the left, starting on negative index -2 (`\"watermelon\"`) and finishing on negative index -4 (`\"apple\"`).", "_____no_output_____" ], [ "If we want to slice from right to left, we need to write our step as -1.", "_____no_output_____" ] ], [ [ "fruit[-2:-5:-1]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "We can also mix positive indexes with negative steps and vice-versa.", "_____no_output_____" ] ], [ [ "fruit[5:2:-1]", "_____no_output_____" ], [ "fruit[-5:-2:1]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 1.3 Element verification <a name=\"1.3\"></a>", "_____no_output_____" ], [ "Sometimes, you may want to check if a certain element is inside a tuple or if a specific element matches a value. \nIn order to do that, we can use the keywords __`in`__ or __`not in`__. The returned value is `True` if the condition is verified and `False` otherwise.\n", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ], [ "\"banana\" in fruit", "_____no_output_____" ], [ "\"banana\" not in fruit", "_____no_output_____" ], [ "\"onion\" not in fruit", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Or, we may want to verify if a specific element inside a tuple matches a certain value. We can make use of `==` and `!=` operators. We'll see these operators extensively on SLU03.", "_____no_output_____" ] ], [ [ "fruit[0] == \"banana\"", "_____no_output_____" ], [ "fruit[0] != \"tomato\"", "_____no_output_____" ] ], [ [ "### 1.4 Immutability <a name=\"1.4\"></a>", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ] ], [ [ "Tuples are __immutable__ which means that they are unchangeable, so if we try to assign a new value to a tuple that already exist it will generate an error.", "_____no_output_____" ] ], [ [ "fruit[0]", "_____no_output_____" ] ], [ [ "Let's try to replace `\"banana\"` with `\"lemon\"` and see what happens.", "_____no_output_____" ] ], [ [ "fruit[0] = \"lemon\"", "_____no_output_____" ] ], [ [ "As expected, the output of the last cell was an error. We were not able to replace bananas with lemons on our tuple. Tuples are like monkeys, don't try to steal their bananas. ", "_____no_output_____" ], [ "<img src=\"data/monkey.gif\" width=\"600\" height=\"150\" >", "_____no_output_____" ], [ "### 1.5 Adding two tuples <a name=\"1.5\"></a>", "_____no_output_____" ], [ "We can create a third tuple by adding two distinct tuples.", "_____no_output_____" ], [ "Instead of trying to replace values, we can create a tuple where the first element is a tuple of one element, `\"lemons\"`, and the remaining elements are the same as the tuple `fruit` without the first element.", "_____no_output_____" ] ], [ [ "fruit = (\"banana\", \"apple\", \"tomato\", \"watermelon\", \"pineapple\")\nfruit", "_____no_output_____" ], [ "fruit_without_bananas = (\"lemons\",) + fruit[1:]\nfruit_without_bananas", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 1.5 Further Reading <a name=\"1.5\"></a>", "_____no_output_____" ], [ "[Programiz: Python Tuple](https://www.programiz.com/python-programming/tuple) \n[GeeksForGeeks: Tuples in Python](https://www.geeksforgeeks.org/tuples-in-python/) \n[W3schools: Python Tuples](https://www.w3schools.com/python/python_tuples.asp)", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## 2 Data Structure - List <a name=\"2\"></a>", "_____no_output_____" ], [ "# <center> [🐙, 🐷, 🐬, 🐞, 🐈, 🙉, 🐸, 🐓] </center>", "_____no_output_____" ], [ "### 2.1 Definition and List Creation <a name=\"2.1\"></a>", "_____no_output_____" ], [ "#### 2.1.1 Definition <a name=\"2.1.1\"></a>", "_____no_output_____" ], [ "A list is also a collection of data. Lists can be changed after being created. Lists accept duplicated values. After creation, the elements are kept on the same position until explicitly changed. The elements on a list are between square brackets `[]` and are separated by commas `,`. It can have multiple types of data inside the same list. ", "_____no_output_____" ], [ "Let's create our first list with 5 elements.", "_____no_output_____" ] ], [ [ "this_list = [0, 1, 2, 3, 4]\nthis_list", "_____no_output_____" ], [ "type(this_list)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.1.2 Tuples vs Lists <a name=\"2.1.2\"></a>", "_____no_output_____" ], [ "Besides the notation, the main difference between tuples and lists is that lists are changeable. This means that, contrary to tuples, we can change, append or delete elements on a list after creation. ", "_____no_output_____" ], [ "Regarding length checking, indexing, slicing and element verification, they are done in the same way for lists as they were done for tuples.", "_____no_output_____" ], [ "Functions `len()`, `type()` and `isinstance()` can also be used with lists.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### 2.1.3 List creation <a name=\"2.1.3\"></a>", "_____no_output_____" ], [ "Let's see an example of a list with multiple types of variables inside.", "_____no_output_____" ] ], [ [ "this_list = [1.5, \"hello\", True, \"book\"]\nthis_list", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Let's verify if we can have duplicated values in a list and if the order is maintained.", "_____no_output_____" ] ], [ [ "this_list = [1, 1, 1, 10, 10, 5, 5, 1]\nthis_list", "_____no_output_____" ] ], [ [ "As we can see from the cell above, order and duplicated values are maintained.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "We can also have a list of lists.", "_____no_output_____" ] ], [ [ "this_list = [[1,2], [4,5,6], [4]]\ntype(this_list)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "A list with one element:", "_____no_output_____" ] ], [ [ "this_list = [1]\ntype(this_list)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "An empty list:", "_____no_output_____" ] ], [ [ "this_list = []\ntype(this_list), len(this_list)", "_____no_output_____" ] ], [ [ "An empty list can also be created with the function __`list()`__.", "_____no_output_____" ] ], [ [ "this_list = list()\ntype(this_list), len(this_list) ", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "We can create lists with __list comprehension__.", "_____no_output_____" ] ], [ [ "this_list = [i for i in range(0, 10)]\nthis_list", "_____no_output_____" ] ], [ [ "Compressed lists are a way of creating a list using a for loop under the hood. This will be clearer by the time that we learn about __for__ loops. ", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### 2.2 List length and accessing to specific elements <a name=\"2.2\"></a>", "_____no_output_____" ], [ "The rules for checking the size of a list, indexing and slicing are the same for tuples and lists.", "_____no_output_____" ], [ "#### 2.2.1 Length <a name=\"2.2.1\"></a>", "_____no_output_____" ] ], [ [ "this_list = [0,1,2,3,4]\nlen(this_list)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.2.2 Indexing <a name=\"2.2.2\"></a>", "_____no_output_____" ], [ "Using __positive indexing__ let's try to extract `\"tomato\"` from the following list:", "_____no_output_____" ] ], [ [ "fruit = [\"banana\", \"apple\", \"tomato\", \"watermelon\", \"pineapple\"]", "_____no_output_____" ], [ "#tomato is on the positive index 2\nfruit[2]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Let's now extract the last element of the list `fruit` using __positive indexing__.", "_____no_output_____" ] ], [ [ "fruit[len(fruit)-1]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Giving the following list of lists, let's try to extract `\"pear\"` using __positive indexing__.", "_____no_output_____" ] ], [ [ "fruit_by_color = [[\"strawberry\", \"cherry\"], [\"green apple\", \"kiwi\",\"pear\"], [\"mango\", \"papaya\"]]", "_____no_output_____" ] ], [ [ "`\"Pear\"` is inside the secondary list that is on positive index 1. On the secondary list, `\"Pear\"` is on positive index 2. ", "_____no_output_____" ], [ "When we want to index a list inside a list, first we should index the main list and then the list inside.", "_____no_output_____" ] ], [ [ "fruit_by_color[1][2]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "With __negative indexing__, let's try to extract `\"mango\"` from `fruit_by_color` list.", "_____no_output_____" ], [ "`\"mango\"` is on position -1 of the main list and on the position -2 of the secondary list.", "_____no_output_____" ] ], [ [ "fruit_by_color[-1][-2]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Now mixing positive indexing with negative indexing in order to extract `\"cherry\"`.", "_____no_output_____" ], [ "`\"cherry\"` is on positive index 0 of the main list and -1 (last element of `[\"strawberry\", \"cherry\"]` ) of the secondary list. ", "_____no_output_____" ] ], [ [ "fruit_by_color[0][-1]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.2.3 Slicing <a name=\"2.2.3\"></a>", "_____no_output_____" ], [ "As already said, the rules for slicing are the same for list as the ones for tuples.", "_____no_output_____" ], [ "There are some good examples of slicing with lists, with images to illustrate that, of slicing with step, positive and negative indexing on this [link](https://www.learnbyexample.org/python-list-slicing/). __Look at it__!", "_____no_output_____" ], [ "Let's check a few examples of __positive index__ slicing.", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ] ], [ [ "Let's extract elements `\"tomato\"` and `\"watermelon\"`.", "_____no_output_____" ] ], [ [ "fruit[2:4:1]", "_____no_output_____" ] ], [ [ "We can also ignore the step in the cell above, because it is positive, and the value is one.", "_____no_output_____" ] ], [ [ "fruit[2:4]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Let's now extracting the same values but now slicing with __negative indexing__ but with a positive step of 1.", "_____no_output_____" ] ], [ [ "fruit[-3:-1:1]", "_____no_output_____" ] ], [ [ "It is the same of writing:", "_____no_output_____" ] ], [ [ "fruit[-3:-1]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Finally, let's slice all the list `fruit` backwards with negative indexing, excluding the `\"banana\"`. \nLike a reverse + excluding the element `\"banana\"`.", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ], [ "fruit[:-5:-1]", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "We are done with the slicing. Pizza for everyone.", "_____no_output_____" ], [ "<img src=\"data/pizza_2.gif\" width=\"300\" height=\"150\" >", "_____no_output_____" ], [ "Did I make you hungry? Sorry for that.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### 2.3 Element verification <a name=\"2.3\"></a>", "_____no_output_____" ], [ "The methodology to check if an element is in a list, is the same as the one we learned for tuples.", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ] ], [ [ "Let's confirm that `fruit` list has `\"pineapple\"`.", "_____no_output_____" ] ], [ [ "\"pineapple\" in fruit", "_____no_output_____" ] ], [ [ "Let's check if the element `\"pizza\"` is in the list `fruit`.", "_____no_output_____" ] ], [ [ "\"pizza\" in fruit", "_____no_output_____" ] ], [ [ "Yup, pizza is not a fruit, even if it has `\"pineapple\"` on it.", "_____no_output_____" ], [ "Finally, let's check that the element on the second index is a `\"tomato\"`.", "_____no_output_____" ] ], [ [ "fruit[2] == \"tomato\"", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 2.4 Replace, Append and Delete <a name=\"2.4\"></a>", "_____no_output_____" ], [ "Unlike tuples, lists are mutable. So, we are able to replace, delete and append values.", "_____no_output_____" ], [ "#### 2.4.1 Replace <a name=\"2.4.1\"></a>", "_____no_output_____" ] ], [ [ "fruit", "_____no_output_____" ] ], [ [ "We want to replace `\"watermelon\"` with `\"onions\"`. `\"watermelon\"` is on position index 3, we need to index the list on this position and assign it the new value, `\"onion\"`.", "_____no_output_____" ] ], [ [ "fruit[3] = \"onion\"\nfruit", "_____no_output_____" ] ], [ [ "From the cell above, you can see that `\"watermelon\"` was replaced by `\"onion\"`.", "_____no_output_____" ], [ "We will also replace `\"pineapple\"` with `\"bacon\"` using negative indexing.", "_____no_output_____" ] ], [ [ "fruit[-1] = \"bacon\"\nfruit", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.4.2 Append <a name=\"2.4.2\"></a>", "_____no_output_____" ], [ "Now, we will append the element `\"bread\"` to our list fruit. ", "_____no_output_____" ] ], [ [ "fruit.append(\"bread\")\nfruit", "_____no_output_____" ] ], [ [ "The method `append()` added the element `\"bread\"` to the end of the list `fruit`.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### 2.4.3 Delete <a name=\"2.4.3\"></a>", "_____no_output_____" ], [ "If we want to delete an element from the list, we can use `del`. Let's start by removing `\"apple\"` element on the index 1.", "_____no_output_____" ] ], [ [ "del fruit[1]\nfruit", "_____no_output_____" ] ], [ [ "We can also delete elements using remove method. Let's remove the element `\"banana\"` with this method.", "_____no_output_____" ] ], [ [ "fruit.remove(\"banana\")\nfruit", "_____no_output_____" ] ], [ [ "As you might have guessed, we have transformed fruit into pizza!!! 🎉 🎉 🎉", "_____no_output_____" ], [ "<img src=\"data/happy.gif\" width=\"300\" height=\"150\" >", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### 2.5 Other Methods <a name=\"2.5\"></a>", "_____no_output_____" ], [ "#### 2.5.1 `count()`", "_____no_output_____" ], [ "Method `count()` can be used in order to count the number of times a specific element appears in a list.", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"napoletana\",\"gorgonzola\", \"calzone\", \"napoletana\", \"romana\"]\npizza", "_____no_output_____" ] ], [ [ "Let's check how many times `\"napoletana\"` appears in the `pizza` list.", "_____no_output_____" ] ], [ [ "pizza.count(\"napoletana\")", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.5.2 `index()`", "_____no_output_____" ], [ "This method returns the first index that matches the input element.", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"napoletana\",\"gorgonzola\", \"calzone\", \"napoletana\", \"romana\"]\npizza", "_____no_output_____" ], [ "pizza.index(\"napoletana\")", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.5.3 `sort()`", "_____no_output_____" ], [ "In order to sort the list, we can make use of the method `sort()`.", "_____no_output_____" ] ], [ [ "pizza", "_____no_output_____" ], [ "pizza.sort()\npizza", "_____no_output_____" ] ], [ [ "Elements in `pizza` are now in alphabetical order.", "_____no_output_____" ], [ "Let's apply the same function but to a list of numbers.", "_____no_output_____" ] ], [ [ "pizza_price_usa = [8.37, 8.59, 5.99, 7.75, 8.75, 8.95, 8.99, 6.99, 7.99, 8.0]", "_____no_output_____" ], [ "pizza_price_usa.sort()\npizza_price_usa", "_____no_output_____" ] ], [ [ "__Random Information__: Those are the prices for pizza, on average, in the cities that pizza is cheaper in the USA. [Forbes: The Pizza Price Index](https://www.forbes.com/sites/priceonomics/2017/09/26/the-pizza-price-index/#7b2826be6553)", "_____no_output_____" ], [ "### 2.6 Converting a tuple into a list and vice-versa <a name=\"2.6\"></a>", "_____no_output_____" ], [ "Sometimes we may need to convert a tuple into a list and vice-versa. ", "_____no_output_____" ], [ "Let's start by trying to convert a tuple into a list.", "_____no_output_____" ] ], [ [ "this_tuple =(\"i\", \"am\", \"a\", \"tuple\")\ntype(this_tuple)", "_____no_output_____" ] ], [ [ "The variable above is a tuple, as we can see from the output of the function __`type()`__. If we give this variable as an input of __`list()`__, our tuple will be converted to a list.", "_____no_output_____" ] ], [ [ "this_tuple = list(this_tuple)\nthis_tuple", "_____no_output_____" ], [ "type(this_tuple)", "_____no_output_____" ] ], [ [ "From the output of the last cell, we can confirm that `this_tuple` is now a list.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "Let's do the same, but now to convert a list into a tuple. For that we will use function __`tuple()`__. ", "_____no_output_____" ] ], [ [ "this_list = [\"i\", \"am\", \"a\", \"list\"]", "_____no_output_____" ], [ "type(this_list)", "_____no_output_____" ] ], [ [ "In order to convert our list into a tuple, we need to use function __`tuple()`__ and pass it as input.", "_____no_output_____" ] ], [ [ "this_list = tuple(this_list)\nthis_list", "_____no_output_____" ], [ "type(this_list)", "_____no_output_____" ] ], [ [ "I have had just converted our list into a tuple.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### 2.7 List Operations<a name=\"2.7\"></a>", "_____no_output_____" ], [ "We can perform operations between lists.", "_____no_output_____" ], [ "Let's see what happens if we try to add two lists. ", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"gorgonzola\", \"calzone\"]\npizza", "_____no_output_____" ], [ "other_pizzas = [\"quattro stagioni\", \"Frutti di Mare\", \"quattro formaggi\"]\nother_pizzas", "_____no_output_____" ], [ "all_pizzas = pizza + other_pizzas\nall_pizzas", "_____no_output_____" ] ], [ [ "The results of adding two lists is a third list with all the elements of both lists. The first elements are the ones on list `pizza` followed by `other_pizzas`.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "We can also multiply a list by a number.", "_____no_output_____" ] ], [ [ "other_pizzas", "_____no_output_____" ], [ "other_pizzas_5_x = other_pizzas * 5\nother_pizzas_5_x", "_____no_output_____" ], [ "len(other_pizzas), len(other_pizzas_5_x)", "_____no_output_____" ] ], [ [ "Now each element on list `other_pizzas` appears 5 times on list `other_pizzas_5_x`.", "_____no_output_____" ], [ "### 2.8 Further Reading<a name=\"2.8\"></a>", "_____no_output_____" ], [ "[Python docs: Datastructures](https://docs.python.org/3/tutorial/datastructures.html) \n[Programiz: Python List](https://www.programiz.com/python-programming/list) \n[GeeksForGeeks: Python Lists](https://www.geeksforgeeks.org/python-list/) \n[TutorialsPoint: Python Lists ](https://www.tutorialspoint.com/python/python_lists.htm) \n[RealPython: Python Lists and Tuples](https://realpython.com/python-lists-tuples/) \n[W3schools: Python Lists](https://www.w3schools.com/python/python_lists.asp) \n[Datacamp: Python List questions](https://www.datacamp.com/community/tutorials/18-most-common-python-list-questions-learn-python)", "_____no_output_____" ], [ "## 3 Data Structure - Dictionary <a name=\"3\"></a>", "_____no_output_____" ], [ "# <center> {\"octopus\": 🐙, \"pork\": 🐷, \"cat\": 🐈, \"monkey\": 🙉} </center>", "_____no_output_____" ], [ "### 3.1 Definition and Dictionary Creation<a name=\"3.1\"></a>", "_____no_output_____" ], [ "#### 3.1.1 Definition", "_____no_output_____" ], [ "A dictionary is a collection of key-value pairs. It doesn't support duplicated keys. Each unique key is mapped to a value. Keys and values can be of any type. \n\nDictionaries are unordered. Being unordered implies that the elements don't have a specific position and, therefore, you cannot search for a element by position. \n\nThey are also mutable. It means that we can append, delete and update key-value pairs on a dictionary after its creation.", "_____no_output_____" ], [ "In a dictionary, key-value correspondence should be done with a colon `:`, and consecutive pairs should be separated by commas `,`. All pairs should be between curly brackets `{}`.", "_____no_output_____" ], [ "Let's create our first dictionary.", "_____no_output_____" ] ], [ [ "#keys are fruits and values are the correspondent color\nthis_dict = {\"strawberry\": \"red\", \"pear\":\"green\", \"mango\": \"yellow\", \"banana\": \"yellow\"}\nthis_dict", "_____no_output_____" ] ], [ [ "#### 2.1.1 Dictionary creation", "_____no_output_____" ], [ "Let's start by creating an empty dictionary.", "_____no_output_____" ] ], [ [ "this_dict = {}\nlen(this_dict), type(this_dict)", "_____no_output_____" ] ], [ [ "An empty dictionary can also be created by using the function __`dict()`__.", "_____no_output_____" ] ], [ [ "this_dict = dict()\nlen(this_dict), type(this_dict)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "What happens if we have duplicated keys? Let's create one dictionary with duplicated keys to see what happens.", "_____no_output_____" ] ], [ [ "this_dict = {\"strawberry\": \"red\", \"pear\":\"green\", \"mango\": \"red\", \"banana\": \"yellow\", \"mango\": \"yellow\"}\nthis_dict", "_____no_output_____" ] ], [ [ "It is __not possible__ to have __duplicated keys__ in a python dictionary. The key-value pair keeps the last value assigned to a key.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "Now we will create a dictionary with values of different types.", "_____no_output_____" ] ], [ [ "strawberry = {\"type\": \"fruit\", \"name\": \"strawberry\", \"color\": \"red\", \"price\": 2.5, \"in_stock\": True}\nstrawberry", "_____no_output_____" ], [ "type(strawberry)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Let's try to create a dictionary with keys that are not string to see what happens.", "_____no_output_____" ] ], [ [ "strawberry = {\"type\": \"fruit\", \"name\": \"strawberry\", True: \"in_stoks\", 2.5: \"in_stock\"}\nstrawberry", "_____no_output_____" ], [ "type(strawberry)", "_____no_output_____" ] ], [ [ "It works. But, keep in mind that keys should have a common meaning between them in order to be easy to search elements on a dict.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "Having dictionaries of dictionaries is very common when we want details about a key. ", "_____no_output_____" ], [ "Let's create a dictionary with the groceries that someone did on march 2020. Each key is the name of the product bought and value is the details about the product purchased, type of product, price per unit and quantity purchased.", "_____no_output_____" ] ], [ [ "groceries = {\n \"bread\": {\"type\": \"grains\", \"price_per_unit\": 2, \"quantity_purchased\": 1},\n \"onions\": {\"type\": \"vegetables\", \"price_per_unit\": 0.5, \"quantity_purchased\": 2},\n \"rice\": {\"type\": \"grains\" , \"price_per_unit\": 1, \"quantity_purchased\": 2},\n \"toilet paper\": {\"type\": \"others\", \"price_per_unit\": 50, \"quantity_purchased\":1000},\n \"spinages\": {\"type\": \"vegetables\" , \"price_per_unit\": 1.5, \"quantity_purchased\": 1}\n }\ngroceries", "_____no_output_____" ], [ "type(groceries)", "_____no_output_____" ] ], [ [ "Has we can see, it is possible to have a dictionary whose values are other dictionaries.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "### 3.2 Accessing keys and values on a dictionary<a name=\"3.2\"></a>", "_____no_output_____" ], [ "Sometimes we may need to check the value assigned to a certain key. We can do that in two ways. Using squared brackets `[]` with the key inside. Or with the __`get()`__ method also with the key inside.", "_____no_output_____" ], [ "- __Squared brackets:__", "_____no_output_____" ] ], [ [ "groceries", "_____no_output_____" ], [ "toilet_paper = groceries[\"toilet paper\"]\ntoilet_paper", "_____no_output_____" ] ], [ [ "Let's now try to search by index on the dictionary to see what happens.", "_____no_output_____" ] ], [ [ "toilet_paper = groceries[1]\ntoilet_paper", "_____no_output_____" ] ], [ [ "We have an error on the cell above. Because dictionaries are unordered, we are not able to search by position. ", "_____no_output_____" ], [ "- __`get()` method:__", "_____no_output_____" ] ], [ [ "toilet_paper = groceries.get(\"toilet paper\")\ntoilet_paper", "_____no_output_____" ] ], [ [ "From the cells above we can check that the output is the same for both cells. Yes, the person who did these groceries bought a lot of toilet paper and in a very expensive price. Toilet paper is the currency in town during the pandemic. ", "_____no_output_____" ], [ "<img src=\"data/toilet_paper.gif\" width=\"500\" height=\"150\" >", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "We can also use methods __`keys()`__ and __`values()`__ in order to get all the keys in a dictionary and all the values respectively.", "_____no_output_____" ] ], [ [ "toilet_paper", "_____no_output_____" ] ], [ [ "Let's extract keys from dict `toilet_paper`.", "_____no_output_____" ] ], [ [ "toilet_paper.keys()", "_____no_output_____" ] ], [ [ "And now all the values.", "_____no_output_____" ] ], [ [ "toilet_paper.values()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "Method __`items()`__ returns a list of tuples, each tuple has two elements. The element on the first position is the key, and the element on the second position is the correspondent value. This function is particularly useful when we want to iterate over a dictionary. ", "_____no_output_____" ] ], [ [ "toilet_paper.items()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 3.3 Element verification<a name=\"3.3\"></a>", "_____no_output_____" ], [ "In order to check if a key is in a dictionary or not, we can use __in__ and __not in__ notation.", "_____no_output_____" ] ], [ [ "toilet_paper", "_____no_output_____" ] ], [ [ "Let's check if `\"quantity_purchased\"` key is in the dictionary `toilet_paper`.", "_____no_output_____" ] ], [ [ "\"quantity_purchased\" in toilet_paper", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "If we want to confirm if a value matches a key, we should start by indexing the value that matches our key, <a href=\"#3.2\">section 3.2</a>, followed by __`==`__ or __`!=`__ and the value we want to compare.", "_____no_output_____" ], [ "Let's see if `\"quantity_purchased\"`matches `1000` in the dictionary `toilet_paper`.", "_____no_output_____" ] ], [ [ "toilet_paper[\"quantity_purchased\"] == 1000", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "If we want to check if a value is in a dictionary, we can combine __in__ notation with values extraction that we learned on <a href=\"#3.2\">section 3.2</a>.", "_____no_output_____" ] ], [ [ "1000 in toilet_paper.values()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 3.4 Replace, Append and Delete<a name=\"3.4\"></a>", "_____no_output_____" ], [ "Let's start by learn how to replace values in a key-value pair.", "_____no_output_____" ], [ "The pandemic is now over, and toilet paper purchases come to its normal state.\n\nWe can assign a value to a key by writing the name of the dictionary, followed by the key inside square brackets `[]`, the equal sign `=` and the new value.", "_____no_output_____" ] ], [ [ "toilet_paper", "_____no_output_____" ], [ "toilet_paper[\"price_per_unit\"] = 2\ntoilet_paper[\"quantity_purchased\"] = 1\ntoilet_paper", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "We can also add new key-value pairs to our dictionary, using the `update()` method or with the notation `my_dict[new_key] = new_value`.", "_____no_output_____" ] ], [ [ "toilet_paper.update({\"characteristics\": [\"soft\", \"double\"]})\ntoilet_paper", "_____no_output_____" ], [ "toilet_paper[\"rating\"] = 4\ntoilet_paper", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "If we want to delete a key-value pair, we can use the `del` notation or the `pop()` method.", "_____no_output_____" ] ], [ [ "del toilet_paper[\"rating\"]\ntoilet_paper", "_____no_output_____" ], [ "toilet_paper.pop(\"type\")\ntoilet_paper", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### 3.5 Further Reading <a name=\"3.5\"></a>", "_____no_output_____" ], [ "[Python Docs: Datastructures-Dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) \n[Realpython: Python dicts](https://realpython.com/python-dicts/) \n[Programiz: Dictionary](https://www.programiz.com/python-programming/dictionary) \n[W3schools: Python Dictionaries](https://www.w3schools.com/python/python_dictionaries.asp) \n[Pythonlikeyoumeanit: DataStructures II Dictionaries](https://www.pythonlikeyoumeanit.com/Module2_EssentialsOfPython/DataStructures_II_Dictionaries.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ecb05131538f65e0893cca99cca05b0164e3e65e
181,180
ipynb
Jupyter Notebook
experiments/qlearning/basic/qlearner_obs_seq/qlearner-obs-seq.ipynb
jackblandin/rlpomdp
31d97ded61ba231680a0f277e2c24ccb093210f1
[ "MIT" ]
null
null
null
experiments/qlearning/basic/qlearner_obs_seq/qlearner-obs-seq.ipynb
jackblandin/rlpomdp
31d97ded61ba231680a0f277e2c24ccb093210f1
[ "MIT" ]
1
2022-02-12T21:54:56.000Z
2022-02-12T21:54:56.000Z
experiments/qlearning/basic/qlearner_obs_seq/qlearner-obs-seq.ipynb
jackblandin/research
31d97ded61ba231680a0f277e2c24ccb093210f1
[ "MIT" ]
null
null
null
163.372408
38,696
0.757109
[ [ [ "# Setup", "_____no_output_____" ] ], [ [ "\"\"\"\nAdd parent directorys to current path\n\"\"\"\nimport os.path\nimport sys\n\n\nfor p in ['..', '../..', '../../..', '../../../..']:\n d = os.path.abspath(p)\n if d not in sys.path:\n sys.path.insert(0,d)\n\n \n\"\"\"\nAdd tiger-env directory to current path\nStill not sure why this is needed.\n\"\"\"\nd = [os.path.abspath('../../../../../custom_envs/gym-tiger'),\n os.path.abspath('../../../../../custom_envs/gym-dummy/')]\nfor _d in d:\n if _d not in sys.path:\n sys.path.insert(0, _d)\n\n\n\"\"\"\nEnable hot-reloading\n\"\"\" \nfrom notebook_utils import import_module_by_name, reload_module_by_name\n\n\ndef reload():\n \"\"\"Helper function for hot-reloading QLearnerObsSeq class from source\"\"\"\n reload_module_by_name(\n 'experiments.qlearning.basic.qlearner_obs_seq.qlearner_obs_seq',\n 'QLearnerObsSeq')\n global QLearnerObsSingle\n from experiments.qlearning.basic.qlearner_obs_seq.qlearner_obs_seq \\\n import QLearnerObsSeq", "_____no_output_____" ] ], [ [ "# Tiger-v0", "_____no_output_____" ], [ "## Setup ENV and Model", "_____no_output_____" ] ], [ [ "import gym\nimport gym_tiger\nimport matplotlib.pyplot as plt\nfrom experiments.qlearning.basic.utils import play_one, plot_running_avg\nfrom experiments.qlearning.basic.qlearner_obs_seq.qlearner_obs_seq \\\n import QLearnerObsSeq\nfrom experiments.qlearning.basic.qlearner_obs_seq.feature_transformer \\\n import SeqFeatureTransformer\n\nenv = gym.make('Tiger-v0')\nenv.__init__(reward_tiger=-100, reward_gold=10, reward_listen=-1,\n max_steps_per_episode=500)\n\nseq_len=2\n\nft = SeqFeatureTransformer()\n\nmodel = QLearnerObsSeq(env, ft, initial_alpha=.5, gamma=.9, alpha_decay=.4, seq_len=seq_len)\neps = 1\nn = 0\ndone = False\not = env.reset()", "/Users/jblandin/miniconda3/envs/research/lib/python3.6/site-packages/gym/envs/registration.py:14: PkgResourcesDeprecationWarning: Parameters to load are deprecated. Call .resolve and .require separately.\n result = entry_point.load(False)\n" ] ], [ [ "## Take one action and update Q", "_____no_output_____" ], [ "$$\nQ(s_{t-1}, a_{t-1}) = Q(s_{t-1}, a_{t-1}) + \\alpha \\big[ r_t + \\gamma \\cdot Q(s_t, a_t) - Q(s_{t-1}, a_{t-1}) \\big]\n$$", "_____no_output_____" ] ], [ [ "if done:\n ot = env.reset()\n# print(str(model))\notm1 = ot\natm1 = model.sample_action(otm1, eps)\not, r, done, info = env.step(atm1)\nat = model.best_action(ot)\nmodel.update(otm1, atm1, r, ot, at)\n_otm1 = env.translate_obs(otm1)\n_atm1 = env.translate_action(atm1)\nprint(_otm1, ',', _atm1, ',', r)\nprint(str(model))", "START , OPEN_LEFT , -100\n\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n----------------------- ----------- ------------ --------\n\n" ] ], [ [ "## Play 1 Episode (100 steps)", "_____no_output_____" ] ], [ [ "print(str(model))\nplay_one(env, model, eps, verbose=True)\nprint(str(model))", "\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n----------------------- ----------- ------------ --------\n\no_t-n,...,o_t-1 | o_t-1 | a_t-1 | r | o_t | a_t \n----------------------------------------------------------------------------------------------------\n['START', 'START'] | START | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_LEFT \n['START', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | OPEN_LEFT \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_LEFT \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | OPEN_LEFT \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | OPEN_LEFT \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | OPEN_LEFT \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | OPEN_LEFT \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | OPEN_LEFT \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | OPEN_LEFT \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | OPEN_LEFT \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | OPEN_RIGHT\n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | -100 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | 10 | GROWL_RIGHT | LISTEN \n['GROWL_RIGHT', 'GROWL_RIGHT'] | GROWL_RIGHT | OPEN_LEFT | -100 | GROWL_LEFT | LISTEN \n['GROWL_RIGHT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | OPEN_RIGHT | 10 | GROWL_LEFT | LISTEN \n['GROWL_LEFT', 'GROWL_LEFT'] | GROWL_LEFT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n['GROWL_LEFT', 'GROWL_RIGHT'] | GROWL_RIGHT | LISTEN | -1 | GROWL_RIGHT | LISTEN \n\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n------------------------------ ----------- ------------ ---------\n['GROWL_LEFT', 'GROWL_RIGHT'] -43 -52.43 -15.73 <<\n['GROWL_LEFT', 'GROWL_RIGHT'] -43 -52.43 -15.73 <<\n['GROWL_RIGHT', 'GROWL_LEFT'] -47.11 -52.12 -8.27 <<\n['GROWL_RIGHT', 'GROWL_RIGHT'] -27.11 -58.19 -4.13 <<\n['GROWL_LEFT', 'GROWL_RIGHT'] -43 -52.43 -15.73 <<\n['GROWL_LEFT', 'GROWL_LEFT'] -82.11 -50.9 -9.37 <<\n\n" ] ], [ [ "## Play 100 Episodes", "_____no_output_____" ] ], [ [ "import gym\nimport gym_tiger\nimport matplotlib.pyplot as plt\nfrom experiments.qlearning.basic.utils import play_one, plot_running_avg\nfrom experiments.qlearning.basic.qlearner_obs_seq.qlearner_obs_seq \\\n import QLearnerObsSeq\nfrom experiments.qlearning.basic.qlearner_obs_seq.feature_transformer \\\n import SeqFeatureTransformer\n\nenv = gym.make('Tiger-v0')\nenv.__init__(reward_tiger=-100, reward_gold=10, reward_listen=-1,\n max_steps_per_episode=500)\n\nseq_len=3\n\nft = SeqFeatureTransformer()\n\nmodel = QLearnerObsSeq(env, ft, initial_alpha=.5, gamma=.9, alpha_decay=.4,\n seq_len=seq_len)\n\neps = 1\nn = 0\ndone = False\not = env.reset()\n\nN = 100\ntotalrewards = np.empty(N)\nfor n in range(N):\n if n > 25:\n eps = 0\n else:\n eps = 1.0/np.sqrt(n+1)\n totalreward = play_one(env, model, eps)\n totalrewards[n] = totalreward\n if n % (N/5) == 0:\n print(str(model))\n\nprint(\"avg reward for last {} episodes:\".format(N/5),\n totalrewards[int(-1*(N/5)):].mean())\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 5))\nax.plot(totalrewards)\nax.set_title(\"Rewards\")\n\nplot_running_avg(totalrewards[25:], window=5)", "\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n--------------------------------------------- ----------- ------------ --------\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -23.84 -40.08 -7.04 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -45.65 -39.71 -1.42 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -40.75 -64.17 -1.13 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -35.89 -38.24 -3.97 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -50.36 -33.96 -2.08 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_LEFT'] -40.04 -32.87 -6.05 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -23.84 -40.08 -7.04 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_LEFT'] -47.77 -18.4 -2.39 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_RIGHT'] -40.94 -47.63 -4.2 <<\n\n\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n--------------------------------------------- ----------- ------------ --------\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -50.25 -46.04 -8.52 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -50.25 -46.04 -8.52 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -9.34 -94.22 -8.81 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.89 -61.81 -8.5 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.31 -24.93 -8.73 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_LEFT'] -68.81 -27.5 -8.56 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -15.73 -85.53 -8.66 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_LEFT'] -94.32 -10.28 -8.87 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_RIGHT'] -51.09 -49.17 -8.54 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -50.25 -46.04 -8.52 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -50.25 -46.04 -8.52 <<\n\n\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n--------------------------------------------- ----------- ------------ --------\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.49 -23.16 -9.63 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -48.93 -48.55 -9.48 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -10.16 -98.85 -9.62 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.42 -63.28 -9.49 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.49 -23.16 -9.63 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_LEFT'] -70.81 -27.23 -9.58 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -17 -82.09 -9.54 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_LEFT'] -94.06 -12.22 -9.7 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_RIGHT'] -54.28 -47.81 -9.54 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.49 -23.16 -9.63 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.49 -23.16 -9.63 <<\n\n\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n--------------------------------------------- ----------- ------------ --------\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.42 -63.28 -9.84 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -48.93 -48.55 -9.82 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -10.16 -98.85 -9.88 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.42 -63.28 -9.84 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.49 -23.16 -9.82 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_LEFT'] -70.81 -27.23 -9.81 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -17 -82.09 -9.86 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_LEFT'] -94.06 -12.22 -9.85 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_RIGHT'] -54.28 -47.81 -9.8 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.42 -63.28 -9.84 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.42 -63.28 -9.84 <<\n\n\nPrevious Observations OPEN_LEFT OPEN_RIGHT LISTEN\n--------------------------------------------- ----------- ------------ --------\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -10.16 -98.85 -9.95 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_LEFT'] -48.93 -48.55 -9.93 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -10.16 -98.85 -9.95 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_RIGHT'] -26.42 -63.28 -9.93 <<\n['GROWL_RIGHT', 'GROWL_LEFT', 'GROWL_LEFT'] -73.49 -23.16 -9.91 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_LEFT'] -70.81 -27.23 -9.9 <<\n['GROWL_LEFT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -17 -82.09 -9.94 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_LEFT'] -94.06 -12.22 -9.93 <<\n['GROWL_LEFT', 'GROWL_LEFT', 'GROWL_RIGHT'] -54.28 -47.81 -9.91 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -10.16 -98.85 -9.95 <<\n['GROWL_RIGHT', 'GROWL_RIGHT', 'GROWL_RIGHT'] -10.16 -98.85 -9.95 <<\n\navg reward for last 20.0 episodes: -499.45\n" ] ], [ [ "# TwoInARow-v0", "_____no_output_____" ] ], [ [ "import gym\nimport gym_dummy\nimport matplotlib.pyplot as plt\nfrom experiments.qlearning.basic.utils import play_one, plot_running_avg\nfrom experiments.qlearning.basic.qlearner_obs_seq.qlearner_obs_seq \\\n import QLearnerObsSeq\nfrom experiments.qlearning.basic.qlearner_obs_seq.feature_transformer \\\n import SeqFeatureTransformer\n\nenv = gym.make('TwoInARow-v0')\nft = SeqFeatureTransformer()\nmodel = QLearnerObsSeq(env, ft, initial_alpha=.5, gamma=.9, alpha_decay=.4,\n seq_len=2, translate=False)\neps = 1\nn = 0\ndone = False\not = env.reset()\n\nN = 100\ntotalrewards = np.empty(N)\nfor n in range(N):\n if n >= N - (N/5):\n eps = 0\n else:\n eps = 1.0/np.sqrt(n+1)\n totalreward = play_one(env, model, eps)\n totalrewards[n] = totalreward\n\nprint(\"avg reward for last {} episodes:\".format(N/5),\n totalrewards[int(-1*(N/5)):].mean())\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 5))\nax.plot(totalrewards)\nax.set_title(\"Rewards\")\n\nplot_running_avg(totalrewards, window=5)", "avg reward for last 20.0 episodes: 99.2\n" ], [ "display(model.last_n_obs)\nmodel.predict([1])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecb0528e19fc31a3f4f4f567517b26817eb9e89a
39,847
ipynb
Jupyter Notebook
codes/labs_lecture05/lab01_mlp/mlp_demo.ipynb
zzybluebell/CS4243
7c7d8e4a2b1397c291ad39ad0696881c8a2f4995
[ "MIT" ]
47
2022-01-10T02:49:02.000Z
2022-03-25T08:59:04.000Z
codes/labs_lecture05/lab01_mlp/mlp_demo.ipynb
zzybluebell/CS4243
7c7d8e4a2b1397c291ad39ad0696881c8a2f4995
[ "MIT" ]
null
null
null
codes/labs_lecture05/lab01_mlp/mlp_demo.ipynb
zzybluebell/CS4243
7c7d8e4a2b1397c291ad39ad0696881c8a2f4995
[ "MIT" ]
5
2022-02-12T11:12:41.000Z
2022-03-25T15:54:43.000Z
143.851986
28,376
0.887997
[ [ [ "# Lab 01 : MLP -- demo\n\n# Understanding the training loop ", "_____no_output_____" ] ], [ [ "# For Google Colaboratory\nimport sys, os\nif 'google.colab' in sys.modules:\n # mount google drive\n from google.colab import drive\n drive.mount('/content/gdrive')\n path_to_file = '/content/gdrive/My Drive/CS4243_codes/codes/labs_lecture05/lab01_mlp'\n print(path_to_file)\n # move to Google Drive directory\n os.chdir(path_to_file)\n !pwd", "_____no_output_____" ], [ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom random import randint\nimport utils", "_____no_output_____" ] ], [ [ "### Download the data", "_____no_output_____" ] ], [ [ "from utils import check_mnist_dataset_exists\ndata_path=check_mnist_dataset_exists()\n\ntrain_data=torch.load(data_path+'mnist/train_data.pt')\ntrain_label=torch.load(data_path+'mnist/train_label.pt')\ntest_data=torch.load(data_path+'mnist/test_data.pt')", "_____no_output_____" ] ], [ [ "### Make a three layer net class", "_____no_output_____" ] ], [ [ "class three_layer_net(nn.Module):\n\n def __init__(self, input_size, hidden_size1, hidden_size2, output_size):\n super(three_layer_net , self).__init__()\n \n self.layer1 = nn.Linear( input_size , hidden_size1 , bias=False )\n self.layer2 = nn.Linear( hidden_size1 , hidden_size2 , bias=False )\n self.layer3 = nn.Linear( hidden_size2 , output_size , bias=False )\n \n def forward(self, x):\n \n y = self.layer1(x)\n y_hat = torch.relu(y)\n z = self.layer2(y_hat)\n z_hat = torch.relu(z)\n scores = self.layer3(z_hat)\n \n return scores", "_____no_output_____" ] ], [ [ "### Build the net", "_____no_output_____" ] ], [ [ "net=three_layer_net(784, 50, 50, 10)\nprint(net)", "three_layer_net(\n (layer1): Linear(in_features=784, out_features=50, bias=False)\n (layer2): Linear(in_features=50, out_features=50, bias=False)\n (layer3): Linear(in_features=50, out_features=10, bias=False)\n)\n" ] ], [ [ "### Choose the criterion, optimizer, learning rate, and batch size", "_____no_output_____" ] ], [ [ "criterion = nn.CrossEntropyLoss()\n\noptimizer=torch.optim.SGD( net.parameters() , lr=0.01 )\n\nbs=200", "_____no_output_____" ] ], [ [ "### Train the network on the train set (process 5000 batches)", "_____no_output_____" ] ], [ [ "for iter in range(1,5000):\n \n # Set dL/dU, dL/dV, dL/dW to be filled with zeros\n optimizer.zero_grad()\n \n # create a minibatch\n indices=torch.LongTensor(bs).random_(0,60000)\n minibatch_data = train_data[indices]\n minibatch_label= train_label[indices]\n \n #reshape the minibatch\n inputs = minibatch_data.view(bs,784)\n \n # tell Pytorch to start tracking all operations that will be done on \"inputs\"\n inputs.requires_grad_()\n\n # forward the minibatch through the net \n scores=net( inputs ) \n \n # Compute the average of the losses of the data points in the minibatch\n loss = criterion( scores , minibatch_label) \n \n # backward pass to compute dL/dU, dL/dV and dL/dW \n loss.backward()\n \n # do one step of stochastic gradient descent: U=U-lr(dL/dU), V=V-lr(dL/dU), ...\n optimizer.step()\n ", "_____no_output_____" ] ], [ [ "### Choose image at random from the test set and see how good/bad are the predictions", "_____no_output_____" ] ], [ [ "# choose a picture at random\nidx=randint(0, 10000-1)\nim=test_data[idx]\n\n# diplay the picture\nutils.show(im)\n\n# feed it to the net and display the confidence scores\nscores = net( im.view(1,784)) \nprob=torch.softmax(scores, dim = 1)\n\nutils.show_prob_mnist(prob)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb05eb5a48ba55a354e5b38936786e5df9b530f
24,573
ipynb
Jupyter Notebook
Chapter07/Using deep belief networks for predictive analytics.ipynb
arifmudi/Predictive-Analytics-with-TensorFlow
23f24d07dd19c65febdbd44cb837c2dea9844f89
[ "MIT" ]
73
2017-10-27T22:44:32.000Z
2021-12-25T18:37:51.000Z
Chapter07/Using deep belief networks for predictive analytics.ipynb
arifmudi/Predictive-Analytics-with-TensorFlow
23f24d07dd19c65febdbd44cb837c2dea9844f89
[ "MIT" ]
3
2018-09-20T21:47:21.000Z
2021-03-14T05:32:41.000Z
Chapter07/Using deep belief networks for predictive analytics.ipynb
arifmudi/Predictive-Analytics-with-TensorFlow
23f24d07dd19c65febdbd44cb837c2dea9844f89
[ "MIT" ]
72
2017-11-06T07:08:01.000Z
2021-11-25T09:00:28.000Z
76.790625
11,960
0.529239
[ [ [ "import warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nnp.random.seed(123456789) \nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics.classification import accuracy_score\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nfrom tf_models import SupervisedDBNClassification\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "FILE_PATH = 'bank_normalized.csv'\nraw_data = pd.read_csv(FILE_PATH) ", "_____no_output_____" ], [ "Y_LABEL = 'y' \nKEYS = [i for i in raw_data.keys().tolist() if i != Y_LABEL]\nX = raw_data[KEYS].get_values() \nY = raw_data[Y_LABEL].get_values() \nclass_names = list(raw_data.columns.values)\nprint(class_names)", "['age', 'campaign', 'pdays', 'previous', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed', 'admin.', 'blue-collar', 'entrepreneur', 'housemaid', 'management', 'retired', 'self-employed', 'services', 'student', 'technician', 'unemployed', 'unknown', 'divorced', 'married', 'single', 'unknown.1', 'basic.4y', 'basic.6y', 'basic.9y', 'high.school', 'illiterate', 'professional.course', 'university.degree', 'unknown.2', 'no', 'unknown.3', 'yes', 'no.1', 'unknown.4', 'yes.1', 'no.2', 'unknown.5', 'yes.2', 'cellular', 'telephone', 'apr', 'aug', 'dec', 'jul', 'jun', 'mar', 'may', 'nov', 'oct', 'sep', 'fri', 'mon', 'thu', 'tue', 'wed', 'failure', 'nonexistent', 'success', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199', '200', '201', '202', '203', '204', '205', '206', '207', '208', '209', '210', '211', '212', '213', '214', '215', '216', '217', '218', '219', '220', '221', '222', '223', '224', '225', '226', '227', '228', '229', '230', '231', '232', '233', '234', '235', '236', '237', '238', '239', '240', '241', '242', '243', '244', '245', '246', '247', '248', '249', '250', '251', '252', '253', '254', '255', '256', '257', '258', '259', '260', '261', '262', '263', '264', '265', '266', '267', '268', '269', '270', '271', '272', '273', '274', '275', '276', '277', '278', '279', '280', '281', '282', '283', '284', '285', '286', '287', '288', '289', '290', '291', '292', '293', '294', '295', '296', '297', '298', '299', '300', '301', '302', '303', '304', '305', '306', '307', '308', '309', '310', '311', '312', '313', '314', '315', '316', '317', '318', '319', '320', '321', '322', '323', '324', '325', '326', '327', '328', '329', '330', '331', '332', '333', '334', '335', '336', '337', '338', '339', '340', '341', '342', '343', '344', '345', '346', '347', '348', '349', '350', '351', '352', '353', '354', '355', '356', '357', '358', '359', '360', '361', '362', '363', '364', '365', '366', '367', '368', '369', '370', '371', '372', '373', '374', '375', '376', '377', '378', '379', '380', '381', '382', '383', '384', '385', '386', '387', '388', '389', '390', '391', '392', '393', '394', '395', '396', '397', '398', '399', '400', '401', '402', '403', '404', '405', '406', '407', '408', '409', '410', '411', '412', '413', '414', '415', '416', '417', '418', '419', '420', '421', '422', '423', '424', '425', '426', '427', '428', '429', '430', '431', '432', '433', '434', '435', '436', '437', '438', '439', '440', '441', '442', '443', '444', '445', '446', '447', '448', '449', '450', '451', '452', '453', '454', '455', '456', '457', '458', '459', '460', '461', '462', '463', '464', '465', '466', '467', '468', '469', '470', '471', '472', '473', '474', '475', '476', '477', '478', '479', '480', '481', '482', '483', '484', '485', '486', '487', '488', '489', '490', '491', '492', '493', '494', '495', '496', '497', '498', '499', '500', '501', '502', '503', '504', '505', '506', '507', '508', '509', '510', '511', '512', '513', '514', '515', '516', '517', '518', '519', '520', '521', '522', '523', '524', '525', '526', '527', '528', '529', '530', '531', '532', '533', '534', '535', '536', '537', '538', '539', '540', '541', '542', '543', '544', '545', '546', '547', '548', '549', '550', '551', '552', '553', '554', '555', '556', '557', '558', '559', '560', '561', '562', '563', '564', '565', '566', '567', '568', '569', '570', '571', '572', '573', '574', '575', '576', '577', '578', '579', '580', '581', '582', '583', '584', '585', '586', '587', '588', '589', '590', '591', '592', '593', '594', '595', '596', '597', '598', '599', '600', '601', '602', '603', '604', '605', '606', '607', '608', '609', '610', '611', '612', '613', '614', '615', '616', '617', '618', '619', '620', '621', '622', '623', '624', '625', '626', '627', '628', '629', '630', '631', '632', '633', '634', '635', '636', '637', '638', '639', '640', '641', '642', '643', '644', '645', '646', '647', '648', '649', '650', '651', '652', '653', '654', '655', '656', '657', '658', '659', '660', '661', '662', '663', '664', '665', '666', '667', '668', '669', '670', '671', '672', '673', '674', '675', '676', '677', '678', '679', '680', '681', '682', '683', '684', '685', '686', '687', '688', '689', '690', '691', '692', '693', '694', '695', '696', '697', '698', '699', '700', '701', '702', '703', '704', '705', '706', '707', '708', '709', '710', '711', '712', '713', '714', '715', '716', '717', '718', '719', '720', '721', '722', '723', '724', '725', '726', '727', '728', '729', '730', '731', '732', '733', '734', '735', '736', '737', '738', '739', '740', '741', '742', '743', '744', '745', '746', '747', '748', '749', '750', '751', '752', '753', '754', '755', '756', '757', '758', '759', '760', '761', '762', '763', '764', '765', '766', '767', '768', '769', '770', '771', '772', '773', '774', '775', '776', '777', '778', '779', '780', '781', '782', '783', '784', '785', '786', '787', '788', '789', '790', '791', '792', '793', '794', '795', '796', '797', '798', '799', '800', '801', '802', '803', '804', '805', '806', '807', '808', '809', '810', '811', '812', '813', '814', '815', '816', '817', '818', '819', '820', '821', '822', '823', '824', '825', '826', '827', '828', '829', '830', '831', '832', '833', '834', '835', '836', '837', '838', '839', '840', '841', '843', '844', '845', '846', '847', '848', '849', '850', '851', '852', '853', '854', '855', '856', '857', '858', '859', '860', '861', '862', '863', '864', '865', '866', '867', '868', '869', '870', '871', '872', '873', '874', '875', '876', '877', '878', '879', '880', '881', '882', '883', '884', '885', '886', '888', '889', '890', '891', '892', '893', '894', '895', '896', '897', '898', '899', '900', '901', '902', '903', '904', '905', '906', '907', '908', '909', '910', '911', '912', '913', '915', '916', '917', '918', '919', '920', '921', '922', '923', '924', '925', '926', '927', '928', '929', '930', '931', '932', '933', '934', '935', '936', '937', '938', '939', '940', '941', '942', '943', '944', '945', '946', '947', '949', '950', '951', '952', '953', '954', '955', '956', '957', '958', '959', '960', '961', '962', '963', '964', '965', '966', '967', '968', '969', '970', '971', '972', '973', '974', '975', '976', '977', '978', '979', '980', '981', '982', '983', '984', '985', '986', '987', '988', '989', '990', '991', '992', '993', '994', '996', '997', '998', '999', '1000', '1001', '1002', '1003', '1005', '1007', '1008', '1009', '1010', '1011', '1012', '1013', '1014', '1015', '1017', '1018', '1019', '1020', '1021', '1022', '1023', '1024', '1025', '1026', '1027', '1028', '1029', '1030', '1031', '1032', '1033', '1034', '1035', '1036', '1037', '1038', '1039', '1040', '1041', '1042', '1043', '1044', '1045', '1046', '1047', '1048', '1049', '1051', '1052', '1053', '1054', '1055', '1056', '1057', '1058', '1059', '1060', '1061', '1062', '1063', '1064', '1065', '1066', '1067', '1068', '1070', '1071', '1072', '1073', '1074', '1075', '1076', '1077', '1078', '1079', '1080', '1081', '1082', '1083', '1084', '1085', '1087', '1088', '1089', '1090', '1091', '1092', '1093', '1094', '1095', '1096', '1097', '1098', '1099', '1100', '1101', '1102', '1103', '1104', '1105', '1106', '1108', '1109', '1110', '1111', '1112', '1114', '1117', '1118', '1119', '1120', '1121', '1122', '1123', '1124', '1125', '1126', '1127', '1128', '1129', '1130', '1131', '1132', '1133', '1134', '1135', '1136', '1137', '1138', '1139', '1140', '1141', '1142', '1143', '1144', '1145', '1147', '1148', '1149', '1150', '1151', '1152', '1153', '1154', '1156', '1161', '1162', '1164', '1165', '1166', '1167', '1168', '1169', '1170', '1171', '1173', '1174', '1175', '1176', '1178', '1180', '1181', '1182', '1183', '1184', '1185', '1186', '1187', '1190', '1191', '1192', '1193', '1195', '1196', '1197', '1199', '1200', '1201', '1202', '1203', '1204', '1205', '1206', '1207', '1208', '1210', '1211', '1212', '1214', '1217', '1218', '1220', '1221', '1222', '1223', '1224', '1225', '1226', '1227', '1228', '1230', '1231', '1232', '1233', '1234', '1236', '1237', '1238', '1239', '1240', '1241', '1242', '1243', '1244', '1245', '1246', '1248', '1250', '1252', '1254', '1255', '1256', '1257', '1258', '1259', '1260', '1262', '1263', '1265', '1266', '1267', '1268', '1269', '1271', '1272', '1273', '1275', '1276', '1277', '1279', '1281', '1282', '1283', '1285', '1286', '1287', '1288', '1290', '1291', '1293', '1294', '1297', '1298', '1300', '1302', '1303', '1306', '1307', '1309', '1310', '1311', '1313', '1317', '1318', '1319', '1321', '1323', '1326', '1327', '1328', '1329', '1330', '1331', '1332', '1333', '1334', '1336', '1337', '1339', '1340', '1341', '1342', '1344', '1345', '1346', '1347', '1348', '1349', '1352', '1353', '1356', '1357', '1359', '1360', '1361', '1363', '1364', '1365', '1366', '1368', '1369', '1370', '1372', '1373', '1374', '1376', '1380', '1386', '1388', '1389', '1390', '1391', '1392', '1394', '1395', '1397', '1398', '1399', '1405', '1407', '1408', '1410', '1411', '1412', '1416', '1422', '1423', '1424', '1425', '1426', '1432', '1434', '1435', '1437', '1438', '1439', '1440', '1441', '1446', '1447', '1448', '1449', '1452', '1456', '1460', '1461', '1462', '1463', '1464', '1467', '1468', '1469', '1471', '1472', '1473', '1476', '1478', '1479', '1480', '1487', '1488', '1489', '1490', '1491', '1492', '1495', '1499', '1500', '1502', '1503', '1504', '1505', '1508', '1512', '1514', '1516', '1521', '1528', '1529', '1530', '1531', '1532', '1534', '1540', '1543', '1545', '1548', '1550', '1551', '1552', '1554', '1555', '1556', '1559', '1563', '1567', '1569', '1571', '1573', '1574', '1575', '1576', '1579', '1580', '1581', '1584', '1590', '1594', '1597', '1598', '1602', '1603', '1606', '1608', '1611', '1613', '1615', '1616', '1617', '1618', '1622', '1623', '1624', '1628', '1640', '1642', '1649', '1662', '1663', '1665', '1666', '1669', '1673', '1677', '1681', '1689', '1692', '1697', '1707', '1710', '1713', '1720', '1721', '1723', '1730', '1735', '1739', '1740', '1745', '1756', '1767', '1776', '1777', '1788', '1804', '1805', '1806', '1809', '1816', '1817', '1820', '1833', '1834', '1842', '1848', '1850', '1855', '1867', '1868', '1869', '1871', '1877', '1880', '1882', '1906', '1925', '1934', '1946', '1954', '1957', '1958', '1960', '1962', '1966', '1970', '1973', '1975', '1978', '1980', '1992', '1994', '2015', '2016', '2025', '2028', '2029', '2033', '2035', '2053', '2055', '2062', '2078', '2087', '2089', '2093', '2122', '2129', '2139', '2184', '2187', '2191', '2203', '2219', '2231', '2260', '2299', '2301', '2316', '2372', '2420', '2429', '2453', '2456', '2462', '2486', '2516', '2621', '2635', '2653', '2680', '2692', '2769', '2870', '2926', '3076', '3078', '3094', '3183', '3253', '3284', '3322', '3366', '3422', '3509', '3631', '3643', '3785', '4199', '4918', 'y']\n" ], [ "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=100)", "_____no_output_____" ], [ "classifier = SupervisedDBNClassification(hidden_layers_structure=[64, 64],\n learning_rate_rbm=0.05,\n learning_rate=0.01,\n n_epochs_rbm=10,\n n_iter_backprop=100,\n batch_size=32,\n activation_function='relu',\n dropout_p=0.2)", "_____no_output_____" ], [ "classifier.fit(X_train, Y_train)", "[START] Pre-training step:\n>> Epoch 1 finished \tRBM Reconstruction error 1.898633\n>> Epoch 2 finished \tRBM Reconstruction error 1.823214\n>> Epoch 3 finished \tRBM Reconstruction error 2.134477\n>> Epoch 4 finished \tRBM Reconstruction error 2.380829\n>> Epoch 5 finished \tRBM Reconstruction error 2.713834\n>> Epoch 6 finished \tRBM Reconstruction error 3.186795\n>> Epoch 7 finished \tRBM Reconstruction error 3.155166\n>> Epoch 8 finished \tRBM Reconstruction error 3.446533\n>> Epoch 9 finished \tRBM Reconstruction error 3.467122\n>> Epoch 10 finished \tRBM Reconstruction error 4.328552\n>> Epoch 1 finished \tRBM Reconstruction error 2.240864\n>> Epoch 2 finished \tRBM Reconstruction error 4.949442\n>> Epoch 3 finished \tRBM Reconstruction error 8.683151\n>> Epoch 4 finished \tRBM Reconstruction error 11.209228\n>> Epoch 5 finished \tRBM Reconstruction error 13.634924\n>> Epoch 6 finished \tRBM Reconstruction error 15.392317\n>> Epoch 7 finished \tRBM Reconstruction error 19.078772\n>> Epoch 8 finished \tRBM Reconstruction error 23.279257\n>> Epoch 9 finished \tRBM Reconstruction error 24.736986\n>> Epoch 10 finished \tRBM Reconstruction error 26.009159\n[END] Pre-training step\nWARNING:tensorflow:From C:\\Users\\Test.PPMUMCPU0034\\Desktop\\Predictive Analytics\\Section 7\\tf_models.py:429: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n\n[START] Fine tuning step:\n>> Epoch 0 finished \tANN training loss 0.299661\n>> Epoch 1 finished \tANN training loss 0.295070\n>> Epoch 2 finished \tANN training loss 0.291765\n>> Epoch 3 finished \tANN training loss 0.290953\n>> Epoch 4 finished \tANN training loss 0.287843\n>> Epoch 5 finished \tANN training loss 0.287937\n>> Epoch 6 finished \tANN training loss 0.287697\n>> Epoch 7 finished \tANN training loss 0.284335\n>> Epoch 8 finished \tANN training loss 0.284241\n>> Epoch 9 finished \tANN training loss 0.282490\n>> Epoch 10 finished \tANN training loss 0.283384\n>> Epoch 11 finished \tANN training loss 0.282873\n>> Epoch 12 finished \tANN training loss 0.290396\n>> Epoch 13 finished \tANN training loss 0.281133\n>> Epoch 14 finished \tANN training loss 0.281815\n>> Epoch 15 finished \tANN training loss 0.284981\n>> Epoch 16 finished \tANN training loss 0.281288\n>> Epoch 17 finished \tANN training loss 0.280106\n>> Epoch 18 finished \tANN training loss 0.281995\n>> Epoch 19 finished \tANN training loss 0.280522\n>> Epoch 20 finished \tANN training loss 0.281755\n>> Epoch 21 finished \tANN training loss 0.280636\n>> Epoch 22 finished \tANN training loss 0.279230\n>> Epoch 23 finished \tANN training loss 0.282032\n>> Epoch 24 finished \tANN training loss 0.279606\n>> Epoch 25 finished \tANN training loss 0.279238\n>> Epoch 26 finished \tANN training loss 0.278925\n>> Epoch 27 finished \tANN training loss 0.279633\n>> Epoch 28 finished \tANN training loss 0.277767\n>> Epoch 29 finished \tANN training loss 0.281926\n>> Epoch 30 finished \tANN training loss 0.280427\n>> Epoch 31 finished \tANN training loss 0.278583\n>> Epoch 32 finished \tANN training loss 0.278570\n>> Epoch 33 finished \tANN training loss 0.277485\n>> Epoch 34 finished \tANN training loss 0.277426\n>> Epoch 35 finished \tANN training loss 0.277010\n>> Epoch 36 finished \tANN training loss 0.278092\n>> Epoch 37 finished \tANN training loss 0.277282\n>> Epoch 38 finished \tANN training loss 0.278445\n>> Epoch 39 finished \tANN training loss 0.277108\n>> Epoch 40 finished \tANN training loss 0.277080\n>> Epoch 41 finished \tANN training loss 0.277618\n>> Epoch 42 finished \tANN training loss 0.277189\n>> Epoch 43 finished \tANN training loss 0.277878\n>> Epoch 44 finished \tANN training loss 0.276542\n>> Epoch 45 finished \tANN training loss 0.276110\n>> Epoch 46 finished \tANN training loss 0.276780\n>> Epoch 47 finished \tANN training loss 0.277858\n>> Epoch 48 finished \tANN training loss 0.276102\n>> Epoch 49 finished \tANN training loss 0.275450\n>> Epoch 50 finished \tANN training loss 0.276393\n>> Epoch 51 finished \tANN training loss 0.276390\n>> Epoch 52 finished \tANN training loss 0.275516\n>> Epoch 53 finished \tANN training loss 0.277077\n>> Epoch 54 finished \tANN training loss 0.275404\n>> Epoch 55 finished \tANN training loss 0.275136\n>> Epoch 56 finished \tANN training loss 0.275833\n>> Epoch 57 finished \tANN training loss 0.276556\n>> Epoch 58 finished \tANN training loss 0.274757\n>> Epoch 59 finished \tANN training loss 0.274097\n>> Epoch 60 finished \tANN training loss 0.274187\n>> Epoch 61 finished \tANN training loss 0.274909\n>> Epoch 62 finished \tANN training loss 0.274679\n>> Epoch 63 finished \tANN training loss 0.273806\n>> Epoch 64 finished \tANN training loss 0.274362\n>> Epoch 65 finished \tANN training loss 0.273575\n>> Epoch 66 finished \tANN training loss 0.274095\n>> Epoch 67 finished \tANN training loss 0.273716\n>> Epoch 68 finished \tANN training loss 0.275413\n>> Epoch 69 finished \tANN training loss 0.273773\n>> Epoch 70 finished \tANN training loss 0.272854\n>> Epoch 71 finished \tANN training loss 0.272536\n>> Epoch 72 finished \tANN training loss 0.273622\n>> Epoch 73 finished \tANN training loss 0.272048\n>> Epoch 74 finished \tANN training loss 0.272974\n>> Epoch 75 finished \tANN training loss 0.273109\n>> Epoch 76 finished \tANN training loss 0.273178\n>> Epoch 77 finished \tANN training loss 0.272605\n>> Epoch 78 finished \tANN training loss 0.272742\n>> Epoch 79 finished \tANN training loss 0.271513\n>> Epoch 80 finished \tANN training loss 0.273859\n>> Epoch 81 finished \tANN training loss 0.272571\n>> Epoch 82 finished \tANN training loss 0.271164\n>> Epoch 83 finished \tANN training loss 0.271178\n>> Epoch 84 finished \tANN training loss 0.271566\n>> Epoch 85 finished \tANN training loss 0.271825\n>> Epoch 86 finished \tANN training loss 0.270660\n>> Epoch 87 finished \tANN training loss 0.271399\n>> Epoch 88 finished \tANN training loss 0.271381\n>> Epoch 89 finished \tANN training loss 0.270808\n>> Epoch 90 finished \tANN training loss 0.270150\n>> Epoch 91 finished \tANN training loss 0.270768\n>> Epoch 92 finished \tANN training loss 0.270213\n>> Epoch 93 finished \tANN training loss 0.269677\n>> Epoch 94 finished \tANN training loss 0.268543\n>> Epoch 95 finished \tANN training loss 0.268436\n>> Epoch 96 finished \tANN training loss 0.268470\n>> Epoch 97 finished \tANN training loss 0.268205\n>> Epoch 98 finished \tANN training loss 0.267602\n>> Epoch 99 finished \tANN training loss 0.267119\n[END] Fine tuning step\n" ], [ "Y_pred = classifier.predict(X_test)\nprint('Accuracy: %f' % accuracy_score(Y_test, Y_pred))", "Accuracy: 0.899777\n" ], [ "p, r, f, s = precision_recall_fscore_support(Y_test, Y_pred, average='weighted')\nprint('Precision:', p)\nprint('Recall:', r)\nprint('F1-score:', f)", "Precision: 0.8811782382563896\nRecall: 0.8997766339710596\nF1-score: 0.8746846933752881\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb0610b026903945fec1526d24720ed08eadab3
9,698
ipynb
Jupyter Notebook
examples/notebooks/example_gmle.ipynb
JanSchulz/statsmodels
a160bbc790ef447ec365651ad01da3cf11e75f7f
[ "BSD-3-Clause" ]
null
null
null
examples/notebooks/example_gmle.ipynb
JanSchulz/statsmodels
a160bbc790ef447ec365651ad01da3cf11e75f7f
[ "BSD-3-Clause" ]
null
null
null
examples/notebooks/example_gmle.ipynb
JanSchulz/statsmodels
a160bbc790ef447ec365651ad01da3cf11e75f7f
[ "BSD-3-Clause" ]
null
null
null
32.219269
628
0.528253
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecb065b1867a87a8edaef4a65c28540d8c551894
485,921
ipynb
Jupyter Notebook
05.noisy_net.ipynb
leeyaf/rainbow-is-all-you-need
692796a9515cc40154aab5672bf06e08ba9f8b8b
[ "MIT" ]
null
null
null
05.noisy_net.ipynb
leeyaf/rainbow-is-all-you-need
692796a9515cc40154aab5672bf06e08ba9f8b8b
[ "MIT" ]
null
null
null
05.noisy_net.ipynb
leeyaf/rainbow-is-all-you-need
692796a9515cc40154aab5672bf06e08ba9f8b8b
[ "MIT" ]
null
null
null
424.385153
62,192
0.94147
[ [ [ "## Configurations for Colab", "_____no_output_____" ] ], [ [ "import sys\nIN_COLAB = \"google.colab\" in sys.modules\n\nif IN_COLAB:\n !apt install python-opengl\n !apt install ffmpeg\n !apt install xvfb\n !pip install pyvirtualdisplay\n !pip install gym\n from pyvirtualdisplay import Display\n \n # Start virtual display\n dis = Display(visible=0, size=(400, 400))\n dis.start()", "_____no_output_____" ] ], [ [ "# 05. Noisy Networks for Exploration\n\n[M. Fortunato et al., \"Noisy Networks for Exploration.\" arXiv preprint arXiv:1706.10295, 2017.](https://arxiv.org/pdf/1706.10295.pdf)\n\n\nNoisyNet is an exploration method that learns perturbations of the network weights to drive exploration. The key insight is that a single change to the weight vector can induce a consistent, and potentially very complex, state-dependent change in policy over multiple time steps.\n\nFirstly, let's take a look into a linear layer of a neural network with $p$ inputs and $q$ outputs, represented by\n\n$$\ny = wx + b,\n$$\n\nwhere $x \\in \\mathbb{R}^p$ is the layer input, $w \\in \\mathbb{R}^{q \\times p}$, and $b \\in \\mathbb{R}$ the bias.\n\nThe corresponding noisy linear layer is defined as:\n\n$$\ny = (\\mu^w + \\sigma^w \\odot \\epsilon^w) x + \\mu^b + \\sigma^b \\odot \\epsilon^b,\n$$\n\nwhere $\\mu^w + \\sigma^w \\odot \\epsilon^w$ and $\\mu^b + \\sigma^b \\odot \\epsilon^b$ replace $w$ and $b$ in the first linear layer equation. The parameters $\\mu^w \\in \\mathbb{R}^{q \\times p}, \\mu^b \\in \\mathbb{R}^q, \\sigma^w \\in \\mathbb{R}^{q \\times p}$ and $\\sigma^b \\in \\mathbb{R}^q$ are learnable, whereas $\\epsilon^w \\in \\mathbb{R}^{q \\times p}$ and $\\epsilon^b \\in \\mathbb{R}^q$ are noise random variables which can be generated by one of the following two ways:\n\n1. **Independent Gaussian noise**: the noise applied to each weight and bias is independent, where each random noise entry is drawn from a unit Gaussian distribution. This means that for each noisy linear layer, there are $pq + q$ noise variables (for $p$ inputs to the layer and $q$ outputs).\n2. **Factorised Gaussian noise:** This is a more computationally efficient way. It produces 2 random Gaussian noise vectors ($p, q$) and makes $pq + q$ noise entries by outer product as follows:\n\n$$\n\\begin{align}\n\\epsilon_{i,j}^w &= f(\\epsilon_i) f(\\epsilon_j),\\\\\n\\epsilon_{j}^b &= f(\\epsilon_i),\\\\\n\\text{where } f(x) &= sgn(x) \\sqrt{|x|}.\n\\end{align}\n$$\n\nIn all experiements of the paper, the authors used Factorised Gaussian noise, so we will go for it as well.", "_____no_output_____" ] ], [ [ "import math\nimport os\nfrom typing import Dict, List, Tuple\n\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom IPython.display import clear_output", "_____no_output_____" ] ], [ [ "## Replay buffer\n\nPlease see *01.dqn.ipynb* for detailed description.", "_____no_output_____" ] ], [ [ "class ReplayBuffer:\n \"\"\"A simple numpy replay buffer.\"\"\"\n\n def __init__(self, obs_dim: int, size: int, batch_size: int = 32):\n self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size], dtype=np.float32)\n self.rews_buf = np.zeros([size], dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.max_size, self.batch_size = size, batch_size\n self.ptr, self.size, = 0, 0\n\n def store(\n self,\n obs: np.ndarray,\n act: np.ndarray, \n rew: float, \n next_obs: np.ndarray, \n done: bool,\n ):\n self.obs_buf[self.ptr] = obs\n self.next_obs_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self) -> Dict[str, np.ndarray]:\n idxs = np.random.choice(self.size, size=self.batch_size, replace=False)\n return dict(obs=self.obs_buf[idxs],\n next_obs=self.next_obs_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs])\n\n def __len__(self) -> int:\n return self.size", "_____no_output_____" ] ], [ [ "## Noisy Layer\n\n**References:**\n- https://github.com/higgsfield/RL-Adventure/blob/master/5.noisy%20dqn.ipynb\n- https://github.com/Kaixhin/Rainbow/blob/master/model.py", "_____no_output_____" ] ], [ [ "class NoisyLinear(nn.Module):\n \"\"\"Noisy linear module for NoisyNet.\n \n Attributes:\n in_features (int): input size of linear module\n out_features (int): output size of linear module\n std_init (float): initial std value\n weight_mu (nn.Parameter): mean value weight parameter\n weight_sigma (nn.Parameter): std value weight parameter\n bias_mu (nn.Parameter): mean value bias parameter\n bias_sigma (nn.Parameter): std value bias parameter\n \n \"\"\"\n\n def __init__(self, in_features: int, out_features: int, std_init: float = 0.5):\n \"\"\"Initialization.\"\"\"\n super(NoisyLinear, self).__init__()\n \n self.in_features = in_features\n self.out_features = out_features\n self.std_init = std_init\n\n self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))\n self.weight_sigma = nn.Parameter(\n torch.Tensor(out_features, in_features)\n )\n self.register_buffer(\n \"weight_epsilon\", torch.Tensor(out_features, in_features)\n )\n\n self.bias_mu = nn.Parameter(torch.Tensor(out_features))\n self.bias_sigma = nn.Parameter(torch.Tensor(out_features))\n self.register_buffer(\"bias_epsilon\", torch.Tensor(out_features))\n\n self.reset_parameters()\n self.reset_noise()\n\n def reset_parameters(self):\n \"\"\"Reset trainable network parameters (factorized gaussian noise).\"\"\"\n mu_range = 1 / math.sqrt(self.in_features)\n self.weight_mu.data.uniform_(-mu_range, mu_range)\n self.weight_sigma.data.fill_(\n self.std_init / math.sqrt(self.in_features)\n )\n self.bias_mu.data.uniform_(-mu_range, mu_range)\n self.bias_sigma.data.fill_(\n self.std_init / math.sqrt(self.out_features)\n )\n\n def reset_noise(self):\n \"\"\"Make new noise.\"\"\"\n epsilon_in = self.scale_noise(self.in_features)\n epsilon_out = self.scale_noise(self.out_features)\n\n # outer product\n self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))\n self.bias_epsilon.copy_(epsilon_out)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward method implementation.\n \n We don't use separate statements on train / eval mode.\n It doesn't show remarkable difference of performance.\n \"\"\"\n return F.linear(\n x,\n self.weight_mu + self.weight_sigma * self.weight_epsilon,\n self.bias_mu + self.bias_sigma * self.bias_epsilon,\n )\n \n @staticmethod\n def scale_noise(size: int) -> torch.Tensor:\n \"\"\"Set scale to make noise (factorized gaussian noise).\"\"\"\n x = torch.FloatTensor(np.random.normal(loc=0.0, scale=1.0, size=size))\n\n return x.sign().mul(x.abs().sqrt())", "_____no_output_____" ] ], [ [ "## Noisy Network\n\nWe use NoisyLinear for the last two FC layers, and there is a method to reset noise at every step.\nThese are the only differences from the example of *01.dqn.ipynb*.", "_____no_output_____" ] ], [ [ "class Network(nn.Module):\n def __init__(self, in_dim: int, out_dim: int):\n \"\"\"Initialization.\"\"\"\n super(Network, self).__init__()\n\n self.feature = nn.Linear(in_dim, 128)\n self.noisy_layer1 = NoisyLinear(128, 128)\n self.noisy_layer2 = NoisyLinear(128, out_dim)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward method implementation.\"\"\"\n feature = F.relu(self.feature(x))\n hidden = F.relu(self.noisy_layer1(feature))\n out = self.noisy_layer2(hidden)\n \n return out\n \n def reset_noise(self):\n \"\"\"Reset all noisy layers.\"\"\"\n self.noisy_layer1.reset_noise()\n self.noisy_layer2.reset_noise()", "_____no_output_____" ] ], [ [ "## DQN + NoisyNet Agent (w/o DuelingNet)\n\nHere is a summary of DQNAgent class.\n\n| Method | Note |\n| --- | --- |\n|select_action | select an action from the input state. |\n|step | take an action and return the response of the env. |\n|compute_dqn_loss | return dqn loss. |\n|update_model | update the model by gradient descent. |\n|target_hard_update| hard update from the local model to the target model.|\n|train | train the agent during num_frames. |\n|test | test the agent (1 episode). |\n|plot | plot the training progresses. |\n\nIn the paper, NoisyNet is used as a component of the Dueling Network Architecture, which includes Double-DQN and Prioritized Experience Replay. However, we don't implement them to simplify the tutorial. One thing to note is that NoisyNet is an alternertive to $\\epsilon$-greedy method, so all $\\epsilon$ related lines are removed. Please check all comments with *NoisyNet*.", "_____no_output_____" ] ], [ [ "class DQNAgent:\n \"\"\"DQN Agent interacting with environment.\n \n Attribute:\n env (gym.Env): openAI Gym environment\n memory (ReplayBuffer): replay memory to store transitions\n batch_size (int): batch size for sampling\n target_update (int): period for target model's hard update\n gamma (float): discount factor\n dqn (Network): model to train and select actions\n dqn_target (Network): target model to update\n optimizer (torch.optim): optimizer for training dqn\n transition (list): transition information including\n state, action, reward, next_state, done\n \"\"\"\n\n def __init__(\n self, \n env: gym.Env,\n memory_size: int,\n batch_size: int,\n target_update: int,\n gamma: float = 0.99,\n ):\n \"\"\"Initialization.\n \n Args:\n env (gym.Env): openAI Gym environment\n memory_size (int): length of memory\n batch_size (int): batch size for sampling\n target_update (int): period for target model's hard update\n gamma (float): discount factor\n \"\"\"\n # NoisyNet: All attributes related to epsilon are removed\n obs_dim = env.observation_space.shape[0]\n action_dim = env.action_space.n\n \n self.env = env\n self.memory = ReplayBuffer(obs_dim, memory_size, batch_size)\n self.batch_size = batch_size\n self.target_update = target_update\n self.gamma = gamma\n \n # device: cpu / gpu\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n print(self.device)\n\n # networks: dqn, dqn_target\n self.dqn = Network(obs_dim, action_dim).to(self.device)\n self.dqn_target = Network(obs_dim, action_dim).to(self.device)\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n self.dqn_target.eval()\n \n # optimizer\n self.optimizer = optim.Adam(self.dqn.parameters())\n\n # transition to store in memory\n self.transition = list()\n \n # mode: train / test\n self.is_test = False\n\n def select_action(self, state: np.ndarray) -> np.ndarray:\n \"\"\"Select an action from the input state.\"\"\"\n # NoisyNet: no epsilon greedy action selection\n selected_action = self.dqn(\n torch.FloatTensor(state).to(self.device)\n ).argmax()\n selected_action = selected_action.detach().cpu().numpy()\n \n if not self.is_test:\n self.transition = [state, selected_action]\n \n return selected_action\n\n def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n \"\"\"Take an action and return the response of the env.\"\"\"\n next_state, reward, done, _ = self.env.step(action)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n self.memory.store(*self.transition)\n \n return next_state, reward, done\n\n def update_model(self) -> torch.Tensor:\n \"\"\"Update the model by gradient descent.\"\"\"\n samples = self.memory.sample_batch()\n\n loss = self._compute_dqn_loss(samples)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # NoisyNet: reset noise\n self.dqn.reset_noise()\n self.dqn_target.reset_noise()\n\n return loss.item()\n \n def train(self, num_frames: int, plotting_interval: int = 200):\n \"\"\"Train the agent.\"\"\"\n self.is_test = False\n \n state = self.env.reset()\n update_cnt = 0\n losses = []\n scores = []\n score = 0\n\n for frame_idx in range(1, num_frames + 1):\n action = self.select_action(state)\n next_state, reward, done = self.step(action)\n\n state = next_state\n score += reward\n \n # NoisyNet: removed decrease of epsilon\n\n # if episode ends\n if done:\n state = self.env.reset()\n scores.append(score)\n score = 0\n\n # if training is ready\n if len(self.memory) >= self.batch_size:\n loss = self.update_model()\n losses.append(loss)\n update_cnt += 1\n \n # if hard update is needed\n if update_cnt % self.target_update == 0:\n self._target_hard_update()\n\n # plotting\n if frame_idx % plotting_interval == 0:\n self._plot(frame_idx, scores, losses)\n \n self.env.close()\n \n def test(self) -> List[np.ndarray]:\n \"\"\"Test the agent.\"\"\"\n self.is_test = True\n \n state = self.env.reset()\n done = False\n score = 0\n \n frames = []\n while not done:\n frames.append(self.env.render(mode=\"rgb_array\"))\n action = self.select_action(state)\n next_state, reward, done = self.step(action)\n\n state = next_state\n score += reward\n \n print(\"score: \", score)\n self.env.close()\n \n return frames\n\n def _compute_dqn_loss(self, samples: Dict[str, np.ndarray]) -> torch.Tensor:\n \"\"\"Return dqn loss.\"\"\"\n device = self.device # for shortening the following lines\n state = torch.FloatTensor(samples[\"obs\"]).to(device)\n next_state = torch.FloatTensor(samples[\"next_obs\"]).to(device)\n action = torch.LongTensor(samples[\"acts\"].reshape(-1, 1)).to(device)\n reward = torch.FloatTensor(samples[\"rews\"].reshape(-1, 1)).to(device)\n done = torch.FloatTensor(samples[\"done\"].reshape(-1, 1)).to(device)\n \n # G_t = r + gamma * v(s_{t+1}) if state != Terminal\n # = r otherwise\n curr_q_value = self.dqn(state).gather(1, action)\n next_q_value = self.dqn_target(next_state).max(\n dim=1, keepdim=True\n )[0].detach()\n mask = 1 - done\n target = (reward + self.gamma * next_q_value * mask).to(self.device)\n\n # calculate dqn loss\n loss = F.smooth_l1_loss(curr_q_value, target)\n\n return loss\n\n def _target_hard_update(self):\n \"\"\"Hard update: target <- local.\"\"\"\n self.dqn_target.load_state_dict(self.dqn.state_dict())\n \n def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float], \n ):\n \"\"\"Plot the training progresses.\"\"\"\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.show()", "_____no_output_____" ] ], [ [ "## Environment\n\nYou can see the [code](https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py) and [configurations](https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L53) of CartPole-v0 from OpenAI's repository.", "_____no_output_____" ] ], [ [ "# environment\nenv_id = \"CartPole-v0\"\nenv = gym.make(env_id)\nif IN_COLAB:\n env = gym.wrappers.Monitor(env, \"videos\", force=True)", "_____no_output_____" ] ], [ [ "## Set random seed", "_____no_output_____" ] ], [ [ "seed = 777\n\ndef seed_torch(seed):\n torch.manual_seed(seed)\n if torch.backends.cudnn.enabled:\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\nnp.random.seed(seed)\nseed_torch(seed)\nenv.seed(seed)", "_____no_output_____" ] ], [ [ "## Initialize", "_____no_output_____" ] ], [ [ "# parameters\nnum_frames = 20000\nmemory_size = 2000\nbatch_size = 64\ntarget_update = 150\n\n# train\nagent = DQNAgent(env, memory_size, batch_size, target_update)", "cpu\n" ] ], [ [ "## Train", "_____no_output_____" ] ], [ [ "agent.train(num_frames)", "_____no_output_____" ] ], [ [ "## Test\n\nRun the trained agent (1 episode).", "_____no_output_____" ] ], [ [ "frames = agent.test()", "score: 200.0\n" ] ], [ [ "## Render", "_____no_output_____" ] ], [ [ "if IN_COLAB: # for colab\n import base64\n import glob\n import io\n import os\n\n from IPython.display import HTML, display\n\n\n def ipython_show_video(path: str) -> None:\n \"\"\"Show a video at `path` within IPython Notebook.\"\"\"\n if not os.path.isfile(path):\n raise NameError(\"Cannot access: {}\".format(path))\n\n video = io.open(path, \"r+b\").read()\n encoded = base64.b64encode(video)\n\n display(HTML(\n data=\"\"\"\n <video alt=\"test\" controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\"/>\n </video>\n \"\"\".format(encoded.decode(\"ascii\"))\n ))\n\n list_of_files = glob.glob(\"videos/*.mp4\")\n latest_file = max(list_of_files, key=os.path.getctime)\n print(latest_file)\n ipython_show_video(latest_file)\n \nelse: # for jupyter\n from matplotlib import animation\n from JSAnimation.IPython_display import display_animation\n from IPython.display import display\n\n\n def display_frames_as_gif(frames: List[np.ndarray]) -> None:\n \"\"\"Displays a list of frames as a gif, with controls.\"\"\"\n patch = plt.imshow(frames[0])\n plt.axis('off')\n\n def animate(i):\n patch.set_data(frames[i])\n\n anim = animation.FuncAnimation(\n plt.gcf(), animate, frames = len(frames), interval=50\n )\n display(display_animation(anim, default_mode='loop'))\n\n\n # display \n display_frames_as_gif(frames)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb08eb8469181a84b1d5e71d224144726ac2cd9
118,927
ipynb
Jupyter Notebook
tutorial_3/ML_for_images.ipynb
BjornGudmund/qtim_Tutorials
732b123c623f6c6bedad3aab0a79634be9a0853d
[ "MIT" ]
26
2018-01-16T16:36:56.000Z
2021-02-23T03:47:29.000Z
tutorial_3/ML_for_images.ipynb
BjornGudmund/qtim_Tutorials
732b123c623f6c6bedad3aab0a79634be9a0853d
[ "MIT" ]
1
2018-08-30T15:51:21.000Z
2018-08-31T13:43:46.000Z
tutorial_3/ML_for_images.ipynb
BjornGudmund/qtim_Tutorials
732b123c623f6c6bedad3aab0a79634be9a0853d
[ "MIT" ]
16
2018-03-11T21:29:51.000Z
2021-04-28T12:07:45.000Z
105.80694
50,160
0.823001
[ [ [ "# Tutorial 3: Machine Learning and Image Data", "_____no_output_____" ], [ "Previously, we applied machine learning to stroke risk data using random forests. We got some pretty good results! \n\nYou may be wondering, however -- how do we apply these machine learning methods to the medical imaging data we know and love? At first glance, images seem more complex then the list of stroke predictors we looked at before. Pixels (and voxels) have scalar values, like the list of stroke predictors, but they also have spatial relationships that affect their interpretation. For example, a data point in a image may be significant because it is valued very highly (bright), or it may be significant because of its differences from neighboring voxels (contrast). How can we process this new relationship with the methods we've already encountered?\n\nThe answer, in short, is to reformat our image data so that it is more similar to the vectorized stroke data we dealt with previously. In this tutorial, we will show you how you can do that quite simply with small images by \"flattening\" an image into a vectorized state. We will also show that this basicmethod quickly becomes impractical with large images, and start to discuss how neural networks overcome this problem with the help of _convolutional filters_ over images.", "_____no_output_____" ], [ "To get started, we're first going to load a very small image of the number \"4\".", "_____no_output_____" ] ], [ [ "!wget -O mnist_sample.png https://github.com/QTIM-Lab/qtim_Tutorials/blob/master/tutorial_3/mnist_sample.png?raw=true\n!wget -O mnist_sample_2.png https://github.com/QTIM-Lab/qtim_Tutorials/blob/master/tutorial_3/mnist_sample_2.png?raw=true\n!wget -O mnist_glossary.png https://github.com/QTIM-Lab/qtim_Tutorials/blob/master/tutorial_3/mnist_glossary.png?raw=true", "'wget' is not recognized as an internal or external command,\noperable program or batch file.\n'wget' is not recognized as an internal or external command,\noperable program or batch file.\n'wget' is not recognized as an internal or external command,\noperable program or batch file.\n" ], [ "from PIL import Image\nimport matplotlib.pyplot as plt # another common abbreviation\n%matplotlib inline\nimport numpy as np # common abbreviation\n\nimg = Image.open('mnist_sample.png')\nimg_arr = np.asarray(img)\nplt.imshow(img_arr, cmap='gray')\nplt.axis('off')", "_____no_output_____" ] ], [ [ "Pretty small! Let's check the size of that image.", "_____no_output_____" ] ], [ [ "print(img_arr.shape)", "(28, 28)\n" ] ], [ [ "Only 28x28, or 784 pixels. Far smaller than most images we deal with in the real world, but a good starting point for today's tutorial. Let's bring up a second image, and use matplotlib's subplot feature to compare the two.", "_____no_output_____" ] ], [ [ "img_2 = Image.open('mnist_sample_2.png')\nimg_arr_2 = np.asarray(img_2)\n\nplt.subplot(1, 2, 1) # total rows, total columns, position (row*column)\nplt.imshow(img_arr, cmap='gray')\nplt.axis('off')\nplt.subplot(1, 2, 2)\nplt.imshow(img_arr_2, cmap='gray')\nplt.axis('off')", "_____no_output_____" ] ], [ [ "A 4 and a 5 -- quite interesting! Here is today's challenge: how can you use an algorithm to tell whether a picture is a picture of a \"4\" or a picture of a \"5\"? Or, maybe, for that matter, a picture of a \"3\"? This problem of differentiating between different pictures of handwritten digits is a common benchmarking problem in computer vision, and its trained on a datset known as the MNIST dataset.", "_____no_output_____" ], [ "![ML](mnist_glossary.png)", "_____no_output_____" ], [ "Telling the difference between the rows of handwritten digits above is easy for a human being, but creating an algorithm to do so in the context of a program is not necessarily obvious. We'll start with the simplest possible method, which is to _flatten_ an image into a vectorized state, and then run a machine learning algorithm we're familiar with on it. Let's get started.", "_____no_output_____" ] ], [ [ "# We flatten our image by using numpy's \"reshape\" function to make it a 1 x (height * width) array.\nprint(img_arr.shape)\nimg_flattened = np.reshape(img_arr, (1, 784)) # 28x28 = 784\nprint(img_flattened.shape)", "(28, 28)\n(1, 784)\n" ] ], [ [ "So what does that look like..", "_____no_output_____" ] ], [ [ "plt.figure(figsize = (20,1))\nplt.imshow(img_flattened)\nplt.axis('off')", "_____no_output_____" ] ], [ [ "Not very helpful -- let's try looking at just one segment.", "_____no_output_____" ] ], [ [ "shortened_img_flattened = img_flattened[:, 200:300]\nplt.figure(figsize = (20,1))\nplt.imshow(shortened_img_flattened)\nplt.axis('off')", "_____no_output_____" ] ], [ [ "Weird! Let's see how that compares to our old friend \"5\".", "_____no_output_____" ] ], [ [ "img_flattened_2 = np.reshape(img_arr_2, (1, 784)) # 28x28 = 784\nshortened_img_flattened_2 = img_flattened_2[:, 200:300]\n\nplt.figure(figsize = (20,1))\nplt.imshow(shortened_img_flattened, interpolation='none')\nplt.axis('off')\nplt.subplot(2, 1, 1) # total rows, total columns, position (row*column)\nplt.imshow(shortened_img_flattened)\nplt.axis('off')\nplt.subplot(2, 1, 2)\nplt.imshow(shortened_img_flattened_2)\nplt.axis('off')", "_____no_output_____" ] ], [ [ "Interesting -- it seems like there might be something there. There are high values on the \"5\" where there are low values on the \"4\", and vice-versa. Much like how the random forest identified risk factors in the stroke dataset, a random forest can also identify these informative pixels that may lead to a classification of an image as a \"4\" or a \"5\".\n\nTo train an algorithm to separate these 4s and 5s, we will need a lot of images. We _could_ write some code to download a massive dataset of images of 4s and 5s from the internet, flatten those images individually, and concatenate each flattened image into a new row of a pandas array. Lord knows that if you continue in data science, you will have to write code like that, but today, thankfully, someone else has done the hard work for you. In this case, you'll simply need to load a dataset we've attached to this lesson called \"mnist.csv\".", "_____no_output_____" ] ], [ [ "!wget -O mnist.csv https://www.dropbox.com/s/zzltdvylq6sj25j/mnist.csv?dl=1\nimport pandas as pd # common abbreviation\ndata = pd.read_csv('mnist.csv') # load the comma separated value (CSV) file\ndata.head(10) # show the first n rows of this file", "'wget' is not recognized as an internal or external command,\noperable program or batch file.\n" ] ], [ [ "It's kind of looking like the diabetes dataset now! For fun and knowledge, let's visualize some of the data like we did above.", "_____no_output_____" ] ], [ [ "for i in range(20): \n plt.figure(figsize = (20,20)) \n plt.subplot(20, 1, i+1) # total rows, total columns, position (row*column)\n plt.imshow(data.iloc[i, 200:300].reshape(1,100))\n plt.axis('off')\n plt.title(str(data.label[i]))", "C:\\Users\\jxb29\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:4: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead\n after removing the cwd from sys.path.\n" ] ], [ [ "Hmmm... Looks like there might be a pattern there. I wonder if we can use the visualization method we used for the stroke data? Let's give it a shot. ", "_____no_output_____" ] ], [ [ "plt.rcParams['figure.figsize'] = [20, 10]\n\nimport seaborn as sns\nsns.set_style('white')\n\nfor i, column in enumerate(data.columns[100:108]): \n \n plt.subplot(2, 4, i+1) # total rows, total columns, position (row*column)\n box = sns.boxplot(x='label', y=column, data=data)", "_____no_output_____" ] ], [ [ "Weird! And not very helpful. Most of the time, pixel values are zero, so a boxplot median and upper and lower quartiles will all be zero. I wonder.. could a random forest still learn? Let's recycle our code from last time.", "_____no_output_____" ] ], [ [ "pixel_variables = data.columns[1:] # Instead of clinical_variables, we have pixel_variables. They are all columns except the first row.\n\nfrom sklearn.cross_validation import train_test_split\n\n# Note: these are not great variable names, but very common in mathematical notation\n# Capital letter: matrix, lower-case letter: vector\nX_train, X_test, y_train, y_test = train_test_split(data[pixel_variables], data['label'], train_size=0.8, test_size=0.2)\n\n# How many cases do we have?\nprint(X_train.shape, y_train.shape)\nprint(X_test.shape, y_test.shape)", "(6293, 784) (6293,)\n(1574, 784) (1574,)\n" ], [ "# Let's train our random forest!\nfrom sklearn import tree\n\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X_train, y_train)", "_____no_output_____" ], [ "# How does it do?\nfrom sklearn.metrics import accuracy_score, classification_report\n\n# Training data\nprint('Accuracy on training data:', accuracy_score(y_train, clf.predict(X_train)))\n\n# Testing...\nprint('Accuracy on testing data:', accuracy_score(y_test, clf.predict(X_test)))", "Accuracy on training data: 1.0\nAccuracy on testing data: 0.9714104193138501\n" ] ], [ [ "Looks like it does pretty well -- 98%! It turns out computer vision isn't that hard after all. However, we're only able to get this result because we are dealing with 28x28 images. Particularly, because these images have only 784 variables, we are able to relatively easily create a dataset where we have more samples than variables. Real images, however, can be much larger. A 1000x1000 pixel image will have 1,000,000 separate variables to regress over, which is often much larger than the number of samples we can acquire. An MRI image is often on the order of 150x150x225 if in isotropic resolution, which gives about 5 million variable!", "_____no_output_____" ], [ "# Bonus: neural network!", "_____no_output_____" ] ], [ [ "from sklearn.neural_network import MLPClassifier\nclf = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(32, 32))\nclf.fit(X_train, y_train)", "_____no_output_____" ], [ "print('Accuracy on training data:', accuracy_score(y_train, clf.predict(X_train)))\nprint('Accuracy on testing data:', accuracy_score(y_test, clf.predict(X_test)))", "Accuracy on training data: 0.9984109327824567\nAccuracy on testing data: 0.9911054637865311\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
ecb0913735e685261cf086cb8ac8cff04968bbaf
45,208
ipynb
Jupyter Notebook
docs/tutorials/variational_algorithm.ipynb
rheaparekh/Cirq
50d9be8812ca5729344bdd58b33e21b7831f9105
[ "Apache-2.0" ]
null
null
null
docs/tutorials/variational_algorithm.ipynb
rheaparekh/Cirq
50d9be8812ca5729344bdd58b33e21b7831f9105
[ "Apache-2.0" ]
null
null
null
docs/tutorials/variational_algorithm.ipynb
rheaparekh/Cirq
50d9be8812ca5729344bdd58b33e21b7831f9105
[ "Apache-2.0" ]
null
null
null
51.256236
699
0.536586
[ [ [ "##### Copyright 2020 The Cirq Developers", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Quantum Variational Algorithm", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.example.org/cirq/tutorials/variational_algorithm\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on QuantumLib</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/variational_algorithm.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/variational_algorithm.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/variational_algorithm.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "In this tutorial, we will create a [quantum variational algorithm](https://arxiv.org/abs/1304.3061) using cirq and show how it can optimize a simple Ising model.\n\nTo begin, please follow the instructions for [installing Cirq](../install.md).", "_____no_output_____" ], [ "## Background: Variational Quantum Algorithm\n\nThe [variational method](https://en.wikipedia.org/wiki/Variational_method_(quantum_mechanics)) in quantum theory is a classical method for finding low energy states of a quantum system. The rough idea of this method is that one defines a trial wave function (sometimes called an *ansatz*) as a function of some parameters, and then one finds the values of these parameters that minimize the expectation value of the energy with respect to these parameters. This minimized ansatz is then an approximation to the lowest energy eigenstate, and the expectation value serves as an upper bound on the energy of the ground state.\n\nIn the last few years (see [arXiv:1304.3061](https://arxiv.org/abs/1304.3061) and [arXiv:1507.08969](https://arxiv.org/abs/1507.08969), for example), it has been realized that quantum computers can mimic the classical technique and that a quantum computer does so with certain advantages. In particular, when one applies the classical variational method to a system of $n$ qubits, an exponential number (in $n$) of complex numbers is necessary to generically represent the wave function of the system. However, with a quantum computer, one can directly produce this state using a parameterized quantum circuit, and then by repeated measurements estimate the expectation value of the energy.\n\nThis idea has led to a class of algorithms known as variational quantum algorithms. Indeed this approach is not just limited to finding low energy eigenstates, but minimizing any objective function that can be expressed as a quantum observable. It is an open question to identify under what conditions these quantum variational algorithms will succeed, and exploring this class of algorithms is a key part of the research for [noisy intermediate scale quantum computers](https://arxiv.org/abs/1801.00862).\n\nThe classical problem we will focus on is the 2D +/- Ising model with transverse field ([ISING](http://iopscience.iop.org/article/10.1088/0305-4470/15/10/028/meta)). This problem is NP-complete. So it is highly unlikely that quantum computers will be able to efficiently solve it across all instances. Yet this type of problem is illustrative of the general class of problems that Cirq is designed to tackle.\n\n\nConsider the energy function\n\n$E(s_1,\\dots,s_n) = \\sum_{<i,j>} J_{i,j}s_i s_j + \\sum_i h_i s_i$\n\nwhere here each $s_i, J_{i,j}$, and $h_i$ are either +1 or -1. Here each index i is associated with a bit on a square lattice, and the $<i,j>$ notation means sums over neighboring bits on this lattice. The problem we would like to solve is, given $J_{i,j}$, and $h_i$, find an assignment of $s_i$ values that minimize $E$.\n\nHow does a variational quantum algorithm work for this? One approach is to consider $n$ qubits and associate them with each of the bits in the classical problem. This maps the classical problem onto the quantum problem of minimizing the expectation value of the observable\n\n$H=\\sum_{<i,j>} J_{i,j} Z_i Z_j + \\sum_i h_iZ_i$\n\nThen one defines a set of parameterized quantum circuits, i.e., a quantum circuit where the gates (or more general quantum operations) are parameterized by some values. This produces an ansatz state\n\n$|\\psi(p_1, p_2, \\dots, p_k)\\rangle$\n\nwhere $p_i$ are the parameters that produce this state (here we assume a pure state, but mixed states are of course possible).\n\nThe variational algorithm then works by noting that one can obtain the value of the objective function for a given ansatz state by\n\n1. Prepare the ansatz state.\n2. Make a measurement which samples from some terms in H.\n3. Goto 1.\n\nNote that one cannot always measure $H$ directly (without the use of quantum phase estimation). So one often relies on the linearity of expectation values to measure parts of $H$ in step 2. One always needs to repeat the measurements to obtain an estimate of the expectation value. How many measurements needed to achieve a given accuracy is beyond the scope of this tutorial, but Cirq can help investigate this question.\n\nThe above shows that one can use a quantum computer to obtain estimates of the objective function for the ansatz. This can then be used in an outer loop to try to obtain parameters for the lowest value of the objective function. For these values, one can then use that best ansatz to produce samples of solutions to the problem, which obtain a hopefully good approximation for the lowest possible value of the objective function.\n", "_____no_output_____" ], [ "## Create a circuit on a Grid\n\nTo build the above variational quantum algorithm using Cirq, one begins by building the appropriate circuit. Because the problem we have defined has a natural structure on a grid, we will use Cirq’s built-in `GridQubits` as our qubits. We will demonstrate some of how this works in an interactive Python environment, the following code can be run in series in a Python environment where you have Cirq installed. For more about circuits and how to create them, see the [Tutorial](basics.ipynb) or the [Circuits](../circuits.ipynb) page.", "_____no_output_____" ] ], [ [ "import cirq\n\n# define the length of the grid.\nlength = 3\n\n# define qubits on the grid.\nqubits = cirq.GridQubit.square(length)\n\nprint(qubits)", "[cirq.GridQubit(0, 0), cirq.GridQubit(0, 1), cirq.GridQubit(0, 2), cirq.GridQubit(1, 0), cirq.GridQubit(1, 1), cirq.GridQubit(1, 2), cirq.GridQubit(2, 0), cirq.GridQubit(2, 1), cirq.GridQubit(2, 2)]\n" ] ], [ [ "Here we see that we've created a bunch of `GridQubits`, which have a row and column, indicating their position on a grid.\n\nNow that we have some qubits, let us construct a `Circuit` on these qubits. For example, suppose we want to apply the Hadamard gate `H` to every qubit whose row index plus column index is even, and an `X` gate to every qubit whose row index plus column index is odd. To do this, we write:", "_____no_output_____" ] ], [ [ "circuit = cirq.Circuit()\ncircuit.append(cirq.H(q) for q in qubits if (q.row + q.col) % 2 == 0)\ncircuit.append(cirq.X(q) for q in qubits if (q.row + q.col) % 2 == 1)\n\nprint(circuit)", "(0, 0): ───H───\n\n(0, 1): ───X───\n\n(0, 2): ───H───\n\n(1, 0): ───X───\n\n(1, 1): ───H───\n\n(1, 2): ───X───\n\n(2, 0): ───H───\n\n(2, 1): ───X───\n\n(2, 2): ───H───\n" ] ], [ [ "## Creating the Ansatz\n\nOne convenient pattern is to use a python [Generator](https://wiki.python.org/moin/Generators) for defining sub-circuits or layers in our algorithm. We will define a function that takes in the relevant parameters and then yields the operations for the sub-circuit, and then this can be appended to the `Circuit`:", "_____no_output_____" ] ], [ [ "def rot_x_layer(length, half_turns):\n \"\"\"Yields X rotations by half_turns on a square grid of given length.\"\"\"\n\n # Define the gate once and then re-use it for each Operation.\n rot = cirq.XPowGate(exponent=half_turns)\n\n # Create an X rotation Operation for each qubit in the grid.\n for i in range(length):\n for j in range(length):\n yield rot(cirq.GridQubit(i, j))\n\n# Create the circuit using the rot_x_layer generator\ncircuit = cirq.Circuit()\ncircuit.append(rot_x_layer(2, 0.1))\nprint(circuit)", "(0, 0): ───X^0.1───\n\n(0, 1): ───X^0.1───\n\n(1, 0): ───X^0.1───\n\n(1, 1): ───X^0.1───\n" ] ], [ [ "Another important concept here is that the rotation gate is specified in *half turns* ($ht$). For a rotation about `X`, the gate is:\n\n$\\cos(ht * \\pi) I + i \\sin(ht * \\pi) X$\n\nThere is a lot of freedom defining a variational ansatz. Here we will do a variation on a [QAOA strategy](https://arxiv.org/abs/1411.4028) and define an ansatz related to the problem we are trying to solve.\n\nFirst, we need to choose how the instances of the problem are represented. These are the values $J$ and $h$ in the Hamiltonian definition. We represent them as two-dimensional arrays (lists of lists). For $J$ we use two such lists, one for the row links and one for the column links.\n\nHere is a snippet that we can use to generate random problem instances:", "_____no_output_____" ] ], [ [ "import random\ndef rand2d(rows, cols):\n return [[random.choice([+1, -1]) for _ in range(cols)] for _ in range(rows)]\n\ndef random_instance(length):\n # transverse field terms\n h = rand2d(length, length)\n # links within a row\n jr = rand2d(length - 1, length)\n # links within a column\n jc = rand2d(length, length - 1)\n return (h, jr, jc)\n\nh, jr, jc = random_instance(3)\nprint('transverse fields: {}'.format(h))\nprint('row j fields: {}'.format(jr))\nprint('column j fields: {}'.format(jc))", "transverse fields: [[1, -1, 1], [1, -1, 1], [-1, 1, -1]]\nrow j fields: [[-1, 1, -1], [1, -1, -1]]\ncolumn j fields: [[-1, 1], [-1, -1], [1, -1]]\n" ] ], [ [ "In the code above, the actual values will be different for each individual run because they are using `random.choice`.\n\nGiven this definition of the problem instance, we can now introduce our ansatz. It will consist of one step of a circuit made up of:\n\n1. Apply an `XPowGate` for the same parameter for all qubits. This is the method we have written above.\n2. Apply a `ZPowGate` for the same parameter for all qubits where the transverse field term $h$ is $+1$.", "_____no_output_____" ] ], [ [ "def rot_z_layer(h, half_turns):\n \"\"\"Yields Z rotations by half_turns conditioned on the field h.\"\"\"\n gate = cirq.ZPowGate(exponent=half_turns)\n for i, h_row in enumerate(h):\n for j, h_ij in enumerate(h_row):\n if h_ij == 1:\n yield gate(cirq.GridQubit(i, j))", "_____no_output_____" ] ], [ [ "3. Apply a `CZPowGate` for the same parameter between all qubits where the coupling field term $J$ is $+1$. If the field is $-1$, apply `CZPowGate` conjugated by $X$ gates on all qubits.", "_____no_output_____" ] ], [ [ "def rot_11_layer(jr, jc, half_turns):\n \"\"\"Yields rotations about |11> conditioned on the jr and jc fields.\"\"\"\n cz_gate = cirq.CZPowGate(exponent=half_turns) \n for i, jr_row in enumerate(jr):\n for j, jr_ij in enumerate(jr_row):\n q = cirq.GridQubit(i, j)\n q_1 = cirq.GridQubit(i + 1, j)\n if jr_ij == -1:\n yield cirq.X(q)\n yield cirq.X(q_1)\n yield cz_gate(q, q_1)\n if jr_ij == -1:\n yield cirq.X(q)\n yield cirq.X(q_1)\n\n for i, jc_row in enumerate(jc):\n for j, jc_ij in enumerate(jc_row):\n q = cirq.GridQubit(i, j)\n q_1 = cirq.GridQubit(i, j + 1)\n if jc_ij == -1:\n yield cirq.X(q)\n yield cirq.X(q_1)\n yield cz_gate(q, q_1)\n if jc_ij == -1:\n yield cirq.X(q)\n yield cirq.X(q_1)", "_____no_output_____" ] ], [ [ "Putting all together, we can create a step that uses just three parameters. Below is the code, which uses the generator for each of the layers (note to advanced Python users: this code does not contain a bug in using `yield` due to the auto flattening of the `OP_TREE concept`. Typically, one would want to use `yield` from here, but this is not necessary):", "_____no_output_____" ] ], [ [ "def one_step(h, jr, jc, x_half_turns, h_half_turns, j_half_turns):\n length = len(h)\n yield rot_x_layer(length, x_half_turns)\n yield rot_z_layer(h, h_half_turns)\n yield rot_11_layer(jr, jc, j_half_turns)\n\nh, jr, jc = random_instance(3)\n\ncircuit = cirq.Circuit() \ncircuit.append(one_step(h, jr, jc, 0.1, 0.2, 0.3),\n strategy=cirq.InsertStrategy.EARLIEST)\nprint(circuit)", " ┌───────────────┐ ┌──────┐ ┌──────┐\n(0, 0): ───X^0.1───Z^0.2────@─────────────────────────────@──────────────────────────────────────────────────────\n │ │\n(0, 1): ───X^0.1───X────────┼────@──────────────X─────────@^0.3────X─────────@───────X───────────────────────────\n │ │ │\n(0, 2): ───X^0.1────────────┼────┼────@─────────X────────────────────────────@^0.3───X───────────────────────────\n │ │ │\n(1, 0): ───X^0.1────────────@^0.3┼────┼──────────@───────────────────────────────────@───────────────────────────\n │ │ │ │\n(1, 1): ───X^0.1───X─────────────@^0.3┼─────────X┼────────X────────@─────────X───────@^0.3───X───────@───────X───\n │ │ │ │\n(1, 2): ───X^0.1───Z^0.2──────────────@^0.3─────X┼────────@────────┼────X────X───────────────────────@^0.3───X───\n │ │ │\n(2, 0): ───X^0.1─────────────────────────────────@^0.3────┼────────┼─────────────────@───────────────────────────\n │ │ │\n(2, 1): ───X^0.1───Z^0.2────X─────────────────────────────┼────────@^0.3─────X───────@^0.3───@───────────────────\n │ │\n(2, 2): ───X^0.1───Z^0.2────X─────────────────────────────@^0.3────X─────────────────────────@^0.3───────────────\n └───────────────┘ └──────┘ └──────┘\n" ] ], [ [ "Here we see that we have chosen particular parameter values $(0.1, 0.2, 0.3)$.", "_____no_output_____" ], [ "## Simulation\n\nIn Cirq, the simulators make a distinction between a *run* and a *simulation*. A *run* only allows for a simulation that mimics the actual quantum hardware. For example, it does not allow for access to the amplitudes of the wave function of the system, since that is not experimentally accessible. *Simulate* commands, however, are broader and allow different forms of simulation. When prototyping small circuits, it is useful to execute *simulate* methods, but one should be wary of relying on them when running against actual hardware.\n\nCurrently, Cirq ships with a simulator tied strongly to the gate set of the **Google xmon architecture**. However, for convenience, the simulator attempts to automatically convert unknown operations into `XmonGates` (as long as the operation specifies a matrix or a decomposition into `XmonGates`). This, in principle, allows us to simulate any circuit that has gates that implement one and two qubit `KnownMatrix` gates. Future releases of Cirq will expand these simulators.\n\nBecause the simulator is tied to the **xmon gate set**, the simulator lives, in contrast to core Cirq, in the `cirq.google` module. To run a simulation of the full circuit, we create a simulator, and pass the circuit to the simulator.", "_____no_output_____" ] ], [ [ "simulator = cirq.Simulator()\ncircuit = cirq.Circuit() \ncircuit.append(one_step(h, jr, jc, 0.1, 0.2, 0.3))\ncircuit.append(cirq.measure(*qubits, key='x'))\nresults = simulator.run(circuit, repetitions=100)\nprint(results.histogram(key='x'))", "Counter({0: 74, 32: 6, 4: 4, 256: 4, 16: 2, 1: 2, 128: 2, 2: 2, 8: 1, 132: 1, 272: 1, 64: 1})\n" ] ], [ [ "Note that we have run the simulation 100 times and produced a histogram of the counts of the measurement results. What are the keys in the histogram counter? Note that we have passed in the order of the qubits. This ordering is then used to translate the order of the measurement results to a register using a [big endian representation](https://en.wikipedia.org/wiki/Endianness).\n\nFor our optimization problem, we want to calculate the value of the objective function for a given result run. One way to do this is using the raw measurement data from the result of `simulator.run`. Another way to do this is to provide to the histogram a method to calculate the objective: this will then be used as the key for the returned `Counter`.", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef energy_func(length, h, jr, jc):\n def energy(measurements):\n # Reshape measurement into array that matches grid shape.\n meas_list_of_lists = [measurements[i * length:(i + 1) * length]\n for i in range(length)]\n # Convert true/false to +1/-1.\n pm_meas = 1 - 2 * np.array(meas_list_of_lists).astype(np.int32)\n\n tot_energy = np.sum(pm_meas * h)\n for i, jr_row in enumerate(jr):\n for j, jr_ij in enumerate(jr_row):\n tot_energy += jr_ij * pm_meas[i, j] * pm_meas[i + 1, j]\n for i, jc_row in enumerate(jc):\n for j, jc_ij in enumerate(jc_row):\n tot_energy += jc_ij * pm_meas[i, j] * pm_meas[i, j + 1]\n return tot_energy\n return energy\nprint(results.histogram(key='x', fold_func=energy_func(3, h, jr, jc)))", "Counter({1: 76, -3: 8, -1: 6, -5: 4, 7: 2, 3: 2, 5: 2})\n" ] ], [ [ "One can then calculate the expectation value over all repetitions:", "_____no_output_____" ] ], [ [ "def obj_func(result):\n energy_hist = result.histogram(key='x', fold_func=energy_func(3, h, jr, jc))\n return np.sum([k * v for k,v in energy_hist.items()]) / result.repetitions\nprint('Value of the objective function {}'.format(obj_func(results)))", "Value of the objective function 0.56\n" ] ], [ [ "### Parameterizing the Ansatz\n\nNow that we have constructed a variational ansatz and shown how to simulate it using Cirq, we can think about optimizing the value. \n\nOn quantum hardware, one would most likely want to have the optimization code as close to the hardware as possible. As the classical hardware that is allowed to inter-operate with the quantum hardware becomes better specified, this language will be better defined. Without this specification, however, Cirq also provides a useful concept for optimizing the looping in many optimization algorithms. This is the fact that many of the value in the gate sets can, instead of being specified by a float, be specified by a `Symbol`, and this `Symbol` can be substituted for a value specified at execution time.\n\nLuckily for us, we have written our code so that using parameterized values is as simple as passing `Symbol` objects where we previously passed float values.", "_____no_output_____" ] ], [ [ "import sympy\ncircuit = cirq.Circuit()\nalpha = sympy.Symbol('alpha')\nbeta = sympy.Symbol('beta')\ngamma = sympy.Symbol('gamma')\ncircuit.append(one_step(h, jr, jc, alpha, beta, gamma))\ncircuit.append(cirq.measure(*qubits, key='x'))\nprint(circuit)", " ┌─────────────────────┐ ┌────────┐ ┌────────┐\n(0, 0): ───X^alpha───Z^beta────@─────────────────────────────────────@──────────────────────────────────────────────────────────────────M('x')───\n │ │ │\n(0, 1): ───X^alpha───X─────────┼──────@──────────────────X───────────@^gamma────X───────────@─────────X─────────────────────────────────M────────\n │ │ │ │\n(0, 2): ───X^alpha─────────────┼──────┼──────@───────────X──────────────────────────────────@^gamma───X─────────────────────────────────M────────\n │ │ │ │\n(1, 0): ───X^alpha─────────────@^gamma┼──────┼────────────@───────────────────────────────────────────@─────────────────────────────────M────────\n │ │ │ │ │\n(1, 1): ───X^alpha───X────────────────@^gamma┼───────────X┼──────────X──────────@───────────X─────────@^gamma───X─────────@─────────X───M────────\n │ │ │ │ │\n(1, 2): ───X^alpha───Z^beta──────────────────@^gamma─────X┼──────────@──────────┼──────X────X─────────────────────────────@^gamma───X───M────────\n │ │ │ │\n(2, 0): ───X^alpha────────────────────────────────────────@^gamma────┼──────────┼─────────────────────@─────────────────────────────────M────────\n │ │ │ │\n(2, 1): ───X^alpha───Z^beta────X─────────────────────────────────────┼──────────@^gamma─────X─────────@^gamma───@───────────────────────M────────\n │ │ │\n(2, 2): ───X^alpha───Z^beta────X─────────────────────────────────────@^gamma────X───────────────────────────────@^gamma─────────────────M────────\n └─────────────────────┘ └────────┘ └────────┘\n" ] ], [ [ "Note now that the circuit's gates are parameterized.\n\nParameters are specified at runtime using a `ParamResolver`, which is just a dictionary from `Symbol` keys to runtime values. \n\nFor instance:", "_____no_output_____" ] ], [ [ "resolver = cirq.ParamResolver({'alpha': 0.1, 'beta': 0.3, 'gamma': 0.7})\nresolved_circuit = cirq.resolve_parameters(circuit, resolver)", "_____no_output_____" ] ], [ [ "resolves the parameters to actual values in the circuit.\n\nCirq also has the concept of a *sweep*. A sweep is a collection of parameter resolvers. This runtime information is very useful when one wants to run many circuits for many different parameter values. Sweeps can be created to specify values directly (this is one way to get classical information into a circuit), or a variety of helper methods. For example suppose we want to evaluate our circuit over an equally spaced grid of parameter values. We can easily create this using `LinSpace`.", "_____no_output_____" ] ], [ [ "sweep = (cirq.Linspace(key='alpha', start=0.1, stop=0.9, length=5)\n * cirq.Linspace(key='beta', start=0.1, stop=0.9, length=5)\n * cirq.Linspace(key='gamma', start=0.1, stop=0.9, length=5))\nresults = simulator.run_sweep(circuit, params=sweep, repetitions=100)\nfor result in results:\n print(result.params.param_dict, obj_func(result))", "OrderedDict([('alpha', 0.1), ('beta', 0.1), ('gamma', 0.1)]) 0.74\nOrderedDict([('alpha', 0.1), ('beta', 0.1), ('gamma', 0.30000000000000004)]) 0.92\nOrderedDict([('alpha', 0.1), ('beta', 0.1), ('gamma', 0.5)]) 0.72\nOrderedDict([('alpha', 0.1), ('beta', 0.1), ('gamma', 0.7000000000000001)]) 0.88\nOrderedDict([('alpha', 0.1), ('beta', 0.1), ('gamma', 0.9)]) 0.92\nOrderedDict([('alpha', 0.1), ('beta', 0.30000000000000004), ('gamma', 0.1)]) 0.6\nOrderedDict([('alpha', 0.1), ('beta', 0.30000000000000004), ('gamma', 0.30000000000000004)]) 0.7\nOrderedDict([('alpha', 0.1), ('beta', 0.30000000000000004), ('gamma', 0.5)]) 1.12\nOrderedDict([('alpha', 0.1), ('beta', 0.30000000000000004), ('gamma', 0.7000000000000001)]) 1.18\nOrderedDict([('alpha', 0.1), ('beta', 0.30000000000000004), ('gamma', 0.9)]) 1.02\nOrderedDict([('alpha', 0.1), ('beta', 0.5), ('gamma', 0.1)]) 0.5\nOrderedDict([('alpha', 0.1), ('beta', 0.5), ('gamma', 0.30000000000000004)]) 1.14\nOrderedDict([('alpha', 0.1), ('beta', 0.5), ('gamma', 0.5)]) 1.0\nOrderedDict([('alpha', 0.1), ('beta', 0.5), ('gamma', 0.7000000000000001)]) 0.84\nOrderedDict([('alpha', 0.1), ('beta', 0.5), ('gamma', 0.9)]) 0.92\nOrderedDict([('alpha', 0.1), ('beta', 0.7000000000000001), ('gamma', 0.1)]) 1.16\nOrderedDict([('alpha', 0.1), ('beta', 0.7000000000000001), ('gamma', 0.30000000000000004)]) 0.8\nOrderedDict([('alpha', 0.1), ('beta', 0.7000000000000001), ('gamma', 0.5)]) 0.84\nOrderedDict([('alpha', 0.1), ('beta', 0.7000000000000001), ('gamma', 0.7000000000000001)]) 1.28\nOrderedDict([('alpha', 0.1), ('beta', 0.7000000000000001), ('gamma', 0.9)]) 1.04\nOrderedDict([('alpha', 0.1), ('beta', 0.9), ('gamma', 0.1)]) 0.9\nOrderedDict([('alpha', 0.1), ('beta', 0.9), ('gamma', 0.30000000000000004)]) 0.74\nOrderedDict([('alpha', 0.1), ('beta', 0.9), ('gamma', 0.5)]) 0.78\nOrderedDict([('alpha', 0.1), ('beta', 0.9), ('gamma', 0.7000000000000001)]) 1.0\nOrderedDict([('alpha', 0.1), ('beta', 0.9), ('gamma', 0.9)]) 0.56\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.1), ('gamma', 0.1)]) 0.34\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.1), ('gamma', 0.30000000000000004)]) -0.08\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.1), ('gamma', 0.5)]) -0.34\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.1), ('gamma', 0.7000000000000001)]) -0.08\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.1), ('gamma', 0.9)]) 0.18\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.30000000000000004), ('gamma', 0.1)]) -0.7\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.30000000000000004), ('gamma', 0.30000000000000004)]) 0.5\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.30000000000000004), ('gamma', 0.5)]) 0.5\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.30000000000000004), ('gamma', 0.7000000000000001)]) -0.08\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.30000000000000004), ('gamma', 0.9)]) 0.62\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.5), ('gamma', 0.1)]) 0.54\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.5), ('gamma', 0.30000000000000004)]) 0.2\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.5), ('gamma', 0.5)]) -0.24\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.5), ('gamma', 0.7000000000000001)]) 0.46\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.5), ('gamma', 0.9)]) 0.22\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.7000000000000001), ('gamma', 0.1)]) 0.02\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.7000000000000001), ('gamma', 0.30000000000000004)]) -0.18\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.7000000000000001), ('gamma', 0.5)]) -0.16\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.7000000000000001), ('gamma', 0.7000000000000001)]) 0.14\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.7000000000000001), ('gamma', 0.9)]) -0.8\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.9), ('gamma', 0.1)]) 0.46\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.9), ('gamma', 0.30000000000000004)]) 0.48\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.9), ('gamma', 0.5)]) 0.72\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.9), ('gamma', 0.7000000000000001)]) 0.04\nOrderedDict([('alpha', 0.30000000000000004), ('beta', 0.9), ('gamma', 0.9)]) 0.32\nOrderedDict([('alpha', 0.5), ('beta', 0.1), ('gamma', 0.1)]) 0.3\nOrderedDict([('alpha', 0.5), ('beta', 0.1), ('gamma', 0.30000000000000004)]) -0.06\nOrderedDict([('alpha', 0.5), ('beta', 0.1), ('gamma', 0.5)]) 0.26\nOrderedDict([('alpha', 0.5), ('beta', 0.1), ('gamma', 0.7000000000000001)]) -0.4\nOrderedDict([('alpha', 0.5), ('beta', 0.1), ('gamma', 0.9)]) 0.48\nOrderedDict([('alpha', 0.5), ('beta', 0.30000000000000004), ('gamma', 0.1)]) 0.0\nOrderedDict([('alpha', 0.5), ('beta', 0.30000000000000004), ('gamma', 0.30000000000000004)]) -0.14\nOrderedDict([('alpha', 0.5), ('beta', 0.30000000000000004), ('gamma', 0.5)]) 0.22\nOrderedDict([('alpha', 0.5), ('beta', 0.30000000000000004), ('gamma', 0.7000000000000001)]) 0.84\nOrderedDict([('alpha', 0.5), ('beta', 0.30000000000000004), ('gamma', 0.9)]) 0.6\nOrderedDict([('alpha', 0.5), ('beta', 0.5), ('gamma', 0.1)]) 0.04\nOrderedDict([('alpha', 0.5), ('beta', 0.5), ('gamma', 0.30000000000000004)]) -0.44\nOrderedDict([('alpha', 0.5), ('beta', 0.5), ('gamma', 0.5)]) -0.52\nOrderedDict([('alpha', 0.5), ('beta', 0.5), ('gamma', 0.7000000000000001)]) -0.02\nOrderedDict([('alpha', 0.5), ('beta', 0.5), ('gamma', 0.9)]) -0.08\nOrderedDict([('alpha', 0.5), ('beta', 0.7000000000000001), ('gamma', 0.1)]) 1.02\nOrderedDict([('alpha', 0.5), ('beta', 0.7000000000000001), ('gamma', 0.30000000000000004)]) 0.58\nOrderedDict([('alpha', 0.5), ('beta', 0.7000000000000001), ('gamma', 0.5)]) -0.68\nOrderedDict([('alpha', 0.5), ('beta', 0.7000000000000001), ('gamma', 0.7000000000000001)]) 0.16\nOrderedDict([('alpha', 0.5), ('beta', 0.7000000000000001), ('gamma', 0.9)]) 0.98\nOrderedDict([('alpha', 0.5), ('beta', 0.9), ('gamma', 0.1)]) -0.48\nOrderedDict([('alpha', 0.5), ('beta', 0.9), ('gamma', 0.30000000000000004)]) -0.72\nOrderedDict([('alpha', 0.5), ('beta', 0.9), ('gamma', 0.5)]) 0.6\nOrderedDict([('alpha', 0.5), ('beta', 0.9), ('gamma', 0.7000000000000001)]) -0.04\nOrderedDict([('alpha', 0.5), ('beta', 0.9), ('gamma', 0.9)]) -0.36\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.1), ('gamma', 0.1)]) -0.08\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.1), ('gamma', 0.30000000000000004)]) 2.0\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.1), ('gamma', 0.5)]) 0.86\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.1), ('gamma', 0.7000000000000001)]) 1.88\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.1), ('gamma', 0.9)]) 0.66\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.30000000000000004), ('gamma', 0.1)]) 1.32\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.30000000000000004), ('gamma', 0.30000000000000004)]) 1.16\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.30000000000000004), ('gamma', 0.5)]) 1.12\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.30000000000000004), ('gamma', 0.7000000000000001)]) 1.4\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.30000000000000004), ('gamma', 0.9)]) 0.82\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.5), ('gamma', 0.1)]) 1.0\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.5), ('gamma', 0.30000000000000004)]) 1.3\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.5), ('gamma', 0.5)]) 0.88\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.5), ('gamma', 0.7000000000000001)]) 1.34\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.5), ('gamma', 0.9)]) 1.3\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.7000000000000001), ('gamma', 0.1)]) 0.8\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.7000000000000001), ('gamma', 0.30000000000000004)]) 0.88\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.7000000000000001), ('gamma', 0.5)]) 0.82\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.7000000000000001), ('gamma', 0.7000000000000001)]) 1.8\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.7000000000000001), ('gamma', 0.9)]) 1.2\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.9), ('gamma', 0.1)]) 1.42\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.9), ('gamma', 0.30000000000000004)]) 0.96\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.9), ('gamma', 0.5)]) 1.12\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.9), ('gamma', 0.7000000000000001)]) 0.98\nOrderedDict([('alpha', 0.7000000000000001), ('beta', 0.9), ('gamma', 0.9)]) 1.4\nOrderedDict([('alpha', 0.9), ('beta', 0.1), ('gamma', 0.1)]) 2.5\nOrderedDict([('alpha', 0.9), ('beta', 0.1), ('gamma', 0.30000000000000004)]) 2.96\nOrderedDict([('alpha', 0.9), ('beta', 0.1), ('gamma', 0.5)]) 2.6\nOrderedDict([('alpha', 0.9), ('beta', 0.1), ('gamma', 0.7000000000000001)]) 2.76\nOrderedDict([('alpha', 0.9), ('beta', 0.1), ('gamma', 0.9)]) 2.94\nOrderedDict([('alpha', 0.9), ('beta', 0.30000000000000004), ('gamma', 0.1)]) 2.7\nOrderedDict([('alpha', 0.9), ('beta', 0.30000000000000004), ('gamma', 0.30000000000000004)]) 2.78\nOrderedDict([('alpha', 0.9), ('beta', 0.30000000000000004), ('gamma', 0.5)]) 2.44\nOrderedDict([('alpha', 0.9), ('beta', 0.30000000000000004), ('gamma', 0.7000000000000001)]) 2.62\nOrderedDict([('alpha', 0.9), ('beta', 0.30000000000000004), ('gamma', 0.9)]) 2.82\nOrderedDict([('alpha', 0.9), ('beta', 0.5), ('gamma', 0.1)]) 2.58\nOrderedDict([('alpha', 0.9), ('beta', 0.5), ('gamma', 0.30000000000000004)]) 2.9\nOrderedDict([('alpha', 0.9), ('beta', 0.5), ('gamma', 0.5)]) 2.72\nOrderedDict([('alpha', 0.9), ('beta', 0.5), ('gamma', 0.7000000000000001)]) 2.64\nOrderedDict([('alpha', 0.9), ('beta', 0.5), ('gamma', 0.9)]) 2.38\nOrderedDict([('alpha', 0.9), ('beta', 0.7000000000000001), ('gamma', 0.1)]) 2.76\nOrderedDict([('alpha', 0.9), ('beta', 0.7000000000000001), ('gamma', 0.30000000000000004)]) 2.74\nOrderedDict([('alpha', 0.9), ('beta', 0.7000000000000001), ('gamma', 0.5)]) 3.1\nOrderedDict([('alpha', 0.9), ('beta', 0.7000000000000001), ('gamma', 0.7000000000000001)]) 2.4\nOrderedDict([('alpha', 0.9), ('beta', 0.7000000000000001), ('gamma', 0.9)]) 2.98\nOrderedDict([('alpha', 0.9), ('beta', 0.9), ('gamma', 0.1)]) 2.94\nOrderedDict([('alpha', 0.9), ('beta', 0.9), ('gamma', 0.30000000000000004)]) 2.94\nOrderedDict([('alpha', 0.9), ('beta', 0.9), ('gamma', 0.5)]) 3.02\nOrderedDict([('alpha', 0.9), ('beta', 0.9), ('gamma', 0.7000000000000001)]) 2.98\nOrderedDict([('alpha', 0.9), ('beta', 0.9), ('gamma', 0.9)]) 2.62\n" ] ], [ [ "### Finding the Minimum\n\nNow we have all the code, we do a simple grid search over values to find a minimal value. Grid search is not the best optimization algorithm, but is here simply illustrative.", "_____no_output_____" ] ], [ [ "sweep_size = 10\nsweep = (cirq.Linspace(key='alpha', start=0.0, stop=1.0, length=10)\n * cirq.Linspace(key='beta', start=0.0, stop=1.0, length=10)\n * cirq.Linspace(key='gamma', start=0.0, stop=1.0, length=10))\nresults = simulator.run_sweep(circuit, params=sweep, repetitions=100)\n\nmin = None\nmin_params = None\nfor result in results:\n value = obj_func(result)\n if min is None or value < min:\n min = value\n min_params = result.params\nprint('Minimum objective value is {}.'.format(min))", "Minimum objective value is -1.16.\n" ] ], [ [ "We've created a simple variational quantum algorithm using Cirq. Where to go next? Perhaps you can play around with the above code and work on analyzing the algorithms performance. Add new parameterized circuits and build an end to end program for analyzing these circuits.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecb0955ab9ce777a0501a998101fe7bb6b7e74ae
3,517
ipynb
Jupyter Notebook
lessons/NLP Pipelines/tokenization_solution.ipynb
callezenwaka/DSND_Term2
6252cb75f9fbd61043b308a783b1d62cdd217001
[ "MIT" ]
1,030
2018-07-03T19:09:50.000Z
2022-03-25T05:48:57.000Z
lessons/NLP Pipelines/tokenization_solution.ipynb
callezenwaka/DSND_Term2
6252cb75f9fbd61043b308a783b1d62cdd217001
[ "MIT" ]
21
2018-09-20T14:36:04.000Z
2021-10-11T18:25:31.000Z
lessons/NLP Pipelines/tokenization_solution.ipynb
callezenwaka/DSND_Term2
6252cb75f9fbd61043b308a783b1d62cdd217001
[ "MIT" ]
1,736
2018-06-27T19:33:46.000Z
2022-03-28T17:52:33.000Z
25.485507
429
0.570372
[ [ [ "### Note on NLTK data download\nRun the cell below to download the necessary nltk data packages. Note, because we are working in classroom workspaces, we will be downloading specific packages in each notebook throughout the lesson. However, you can download all packages by entering `nltk.download()` on your computer. Keep in mind this does take up a bit more space. You can learn more about nltk data installation [here](https://www.nltk.org/data.html).", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download('punkt')", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n" ] ], [ [ "# Tokenization\nTry out the tokenization methods in nltk to split the following text into words and then sentences.", "_____no_output_____" ] ], [ [ "# import statements\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import sent_tokenize", "_____no_output_____" ], [ "text = \"Dr. Smith graduated from the University of Washington. He later started an analytics firm called Lux, which catered to enterprise customers.\"\nprint(text)", "Dr. Smith graduated from the University of Washington. He later started an analytics firm called Lux, which catered to enterprise customers.\n" ], [ "# Split text into words using NLTK\nwords = word_tokenize(text)\nprint(words)", "['Dr.', 'Smith', 'graduated', 'from', 'the', 'University', 'of', 'Washington', '.', 'He', 'later', 'started', 'an', 'analytics', 'firm', 'called', 'Lux', ',', 'which', 'catered', 'to', 'enterprise', 'customers', '.']\n" ], [ "# Split text into sentences\nsentences = sent_tokenize(text)\nprint(sentences)", "['Dr. Smith graduated from the University of Washington.', 'He later started an analytics firm called Lux, which catered to enterprise customers.']\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ecb0a2a9226e3648f549984737c6363571bd6abf
14,402
ipynb
Jupyter Notebook
courses/udacity_deep_learning/[JPN]4_convolutions.ipynb
hironsuz/examples
190498a2081d64c7488901aeba9269d05b6fdf6f
[ "Apache-2.0" ]
null
null
null
courses/udacity_deep_learning/[JPN]4_convolutions.ipynb
hironsuz/examples
190498a2081d64c7488901aeba9269d05b6fdf6f
[ "Apache-2.0" ]
null
null
null
courses/udacity_deep_learning/[JPN]4_convolutions.ipynb
hironsuz/examples
190498a2081d64c7488901aeba9269d05b6fdf6f
[ "Apache-2.0" ]
null
null
null
29.331976
171
0.556589
[ [ [ "Deep Learning\n=============\n\nAssignment 4\n------------\n\n前回、 `2_fullyconnected.ipynb`と` 3_regularization.ipynb`で、[notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html)の文字を分類するために、完全に接続されたネットワークを訓練しました。\n\n今回のアサインメントの目的は、ニューラルネットワークを畳み込みにすることです。", "_____no_output_____" ] ], [ [ "# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range", "_____no_output_____" ], [ "pickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)", "Training set (200000, 28, 28) (200000,)\nValidation set (10000, 28, 28) (10000,)\nTest set (18724, 28, 28) (18724,)\n" ] ], [ [ "TensorFlowに適した形状に再フォーマットします。\n- 畳み込みには、キューブとしてフォーマットされた画像データが必要です(#channelsによる高さによる幅)\n- フロート1-hotエンコーディングとしてのラベル。", "_____no_output_____" ] ], [ [ "image_size = 28\nnum_labels = 10\nnum_channels = 1 # grayscale\n\nimport numpy as np\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape(\n (-1, image_size, image_size, num_channels)).astype(np.float32)\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)", "Training set (200000, 28, 28, 1) (200000, 10)\nValidation set (10000, 28, 28, 1) (10000, 10)\nTest set (18724, 28, 28, 1) (18724, 10)\n" ], [ "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])", "_____no_output_____" ] ], [ [ "2つの畳み込み層とそれに続く1つの完全に接続された層で小さなネットワークを構築しましょう。 畳み込みネットワークは計算コストが高いため、深さと完全に接続されたノードの数を制限します。", "_____no_output_____" ] ], [ [ "batch_size = 16\npatch_size = 5\ndepth = 16\nnum_hidden = 64\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n # Input data.\n tf_train_dataset = tf.placeholder(\n tf.float32, shape=(batch_size, image_size, image_size, num_channels))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n \n # Variables.\n layer1_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, num_channels, depth], stddev=0.1))\n layer1_biases = tf.Variable(tf.zeros([depth]))\n layer2_weights = tf.Variable(tf.truncated_normal(\n [patch_size, patch_size, depth, depth], stddev=0.1))\n layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n layer3_weights = tf.Variable(tf.truncated_normal(\n [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))\n layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n layer4_weights = tf.Variable(tf.truncated_normal(\n [num_hidden, num_labels], stddev=0.1))\n layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))\n \n # Model.\n def model(data):\n conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n shape = hidden.get_shape().as_list()\n reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)\n return tf.matmul(hidden, layer4_weights) + layer4_biases\n \n # Training computation.\n logits = model(tf_train_dataset)\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n \n # Optimizer.\n optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)\n \n # Predictions for the training, validation, and test data.\n train_prediction = tf.nn.softmax(logits)\n valid_prediction = tf.nn.softmax(model(tf_valid_dataset))\n test_prediction = tf.nn.softmax(model(tf_test_dataset))", "_____no_output_____" ], [ "num_steps = 1001\n\nwith tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n print('Initialized')\n for step in range(num_steps):\n offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n batch_data = train_dataset[offset:(offset + batch_size), :, :, :]\n batch_labels = train_labels[offset:(offset + batch_size), :]\n feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n _, l, predictions = session.run(\n [optimizer, loss, train_prediction], feed_dict=feed_dict)\n if (step % 50 == 0):\n print('Minibatch loss at step %d: %f' % (step, l))\n print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))\n print('Validation accuracy: %.1f%%' % accuracy(\n valid_prediction.eval(), valid_labels))\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))", "Initialized\nMinibatch loss at step 0 : 3.51275\nMinibatch accuracy: 6.2%\nValidation accuracy: 12.8%\nMinibatch loss at step 50 : 1.48703\nMinibatch accuracy: 43.8%\nValidation accuracy: 50.4%\nMinibatch loss at step 100 : 1.04377\nMinibatch accuracy: 68.8%\nValidation accuracy: 67.4%\nMinibatch loss at step 150 : 0.601682\nMinibatch accuracy: 68.8%\nValidation accuracy: 73.0%\nMinibatch loss at step 200 : 0.898649\nMinibatch accuracy: 75.0%\nValidation accuracy: 77.8%\nMinibatch loss at step 250 : 1.3637\nMinibatch accuracy: 56.2%\nValidation accuracy: 75.4%\nMinibatch loss at step 300 : 1.41968\nMinibatch accuracy: 62.5%\nValidation accuracy: 76.0%\nMinibatch loss at step 350 : 0.300648\nMinibatch accuracy: 81.2%\nValidation accuracy: 80.2%\nMinibatch loss at step 400 : 1.32092\nMinibatch accuracy: 56.2%\nValidation accuracy: 80.4%\nMinibatch loss at step 450 : 0.556701\nMinibatch accuracy: 81.2%\nValidation accuracy: 79.4%\nMinibatch loss at step 500 : 1.65595\nMinibatch accuracy: 43.8%\nValidation accuracy: 79.6%\nMinibatch loss at step 550 : 1.06995\nMinibatch accuracy: 75.0%\nValidation accuracy: 81.2%\nMinibatch loss at step 600 : 0.223684\nMinibatch accuracy: 100.0%\nValidation accuracy: 82.3%\nMinibatch loss at step 650 : 0.619602\nMinibatch accuracy: 87.5%\nValidation accuracy: 81.8%\nMinibatch loss at step 700 : 0.812091\nMinibatch accuracy: 75.0%\nValidation accuracy: 82.4%\nMinibatch loss at step 750 : 0.276302\nMinibatch accuracy: 87.5%\nValidation accuracy: 82.3%\nMinibatch loss at step 800 : 0.450241\nMinibatch accuracy: 81.2%\nValidation accuracy: 82.3%\nMinibatch loss at step 850 : 0.137139\nMinibatch accuracy: 93.8%\nValidation accuracy: 82.3%\nMinibatch loss at step 900 : 0.52664\nMinibatch accuracy: 75.0%\nValidation accuracy: 82.2%\nMinibatch loss at step 950 : 0.623835\nMinibatch accuracy: 87.5%\nValidation accuracy: 82.1%\nMinibatch loss at step 1000 : 0.243114\nMinibatch accuracy: 93.8%\nValidation accuracy: 82.9%\nTest accuracy: 90.0%\n" ] ], [ [ "---\nProblem 1\n---------\n\n上記のたたみ込みモデルでは、ストライド2のたたみ込みを使用して次元を減らしています。 ストライドをストライド2とカーネルサイズ2の最大プーリング操作( `nn.max_pool()`)に置き換えます。\n\n---", "_____no_output_____" ], [ "---\nProblem 2\n---------\n\nたたみ込みネットを使用して、最高のパフォーマンスを得るようにしてください。 たとえば、古典的な[LeNet5](http://yann.lecun.com/exdb/lenet/)アーキテクチャを見て、ドロップアウトを追加し、および/または学習率の減衰を追加してください。\n\n---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ecb0a51ca117346b9c5e822c8edf6cd492392afc
551,349
ipynb
Jupyter Notebook
notebooks/18_IntermediateSklearn.ipynb
luckyvamshi/2013_fall_ASTR599
c717b6e122940f83c3c9a5b579901e886084501f
[ "Apache-2.0" ]
56
2015-03-09T04:49:28.000Z
2022-01-12T08:06:56.000Z
notebooks/18_IntermediateSklearn.ipynb
luckyvamshi/2013_fall_ASTR599
c717b6e122940f83c3c9a5b579901e886084501f
[ "Apache-2.0" ]
null
null
null
notebooks/18_IntermediateSklearn.ipynb
luckyvamshi/2013_fall_ASTR599
c717b6e122940f83c3c9a5b579901e886084501f
[ "Apache-2.0" ]
42
2015-01-05T19:23:27.000Z
2021-08-29T04:59:20.000Z
335.983547
94,303
0.916001
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecb0b5b949cd5426d19de300913b9028a389d021
29,423
ipynb
Jupyter Notebook
MemeNote.ipynb
alpv95/MemeProject
8a60a3ed8d754a756d46d8d45e64ebb6ae927c76
[ "MIT" ]
481
2018-04-15T15:32:10.000Z
2021-11-12T13:47:47.000Z
MemeNote.ipynb
alpv95/MemeProject
8a60a3ed8d754a756d46d8d45e64ebb6ae927c76
[ "MIT" ]
7
2018-06-14T03:14:22.000Z
2021-11-11T18:46:52.000Z
MemeNote.ipynb
alpv95/MemeProject
8a60a3ed8d754a756d46d8d45e64ebb6ae927c76
[ "MIT" ]
81
2018-06-02T01:36:22.000Z
2021-05-21T02:03:43.000Z
29.931841
1,205
0.546409
[ [ [ "#some basic imports and setups\nimport os\nimport numpy as np\nfrom PIL import Image\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n\n#mean of imagenet dataset in BGR\nimagenet_mean = np.array([104., 117., 124.], dtype=np.float32)\n\ncurrent_dir = os.getcwd()\nimage_dir = os.path.join(current_dir, 'memes')\n#image_dir = current_dir\n\n%matplotlib inline", "_____no_output_____" ], [ "#get list of all images\nimg_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('ting.jpg')]\nprint(len(img_files))\n#load all images\nimgs = []\nfor f in img_files:\n img = Image.open(f)\n img.thumbnail((227, 227), Image.ANTIALIAS)\n #img = img.resize((227,227))\n assert np.shape(img) == (227, 227, 3)\n imgs.append(img)\n#use img.thumbnail for square images, img.resize for non square\n \n#plot images\nfig = plt.figure(figsize=(15,6))\nfor i, img in enumerate(imgs):\n fig.add_subplot(1,len(imgs),i+1)\n plt.imshow(img)", "_____no_output_____" ], [ "from alexnet import AlexNet\nfrom caffe_classes import class_names\n\n#placeholder for input and dropout rate\nx = tf.placeholder(tf.float32, [1, 227, 227, 3])\nkeep_prob = tf.placeholder(tf.float32)\n\n#create model with default config ( == no skip_layer and 1000 units in the last layer)\nmodel = AlexNet(x, keep_prob, 1000,[],['fc7','fc8'],512) #maybe need to put fc8 in skip_layers\n\n#define activation of last layer as score\nscore = model.fc6\n\n#create op to calculate softmax \n#softmax = tf.nn.softmax(score)", "_____no_output_____" ], [ "with tf.Session() as sess:\n \n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n \n # Load the pretrained weights into the model\n model.load_initial_weights(sess)\n \n# Create figure handle\n fig2 = plt.figure(figsize=(15,6))\n \n # Loop over all images\n for i, image in enumerate(imgs):\n \n # Convert image to float32 and resize to (227x227)\n #img = cv2.resize(image.astype(np.float32), (227,227))\n \n # Subtract the ImageNet mean\n img = image - imagenet_mean\n \n # Reshape as needed to feed into model\n img = img.reshape((1,227,227,3))\n \n # Run the session and calculate the class probability\n #probs = sess.run(softmax, feed_dict={x: img, keep_prob: 1})\n probs = sess.run(score, feed_dict={x: img, keep_prob: 1})\n \n # Get the class name of the class with the highest probability\n #class_name = class_names[np.argmax(probs)]\n \n \n # Plot image with class name and prob in the title\n fig2.add_subplot(len(imgs),1,i+1)\n plt.imshow(image)\n #plt.title(\"Class: \" + class_name + \", probability: %.4f\" %probs[0,np.argmax(probs)])\n plt.axis('off')\n plt.title(\"Vector: \" + \"%.4f\" %probs[0,0])\n ", "_____no_output_____" ], [ "probs[0,:500]", "_____no_output_____" ], [ "ind = np.argsort(probs)\nfor i in range(990,1000):\n print(class_names[int(ind[0][i])])\n print(probs[0,ind[0][i]])", "_____no_output_____" ], [ "print(probs[0,:50])", "_____no_output_____" ] ], [ [ "Converting captions and meme vector representations into single Tfrecord", "_____no_output_____" ], [ "Requires putting memes through alexnet to find their vector rep, shuffling the captions, changing captions into their word2idx, finally saving one caption together with one meme.", "_____no_output_____" ] ], [ [ "with open('captions.txt','r') as f:\n captions = f.readlines()\n ", "_____no_output_____" ], [ "current_dir", "_____no_output_____" ], [ "len(captions)", "_____no_output_____" ], [ "captions = list(set(captions))\n", "_____no_output_____" ], [ "len(captions)", "_____no_output_____" ], [ "captions = [s.lower() for s in captions]", "_____no_output_____" ], [ "img_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('jpg')]\nprint(len(img_files))\nimg_files = list(set(img_files))\nprint(len(img_files))", "_____no_output_____" ], [ "meme_name = img_files[2500].replace('/Users/ALP/Desktop/Stanford/CS224n/MemeProject/memes/','')\nmeme_name = meme_name.replace('.jpg','')\nmeme_name = meme_name.replace('-',' ').lower()\nmeme_name = 'dolan'\nprint(meme_name)\nmatch = [s for s in captions if meme_name in s]\n#print(match)\n#match[0].replace(meme_name + ' - ', '')", "_____no_output_____" ], [ "search_dir = '/Users/ALP/Desktop/Stanford/CS224n/MemeProject/memes'\nos.chdir(search_dir)\nimg_files = filter(os.path.isfile, os.listdir(search_dir))\nimg_files = [os.path.join(search_dir, f) for f in img_files] # add path to each file\nimg_files.sort(key=lambda x: os.path.getmtime(x))\nwith open('/Users/ALP/Desktop/Stanford/CS224n/MemeProject/Captions.txt','r') as f:\n captions = f.readlines()\n#captions = list(set(captions))\ncaptions = [s.lower() for s in captions]\ndata_memes = []\ndata_captions = []\ncounter = 0\n\n#Doing everything in one script: (the fc6 vectors are quite sparse)\nwith tf.Session() as sess:\n \n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n \n # Load the pretrained weights into the model\n model.load_initial_weights(sess)\n \n for i,meme in enumerate(img_files):\n #meme_name = meme.replace('/Users/ALP/Desktop/Stanford/CS224n/MemeProject/memes/','')\n #meme_name = meme_name.replace('.jpg','').lower()\n #meme_name = meme_name.replace('-',' ')\n img = Image.open(meme)\n try:\n img.thumbnail((227, 227), Image.ANTIALIAS)\n #img = img.resize((227,227))\n #use img.thumbnail for square images, img.resize for non square\n assert np.shape(img) == (227, 227, 3)\n except AssertionError:\n img = img.resize((227,227))\n print('sizing error')\n \n # Subtract the ImageNet mean\n img = img - imagenet_mean #should probably change this\n \n # Reshape as needed to feed into model\n img = img.reshape((1,227,227,3))\n\n meme_vector = sess.run(score, feed_dict={x: img, keep_prob: 1}) #[1,4096]\n meme_vector = np.reshape(meme_vector,[4096])\n assert np.shape(meme_vector) == (4096,)\n #match = [s.split('-',1)[-1].lstrip() for s in captions if meme_name in s]\n match = []\n meme_name = captions[counter].split('-')[0]\n while meme_name in captions[counter]:\n match.append(captions[counter].split('-')[-1])\n counter += 1\n \n #now save in tfrecords format, or prepare for that action\n meme_vectors = [meme_vector for cap in match]\n assert len(meme_vectors) == len(match)\n data_memes.extend(meme_vectors)\n data_captions.extend(match)\n\n if i % 100 == 0:\n print(i,len(data_memes),len(data_captions))\n \n ", "_____no_output_____" ], [ "search_dir = '/Users/ALP/Desktop/Stanford/CS224n/MemeProject/memes'\nos.chdir(search_dir)\nfiles = filter(os.path.isfile, os.listdir(search_dir))\nfiles = [os.path.join(search_dir, f) for f in files] # add path to each file\nfiles.sort(key=lambda x: os.path.getmtime(x))\nprint(files[:100])", "_____no_output_____" ], [ "with open('/Users/ALP/Desktop/Stanford/CS224n/MemeProject/Captions.txt','r') as f:\n captions = f.readlines()\n#captions = list(set(captions))\ncaptions = [s.lower() for s in captions]\nprint(len([s for s in captions if 'scared bekett' in s]))\ncaptions[112000:112100]", "_____no_output_____" ], [ "del img_files[0]", "_____no_output_____" ], [ "img_files[2]", "_____no_output_____" ], [ "for i,meme in enumerate(img_files):\n img_files[i] = meme.replace('/Users/ALP/Desktop/Stanford/CS224n/MemeProject/memes/','')", "_____no_output_____" ], [ "img_files[2503]", "_____no_output_____" ], [ "img_files[10]", "_____no_output_____" ], [ "f = open('ordered_memes.txt', 'w')\nfor item in img_files:\n f.write('%s\\n' % item)", "_____no_output_____" ], [ "deleters = []\nfor i,ting in enumerate(data_captions):\n if ting == '':\n deleters.append(i)\nfor i,ting in enumerate(deleters):\n del data_captions[ting-i]\n del data_memes[ting-i]", "_____no_output_____" ], [ "import re\nword_captions = []\nfor capt in data_captions:\n words = re.findall(r\"[\\w']+|[.,!?;'><(){}%$#£@-_+=|\\/~`^&*]\", capt)\n word_captions.append(words)\n#print(len(word_captions))\n#word_captions = list(set(word_captions))\n#print(len(word_captions))", "_____no_output_____" ], [ "from collections import Counter\nprint(\"Creating vocabulary.\")\ncounter = Counter()\nfor c in word_captions:\n counter.update(c)\nprint(\"Total words:\", len(counter))\n\n# Filter uncommon words and sort by descending count.\nword_counts = [x for x in counter.items() if x[1] >= 3]\nword_counts.sort(key=lambda x: x[1], reverse=True)\nprint(\"Words in vocabulary:\", len(word_counts))", "_____no_output_____" ], [ "# Create the vocabulary dictionary.\nreverse_vocab = [x[0] for x in word_counts]\n#unk_id = len(reverse_vocab)\nvocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])", "_____no_output_____" ], [ "reverse_vocab[1]", "_____no_output_____" ], [ "EMBEDDING_DIMENSION=300 # Available dimensions for 6B data is 50, 100, 200, 300\ndata_directory = '~/Desktop/Stanford/CS224n/MemeProject'\n\nPAD_TOKEN = 0\n\nword2idx = { 'PAD': PAD_TOKEN } # dict so we can lookup indices for tokenising our text later from string to sequence of integers\nweights = []\nindex_counter = 0\n\nwith open('glove.42B.300d.txt','r') as file:\n for index, line in enumerate(file):\n values = line.split() # Word and weights separated by space\n word = values[0] # Word is first symbol on each line\n if word in vocab_dict:\n index_counter += 1\n word_weights = np.asarray(values[1:], dtype=np.float32) # Remainder of line is weights for word\n word2idx[word] = index_counter # PAD is our zeroth index so shift by one\n weights.append(word_weights)\n if index % 20000 == 0:\n print(index)\n if index + 1 == 1500000:\n # Limit vocabulary to top 40k terms\n break\n\nEMBEDDING_DIMENSION = len(weights[0])\n# Insert the PAD weights at index 0 now we know the embedding dimension\nweights.insert(0, np.random.randn(EMBEDDING_DIMENSION))\n\n# Append unknown and pad to end of vocab and initialize as random #maybe include start and end token here\nUNKNOWN_TOKEN=len(weights)\nword2idx['UNK'] = UNKNOWN_TOKEN\nword2idx['<S>'] = UNKNOWN_TOKEN + 1\nword2idx['</S>'] = UNKNOWN_TOKEN + 2\nweights.append(np.random.randn(EMBEDDING_DIMENSION))\nweights.append(np.random.randn(EMBEDDING_DIMENSION))\nweights.append(np.random.randn(EMBEDDING_DIMENSION))\n\n# Construct our final vocab\nweights = np.asarray(weights, dtype=np.float32)\n\nVOCAB_SIZE=weights.shape[0]\n\n#Save Vocabulary\nwith tf.gfile.FastGFile('vocab.txt', \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word2idx.iteritems()]))\nprint(\"Wrote vocabulary file:\", 'vocab.txt')", "_____no_output_____" ], [ "with open('vocab.txt','r') as f:\n reverse_vocab = list(f.readlines())\nreverse_vocab = [(line.split()[0],line.split()[1]) for line in reverse_vocab]\n\nvocab = dict([(x, y) for (x, y) in reverse_vocab]) ", "_____no_output_____" ], [ "print(vocab['.'])\nx = sorted(vocab.iteritems(), key=lambda x: int(x[1]))\nreverse_vocab = [y[0] for y in x]\nprint(reverse_vocab[44430:])", "_____no_output_____" ], [ "filenames = [os.path.join(image_dir, f) for f in ['one_does_not_simply.jpg']]", "_____no_output_____" ], [ "filenames", "_____no_output_____" ], [ "weights[76984]", "_____no_output_____" ], [ "np.savetxt('embedding_matrix2',weights)", "_____no_output_____" ], [ "deleters = []\nfor i,ting in enumerate(data_captions):\n if len(ting) == 2:\n deleters.append(i)", "_____no_output_____" ], [ "for i,ting in enumerate(deleters):\n del data_captions[ting-i]\n del data_memes[ting-i]", "_____no_output_____" ], [ "deleters[0]", "_____no_output_____" ], [ "len(data_captions)", "_____no_output_____" ], [ "import re\ntoken_captions = []\nfor capt in data_captions:\n token_caption = []\n token_caption.append(word2idx['<S>'])\n words = re.findall(r\"[\\w']+|[.,!?;'><(){}%$#£@-_+=|\\/~`^&*]\", capt)\n for word in words:\n try:\n token = word2idx[word]\n except KeyError:\n token = word2idx['UNK']\n token_caption.append(token)\n token_caption.append(word2idx['</S>'])\n token_captions.append(token_caption)", "_____no_output_____" ], [ "from __future__ import division\ntotal_words = 0\ntotal_UNK = 0\nfor i,ting in enumerate(token_captions):\n for word in ting:\n total_words += 1\n if word == word2idx['UNK']:\n total_UNK += 1\nprint(total_words - 2*len(data_captions))\nprint(total_UNK)\nprint((total_UNK/(total_words - 2*len(data_captions))))", "_____no_output_____" ], [ "for i,ting in enumerate(deleters):\n del data_captions[ting-i]\n del data_memes[ting-i]\n del token_captions[ting-i]", "_____no_output_____" ], [ "from random import shuffle\nc = list(zip(data_memes, token_captions))\nshuffle(c)\nmemes_shuffled, captions_shuffled = zip(*c)", "_____no_output_____" ], [ "len(captions_shuffled)", "_____no_output_____" ], [ "\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting an int64 Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature_list(values):\n \"\"\"Wrapper for inserting an int64 FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])\n\n\ndef _bytes_feature_list(values):\n \"\"\"Wrapper for inserting a bytes FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])\n\ndef _floats_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))", "_____no_output_____" ], [ "memes_shuffled_int = []\nfor i,meme in enumerate(memes_shuffled):\n memes_shuffled_int.append(np.int_(meme*1000000000))\nprint(memes_shuffled_int[0][:100])", "_____no_output_____" ], [ "(captions_shuffled[8])", "_____no_output_____" ], [ "def _to_sequence_example(image, decoder, vocab):\n \"\"\"Builds a SequenceExample proto for an image-caption pair.\n Args:\n image: An ImageMetadata object.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n Returns:\n A SequenceExample proto.\n \"\"\"\n with tf.gfile.FastGFile(image.filename, \"r\") as f:\n encoded_image = f.read()\n \n try:\n decoder.decode_jpeg(encoded_image)\n except (tf.errors.InvalidArgumentError, AssertionError):\n print(\"Skipping file with invalid JPEG data: %s\" % image.filename)\n return\n \n context = tf.train.Features(feature={\n \"image/image_id\": _int64_feature(image.image_id),\n \"image/data\": _bytes_feature(encoded_image),\n })\n \n assert len(image.captions) == 1\n caption = image.captions[0]\n caption_ids = [vocab.word_to_id(word) for word in caption]\n feature_lists = tf.train.FeatureLists(feature_list={\n \"image/caption\": _bytes_feature_list(caption),\n \"image/caption_ids\": _int64_feature_list(caption_ids)\n })\n sequence_example = tf.train.SequenceExample(\n context=context, feature_lists=feature_lists)\n \n return sequence_example", "_____no_output_____" ], [ "import sys\ntrain_filename = 'train.tfrecords4' # address to save the TFRecords file\n# open the TFRecords file\nwriter = tf.python_io.TFRecordWriter(train_filename)\nfor i in range(len(memes_shuffled_int)):\n if not i % 20000:\n print 'Train data: {}/{}'.format(i, len(memes_shuffled_int))\n sys.stdout.flush()\n context = tf.train.Features(feature={\n \"train/meme\": _bytes_feature(memes_shuffled_int[i].tostring()), #this is the part that needs to be a float save\n })\n feature_lists = tf.train.FeatureLists(feature_list={\n \"train/captions\": _int64_feature_list(captions_shuffled[i])\n })\n sequence_example = tf.train.SequenceExample(\n context=context, feature_lists=feature_lists)\n \n writer.write(sequence_example.SerializeToString())\n \nwriter.close()\nsys.stdout.flush()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb0b787e2867fd3e4cfd3384084ffebf0001679
111,598
ipynb
Jupyter Notebook
examples/models/metrics/general_metrics.ipynb
BearerPipelineTest/seldon-core
1c64f7fdfe79e48929cf37ab2938d1c82f84200e
[ "Apache-2.0" ]
null
null
null
examples/models/metrics/general_metrics.ipynb
BearerPipelineTest/seldon-core
1c64f7fdfe79e48929cf37ab2938d1c82f84200e
[ "Apache-2.0" ]
null
null
null
examples/models/metrics/general_metrics.ipynb
BearerPipelineTest/seldon-core
1c64f7fdfe79e48929cf37ab2938d1c82f84200e
[ "Apache-2.0" ]
null
null
null
30.982232
241
0.362318
[ [ [ "# Basic Examples with Different Protocols Showing Metrics\n\n## Prerequisites\n\n * A kubernetes cluster with kubectl configured\n * curl\n * grpcurl\n * pygmentize\n \n\n## Setup Seldon Core\n\nInstall Seldon Core as described in [docs](https://docs.seldon.io/projects/seldon-core/en/latest/workflow/install.html)\n\nThen port-forward to that ingress on localhost:8003 in a separate terminal either with:\n\n * Ambassador: \n \n ```bash\n kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080\n ```\n * Istio: \n \n ```bash\n kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80\n ```\n ", "_____no_output_____" ] ], [ [ "!kubectl create namespace seldon", "Error from server (AlreadyExists): namespaces \"seldon\" already exists\n" ], [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon", "Context \"kind-ansible\" modified.\n" ] ], [ [ "## Install Seldon Analytics", "_____no_output_____" ] ], [ [ "!helm install seldon-core-analytics ../../../helm-charts/seldon-core-analytics \\\n --set grafana_prom_admin_password=password \\\n --set persistence.enabled=false \\\n --namespace seldon-system \\\n --wait", "W0401 16:50:29.277945 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.278960 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.280031 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.280803 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.283179 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.284050 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.285098 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.286257 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.291413 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role\nW0401 16:50:29.293958 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 RoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 RoleBinding\nW0401 16:50:29.400127 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.400127 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.400131 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.400127 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 16:50:29.405814 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.405814 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.405814 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.405820 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 16:50:29.413047 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role\nW0401 16:50:29.420663 549535 warnings.go:70] rbac.authorization.k8s.io/v1beta1 RoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 RoleBinding\nNAME: seldon-core-analytics\nLAST DEPLOYED: Fri Apr 1 16:50:29 2022\nNAMESPACE: seldon-system\nSTATUS: deployed\nREVISION: 1\n" ] ], [ [ "Port forward to the Grafana dashboard\n\n```bash\nkubectl port-forward $(kubectl get pods -n seldon-system -l app.kubernetes.io/name=grafana -o jsonpath='{.items[0].metadata.name}') 3000:3000 -n seldon-system\n```", "_____no_output_____" ] ], [ [ "%env RESOURCES=../../../notebooks/resources", "env: RESOURCES=../../../notebooks/resources\n" ] ], [ [ "## Seldon Protocol REST Model\n\n**Make sure your active namespace is seldon**", "_____no_output_____" ] ], [ [ "!pygmentize ${RESOURCES}/model_seldon_rest.yaml", "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1\n\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\n\u001b[94mmetadata\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: rest-seldon\n\u001b[94mspec\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: restseldon\n \u001b[94mprotocol\u001b[39;49;00m: seldon\n \u001b[94mtransport\u001b[39;49;00m: rest \n \u001b[94mpredictors\u001b[39;49;00m:\n - \u001b[94mcomponentSpecs\u001b[39;49;00m:\n - \u001b[94mspec\u001b[39;49;00m:\n \u001b[94mcontainers\u001b[39;49;00m:\n - \u001b[94mimage\u001b[39;49;00m: seldonio/mock_classifier:1.6.0-dev\n \u001b[94mname\u001b[39;49;00m: classifier\n \u001b[94mgraph\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: classifier\n \u001b[94mtype\u001b[39;49;00m: MODEL\n \u001b[94mname\u001b[39;49;00m: model\n \u001b[94mreplicas\u001b[39;49;00m: 1\n" ], [ "!kubectl apply -f ${RESOURCES}/model_seldon_rest.yaml -n seldon", "seldondeployment.machinelearning.seldon.io/rest-seldon unchanged\n" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=rest-seldon \\\n -o jsonpath='{.items[0].metadata.name}')", "deployment \"rest-seldon-model-0-classifier\" successfully rolled out\n" ], [ "!for i in `seq 1 60`; do \\\n sleep 1 && curl -d '{\"data\": {\"ndarray\":[[1.0, 2.0, 5.0]]}}' \\\n -X POST http://localhost:8003/seldon/seldon/rest-seldon/api/v1.0/predictions \\\n -H \"Content-Type: application/json\"; \\\ndone", "{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n{\"data\":{\"names\":[\"proba\"],\"ndarray\":[[0.43782349911420193]]},\"meta\":{\"requestPath\":{\"classifier\":\"seldonio/mock_classifier:1.6.0-dev\"}}}\n" ] ], [ [ "![seldon-rest-dashboard](seldon-rest-dashboard.png)", "_____no_output_____" ] ], [ [ "import time\n\nfor i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/rest-seldon/prometheus | grep seldon_api_executor_server_requests_seconds_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for rest-seldon\")\n time.sleep(2)\n\nfor i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/rest-seldon/prometheus | grep seldon_api_executor_server_requests_seconds_summary_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for rest-seldon\")\n time.sleep(2)", "seldon_api_executor_server_requests_seconds_count{code=\"200\",deployment_name=\"rest-seldon\",method=\"post\",predictor_name=\"model\",predictor_version=\"\",service=\"predictions\"} 60\nseldon_api_executor_server_requests_seconds_summary_count{code=\"200\",deployment_name=\"rest-seldon\",method=\"post\",predictor_name=\"model\",predictor_version=\"\",service=\"predictions\"} 60\n" ], [ "!kubectl delete -f ${RESOURCES}/model_seldon_rest.yaml -n seldon", "seldondeployment.machinelearning.seldon.io \"rest-seldon\" deleted\n" ] ], [ [ "## Seldon Protocol GRPC Model", "_____no_output_____" ] ], [ [ "!pygmentize ${RESOURCES}/model_seldon_grpc.yaml", "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1\n\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\n\u001b[94mmetadata\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: grpc-seldon\n\u001b[94mspec\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: grpcseldon\n \u001b[94mprotocol\u001b[39;49;00m: seldon\n \u001b[94mtransport\u001b[39;49;00m: grpc\n \u001b[94mpredictors\u001b[39;49;00m:\n - \u001b[94mcomponentSpecs\u001b[39;49;00m:\n - \u001b[94mspec\u001b[39;49;00m:\n \u001b[94mcontainers\u001b[39;49;00m:\n - \u001b[94mimage\u001b[39;49;00m: seldonio/mock_classifier:1.6.0-dev\n \u001b[94mname\u001b[39;49;00m: classifier\n \u001b[94mgraph\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: classifier\n \u001b[94mtype\u001b[39;49;00m: MODEL\n \u001b[94mendpoint\u001b[39;49;00m:\n \u001b[94mtype\u001b[39;49;00m: GRPC\n \u001b[94mname\u001b[39;49;00m: model\n \u001b[94mreplicas\u001b[39;49;00m: 1\n" ], [ "!kubectl apply -f ${RESOURCES}/model_seldon_grpc.yaml -n seldon", "seldondeployment.machinelearning.seldon.io/grpc-seldon created\n" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpc-seldon \\\n -o jsonpath='{.items[0].metadata.name}')", "Waiting for deployment \"grpc-seldon-model-0-classifier\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"grpc-seldon-model-0-classifier\" successfully rolled out\n" ], [ "!cd ../../../executor/proto && for i in `seq 1 60`; do \\\n sleep 1 && grpcurl -d '{\"data\":{\"ndarray\":[[1.0,2.0]]}}' \\\n -rpc-header seldon:grpc-seldon -rpc-header namespace:seldon \\\n -plaintext \\\n -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict; \\\ndone", "{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n{\n \"meta\": {\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.6.0-dev\"\n }\n },\n \"data\": {\n \"names\": [\n \"proba\"\n ],\n \"ndarray\": [\n [\n 0.1951846770138402\n ]\n ]\n }\n}\n" ] ], [ [ "![seldon-grpc-dashboard](seldon-grpc-dashboard.png)", "_____no_output_____" ] ], [ [ "for i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/grpc-seldon/prometheus | grep seldon_api_executor_server_requests_seconds_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for grpc-seldon\")\n time.sleep(2)\n\nfor i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/grpc-seldon/prometheus | grep seldon_api_executor_server_requests_seconds_summary_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for grpc-seldon\")\n time.sleep(2)", "seldon_api_executor_server_requests_seconds_count{code=\"OK\",deployment_name=\"grpc-seldon\",method=\"unary\",predictor_name=\"model\",predictor_version=\"\",service=\"/seldon.protos.Seldon/Predict\"} 60\nseldon_api_executor_server_requests_seconds_summary_count{code=\"OK\",deployment_name=\"grpc-seldon\",method=\"unary\",predictor_name=\"model\",predictor_version=\"\",service=\"/seldon.protos.Seldon/Predict\"} 60\n" ], [ "!kubectl delete -f ${RESOURCES}/model_seldon_grpc.yaml -n seldon", "seldondeployment.machinelearning.seldon.io \"grpc-seldon\" deleted\n" ] ], [ [ "## Tensorflow Protocol REST Model", "_____no_output_____" ] ], [ [ "!pygmentize ${RESOURCES}/model_tfserving_rest.yaml", "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1\n\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\n\u001b[94mmetadata\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: example-tfserving\n\u001b[94mspec\u001b[39;49;00m:\n \u001b[94mprotocol\u001b[39;49;00m: tensorflow\n \u001b[94mpredictors\u001b[39;49;00m:\n - \u001b[94mcomponentSpecs\u001b[39;49;00m:\n - \u001b[94mspec\u001b[39;49;00m:\n \u001b[94mcontainers\u001b[39;49;00m:\n - \u001b[94margs\u001b[39;49;00m:\n - --port=8500\n - --rest_api_port=8501\n - --model_name=halfplustwo\n - --model_base_path=gs://seldon-models/tfserving/half_plus_two\n \u001b[94mimage\u001b[39;49;00m: tensorflow/serving\n \u001b[94mname\u001b[39;49;00m: halfplustwo\n \u001b[94mports\u001b[39;49;00m:\n - \u001b[94mcontainerPort\u001b[39;49;00m: 8501\n \u001b[94mname\u001b[39;49;00m: http\n \u001b[94mprotocol\u001b[39;49;00m: TCP\n - \u001b[94mcontainerPort\u001b[39;49;00m: 8500\n \u001b[94mname\u001b[39;49;00m: grpc\n \u001b[94mprotocol\u001b[39;49;00m: TCP\n \u001b[94mgraph\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: halfplustwo\n \u001b[94mtype\u001b[39;49;00m: MODEL\n \u001b[94mendpoint\u001b[39;49;00m:\n \u001b[94mhttpPort\u001b[39;49;00m: 8501\n \u001b[94mgrpcPort\u001b[39;49;00m: 8500\n \u001b[94mname\u001b[39;49;00m: model\n \u001b[94mreplicas\u001b[39;49;00m: 1\n" ], [ "!kubectl apply -f ${RESOURCES}/model_tfserving_rest.yaml -n seldon", "seldondeployment.machinelearning.seldon.io/example-tfserving created\n" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=example-tfserving \\\n -o jsonpath='{.items[0].metadata.name}')", "deployment \"example-tfserving-model-0-halfplustwo\" successfully rolled out\n" ], [ "!for i in `seq 1 60`; do \\\n sleep 1 && curl -d '{\"instances\": [1.0, 2.0, 5.0]}' \\\n -X POST http://localhost:8003/seldon/seldon/example-tfserving/v1/models/halfplustwo/:predict \\\n -H \"Content-Type: application/json\"; \\\ndone", "{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}{\n \"predictions\": [2.5, 3.0, 4.5\n ]\n}" ], [ "for i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/rest-tfserving/prometheus | grep seldon_api_executor_server_requests_seconds_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for rest-tfserving\")\n time.sleep(2)\n\nfor i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/rest-tfserving/prometheus | grep seldon_api_executor_server_requests_seconds_summary_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for rest-tfserving\")\n time.sleep(2)", "Failed to get metrics for rest-tfserving\nFailed to get metrics for rest-tfserving\nFailed to get metrics for rest-tfserving\nFailed to get metrics for rest-tfserving\nFailed to get metrics for rest-tfserving\nFailed to get metrics for rest-tfserving\n" ] ], [ [ "![tfserving-rest-dashboard](tfserving-rest-dashboard.png)", "_____no_output_____" ] ], [ [ "!kubectl delete -f ${RESOURCES}/model_tfserving_rest.yaml -n seldon", "seldondeployment.machinelearning.seldon.io \"example-tfserving\" deleted\n" ] ], [ [ "## Tensorflow Protocol GRPC Model", "_____no_output_____" ] ], [ [ "!pygmentize ${RESOURCES}/model_tfserving_grpc.yaml", "\u001b[94mapiVersion\u001b[39;49;00m: machinelearning.seldon.io/v1\n\u001b[94mkind\u001b[39;49;00m: SeldonDeployment\n\u001b[94mmetadata\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: grpc-tfserving\n\u001b[94mspec\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: grpctfserving\n \u001b[94mprotocol\u001b[39;49;00m: tensorflow\n \u001b[94mtransport\u001b[39;49;00m: grpc\n \u001b[94mpredictors\u001b[39;49;00m:\n - \u001b[94mcomponentSpecs\u001b[39;49;00m:\n - \u001b[94mspec\u001b[39;49;00m:\n \u001b[94mcontainers\u001b[39;49;00m:\n - \u001b[94margs\u001b[39;49;00m: \n - --port=8500\n - --rest_api_port=8501\n - --model_name=halfplustwo\n - --model_base_path=gs://seldon-models/tfserving/half_plus_two\n \u001b[94mimage\u001b[39;49;00m: tensorflow/serving\n \u001b[94mname\u001b[39;49;00m: halfplustwo\n \u001b[94mports\u001b[39;49;00m:\n - \u001b[94mcontainerPort\u001b[39;49;00m: 8500\n \u001b[94mname\u001b[39;49;00m: grpc\n \u001b[94mgraph\u001b[39;49;00m:\n \u001b[94mname\u001b[39;49;00m: halfplustwo\n \u001b[94mtype\u001b[39;49;00m: MODEL\n \u001b[94mendpoint\u001b[39;49;00m:\n \u001b[94mservice_port\u001b[39;49;00m: 8500\n \u001b[94mtype\u001b[39;49;00m: GRPC\n \u001b[94mname\u001b[39;49;00m: model\n \u001b[94mreplicas\u001b[39;49;00m: 1\n" ], [ "!kubectl apply -f ${RESOURCES}/model_tfserving_grpc.yaml -n seldon", "seldondeployment.machinelearning.seldon.io/grpc-tfserving created\n" ], [ "!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=grpc-tfserving \\\n -o jsonpath='{.items[0].metadata.name}')", "Waiting for deployment \"grpc-tfserving-model-0-halfplustwo\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"grpc-tfserving-model-0-halfplustwo\" successfully rolled out\n" ], [ "!cd ../../../executor/proto && for i in `seq 1 60`; do \\\n sleep 1 && grpcurl \\\n -d '{\"model_spec\":{\"name\":\"halfplustwo\"},\"inputs\":{\"x\":{\"dtype\": 1, \"tensor_shape\": {\"dim\":[{\"size\": 3}]}, \"floatVal\" : [1.0, 2.0, 3.0]}}}' \\\n -rpc-header seldon:grpc-tfserving -rpc-header namespace:seldon \\\n -plaintext -proto ./prediction_service.proto \\\n 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict; \\\ndone", "{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n{\n \"outputs\": {\n \"x\": {\n \"dtype\": \"DT_FLOAT\",\n \"tensorShape\": {\n \"dim\": [\n {\n \"size\": \"3\"\n }\n ]\n },\n \"floatVal\": [\n 2.5,\n 3,\n 3.5\n ]\n }\n },\n \"modelSpec\": {\n \"name\": \"halfplustwo\",\n \"version\": \"123\",\n \"signatureName\": \"serving_default\"\n }\n}\n" ] ], [ [ "![dashboard](tfserving-grpc-dashboard.png)", "_____no_output_____" ] ], [ [ "for i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/grpc-tfserving/prometheus | grep seldon_api_executor_server_requests_seconds_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for grpc-tfserving\")\n time.sleep(2)\n\nfor i in range(3):\n metric = !curl -s http://localhost:8003/seldon/seldon/grpc-tfserving/prometheus | grep seldon_api_executor_server_requests_seconds_summary_count\n if metric and len(metric) > 0:\n print(metric[0])\n assert not metric[0] == \"\"\n break\n else:\n print(\"Failed to get metrics for grpc-tfserving\")\n time.sleep(2)", "seldon_api_executor_server_requests_seconds_count{code=\"OK\",deployment_name=\"grpc-tfserving\",method=\"unary\",predictor_name=\"model\",predictor_version=\"\",service=\"/tensorflow.serving.PredictionService/Predict\"} 60\nseldon_api_executor_server_requests_seconds_summary_count{code=\"OK\",deployment_name=\"grpc-tfserving\",method=\"unary\",predictor_name=\"model\",predictor_version=\"\",service=\"/tensorflow.serving.PredictionService/Predict\"} 60\n" ], [ "!kubectl delete -f ${RESOURCES}/model_tfserving_grpc.yaml -n seldon", "seldondeployment.machinelearning.seldon.io \"grpc-tfserving\" deleted\n" ], [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon-system", "Context \"kind-ansible\" modified.\n" ], [ "!helm delete seldon-core-analytics -n seldon-system", "W0401 17:13:44.115573 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 RoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 RoleBinding\nW0401 17:13:44.124540 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 Role is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 Role\nW0401 17:13:44.128226 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 17:13:44.128226 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 17:13:44.128226 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 17:13:44.128226 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding\nW0401 17:13:44.131656 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 17:13:44.131665 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 17:13:44.131671 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nW0401 17:13:44.131656 612502 warnings.go:70] rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole\nrelease \"seldon-core-analytics\" uninstalled\n" ], [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon", "Context \"kind-ansible\" modified.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecb0bdca1cf1e0ea4c039edc26a2dfefa21a53d4
17,765
ipynb
Jupyter Notebook
ML_Models/Regression/Decision_Tree_Regression/decision_tree_regression.ipynb
lakshit2808/Machine-Learning-Notes
1b7760c2626c36a7f62c5a474e9fdadb76cb023b
[ "MIT" ]
2
2021-09-04T17:13:48.000Z
2021-09-04T17:13:50.000Z
ML_Models/Regression/Decision_Tree_Regression/decision_tree_regression.ipynb
lakshit2808/Machine-Learning-Notes
1b7760c2626c36a7f62c5a474e9fdadb76cb023b
[ "MIT" ]
null
null
null
ML_Models/Regression/Decision_Tree_Regression/decision_tree_regression.ipynb
lakshit2808/Machine-Learning-Notes
1b7760c2626c36a7f62c5a474e9fdadb76cb023b
[ "MIT" ]
1
2021-11-23T19:45:02.000Z
2021-11-23T19:45:02.000Z
83.403756
11,926
0.810526
[ [ [ "<a href=\"https://colab.research.google.com/github/lakshit2808/Machine-Learning-Notes/blob/master/ML_Models/Regression/Decision_Tree_Regression/decision_tree_regression.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Decision Tree Regression\nThe decision trees is used to fit a sine curve with addition noisy observation. As a result, it learns local linear regressions approximating the sine curve.\n\nWe can see that if the maximum depth of the tree (controlled by the max_depth parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit.\n\n<img src='https://scikit-learn.org/stable/_images/sphx_glr_plot_tree_regression_001.png'/>", "_____no_output_____" ], [ "## Importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Importing the dataset", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, -1].values", "_____no_output_____" ] ], [ [ "## Training the Decision Tree Regression model on the whole dataset", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X,y)", "_____no_output_____" ] ], [ [ "## Predicting a new result", "_____no_output_____" ] ], [ [ "regressor.predict([[5.5]])", "_____no_output_____" ] ], [ [ "## Visualising the Decision Tree Regression results (higher resolution)", "_____no_output_____" ] ], [ [ "X_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.title('Truth or Bluff (Decision Tree Regression)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb0bdd6a37e995512c055bf0dc447e8fced0116
89,028
ipynb
Jupyter Notebook
Notebooks/CreateResultHTML.ipynb
smenon8/AnimalPhotoBias
0b97a8a6d51ad749b4338febdee9d67b80dc3853
[ "BSD-3-Clause" ]
2
2017-02-12T02:33:12.000Z
2021-06-21T09:03:34.000Z
Notebooks/CreateResultHTML.ipynb
smenon8/AnimalWildlifeEstimator
0b97a8a6d51ad749b4338febdee9d67b80dc3853
[ "BSD-3-Clause" ]
14
2016-08-31T03:05:44.000Z
2017-06-02T17:37:29.000Z
Notebooks/CreateResultHTML.ipynb
smenon8/AnimalPhotoBias
0b97a8a6d51ad749b4338febdee9d67b80dc3853
[ "BSD-3-Clause" ]
1
2016-04-29T20:33:45.000Z
2016-04-29T20:33:45.000Z
26.63117
116
0.339837
[ [ [ "import csv", "_____no_output_____" ], [ "reader = csv.reader(open(\"../data/consolidatedHITResultsWithInfo.csv\",\"r\"))\nhead = reader.__next__()\n\ndata = []\nfor row in reader:\n data.append(row)", "_____no_output_____" ], [ "# logic for creating the hyper-links\nhyperLinks = []\nfor row in data:\n hyperLinks.append(\"http://pachy.cs.uic.edu:5000/api/image/src/\" + str(row[0]) + \"/?resize_pix_w=300\")", "_____no_output_____" ], [ "hyperIndex = 0\nfor row in data:\n print(\"<tr>\")\n print(\" <td>\")\n print(' <img src = \"' + hyperLinks[hyperIndex] + '\"alt=\"Unavailable\"/>')\n print(\" </td>\")\n print(\" <td><center>\")\n print(row[1])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[2])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[3])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[4])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[5])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[6])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[7])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[8])\n print(\" </center></td>\")\n print(\" <td><center>\")\n print(row[9])\n print(\" </center></td>\")\n print(\"</tr>\")\n\n hyperIndex += 1", "<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/926/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n20\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9473\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1712/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n20\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3131/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n20\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n8849\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/19/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n19\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9335\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/499/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n19\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9393\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/900/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n19\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9421\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1497/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n19\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14854\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3135/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n19\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n8974\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/16/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9326\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/542/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n16020\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/549/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/821/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15642\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/892/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9465\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1717/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3127/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12555\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3153/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n18\n </center></td>\n <td><center>\n2\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12535\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[6, 11]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/97/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/637/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n16090\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/825/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15591\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/878/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15654\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/915/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15656\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1519/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14763\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[24, 35]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1522/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14752\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1594/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15191\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1692/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1715/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3138/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n17\n </center></td>\n <td><center>\n3\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n8822\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/8/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13390\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/483/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/537/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n16070\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[24, 35]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/586/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9347\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1508/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15000\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[24, 35]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1647/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15139\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[6, 11]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1711/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3110/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12573\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3137/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n16\n </center></td>\n <td><center>\n4\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9050\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/34/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13429\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/50/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13427\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/85/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13430\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/597/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/636/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n16118\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/890/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9413\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1480/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14806\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3089/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9010\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3125/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n15\n </center></td>\n <td><center>\n5\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12727\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/33/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13436\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/69/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13403\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\njunk\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/90/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/513/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9342\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/536/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15955\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/612/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15419\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/826/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15598\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/870/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15662\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1540/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13962\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[24, 35]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1631/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13964\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3068/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3084/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9003\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3085/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9083\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3115/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12726\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3124/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9017\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3132/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n14\n </center></td>\n <td><center>\n6\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n8902\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/79/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n13\n </center></td>\n <td><center>\n7\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13454\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/824/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n13\n </center></td>\n <td><center>\n7\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15603\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1570/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n13\n </center></td>\n <td><center>\n7\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14780\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1649/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n13\n </center></td>\n <td><center>\n7\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14887\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[6, 11]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1652/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n13\n </center></td>\n <td><center>\n7\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n14055\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[6, 11]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3103/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n13\n </center></td>\n <td><center>\n7\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12607\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[6, 11]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/815/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n12\n </center></td>\n <td><center>\n8\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15594\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/864/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n12\n </center></td>\n <td><center>\n8\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15868\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1554/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n12\n </center></td>\n <td><center>\n8\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15368\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[24, 35]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/12/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9330\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/37/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13405\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nexcellent\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/504/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/624/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15969\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/635/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n16165\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN SEX\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1634/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15084\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nMale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3107/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3114/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n11\n </center></td>\n <td><center>\n9\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n12716\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/474/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n10\n </center></td>\n <td><center>\n10\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n16058\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\npoor\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3074/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n10\n </center></td>\n <td><center>\n10\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/833/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n9\n </center></td>\n <td><center>\n11\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15649\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/10/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n8\n </center></td>\n <td><center>\n12\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n9336\n </center></td>\n <td><center>\ngiraffe_masai\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/80/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n8\n </center></td>\n <td><center>\n12\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13388\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nFemale\n </center></td>\n <td><center>\n[36, None]\n </center></td>\n <td><center>\n1\n </center></td>\n <td><center>\ngood\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/84/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n8\n </center></td>\n <td><center>\n12\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n13452\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\nok\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/101/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n8\n </center></td>\n <td><center>\n12\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/481/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n8\n </center></td>\n <td><center>\n12\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/592/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n8\n </center></td>\n <td><center>\n12\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/99/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n7\n </center></td>\n <td><center>\n13\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/857/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n7\n </center></td>\n <td><center>\n13\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n15895\n </center></td>\n <td><center>\nzebra_plains\n </center></td>\n <td><center>\nUNKNOWN NAME\n </center></td>\n <td><center>\n[-1, -1]\n </center></td>\n <td><center>\n0\n </center></td>\n <td><center>\njunk\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/1509/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n7\n </center></td>\n <td><center>\n13\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/3112/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n7\n </center></td>\n <td><center>\n13\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/627/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n6\n </center></td>\n <td><center>\n14\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/904/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n6\n </center></td>\n <td><center>\n14\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/619/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n5\n </center></td>\n <td><center>\n15\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/845/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n5\n </center></td>\n <td><center>\n15\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/24/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n4\n </center></td>\n <td><center>\n16\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/91/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n4\n </center></td>\n <td><center>\n16\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/803/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n4\n </center></td>\n <td><center>\n16\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/905/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n1\n </center></td>\n <td><center>\n19\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n<tr>\n <td>\n <img src = \"http://pachy.cs.uic.edu:5000/api/image/src/910/?resize_pix_w=300\"alt=\"Unavailable\"/>\n </td>\n <td><center>\n1\n </center></td>\n <td><center>\n19\n </center></td>\n <td><center>\n20\n </center></td>\n <td><center>\n\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n <td><center>\nNULL\n </center></td>\n</tr>\n" ], [ "for h in hyperLinks:\n print(h)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ecb0be38642134501585d648d4af60e65da3acce
29,215
ipynb
Jupyter Notebook
06_machine_physique/Initiation_au_langage_d_assemblage/Partie1/03_jeu_de_Nim.ipynb
efloti/cours-nsi-premiere
5b05bc81e5f8d7df47bf5785068b4bf4d1e357bb
[ "CC0-1.0" ]
null
null
null
06_machine_physique/Initiation_au_langage_d_assemblage/Partie1/03_jeu_de_Nim.ipynb
efloti/cours-nsi-premiere
5b05bc81e5f8d7df47bf5785068b4bf4d1e357bb
[ "CC0-1.0" ]
null
null
null
06_machine_physique/Initiation_au_langage_d_assemblage/Partie1/03_jeu_de_Nim.ipynb
efloti/cours-nsi-premiere
5b05bc81e5f8d7df47bf5785068b4bf4d1e357bb
[ "CC0-1.0" ]
null
null
null
61.505263
4,884
0.716413
[ [ [ "# Jeu de Nim", "_____no_output_____" ], [ "Dans cette partie nous coderons un jeu très simple, le «jeu des allumettes». Ce jeu commence avec un tas de 15 allumettes (bien que cela puisse être un autre nombre). Les joueurs suppriment tour à tour de 1 à 3 allumettes du tas. Un joueur gagne s'il force l'autre à prendre la dernière allumette. Notre implémentation opposera un joueur à l'ordinateur.\n\nPour implémenter le jeu, nous aurons besoin d'apprendre à réaliser des itérations - boucles \\[ *looping* \\] - et des sélections \\[ *branching* \\] en assembleur. \n\nNous aurons aussi besoin d'apprendre à écrire du texte dans la console et à lire la saisie d'un utilisateur pendant le jeu. Dans le but de comprendre ce dernier point, nous aurons besoin en tout premier lieu de comprendre le chargement (lecture - \\[ *to load* \\]) et la sauvegarde (écriture - \\[ *to store* \\]) de valeurs *depuis ou vers* la mémoire en utilisant les instructions `LDR` - \\[ *LoaD Register (from memory)* \\], et `STR` - \\[ *STore Register (to memory)* \\].", "_____no_output_____" ], [ "## Utiliser les adresses mémoire", "_____no_output_____" ], [ "#### Exercice 15\n\nRemettre le mode de format par défaut **Hex** et vérifier que **Program** et **Memory** soit vide. (C'est le rôle du bouton **Clear** en bas à droite du simulateur)\n\nSaisir et Valider le code suivant:\n\n MOV R0,#255\n STR R0,68\n HALT\n LDR R1,72\n HALT\n \nVous observez que le code du programme a été traduit en code machine lequel a été chargé dans les cinq premier mots de la mémoire principale.\n\nFaites tourner le programme et observer que, lorsqu'il atteint le premier `HALT`, la valeur d'un des autres mots de la mémoire principale a changé. Copier une copie d'écran de la mémoire qui met en valeur ce qui a changé dans la mémoire.\n\nExpliquer pourquoi la valeur montrée est ce qu'elle est (*what*), et est où elle est (*where*).\n\nÀ présent, cliquer sur la position mémoire immédiatement à droite de celle qui a été modifiée, et y saisir une valeur. Relancer l'exécution (en appuyant sur Play) et montrer, avec une copie d'écran partielle, que la valeur saisie précédemment a été copiée ('loaded') dans `R1`.\n\nPourquoi la deuxième opérande pour l'instruction `LDR` est 72 et pas 69? Que se passe-t-il si vous mettez 69 et que vous relancer l'exécution?\n\n________", "_____no_output_____" ], [ "Un point très important à noter avec le jeu d'instruction de l'ARMlite:\n- L'instruction `MOV` ne peut pas travailler avec des adresses mémoires - le deuxième opérande est soit un registre soit une valeur immédiate (de même pour `MVN`),\n- L'instruction `LDR` ne peut pas être utilisée pour charger une valeur immédiate dans un registre - le deuxième opérande doit spécifier une adresse mémoire.", "_____no_output_____" ], [ "Pour la petite histoire, dans certain processeurs, `MOV` (ou son équivalent) peut gérer des registres, des valeurs immédiates et même des adresses mémoire. Un avantage du renforcement de la séparation des rôles, comme sur l'ARMlite, est d'aider le programmeur à se souvenir que les opérations qui impliquent la mémoire principale sont lentes (10x plus environ) par rapport à celles qui n'impliquent que des registres ou des valeurs immédiates.", "_____no_output_____" ], [ "### Point culture - RISC versus CISC\n\nDans les premiers temps de l'informatique, les processeurs étaient munis d'un large jeu d'instructions de manière à faciliter la tâche du programmeur dans l'expression des algorithmes et ainsi d'améliorer sa productivité.\n\nCependant, dans les années 1980, on fit le constat que, puisque la plupart des programmes étaient écrits dans des langages de haut niveau (par opposition aux langages d'assemblages) et qu'ils étaient traduit en code machine automatiquement, le besoin de faire des machines ayant un langage d'assemblage simple à lire ou à écrire pour le programmeur étaient de moins en moins pressant. En réduisant le jeu d'instructions, il est possible de rendre les processeurs plus performants. Cette évolution dans l'architecture des processeurs est connue comme la transition des architectures **CISC** (pour ***Complex*** *Instruction Set Computers*) vers les architectures **RISC** (pour ***Reduced*** *Instruction Set Computers*). La plupart des processeurs modernes ont une architecture RISC bien qu'il n'y ait pas de définition précise de la distinction entre ses deux sortes d'architectures.\n\nDu temps où les architectures CISC étaient prédominantes, on trouvait de nombreuses instructions pour gérer directement la mémoire. Avec le modèle RISC, la plupart des instructions sont de type *registre-vers-registre* (plus rapides) alors que quelques unes seulement sont spécialisées dans le chargement des valeurs depuis ou vers la mémoire principale (plus lentes).\n___", "_____no_output_____" ], [ "## Les étiquettes \\[ *Labels* \\]", "_____no_output_____" ], [ "Lorsqu'on écrit un programme en assembleur, il est déjà bien difficile de suivre la signification des valeurs courantes présentes dans les registres généraux alors si on ajoute à cela la gestion des adresses mémoires en «dur»... C'était pourtant nécessaire avec les premiers assembleurs - tout ce qu'il faisait, c'était traduire les codes «mnémoniques» des instructions, comme `MOV R0,#32` dans l'instruction machine correspondante en binaire; du genre: `0b11100011101000000000000000100000`.\n\nL'étape suivante a été l'introduction d'**assembleur symbolique** qui permettait au programmeur de définir des «symboles» (aujourd'hui on parle plutôt d'**étiquettes**) pour *désigner des adresses spécifiques en mémoire*. Aujourd'hui, tous les assembleurs modernes ont cette capacité.\n\nLe court programme qui suit défini deux étiquettes pour des adresses mémoires, `xCoord` et `yCoord`, et initialise les mots situés à ces adresses avec les valeurs 3 et 4 respectivement. Ces définitions d'étiquettes sont situées *après* toutes les instructions du programme - c'est la pratique recommandée. Une définition d'étiquette doit être suivie immédiatement par `:` \\[ deux points - *colon* \\] comme vous pouvez le voir ci-dessous. Les instructions du programme utilisent ou référencent ces étiquettes, mais une référence à une étiquette ne contient pas le caractère `:`.\n\n LDR R0, xCoord\n ADD R0, R0, #6\n STR R0, xCoord\n LDR R0, yCoord\n ADD R0, R0, #2\n STR R0, yCoord\n HALT\n xCoord: 3\n yCoord: 4\n\nLa plupart du temps, le programmeur ne sait pas (et ne se soucie pas de savoir) où sont situées les valeurs de `xCoord` et `yCoord` en mémoire - simplement parce qu'il peut toujours faire référence à ces positions avec les étiquettes.", "_____no_output_____" ], [ "#### Exercice 16\n\nAvec le simulateur ARMLite en mode par défaut (**Hex**), saisir et valider le code précédent.\n\n*Avant de le lancer*, passer la souris sur les définitions des étiquettes (dans les deux dernières lignes) dans le code. La tooltip vous montrera les adresses mémoires (en hexa) auquelles ces étiquettes font référence. Quelles sont les adresses respectives de `xCoord` et `yCoord`?\n\nColler deux copies d'écran partielles de la zone mémoire, l'une prise avant d'avoir fait tourner le programme, l'autre après, en mettant en évidence dans les deux cas les deux mots mémoire pour `xCoord` et `yCoord`.\n____", "_____no_output_____" ], [ "Cet exemple fait apparaître la raison pour laquelle nous avons besoin de l'instruction `HALT`. Si on supprimait cette instruction, l'ARMLite essaierait d'exécuter le prochain mot (lequel contient la valeur de `xCoord`) comme une instruction. Pour la valeur utilisée dans cet exemple, cela échouerai en produisant une erreur «bad instruction». Mais sur un processeur ARM réel, la valeur du mot pourrait très bien correspondre à une instruction réelle et cela produirait un comportement très probablement indésirable et imprévisible.", "_____no_output_____" ], [ "## Entrée/sortie basique", "_____no_output_____" ], [ "Une partie le l'ARMLite porte la mention *Input/Output*. Le champ le plus haut de cette zone est la «console» - qu'on peut utiliser pour envoyer du texte à l'utilisateur; le champ situé juste en dessous sert à permettre à l'utilisateur de saisir des données lorsque le programme en demande.\n\nL'ARMlite utilise les instruction `STR` et `LDR` en combinaison avec des étiquettes prédéfinies pour gérer l'interaction avec ces champs. Cette manière de faire est connue sous le nom **d'entrées/sorties mappés en mémoire** \\[ *memory mapped I/O* \\].", "_____no_output_____" ], [ "Nous introduirons ces idées en réalisant un premier pas vers le jeu de Nim. Nous utiliserons une approche incrémentale \\[ *iterative* \\] de développement: écrire un petit bout correspondant à l'une des fonctionnalité du jeu à chaque étape. Voici la première itération:", "_____no_output_____" ], [ "```\n//R0 - allumettes restantes\n//R1 - pour écrire des messages\n//R2 - nombre d'allumettes à enlever\n MOV R0, #15\n STR R0, .WriteUnsignedNum\n MOV R1, #msg1\n STR R1, .WriteString\n MOV R1, #msg2\n STR R1, .WriteString\n LDR R2, .InputNum\n SUB R0, R0, R2\n HALT\nmsg1: .ASCIZ \"restantes\\n\"\nmsg2: .ASCIZ \"Combien souhaitez-vous en enlever (1-3)?\\n\"\n```", "_____no_output_____" ], [ "Quelques explications:\n- Les trois premières lignes qui débuttent par `//` sont des **commentaires** sur le rôle des registres utilisés dans le code,\n\n- `msg1` et `msg2` sont des étiquettes mémoires comme nous en avons déjà vues. La différence est que `.ASCIZ` est une *directive* à l'assembleur qui lui demande de positionner chaque caractère de la chaîne en mémoire sur un octet et de terminer avec un octet nul (ASCIZ -> ASCII terminé par un Zéro). L'octet nul permet à l'ARMlite de détecter la fin de la chaîne.\n\n- L'instruction `MOV R1, #msg1` ne charge pas le contenu de msg1 dans R1: cela ne tiendrait pas car la chaîne fait plus de 32bits et de toute façon, il faudrait utiliser `LDR` pour obtenir un tel effet. Au lieu de cela, cette instruction copie la valeur immédiate de l'étiquette msg1 dans R1, c'est-à-dire, l'*adresse en mémoire où se situe le début de cette chaîne*.\n\n- `.WriteSignedNum` est semblable à une étiquette, mais le point du début indique que c'est une étiquette prédéfinie et reconnue par l'ARMLite - plutôt qu'une étiquette définie par l'utilisateur comme msg1. L'assembleur traduit cette étiquette en une adresse mémoire effective bien qu'elle n'apparaisse pas dans la vue de la mémoire principale du simulateur. Durant l'exécution, le fait d'écrire une donnée à l'adresse qui correspond à `.WriteSignedNum` est gérer par l'ARMlite par une écriture dans la console après traduction dans le format valeur décimale signée.\n\n- `.WriteString` est une autre étiquette prédéfinine par l'ARMlite qui permet d'écrire une chaîne complète plutôt qu'un caractère unique. Comme R1 ne peut contenir la chaîne, il contient l'adresse de son premier caractère (La fin est reconnue grâce à l'octet nul placé après le dernier caractère).\n Chaque utilisation de `.WriteString` est donc précédé par une instruction qui précise l'adresse où la chaîne débute comme `MOV R1, #msg1`.\n\n- `LDR R2, .InputNum` est un autre exemple d'entrée/sortie mappés en mémoire. À l'exécution, cette instruction aura pour effet de demander un nombre à l'utilisateur dans le champ prévu à cet effet lequel sera transféré dans le registre `R2` comme si il avait été chargé depuis la mémoire à l'adresse désignée par l'étiquette `.InputNum`.\n\n- Enfin l'écriture `\\n` est ce qu'on appelle un caractère d'échappement \\[ *escape character* \\]. Lorsqu'il est envoyé à la console, elle l'interprète comme une nouvelle ligne.", "_____no_output_____" ], [ "#### Exercice 17\n\nFaites tourner ce programme. Lorsque le processeur se bloque en attente d'une donnée, saisir 1, 2 ou 3. Lorsque le programme se supend, prendre une capture d'écran partielle montrant la console et montrant la valeur dans `R0` qui devrait être le nombre d'allumettes restantes.\n___", "_____no_output_____" ], [ "## Sélection ou Branchement \\[*Branching*\\]", "_____no_output_____" ], [ "Pour le moment nous imaginerons qu'il n'y a qu'un joueur (peu intéressant comme jeu!). Nous voudrions que le programme «recommence» en affichant à chaque fois le nombre d'allumettes restantes. Pour l'ARMlite, la manière la plus simple pour implémenter un **branchement** est d'utiliser l'instruction `B` (pour *Branch*) laquelle est suivie par les informations qui précisent où ce branchement doit avoir lieu. La façon la plus claire pour préciser où le branchement doit avoir lieu est d'utiliser une étiquette définie par nos soin comme `loop:` que vous voyez plus bas:", "_____no_output_____" ], [ "```\n//R0 - allumettes restantes\n//R1 - pour écrire des messages\n//R2 - nombre d'allumettes à enlever\n MOV R0, #15\nloop: STR R0, .WriteUnsignedNum\n MOV R1, #msg1\n STR R1, .WriteString\n MOV R1, #msg2\n STR R1, .WriteString\n LDR R2, .InputNum\n SUB R0, R0, R2\n B loop\n HALT\nmsg1: .ASCIZ \"restantes\\n\"\nmsg2: .ASCIZ \"Combien souhaitez-vous en enlever (1-3)?\\n\"\n```", "_____no_output_____" ], [ "Noter qu'en précisant le point de branchement avec une étiquette, nous n'avons pas besoin de nous soucier du changement d'adresse qui se produit lorsque nous ajoutons ou supprimons des instructions.", "_____no_output_____" ], [ "#### Exercice 18\n\nEffectuer le changement indiqué ci-avant et faites tourner le programme pour vérifier par vous-même ce qu'il fait à présent.\n\nPourquoi l'étiquette `loop:` n'a-t-elle pas été placée sur la première instruction (plutôt que la seconde)? Si vous n'êtes pas sûr, expérimenter ce changement.\n\nMême en version mono-utilisateur de ce jeu, il y a deux sérieuses limitations dans ce code. Pourriez-vous les préciser?\n_____", "_____no_output_____" ], [ "Les limitations mentionnées plus tôt nécessitent de pouvoir effectuer une sorte de *sélection* - connue en assembleur sous le vocable «**branchement conditionnel**» \\[ *conditional branching* \\]. Un branchement conditionnel fonctionne de manière similaire à `B`, mais le branchement effectif n'est effectué *que lorsque certaines conditions sont remplies*. Il y a quatre versions de branchement conditionnel pour l'ARMlite:\n\n1. `BEQ` - Branchement en cas d'égalité - *Branch if EQual*,\n2. `BNE` - Branchement en cas de différence - *Branch if Not Equal*,\n3. `BLT` - Branchement en cas d'infériorité stricte - *Branch if Less Than*,\n4. `BGT` - Branchement en cas de supériorité stricte - *Branch if Greater Than*.\n\nLa question que vous avez peut-être à l'esprit est probablement: Branchement si *quoi* est égal à *quoi*? Ces branchements conditionnels sont conçus pour être *précédés immédiatement* par l'instruction `CMP` - *CoMPare* - qui compare deux valeurs, par exemple:\n\n`CMP R0, R1` compare les valeurs des deux registres,\n\n`CMP R3, #16` compare la valeur du registre R3 avec la valeur immédiate 16.\n\n", "_____no_output_____" ], [ "`CMP` fonctionne de manière similaire à `SUB` - elle soustrait son second opérande au premier - mais elle n'assigne pas le résultat à un registre destination, le résultat est simplement ignoré. La seule trace du résultat se trouve dans un registre spécial - désigné par *Status bits* par l'ARMLite - et généralement désigné par PSW pour \\[ *Program Status Word* \\] (registre d'état en français).\n\n![mot_d_etat.png](attachment:mot_d_etat.png)", "_____no_output_____" ], [ "Le bit `N` indique si le résultat de la comparaison était Négatif (bit à 1) ou non, et le bit `Z` s'il était Nul (bit à 1) ou non.", "_____no_output_____" ], [ "(Les bits `C` pour *Carry* (retenue) et `V` pour *oVerflow* sont utilisés pour détecter une erreur dans le résultat d'une opération - dans le cas où celui-ci ne peut pas tenir dans un registre 32bits. Nous n'en aurons pas besoin pour l'instant.)", "_____no_output_____" ], [ "Nous modifions le code ci-dessous en introduisant une nouvelle étiquette `input:` ainsi qu'une comparaison suivie immédiatement d'un branchement conditionnel vers cet étiquette (et donc en arrière). L'effet est que, si l'utilisateur saisi une valeur supérieure à 3, elle est ignorée et on demande à nouveau au joueur de saisir le nombre d'allumette qu'il souhaite prendre:", "_____no_output_____" ], [ "```\n//R0 - allumettes restantes\n//R1 - pour écrire des messages\n//R2 - nombre d'allumettes à enlever\n MOV R0, #15\nloop: STR R0, .WriteUnsignedNum\n MOV R1, #msg1\n STR R1, .WriteString\n MOV R1, #msg2\n STR R1, .WriteString\ninput: LDR R2, .InputNum\n CMP R2, #3\n BGT input\n SUB R0, R0, R2\n B loop\n HALT\nmsg1: .ASCIZ \"restantes\\n\"\nmsg2: .ASCIZ \"Combien souhaitez-vous en enlever (1-3)?\\n\"\n```", "_____no_output_____" ], [ "#### Exercice 19", "_____no_output_____" ], [ "Tester ce nouveau programme.\n\nÀ présent, en utilisant l'une des quatres formes possibles de branchement conditionnel présentées plus tôt, ajouter quelques instructions de manière à renforcer la règle que le nombre d'allumettes récupérées est au moins une.\n\nTester à nouveau une fois cette modification faites.\n\nEssayer de saisir une valeur négative. Est-ce quel le code empêche cela?\n\nFinalement, jouer le jeu jusqu'à ce qu'il ne reste plus qu'une ou deux allumettes. Que se passe-t-il si le joueur tente d'enlever plus d'allumettes qu'il n'en reste? Pouvez-vous trouver un moyen d'empêcher cela?\n\nFaites une copie d'écran de la version finale de votre code en soulignant les instructions que vous avez ajoutées.\n____", "_____no_output_____" ], [ "Nous avons maintenant besoin d'introduire le joueur qui correspond à la machine. Pour commencer, nous ferons en sorte que l'ordinateur prenne 1, 2 ou 3 allumettes au hasard mais pas plus qu'il n'en reste. Bien que nous pourrions écrire notre propre générateur de nombre pseudo-aléatoire, ARMLite nous en offre un prêt à l'emploi. L'extrait de code qui suit vous montre comment cela se passe:", "_____no_output_____" ], [ "```\n...\nselect: LDR R2, .Random //demande à l'ARMlite de charger le registre R2\n //avec un motif binaire aléatoire de 32bits\n AND R2, R2, #3 //ne conserver que les deux bits les plus à gauche\n //de ce motif aléatoire (correspond à un nombre entre 0 et 3)\n CMP R2, #0 //si 0 a été choisi...\n BEQ select //...recommencer\n CMP R2, R0 //si on enlève (strictement) plus d'allumettes qu'il n'en reste (R0)...\n BGT select //...recommencer\n BEQ select //de même si le choix signifie prendre toutes les allumettes restantes.\n...\n```", "_____no_output_____" ], [ "Observer que le dernier `CMP` est suivi de deux instructions de branchement conditionnel à la suite. Cela fonctionne car ces instructions de branchement ne modifient pas le contenu du registre d'état.", "_____no_output_____" ], [ "Nous somme maintenant en mesure d'achever <a href=\"jeu_de_nim.txt\">le programme de ce jeu</a>.", "_____no_output_____" ], [ "#### Exercice 20\n\nSaisir et faire tourner le programme complet plus d'une fois.\n\nPrendre une capture d'écran partiel montrant la console à la fin du jeu dans chaque cas: celui où vous gagnez, celui où l'ordinateur gagne.\n\nIl y a en fait une stratégie très simple qui garantie la victoire si vous jouez en premier et qui vous l'assure probablement si vous jouez en second pourvu que l'autre joueur ne suive pas la même stratégie (comme dans ce cas où l'ordinateur joue au hasard).\n\nPouvez-vous trouver cette stratégie gagnante?\n____", "_____no_output_____" ], [ "## Exercices optionnels pour améliorer/étendre le jeu", "_____no_output_____" ], [ "Si vous avez un peu de temps disponible, essayez de modifier et/ou d'étendre le programme pour réaliser ces objectifs:\n\n1. Lorsque le jeu s'achève, revenir au début pour rejouer automatiquement,\n2. Faire en sorte qu'à chaque tour le joueur qui débute change ou (alternativement) sélectionner celui qui commence au hasard,\n3. Modifier le nombre d'allumettes de départ de façon à le choisir aléatoirement,\n4. Conserver le compte du nombre de fois où le joueur l'emporte ou bien l'ordinateur,\n5. Trouver et implémenter un meilleur algorithme pour la gestion du jeu côté ordinateur (Noter que la stratégie optimum lui permettra de gagner à tous les coups s'il commence le jeu en premier).", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ecb0c05fba6f924fe2ec45d9dbb9a548760e24c2
12,948
ipynb
Jupyter Notebook
Example_PyGARV_Sapateiro.ipynb
rvertulo/PyGARV
3db68bb3e8243a0e696aa356b0a8b927214337e5
[ "Apache-2.0" ]
1
2021-06-05T17:16:48.000Z
2021-06-05T17:16:48.000Z
Example_PyGARV_Sapateiro.ipynb
rvertulo/PyGARV
3db68bb3e8243a0e696aa356b0a8b927214337e5
[ "Apache-2.0" ]
null
null
null
Example_PyGARV_Sapateiro.ipynb
rvertulo/PyGARV
3db68bb3e8243a0e696aa356b0a8b927214337e5
[ "Apache-2.0" ]
null
null
null
81.949367
7,350
0.778962
[ [ [ "# Otimização do Lucro de um Sapateiro Utilizando um Algoritmo Genético\n\nDocumento criado por __Rodrigo Cesar Vertulo__ em 15 de Novembro de 2019.", "_____no_output_____" ], [ "O enunciado do estudo de caso apresentado nesse documento foi retirado do livro \"Pesquisa Operacional para os cursos de: Economia, Administração e Ciências Contábeis\" dos autores Ermes Medeiros da Silva, Elio Medeiros da Silva e Afrânio Carlos Murolo.\n\nO problema apresentado foi resolvido pelos autores utilizando programação linear e nesse documento apresentarei uma alternativa para a sua resolução utilizando um Algoritmo Genético implementado com a linguagem de programação Python e a biblioteca <a href=\"https://github.com/rvertulo/PyGARV\" taget=\"_new\">PyGARV</a>. O objetivo é verificar e comparar os resultados obtidos pelas duas abordagens.\n\nO enunciado do problema é o seguinte: \"Um sapateiro faz 6 sapatos por hora, se fizer somente sapatos, e 5 cintos por hora, se fizer somente cintos. Ele gasta 2 unidades de couro para fabricar 1 unidade de sapato e 1 unidade de couro para fabricar uma unidade de cinto. Sabendo-se que o total disponível de couro é de 6 unidades e que o lucro unitário por sapato é de 5 unidades monetárias e o do cinto é de 2 unidades monetárias, quantas unidades de sapatos e de cintos o sapateiro deverá produzir para maximizar o seu lucro por hora?\"\n\nA modelagem do sistema é a seguinte:\n\n* x1 = número de sapatos a serem produzidos\n* x2 = número de cintos a serem produzidos\n\n__max. Lucro = 5x1 + 2x2__\n\nSujeito a:\n\n* 10x1 + 12x2 <= 60 (restrição do tempo gasto em minutos para produzir cada item por hora)\n* 2x1 + 1x2 <= 6 (restrição da quantidade de couro disponível)\n* x1 >= 0 (não é possível produzir um número negativo de sapatos)\n* x2 >= 0 (não é possível produzir um número negativo de cintos)", "_____no_output_____" ], [ "O Algoritmo Genético utilizará cromossomos contendo dois valores inteiros e positivos variando de 0 a 99, tendo em vista que observando-se as restrições do problema nenhuma das variáveis poderá assumir valores menores do que zero ou superiores a 99 sem que alguma delas seja desrespeitada.", "_____no_output_____" ], [ "O código a seguir implementa a solução para o problema.", "_____no_output_____" ] ], [ [ "from PyGARV import *\nimport matplotlib.pyplot as plt\n\nclass Example_PO3(PyGARV):\n def __init__(self):\n super().__init__(popSize=30,\n values=2,\n mutationRate=0.2,\n fullMutation=True,\n symmetricCut=False,\n elitism=0.1,\n digits=2)\n \n self.valores = []\n \n def fitness(self, cromossomo):\n x1 = cromossomo[0]\n x2 = cromossomo[1]\n \n f = 5*x1 + 2*x2\n \n if(\n 10*x1 + 12*x2 <= 60 and\n 2*x1 + 1*x2 <= 6 and\n x1 >= 0 and\n x2 >= 0\n ):\n \n self.nota = f\n \n else:\n self.nota = 1/(f+1)\n \n return [cromossomo, self.nota]\n \n def finishedGeneration(self, melhorCromossomo):\n self.x1 = melhorCromossomo[0]\n self.x2 = melhorCromossomo[1]\n \n self.f = 5*self.x1 + 2*self.x2\n \n if self.nota > 1:\n self.valores.append(self.f)\n\n \npygarv = Example_PO3()\npygarv.runGA(150)\n\nprint(\"Lucro: %i x1: %i x2: %i\" % (pygarv.f, pygarv.x1, pygarv.x2))\nplt.plot(pygarv.valores)\nplt.show()", "Lucro: 15 x1: 3 x2: 0\n" ] ], [ [ "As soluções obtidas por Algoritmos Genéticos nem sempre são ótimas, porém, normalmente chegam muito próximas ao resultado ótimo. No caso deste estudo de caso o Algoritmo Genético chegou à solução ótima, obtendo o mesmo resultado apresentado pelos autores do livro utilizando 150 gerações.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ecb0cef1a22b3791d725541eff7ecd0c08bbc0e8
58,819
ipynb
Jupyter Notebook
Regression/Polynomial Regression/Polynomial_Regression.ipynb
techonair/Machine-Learing-A-Z
695516b42162152a332233b610e70a5cdbc120d0
[ "MIT" ]
null
null
null
Regression/Polynomial Regression/Polynomial_Regression.ipynb
techonair/Machine-Learing-A-Z
695516b42162152a332233b610e70a5cdbc120d0
[ "MIT" ]
null
null
null
Regression/Polynomial Regression/Polynomial_Regression.ipynb
techonair/Machine-Learing-A-Z
695516b42162152a332233b610e70a5cdbc120d0
[ "MIT" ]
null
null
null
128.425764
13,590
0.86433
[ [ [ "<a href=\"https://colab.research.google.com/github/techonair/Machine-Learing-A-Z/blob/main/Regression/Polynomial%20Regression/Polynomial_Regression.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# **Polynomial Regression**\n\nWe have a case study, we are hiring a person, the person was previously a Regional Manager for quite a long time, he says he had a salary of 160K in his previous company, we need to check if that is true or bluffing. We have the dataset, let's do it.", "_____no_output_____" ], [ "# Importing Libraries", "_____no_output_____" ] ], [ [ "import numpy as np \nimport matplotlib.pyplot as plt \nimport pandas as pd", "_____no_output_____" ] ], [ [ "# Uploading the dataset", "_____no_output_____" ] ], [ [ "from google.colab import files\nfiles.upload()", "_____no_output_____" ] ], [ [ "# Spliting The Dataset in X & Y", "_____no_output_____" ] ], [ [ "dataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[ : , 1:-1].values\nY = dataset.iloc[ : , -1].values", "_____no_output_____" ] ], [ [ "# Training The Dataset With Linear Regression Model", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nlin_regressor = LinearRegression()\nlin_regressor.fit(X, Y)", "_____no_output_____" ] ], [ [ "# Training The Dataset With Polynomial Regression\n\ny = a + bx + cx^2 + dx^3 + ....\n\nif I take [x, x^2, x^3, ...] as a matrix then it will be in the form of linear regression", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import PolynomialFeatures\n\n# degree = 2 -> y = a + bx + cx^2\n# for first attempt tried with degree 2, curve wasn't fitting very well, tried with degree 3 & 4\npoly_regressor = PolynomialFeatures(degree= 4)\nX_poly = poly_regressor.fit_transform(X)\n\nlin_regressor_2 = LinearRegression()\nlin_regressor_2.fit(X_poly, Y) ", "_____no_output_____" ] ], [ [ "# Visualizing Linear Regression Prediction", "_____no_output_____" ] ], [ [ "plt.scatter(X, Y, color= 'red')\nplt.plot(X, lin_regressor.predict(X), color = 'blue')\nplt.title('Salary Prediction')\nplt.xlabel('Position Level')\nplt.ylabel('Salary')\nplt.show()", "_____no_output_____" ] ], [ [ "# Visualizing Polynomial Regression Prediction", "_____no_output_____" ] ], [ [ "plt.scatter(X, Y, color= 'red')\nplt.plot(X, lin_regressor_2.predict(X_poly), color = 'blue')\nplt.title('Salary Prediction')\nplt.xlabel('Position Level')\nplt.ylabel('Salary')\nplt.show()", "_____no_output_____" ] ], [ [ "Model is overfitting", "_____no_output_____" ], [ "# Visualizing Polynomial Regression Prediction (for higher resolution and smoother curve)\n", "_____no_output_____" ] ], [ [ "x_grid = np.arange(min(X), max(X), 0.1)\n# 0.1 steps instead of 1 as in the above graph\nx_grid = x_grid.reshape((len(x_grid), 1))\nplt.scatter(X, Y, color= 'red')\nplt.plot(x_grid, lin_regressor_2.predict(poly_regressor.fit_transform(x_grid)), color = 'blue')\nplt.title('Salary Prediction')\nplt.xlabel('Position Level')\nplt.ylabel('Salary')\nplt.show()", "_____no_output_____" ] ], [ [ "# Predicting a New Result with Linear Regression", "_____no_output_____" ] ], [ [ "lin_regressor.predict([[6.5]])", "_____no_output_____" ] ], [ [ "# Predicting a New Result with Polynomial Regression", "_____no_output_____" ] ], [ [ "lin_regressor_2.predict(poly_regressor.fit_transform([[6.5]]))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb0d81f6f203db0daf31372651fa5ac70251043
122,786
ipynb
Jupyter Notebook
src/test_problem_SLDS.ipynb
kharris/tvart
ee8f99eea9ad1d8734707d9d2f6880847821d7b3
[ "BSD-3-Clause-Clear" ]
10
2019-05-22T21:03:40.000Z
2020-03-10T15:43:57.000Z
src/test_problem_SLDS.ipynb
kharris/tvart
ee8f99eea9ad1d8734707d9d2f6880847821d7b3
[ "BSD-3-Clause-Clear" ]
1
2019-07-15T15:28:00.000Z
2019-07-15T15:28:00.000Z
src/test_problem_SLDS.ipynb
kamdh/tvart
ee8f99eea9ad1d8734707d9d2f6880847821d7b3
[ "BSD-3-Clause-Clear" ]
4
2019-05-25T23:34:21.000Z
2020-04-06T06:05:17.000Z
620.131313
96,488
0.937216
[ [ [ "from pyslds.models import *\nimport numpy as np\nimport scipy.io\nfrom pybasicbayes.util.text import progprint_xrange\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "data = scipy.io.loadmat(\"test_data_N_20_M_221_sigma_0.200000.mat\")\nX = data['X']\nprint(X.shape)\nN = X.shape[0]\n\nplt.plot(X.T)\nplt.show()", "(20, 221)\n" ], [ "Kmax = 2 # number of hidden states\nr = N #X.shape[0]", "_____no_output_____" ], [ "model = DefaultSLDS(K=Kmax, D_obs=N, D_latent=r, Cs=[np.eye(N) for _ in range(Kmax)])\nmodel.add_data(X.T)\nmodel.resample_states()", "_____no_output_____" ], [ "# Init with Gibbs sampler\nN_samples = 200\ndef update(model):\n model.resample_model()\n return model.log_likelihood()\n\nlls = [update(model) for _ in progprint_xrange(N_samples)]\n\nplt.plot(lls)", "......................... [ 25/200, 0.02sec avg, ETA 4.01 ]\n......................... [ 50/200, 0.02sec avg, ETA 3.27 ]\n......................... [ 75/200, 0.02sec avg, ETA 2.65 ]\n......................... [ 100/200, 0.02sec avg, ETA 2.09 ]\n......................... [ 125/200, 0.02sec avg, ETA 1.57 ]\n......................... [ 150/200, 0.02sec avg, ETA 1.06 ]\n......................... [ 175/200, 0.02sec avg, ETA 0.53 ]\n......................... [ 200/200, 0.02sec avg, ETA 0.00 ]\n\n 0.02sec avg, 4.20 total\n\n" ], [ "model._init_mf_from_gibbs()\nvlbs = []\nfor _ in progprint_xrange(100):\n model.VBEM_step()\n vlbs.append(model.VBEM_ELBO())\n if len(vlbs) > 1:\n assert vlbs[-1] > vlbs[-2] - 1e-8\n \nmodel.VBEM_step\nplt.plot(vlbs)", "_____no_output_____" ], [ "model.states_list[0].stateseq", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecb0d9f8c490f5a6ed3a4db7b0f78557f9ce6926
57,549
ipynb
Jupyter Notebook
Task 1/Task 1 - Supervised.ipynb
IshaSah/Sparks-GRIP-Internship
4ddb726c4817162bdf379755596579b98cec1091
[ "MIT" ]
null
null
null
Task 1/Task 1 - Supervised.ipynb
IshaSah/Sparks-GRIP-Internship
4ddb726c4817162bdf379755596579b98cec1091
[ "MIT" ]
null
null
null
Task 1/Task 1 - Supervised.ipynb
IshaSah/Sparks-GRIP-Internship
4ddb726c4817162bdf379755596579b98cec1091
[ "MIT" ]
null
null
null
61.483974
9,416
0.784271
[ [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nimport seaborn as sns", "_____no_output_____" ], [ "db=pd.read_csv('http://bit.ly/w-data')", "_____no_output_____" ], [ "db.head(10)", "_____no_output_____" ], [ "db.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 25 entries, 0 to 24\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Hours 25 non-null float64\n 1 Scores 25 non-null int64 \ndtypes: float64(1), int64(1)\nmemory usage: 528.0 bytes\n" ], [ "db.describe()", "_____no_output_____" ], [ "db.isnull().sum()", "_____no_output_____" ] ], [ [ "This dataset is clean and does not contain any null values.", "_____no_output_____" ] ], [ [ "sns.scatterplot(data=db,x='Hours',y='Scores',)", "_____no_output_____" ], [ "print(db['Hours'].min())\nprint(db['Hours'].max())", "1.1\n9.2\n" ] ], [ [ "The minimum and maximum hours studied by the students are 1.1 and 9.2 respectrively.", "_____no_output_____" ] ], [ [ "print(db['Scores'].min())\nprint(db['Scores'].max())", "17\n95\n" ] ], [ [ "The minimum and maximum score scored by the students are 17 and 95 respectrively.", "_____no_output_____" ] ], [ [ "db.boxplot('Hours')", "_____no_output_____" ], [ "db.boxplot('Scores')", "_____no_output_____" ] ], [ [ "This infers that our data contains no outlier.", "_____no_output_____" ] ], [ [ "db['Hours'].value_counts(bins=5).sort_index()", "_____no_output_____" ], [ "db['Scores'].value_counts(bins=5).sort_index()", "_____no_output_____" ] ], [ [ "This shows that our dataset is normally distributed and we can easily perform liner regression on it", "_____no_output_____" ] ], [ [ "sns.heatmap(db.corr(),annot=True)", "_____no_output_____" ] ], [ [ "This is the correlation matrix which signifies that Hours and Scores are highly related to each other.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "x=db.iloc[:,:-1].values\ny=db.iloc[:,1].values\nx_train, x_test, y_train, y_test= train_test_split(x, y,train_size=0.60,test_size=0.40,random_state=0)", "_____no_output_____" ] ], [ [ "Here we split our dataset into testing and training data in the ratio of 60:40", "_____no_output_____" ] ], [ [ "print(x_train.shape)\nprint(x_test.shape)\nprint(y_train.shape)\nprint(x_test.shape)", "(15, 1)\n(10, 1)\n(15,)\n(10, 1)\n" ], [ "from sklearn.linear_model import LinearRegression\n", "_____no_output_____" ], [ "model= LinearRegression()\nmodel.fit(x_train, y_train)", "_____no_output_____" ], [ "line = model.coef_*x+model.intercept_\n# Plotting for the test data\nplt.scatter(x, y)\nplt.plot(x, line);\nplt.show()", "_____no_output_____" ], [ "y_pred = model.predict(x_test)\ny_pred", "_____no_output_____" ] ], [ [ "The above values are the Scores values predicted by our model against our Hours test values.", "_____no_output_____" ] ], [ [ "print('Test Score')\nprint(model.score(x_test, y_test))\nprint('Training Score')\nprint(model.score(x_train, y_train))", "Test Score\n0.956640847232559\nTraining Score\n0.9440108159733135\n" ] ], [ [ "This score gives the accuracy of our model which is 95.6%.", "_____no_output_____" ] ], [ [ "output = pd.DataFrame({'Actual Score': y_test,'Predicted Score': y_pred, 'Residual':y_test-y_pred })\nprint(output.head(5))", " Actual Score Predicted Score Residual\n0 20 15.947762 4.052238\n1 27 32.773947 -5.773947\n2 69 74.344523 -5.344523\n3 30 25.845518 4.154482\n4 62 59.497889 2.502111\n" ] ], [ [ "What will be predicted score if a student studies for 9.25 hrs/ day?", "_____no_output_____" ] ], [ [ "print('Score of student who studied for 9.25 hours a day:', model.predict([[9.25]]))", "Score of student who studied for 9.25 hours a day: [92.65537185]\n" ] ], [ [ "The final step is to evaluate the performance of algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. For simplicity here, we have chosen the mean square error. There are many such metrics.", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn import metrics", "_____no_output_____" ] ], [ [ "Calculating absolute error:", "_____no_output_____" ] ], [ [ "print('Mean absolute error : ', metrics.mean_absolute_error(y_test, y_pred))", "Mean absolute error : 4.894510863410609\n" ] ], [ [ "Calculating RMS error:", "_____no_output_____" ] ], [ [ "print('Root mean square error : ', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))", "Root mean square error : 5.102408224327238\n" ] ], [ [ "Performing r-test:", "_____no_output_____" ] ], [ [ "metrics.r2_score(y_test,y_pred)", "_____no_output_____" ] ], [ [ "Performing t-test:", "_____no_output_____" ] ], [ [ "from scipy import stats\nt_statistic, pvalues =stats.ttest_ind(y_test, y_pred)\nprint('t-statistic -->',t_statistic)\nprint('P-value -->',pvalues)", "t-statistic --> 0.16808369709885795\nP-value --> 0.8683916002725192\n" ] ], [ [ "Performing f-test:", "_____no_output_____" ] ], [ [ "f_statistic, pvalues =stats.f_oneway(y_test, y_pred)\nprint('f-statistic -->',f_statistic)\nprint('P-value -->',pvalues)", "f-statistic --> 0.028252129230420545\nP-value --> 0.8683916002725187\n" ] ], [ [ "The dataset given to us two attributes Hours and Scores. This dataset had zero null values.\nOur task was to perform liner regression to the dataset and predict the Score of a student against the no.of hours he studied.\nThis model was successfully developed with an accuracy of 95.6%. \nWe also performed evaluation tests on our model. The model has a good R2 value which suggests goodness of the fit.\nThus, the task was successful.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecb0da8bf33014a299b27c17567df63621c452a5
14,154
ipynb
Jupyter Notebook
Lectures/Lecture5-MapReduce/MR_SORT.ipynb
soumya8888/big-data-python-class
ac8c55b472d7d9a4df2d7a840c15b71c6018f429
[ "MIT" ]
null
null
null
Lectures/Lecture5-MapReduce/MR_SORT.ipynb
soumya8888/big-data-python-class
ac8c55b472d7d9a4df2d7a840c15b71c6018f429
[ "MIT" ]
null
null
null
Lectures/Lecture5-MapReduce/MR_SORT.ipynb
soumya8888/big-data-python-class
ac8c55b472d7d9a4df2d7a840c15b71c6018f429
[ "MIT" ]
null
null
null
35.034653
254
0.573054
[ [ [ "# examine the data\nFirst examine the data by loading it in with the %Load command\nIt will fill in the block with the content of the file in the block and comment out the %Load command", "_____no_output_____" ] ], [ [ "%load data/numbers.txt\n", "_____no_output_____" ] ], [ [ "# examine the sorting code \nDo not run the block a second time", "_____no_output_____" ] ], [ [ "# %load code/MRSortByString.py\nfrom mrjob.job import MRJob\n\nclass MRSortByString(MRJob):\n def mapper(self, _, line):\n \"\"\"\n \"\"\"\n l = line.split(' ')\n print l\n yield l[1], l[0]\n\n def reducer(self, key, val):\n yield key, [v for v in val][0]\n\n\nif __name__ == '__main__':\n MRSortByString.run()", "_____no_output_____" ], [ "%run code/MRSortByString.py data/numbers.txt", "no configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\ncreating tmp directory c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\n\nPLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by default. It's recommended you run your job with --strict-protocols or set up mrjob.conf as described at https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protocols\n\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\step-0-mapper_part-00000\nCounters from step 1:\n (no counters found)\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\step-0-mapper-sorted\n> sort 'c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\step-0-mapper_part-00000'\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\step-0-reducer_part-00000\nCounters from step 1:\n (no counters found)\nMoving c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\step-0-reducer_part-00000 -> c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\output\\part-00000\nStreaming final output from c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.015754.754000\\output\n" ] ], [ [ "\nHow were they sorted?", "_____no_output_____" ] ], [ [ "# %load code/MRSortByInt.py\nfrom mrjob.job import MRJob\n\nclass MRSortByInt(MRJob):\n def mapper(self, _, line):\n \"\"\"\n \"\"\"\n l = line.strip('\\n').split()\n yield '%01d'%int(l[1]), l[0]\n\n def reducer(self, key, val):\n yield int(key), int(list(val)[0])\n\n\nif __name__ == '__main__':\n MRSortByInt.run()", "_____no_output_____" ], [ "%run code/MRSortByInt.py data/numbers.txt", "no configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\ncreating tmp directory c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\ncreating tmp directory c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\n\n\nPLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by default. It's recommended you run your job with --strict-protocols or set up mrjob.conf as described at https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protocols\nPLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by default. It's recommended you run your job with --strict-protocols or set up mrjob.conf as described at https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protocols\n\n\nwriting to c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-mapper_part-00000\nwriting to c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-mapper_part-00000\nCounters from step 1:\nCounters from step 1:\n (no counters found)\n (no counters found)\nwriting to c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-mapper-sorted\nwriting to c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-mapper-sorted\n> sort 'c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-mapper_part-00000'\n> sort 'c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-mapper_part-00000'\nwriting to c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-reducer_part-00000\nwriting to c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-reducer_part-00000\nCounters from step 1:\nCounters from step 1:\n (no counters found)\n (no counters found)\nMoving c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-reducer_part-00000 -> c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\output\\part-00000\nMoving c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\step-0-reducer_part-00000 -> c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\output\\part-00000\nStreaming final output from c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\output\nStreaming final output from c:\\cygwin64\\tmp\\MRSortByInt.PS.20171002.020226.416000\\output\n" ], [ "%%writefile data/sortdata.txt\n1 1\n2 4\n3 8\n4 2\n4 7\n5 5\n6 10\n7 11", "Writing data/sortdata.txt\n" ], [ "# Running code inline example", "_____no_output_____" ], [ "# -*- coding: utf-8 -*-\n# Testing word frequency count\nimport os, sys\nsys.path.append(os.path.join(os.getcwd(),\"code\"))\nfrom MRSortByString import *\nfrom mrjob.job import MRJob\n'''\nThis is a simple wrapper that runs mrjob MapReduce jobs, the inputs are:\nMRJobClass - the class of the job to be run\nargsArr - an array of strings to be used when creating the MRJob.\n@author: Peter Harrington if you have any questions: [email protected]\n'''\ndef runJob(MRJobClass, argsArr, loc='local'):\n if loc == 'emr': \n argsArr.extend(['-r', 'emr'])\n print \"starting %s job on %s\" % (MRJobClass.__name__, loc)\n mrJob = MRJobClass(args=argsArr)\n runner = mrJob.make_runner()\n runner.run()\n print \"finished %s job\" % MRJobClass.__name__\n return mrJob, runner\n \ndef runParallelJob(MRJobClass, argsArr): #TO DO: add threading to allow jobs to run in \n pass #parallel \n #launch a new thread\n #call runJob(MRJobClass, argsArr) on the new thread\n\nif __name__ == '__main__':\n# pass in file from outside\n# MRWordFrequencyCount.run()\n#setup file here\n mr_job, runner = runJob(MRSortByString,[os.path.join(os.path.join(os.getcwd(),\"data\"),\"sortdata.txt\")],\"local\")\n print \"Sorting sortdata.txt\"\n for line in runner.stream_output(): \n key, value = mr_job.parse_output_line(line)\n print \"%s: %s \"%(key,value)", "no configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\nno configs found; falling back on auto-configuration\ncreating tmp directory c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\ncreating tmp directory c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\n\n\nPLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by default. It's recommended you run your job with --strict-protocols or set up mrjob.conf as described at https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protocols\nPLEASE NOTE: Starting in mrjob v0.5.0, protocols will be strict by default. It's recommended you run your job with --strict-protocols or set up mrjob.conf as described at https://pythonhosted.org/mrjob/whats-new.html#ready-for-strict-protocols\n\n\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-mapper_part-00000\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-mapper_part-00000\nCounters from step 1:\nCounters from step 1:\n (no counters found)\n (no counters found)\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-mapper-sorted\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-mapper-sorted\n> sort 'c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-mapper_part-00000'\n> sort 'c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-mapper_part-00000'\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-reducer_part-00000\nwriting to c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-reducer_part-00000\nCounters from step 1:\nCounters from step 1:\n (no counters found)\n (no counters found)\nMoving c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-reducer_part-00000 -> c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\output\\part-00000\nMoving c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\step-0-reducer_part-00000 -> c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\output\\part-00000\nStreaming final output from c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\output\nStreaming final output from c:\\cygwin64\\tmp\\MRSortByString.PS.20171002.023747.748000\\output\n" ] ], [ [ "Note the second column is reported by their string values", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ecb0dc5b48e3fb635cea51e9bcaf448873d9219b
13,557
ipynb
Jupyter Notebook
GramSchmidtProcess.ipynb
hankeceli/mathematics-for-machine-learning-
1ec5a864d21d2983aece14e174f5ab58966d5d57
[ "MIT" ]
null
null
null
GramSchmidtProcess.ipynb
hankeceli/mathematics-for-machine-learning-
1ec5a864d21d2983aece14e174f5ab58966d5d57
[ "MIT" ]
null
null
null
GramSchmidtProcess.ipynb
hankeceli/mathematics-for-machine-learning-
1ec5a864d21d2983aece14e174f5ab58966d5d57
[ "MIT" ]
null
null
null
32.588942
285
0.508003
[ [ [ "# Gram-Schmidt process\n\n## Instructions\nIn this assignment you will write a function to perform the Gram-Schmidt procedure, which takes a list of vectors and forms an orthonormal basis from this set.\nAs a corollary, the procedure allows us to determine the dimension of the space spanned by the basis vectors, which is equal to or less than the space which the vectors sit.\n\nYou'll start by completing a function for 4 basis vectors, before generalising to when an arbitrary number of vectors are given.\n\nAgain, a framework for the function has already been written.\nLook through the code, and you'll be instructed where to make changes.\nWe'll do the first two rows, and you can use this as a guide to do the last two.\n\n### Matrices in Python\nRemember the structure for matrices in *numpy* is,\n```python\nA[0, 0] A[0, 1] A[0, 2] A[0, 3]\nA[1, 0] A[1, 1] A[1, 2] A[1, 3]\nA[2, 0] A[2, 1] A[2, 2] A[2, 3]\nA[3, 0] A[3, 1] A[3, 2] A[3, 3]\n```\nYou can access the value of each element individually using,\n```python\nA[n, m]\n```\nYou can also access a whole row at a time using,\n```python\nA[n]\n```\n\nBuilding on last assignment, in this exercise you will need to select whole columns at a time.\nThis can be done with,\n```python\nA[:, m]\n```\nwhich will select the m'th column (starting at zero).\n\nIn this exercise, you will need to take the dot product between vectors. This can be done using the @ operator.\nTo dot product vectors u and v, use the code,\n```python\nu @ v\n```\n\nAll the code you should complete will be at the same level of indentation as the instruction comment.\n\n### How to submit\nEdit the code in the cell below to complete the assignment.\nOnce you are finished and happy with it, press the *Submit Assignment* button at the top of this notebook.\n\nPlease don't change any of the function names, as these will be checked by the grading script.\n\nIf you have further questions about submissions or programming assignments, here is a [list](https://www.coursera.org/learn/linear-algebra-machine-learning/discussions/weeks/1/threads/jB4klkn5EeibtBIQyzFmQg) of Q&A. You can also raise an issue on the discussion forum. Good luck!", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION\nimport numpy as np\nimport numpy.linalg as la\n\nverySmallNumber = 1e-14 # That's 1×10⁻¹⁴ = 0.00000000000001\n\n# Our first function will perform the Gram-Schmidt procedure for 4 basis vectors.\n# We'll take this list of vectors as the columns of a matrix, A.\n# We'll then go through the vectors one at a time and set them to be orthogonal\n# to all the vectors that came before it. Before normalising.\n# Follow the instructions inside the function at each comment.\n# You will be told where to add code to complete the function.\ndef gsBasis4(A) :\n B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values.\n # The zeroth column is easy, since it has no other vectors to make it normal to.\n # All that needs to be done is to normalise it. I.e. divide by its modulus, or norm.\n B[:, 0] = B[:, 0] / la.norm(B[:, 0])\n # For the first column, we need to subtract any overlap with our new zeroth vector.\n B[:, 1] = B[:, 1] - B[:, 1] @ B[:, 0] * B[:, 0]\n # If there's anything left after that subtraction, then B[:, 1] is linearly independant of B[:, 0]\n # If this is the case, we can normalise it. Otherwise we'll set that vector to zero.\n if la.norm(B[:, 1]) > verySmallNumber :\n B[:, 1] = B[:, 1] / la.norm(B[:, 1])\n else :\n B[:, 1] = np.zeros_like(B[:, 1])\n # Now we need to repeat the process for column 2.\n # Insert two lines of code, the first to subtract the overlap with the zeroth vector,\n # and the second to subtract the overlap with the first.\n B[:, 2] = B[:, 2] - B[:, 2] @ B[:, 0] * B[:, 0]\n B[:, 2] = B[:, 2] - B[:, 2] @ B[:, 1] * B[:, 1]\n # Again we'll need to normalise our new vector.\n # Copy and adapt the normalisation fragment from above to column 2.\n if la.norm(B[:, 2]) > verySmallNumber :\n B[:, 2] = B[:, 2] / la.norm(B[:, 2])\n else :\n B[:, 2] = np.zeros_like(B[:, 2])\n\n \n # Finally, column three:\n # Insert code to subtract the overlap with the first three vectors.\n B[:, 3] = B[:, 3] - B[:, 3] @ B[:, 0] * B[:, 0]\n B[:, 3] = B[:, 3] - B[:, 3] @ B[:, 1] * B[:, 1]\n B[:, 3] = B[:, 3] - B[:, 3] @ B[:, 2] * B[:, 2]\n \n # Now normalise if possible\n if la.norm(B[:, 3]) > verySmallNumber :\n B[:, 3] = B[:, 3] / la.norm(B[:, 3])\n else :\n B[:, 3] = np.zeros_like(B[:, 3])\n \n # Finally, we return the result:\n return B\n\n# The second part of this exercise will generalise the procedure.\n# Previously, we could only have four vectors, and there was a lot of repeating in the code.\n# We'll use a for-loop here to iterate the process for each vector.\ndef gsBasis(A) :\n B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values.\n # Loop over all vectors, starting with zero, label them with i\n for i in range(B.shape[1]) :\n # Inside that loop, loop over all previous vectors, j, to subtract.\n for j in range(i) :\n # Complete the code to subtract the overlap with previous vectors.\n # you'll need the current vector B[:, i] and a previous vector B[:, j]\n B[:, i] = B[:, i] - B[:, i] @ B[:, j] * B[:, j]\n # Next insert code to do the normalisation test for B[:, i]\n if la.norm(B[:, i]) > verySmallNumber :\n B[:, i] = B[:, i] / la.norm(B[:, i])\n else :\n B[:, i] = np.zeros_like(B[:, i])\n \n \n # Finally, we return the result:\n return B\n\n# This function uses the Gram-schmidt process to calculate the dimension\n# spanned by a list of vectors.\n# Since each vector is normalised to one, or is zero,\n# the sum of all the norms will be the dimension.\ndef dimensions(A) :\n return np.sum(la.norm(gsBasis(A), axis=0))\n", "_____no_output_____" ] ], [ [ "## Test your code before submission\nTo test the code you've written above, run the cell (select the cell above, then press the play button [ ▶| ] or press shift-enter).\nYou can then use the code below to test out your function.\nYou don't need to submit this cell; you can edit and run it as much as you like.\n\nTry out your code on tricky test cases!", "_____no_output_____" ] ], [ [ "V = np.array([[1,0,2,6],\n [0,1,8,2],\n [2,8,3,1],\n [1,-6,2,3]], dtype=np.float_)\ngsBasis4(V)", "_____no_output_____" ], [ "# Once you've done Gram-Schmidt once,\n# doing it again should give you the same result. Test this:\nU = gsBasis4(V)\ngsBasis4(U)", "_____no_output_____" ], [ "# Try the general function too.\ngsBasis(V)", "_____no_output_____" ], [ "# See what happens for non-square matrices\nA = np.array([[3,2,3],\n [2,5,-1],\n [2,4,8],\n [12,2,1]], dtype=np.float_)\ngsBasis(A)", "_____no_output_____" ], [ "dimensions(A)", "_____no_output_____" ], [ "B = np.array([[6,2,1,7,5],\n [2,8,5,-4,1],\n [1,-6,3,2,8]], dtype=np.float_)\ngsBasis(B)", "_____no_output_____" ], [ "dimensions(B)", "_____no_output_____" ], [ "# Now let's see what happens when we have one vector that is a linear combination of the others.\nC = np.array([[1,0,2],\n [0,1,-3],\n [1,0,2]], dtype=np.float_)\ngsBasis(C)", "_____no_output_____" ], [ "dimensions(C)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb0ec1122d7474e3e1a9feeb7df7f0d1a7b39f3
6,183
ipynb
Jupyter Notebook
Kaggle Courses/2. Pandas/3. Summary Functions and Maps.ipynb
iVibudh/How-to-do-in-Python
9baf7d342cf64c5a6c8204c762450eb2e5a7007d
[ "MIT" ]
null
null
null
Kaggle Courses/2. Pandas/3. Summary Functions and Maps.ipynb
iVibudh/How-to-do-in-Python
9baf7d342cf64c5a6c8204c762450eb2e5a7007d
[ "MIT" ]
null
null
null
Kaggle Courses/2. Pandas/3. Summary Functions and Maps.ipynb
iVibudh/How-to-do-in-Python
9baf7d342cf64c5a6c8204c762450eb2e5a7007d
[ "MIT" ]
null
null
null
36.803571
133
0.631409
[ [ [ "# In the last section we learnt how to select relevant data out of a Pandas DataFrame or a Series\n# Now, we are goint to learn the summary functions and Maps ", "_____no_output_____" ], [ "# Summary Functions \n\n# 1. describe\n# The first fuction is \"describe\" functioin - It generates a high-level summary of the attributes of the given column\n# For numerical variables - it summarize the central tendency, dispersion and shape of a series, excluding NaN values.\nreviews.points.describe()\n\n# Now if we use the describe function for a series of string, obviously central tendencies and dispersion doesnt make sence \n# So, we can see here that pandas summarises it in a different way \nreviews.taster_name.describe()\n\n# 2. Mean\n# the next function is mean - as the name suggests it gives the mean value after excluding the NaNs\nreviews.points.mean()\n\n# 3. unique\n# the third function is unique - It returns the unique values in order of appearance. Note: This does NOT sort.\nreviews.taster_name.unique()\n\n# 4. vallue_count \n# The next function is value_count. This function gives the unique vales with their frequencies. \nreviews.taster_name.value_counts()\n\n# So, this concludes some of the important summary functions coveed in the course. \n# There are a lot of other functions that Pandas support. I hightly that you have a look at some of the other functions \n# available on Official Pandas documentation. I will attach a link of the website at the bottom of the video. \n# https://pandas.pydata.org/docs/reference/general_functions.html", "_____no_output_____" ], [ "# Maps - map\n# The next topic is very interesting. In Data Science we often need to new representations of the data \n# We take the data and then transform this data so that it makes sence for our ML model \n# maps allows these transfomations very simple. \n\n# Lets study the given example\n# Here we are tring to find the deviations from the mean values. \n# So, first we calculate the mean value of the series using the mean summarization function we just studied\nreview_points_mean = reviews.points.mean()\n# Now we need to calculate the differnec of each value in the series from this mean. \n# So in Map, first we define that the series on which we are going to do the transformation. Here it is review.points\n# then we begin writing the map function. Inside the function, we need to give a variable name and we do it using lambda\n# So now the functions knows that the series reviews.points is represented by the variable p\n# ok. \n# then we simply define the transformation that we want to do. \n# Here we have to find the difference from the mean so we simply write p - mean \nreviews.points.map(lambda p: p - review_points_mean)\n# So, in summarization map returns a new Series where all the values have been transformed\n#ok. \n\n# A key thing to note here is that this statement returns a series. So, we can assign a new coloumn in the existing \n# dataframe with these transformed values", "_____no_output_____" ], [ "# - apply\n# Now apply is an equivalent method - if we want to transform a whole DataFrame by calling a custom method on each row.\n\n# so first we define the function where we are taking the complete row of a dataframe as an input to the function.\n# Then we do the transformation that we want to do in the dataframe\n# then we return the complete row \ndef remean_points(row):\n row.points = row.points - review_points_mean\n return row\n\n# Then we apply this function on the reviews dataframe. Here we use axis='columns' as the parameter as we want to \n# apply function to each row.\n# Similarly we could have used axis = ‘index’to apply function to each column.\n\n# So, here we can see that apply function returns a dataframe. Whereas the Map function returned a series. \nreviews.apply(remean_points, axis='columns')", "_____no_output_____" ] ], [ [ "### Exercise", "_____no_output_____" ] ], [ [ "import pandas as pd\npd.set_option(\"display.max_rows\", 5)\nreviews = pd.read_csv(\"../input/wine-reviews/winemag-data-130k-v2.csv\", index_col=0)\n\nfrom learntools.core import binder; binder.bind(globals())\nfrom learntools.pandas.summary_functions_and_maps import *\nprint(\"Setup complete.\")\n\nreviews.head()", "_____no_output_____" ], [ "\n\nbargain_idx = (reviews.points / reviews.price).idxmax()\nbargain_wine = reviews.loc[bargain_idx, 'title']", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecb0f20f2054518c83ac083b9b601355ffe0a4d8
1,427
ipynb
Jupyter Notebook
notebooks/book1/07/cholesky_demo.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
notebooks/book1/07/cholesky_demo.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
1
2022-03-27T04:59:50.000Z
2022-03-27T04:59:50.000Z
notebooks/book1/07/cholesky_demo.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
2
2022-03-26T11:52:36.000Z
2022-03-27T05:17:48.000Z
23.393443
59
0.461107
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecb11261ee40ef5d1c020624f4576aeb5e044f24
10,697
ipynb
Jupyter Notebook
python_datetime/Date_Time.ipynb
zhaokany/python_notes
ded4909548c21a4489a2e8a1c0a4cf38685bee6a
[ "MIT" ]
null
null
null
python_datetime/Date_Time.ipynb
zhaokany/python_notes
ded4909548c21a4489a2e8a1c0a4cf38685bee6a
[ "MIT" ]
null
null
null
python_datetime/Date_Time.ipynb
zhaokany/python_notes
ded4909548c21a4489a2e8a1c0a4cf38685bee6a
[ "MIT" ]
null
null
null
16.898894
115
0.466205
[ [ [ "import datetime", "_____no_output_____" ], [ "dt = datetime.datetime.now()\nprint(dt)", "2019-02-02 12:28:30.236000\n" ], [ "dt = dt.replace(hour=2)\nprint(dt)", "2019-02-02 02:28:30.236000\n" ] ], [ [ "# TimeDelta", "_____no_output_____" ] ], [ [ "td = dt - datetime.datetime.now()\nprint(type(td))\nprint(td)", "<class 'datetime.timedelta'>\n-1 day, 13:58:58.092000\n" ] ], [ [ "> datetime.datetime.today() vs. datetime.datetime.now()\n\n> now() accepts a timezone while today() doesn't", "_____no_output_____" ] ], [ [ "td.days", "_____no_output_____" ], [ "td.microseconds", "_____no_output_____" ], [ "dt + datetime.timedelta(days=3)", "_____no_output_____" ], [ "datetime.timedelta.total_seconds(td)", "_____no_output_____" ] ], [ [ "# Date and Time only", "_____no_output_____" ] ], [ [ "date = dt.date()\nprint(date)", "2019-02-02\n" ], [ "time = dt.time()\nprint(time)", "02:28:30.236000\n" ] ], [ [ "> datetime.datetime.today() vs. datetime.datetime.now()\n\n> now() accepts a timezone while today() doesn't", "_____no_output_____" ] ], [ [ "datetime.datetime.combine(date, time)", "_____no_output_____" ] ], [ [ "# Get Weekday", "_____no_output_____" ] ], [ [ "dt.weekday()", "_____no_output_____" ] ], [ [ "# Timestamp", "_____no_output_____" ] ], [ [ "ts = dt.timestamp()\nprint(ts)", "1549096110.236\n" ], [ "datetime.timedelta.total_seconds(td)", "_____no_output_____" ] ], [ [ "# Formatting and Parsing", "_____no_output_____" ] ], [ [ "dt.strftime('%B %d')", "_____no_output_____" ], [ "datetime.datetime.strptime('1984-04-13', '%Y-%m-%d')", "_____no_output_____" ], [ "datetime.datetime.strptime('1984-04-1 12:03', '%Y-%m-%d %H:%M')", "_____no_output_____" ], [ "datetime.datetime.fromtimestamp(ts)", "_____no_output_____" ] ], [ [ "# Timezone", "_____no_output_____" ] ], [ [ "td = datetime.timedelta(hours=5, minutes=30)\ntz = datetime.timezone(td)\nprint(tz)", "UTC+05:30\n" ], [ "dt.astimezone(tz)", "_____no_output_____" ] ], [ [ "# pytz package", "_____no_output_____" ] ], [ [ "import pytz", "_____no_output_____" ], [ "pytz.utc", "_____no_output_____" ], [ "eastern = pytz.timezone('US/Eastern')\neastern", "_____no_output_____" ], [ "naive = datetime.datetime.today()\nnaive", "_____no_output_____" ], [ "tz_aware = eastern.localize(naive)\ntz_aware", "_____no_output_____" ], [ "tz_aware.astimezone(tz)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ecb12d58373ecb7293ca56e1c5ec39424ebd510e
86,390
ipynb
Jupyter Notebook
BakingPieCharts.ipynb
mossmatters/PhyPartsPieCharts
a523f434471b43fb699e06311dccfd968ded130b
[ "MIT" ]
10
2017-04-18T14:10:45.000Z
2022-03-23T16:51:50.000Z
BakingPieCharts.ipynb
mossmatters/PhyPartsPieCharts
a523f434471b43fb699e06311dccfd968ded130b
[ "MIT" ]
2
2017-06-23T22:11:49.000Z
2021-11-05T02:00:03.000Z
BakingPieCharts.ipynb
mossmatters/PhyPartsPieCharts
a523f434471b43fb699e06311dccfd968ded130b
[ "MIT" ]
3
2017-12-13T19:34:49.000Z
2020-12-23T15:23:28.000Z
170.059055
25,808
0.878956
[ [ [ "# Baking Pie Charts\n\n## *An introduction to plotting pie charts on trees using ETE3*\n\nIt's fairly common to use pie charts on phylogenetic trees to represent all manner of data-- for instance, the probability of different ancestral character states. In phylogenomics, one application of pie charts is to represent the amount of conflict among gene trees, for example from Smith et al. 2015:\n\n![smith_pies](https://static-content.springer.com/image/art%3A10.1186%2Fs12862-015-0423-0/MediaObjects/12862_2015_423_Fig2_HTML.gif)\n\nIn the figure above, the pie charts represent the proportion of gene trees that:\n\n* Blue: Support the shown topology\n* Green: Conflict with the shown topology (most common conflicting bipartion)\n* Red: Conflict with the shown topology (all other supported conflicting bipartitions)\n* Gray: Have no support for conflicting bipartion\n\nWhen evaluating gene tree support for a species topology, it is important to consider the ratio of blue to green on the pie chart. A high green proportion means there is a dominant alternative topology which should be considered. A very small sliver of blue is also indicative of low overall support for the shown topology. In many cases, bootstrap values may be near 100% for this node, despite the lack of support in any one gene tree.\n\nHow can we replicate a figure like this using the Python package ETE3? We will need to solve the following issues:\n\n1. How to plot pie charts on trees using the ETE3 \"Faces\" API.\n1. How to associate specific pie chart data with specific nodes.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom ete3 import Tree, TreeStyle, TextFace,NodeStyle,faces, COLOR_SCHEMES\nimport random\n", "_____no_output_____" ] ], [ [ "The first question is how to plot images on ETE3 trees. This example is from the ETE3 toolkit tutorial shows circles:", "_____no_output_____" ] ], [ [ "t = Tree( \"((a,b),c);\" )\n\n# Basic tree style\nts = TreeStyle()\nts.show_leaf_name = True\n\n# Creates an independent node style for each node, which is\n# initialized with a red foreground color.\nfor n in t.traverse():\n nstyle = NodeStyle()\n nstyle[\"fgcolor\"] = \"red\"\n nstyle[\"size\"] = 15\n n.set_style(nstyle)\n\n# Let's now modify the aspect of the root node\n#t.img_style[\"size\"] = 30\n#t.img_style[\"fgcolor\"] = \"blue\"\n\nt.render(\"%%inline\",tree_style=ts)", "_____no_output_____" ] ], [ [ "There is also a way to specify that each node have a \"face,\" which can be an image or some other object.\n\nThis can also be a pie chart:\n", "_____no_output_____" ] ], [ [ "#Modified from: https://github.com/etetoolkit/ete/blob/master/ete3/test/test_treeview/barchart_and_piechart_faces.py\n\ndef get_example_tree():\n t = Tree()\n ts = TreeStyle()\n ts.layout_fn = layout\n ts.mode=\"r\"\n ts.show_leaf_name = True\n t.populate(10)\n return t, ts\n\nschema_names = COLOR_SCHEMES.keys()\n\ndef layout(node):\n if node.is_leaf():\n pass\n else:\n F= faces.PieChartFace([10,20,5,5,60],\n colors=COLOR_SCHEMES[\"set1\"],\n width=50, height=50)\n F.border.width = None\n F.opacity = 0.5\n faces.add_face_to_node(F,node, 0, position=\"branch-right\")\n\nt,ts = get_example_tree()\n\nfor n in t.traverse():\n nstyle=NodeStyle()\n nstyle[\"size\"] = 0\n n.set_style(nstyle)\n\nt.render(\"%%inline\",tree_style=ts)\n", "_____no_output_____" ] ], [ [ "The next step is to figure out how to make a *specific* pie chart appear on a *specific* node.\n\nFirst, we have to name all of the nodes:", "_____no_output_____" ] ], [ [ "t = Tree( \"((H,I), A, (B,(C,(J, (F, D)))));\" )\n\n#Root the tree \nt.set_outgroup(\"A\")\n\n#Name the nodes\nedge_num = 0\nfor node in t.traverse():\n if not node.is_leaf():\n node.name = \"Node-{}\".format(edge_num)\n edge_num += 1\n\n#Define a \"Face Naming\" function \ndef node_name(node):\n if not node.is_leaf():\n F = TextFace(node.name)\n faces.add_face_to_node(F,node,0,\"branch-top\")\n \n#Make the tips sorta line up...\nt.convert_to_ultrametric()\n\n#Use TreeStyle to associate the TextFace with our tree\nts = TreeStyle()\nts.layout_fn = node_name\nts.mode=\"r\"\nts.show_leaf_name = True \n\n\nt.render(\"%%inline\",tree_style=ts)\n\n\n", "_____no_output_____" ] ], [ [ "Now associate a set of pie chart percentages with each named node:", "_____no_output_____" ] ], [ [ "#Some pie chart data (must add up to 100)\nnode_pies = {\"Node-0\":[10,20,70],\n \"Node-2\":[70,20,10],\n \"Node-3\":[4,16,80],\n \"Node-4\":[22,25,53],\n \"Node-8\":[90,5,5],\n \"Node-10\":[10,70,20],\n \"Node-12\":[10,20,70],\n \n }\n\n#Associate the PieChartFace only with internal nodes\ndef pie_nodes(node):\n if node.is_leaf():\n pass\n else: \n F= faces.PieChartFace(node_pies[node.name],\n colors=COLOR_SCHEMES[\"set1\"],\n width=50, height=50)\n F.border.width = None\n F.opacity = 0.5\n faces.add_face_to_node(F,node, 0, position=\"branch-right\")\n\nts.layout_fn = pie_nodes\nt.render(\"%%inline\",tree_style=ts)", "_____no_output_____" ] ], [ [ "Phyparts returns a \"node key\" file that associates the names of nodes with the subtree that descends from that node. For example:\n```\nNode0 ((H,I), A, (B,(C,(J, (F, D)))))\nNode1 (J, (F, D))\nNode2 (C,(J,(F,D)))\n```\n\nThe conflict and concordance is also associated with these same node names in the \"hist\" file:\n\n```\nNode0,66.83333333333331,61\nNode1,44.83333333333334,1.0,1.0,47\nNode2,24.333333333333332,1.0,1.0,32\n```\n\nIn order to plot the concordance and conflict information, we will need to name the nodes on our tree very specifically. To do this, we have to figure out how to identify nodes by the subtending subtree. \n\nETE3 has the method `get_topology_id()` which returns a unique MD5-esque value for a particular topology. If the topology of the PhyParts node tree and the ETE subtree is the same, these values should match.", "_____no_output_____" ] ], [ [ "subtree = Tree(\"(J, (F, D));\")\nsubtree.render(\"%%inline\")\n\nfor node in t.traverse():\n if node.get_topology_id() == subtree.get_topology_id():\n print node", "\n /-J\n--|\n | /-F\n \\-|\n \\-D\n" ] ], [ [ "Putting it all together, using some examples modified from the PhyParts website.", "_____no_output_____" ] ], [ [ "sptree = Tree(\"((((A,B),C),D),E);\")\nphyparts_node_key = [\"Node0 (((A,B),C),D)\",\"Node1 ((A,B),C)\",\"Node2 (A,B)\"]\n\n#Node,concord,conflict1,conflict2,totConcord&Conflict\n\nphyparts_hist = [\"Node0,2.0,2.0,1.0,5.0\", \"Node1,6.0,1.0,1.0,8.0\", \"Node2,4.0,2.0,1.0,7.0\"]\n\nphyparts_pies = {}\n\nfor n in phyparts_hist:\n n = n.split(\",\")\n tot_genes = float(n.pop(-1))\n node_name = n.pop(0)\n phyparts_pies[node_name] = [float(x)/tot_genes*100 for x in n]\n\n \nprint phyparts_pies \nsubtrees_dict = {n.split()[0]:Tree(n.split()[1]+\";\") for n in phyparts_node_key}\n\nfor node in sptree.traverse():\n for subtree in subtrees_dict:\n if node.get_topology_id() == subtrees_dict[subtree].get_topology_id():\n node.name = subtree\n \ndef phyparts_pie_layout(mynode):\n if mynode.name in phyparts_pies:\n F= faces.PieChartFace(phyparts_pies[mynode.name],\n colors=COLOR_SCHEMES[\"set1\"],\n width=50, height=50)\n F.border.width = None\n F.opacity = 0.5\n faces.add_face_to_node(F,mynode, 0, position=\"branch-right\")\n\n\nts = TreeStyle()\n \nts.layout_fn = phyparts_pie_layout\nsptree.convert_to_ultrametric()\nsptree.render(\"%%inline\",tree_style=ts) \n\n \n\n\n", "{'Node1': [75.0, 12.5, 12.5], 'Node0': [40.0, 40.0, 20.0], 'Node2': [57.14285714285714, 28.57142857142857, 14.285714285714285]}\n" ] ], [ [ "For an example with real data, see the PhyParts_PieCharts notebook.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom ete3 import Tree, TreeStyle, NodeStyle, faces, COLOR_SCHEMES\n\nsptree = Tree(\"((((A,B),C),D),E);\")\nphyparts_node_key = [\"Node0 (((A,B),C),D)\",\"Node1 ((A,B),C)\",\"Node2 (A,B)\"]\nphyparts_pies = {'Node1': [75.0, 12.5, 12.5], 'Node0': [40.0, 40.0, 20.0], 'Node2': [57.142, 28.57, 14.285]}\nsubtrees_dict = {n.split()[0]:Tree(n.split()[1]+\";\") for n in phyparts_node_key}\n\nnstyle = NodeStyle()\nnstyle[\"size\"] = 0\n\nfor node in sptree.traverse():\n node.set_style(nstyle)\n for subtree in subtrees_dict:\n if node.get_topology_id() == subtrees_dict[subtree].get_topology_id():\n node.name = subtree\n \ndef phyparts_pie_layout(mynode):\n if mynode.name in phyparts_pies:\n F= faces.PieChartFace(phyparts_pies[mynode.name],\n colors=COLOR_SCHEMES[\"set1\"],\n width=50, height=50)\n F.border.width = None\n F.opacity = 0.5\n faces.add_face_to_node(F,mynode, 0, position=\"branch-right\")\n else:\n T = faces.TextFace(mynode.name)\n faces.add_face_to_node(T,mynode,0,position=\"aligned\")\n\n\nts = TreeStyle()\nts.show_leaf_name = False \nts.layout_fn = phyparts_pie_layout\nts.draw_guiding_lines = True\nts.guiding_lines_type = 0\nts.guiding_lines_color = \"black\"\nts.show_scale = False\nts.scale = 100\nsptree.render(\"%%inline\",tree_style=ts) ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb144f8fc6c59a20f3443ab6cf4acc4e6445a14
799,230
ipynb
Jupyter Notebook
Model backlog/Train/57-tweet-train-3fold-roberta-base-jaccard-task.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
0a775abe9a92c4bc2db957519c523be7655df8d8
[ "MIT" ]
11
2020-06-17T07:30:20.000Z
2022-03-25T16:56:01.000Z
Model backlog/Train/57-tweet-train-3fold-roberta-base-jaccard-task.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
0a775abe9a92c4bc2db957519c523be7655df8d8
[ "MIT" ]
null
null
null
Model backlog/Train/57-tweet-train-3fold-roberta-base-jaccard-task.ipynb
dimitreOliveira/Tweet-Sentiment-Extraction
0a775abe9a92c4bc2db957519c523be7655df8d8
[ "MIT" ]
null
null
null
610.099237
246,772
0.925739
[ [ [ "## Dependencies", "_____no_output_____" ] ], [ [ "import json, warnings, shutil\nfrom tweet_utility_scripts import *\nfrom tweet_utility_preprocess_roberta_scripts import *\nfrom transformers import TFRobertaModel, RobertaConfig\nfrom tokenizers import ByteLevelBPETokenizer\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import optimizers, metrics, losses, layers\nfrom tensorflow.keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint\n\nSEED = 0\nseed_everything(SEED)\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "# Load data", "_____no_output_____" ] ], [ [ "database_base_path = '/kaggle/input/tweet-dataset-split-roberta-base-96/'\nk_fold = pd.read_csv(database_base_path + '5-fold.csv')\n\nk_fold['jaccard'] = k_fold.apply(lambda x: jaccard(x['text'], x['selected_text']), axis=1)\n\ndisplay(k_fold.head())\n\n# Unzip files\n!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_1.tar.gz\n!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_2.tar.gz\n!tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_3.tar.gz\n# !tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_4.tar.gz\n# !tar -xvf /kaggle/input/tweet-dataset-split-roberta-base-96/fold_5.tar.gz", "_____no_output_____" ] ], [ [ "# Model parameters", "_____no_output_____" ] ], [ [ "vocab_path = database_base_path + 'vocab.json'\nmerges_path = database_base_path + 'merges.txt'\nbase_path = '/kaggle/input/qa-transformers/roberta/'\n\nconfig = {\n \"MAX_LEN\": 96,\n \"BATCH_SIZE\": 32,\n \"EPOCHS\": 5,\n \"LEARNING_RATE\": 3e-5,\n \"ES_PATIENCE\": 1,\n \"question_size\": 4,\n \"N_FOLDS\": 3,\n \"base_model_path\": base_path + 'roberta-base-tf_model.h5',\n \"config_path\": base_path + 'roberta-base-config.json'\n}\n\nwith open('config.json', 'w') as json_file:\n json.dump(json.loads(json.dumps(config)), json_file)", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)\n\ndef model_fn(MAX_LEN):\n input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')\n attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')\n \n base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name=\"base_model\")\n sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})\n last_state = sequence_output[0]\n \n x_start = layers.Dropout(0.1)(last_state) \n x_start = layers.Conv1D(1, 1)(x_start)\n x_start = layers.Flatten()(x_start)\n y_start = layers.Activation('softmax', name='y_start')(x_start)\n\n x_end = layers.Dropout(0.1)(last_state) \n x_end = layers.Conv1D(1, 1)(x_end)\n x_end = layers.Flatten()(x_end)\n y_end = layers.Activation('softmax', name='y_end')(x_end)\n \n x_jaccard = layers.GlobalAveragePooling1D()(last_state)\n y_jaccard = layers.Dense(1, activation='linear', name='y_jac')(x_jaccard)\n \n model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end, y_jaccard])\n model.compile(optimizers.Adam(lr=config['LEARNING_RATE']), \n loss=[losses.CategoricalCrossentropy(), \n losses.CategoricalCrossentropy(), \n losses.MeanSquaredError()], \n metrics=[metrics.CategoricalAccuracy()])\n \n return model", "_____no_output_____" ] ], [ [ "# Tokenizer", "_____no_output_____" ] ], [ [ "tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path, lowercase=True, add_prefix_space=True)\ntokenizer.save('./')", "_____no_output_____" ] ], [ [ "# Train", "_____no_output_____" ] ], [ [ "history_list = []\nAUTO = tf.data.experimental.AUTOTUNE\n\nfor n_fold in range(config['N_FOLDS']):\n n_fold +=1\n print('\\nFOLD: %d' % (n_fold))\n # Load data\n base_data_path = 'fold_%d/' % (n_fold)\n x_train = np.load(base_data_path + 'x_train.npy')\n y_train = np.load(base_data_path + 'y_train.npy')\n x_valid = np.load(base_data_path + 'x_valid.npy')\n y_valid = np.load(base_data_path + 'y_valid.npy')\n \n y_train_jac = k_fold[k_fold['fold_%d' % (n_fold)] == 'train']['jaccard'].values.reshape(1, y_train.shape[1])\n y_valid_jac = k_fold[k_fold['fold_%d' % (n_fold)] == 'validation']['jaccard'].values.reshape(1, y_valid.shape[1])\n \n ### Delete data dir\n shutil.rmtree(base_data_path)\n\n # Train model\n model_path = 'model_fold_%d.h5' % (n_fold)\n model = model_fn(config['MAX_LEN'])\n es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], \n restore_best_weights=True, verbose=1)\n checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', \n save_best_only=True, save_weights_only=True)\n\n history = model.fit(list(x_train), list(y_train) + list(y_train_jac),\n validation_data=(list(x_valid), list(y_valid) + list(y_valid_jac)),\n batch_size=config['BATCH_SIZE'], \n callbacks=[checkpoint, es],\n epochs=config['EPOCHS'], \n verbose=2).history\n history_list.append(history)\n \n # Make predictions\n train_preds = model.predict(list(x_train))\n valid_preds = model.predict(list(x_valid))\n \n k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'train', 'start_fold_%d' % (n_fold)] = train_preds[0].argmax(axis=-1)\n k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'train', 'end_fold_%d' % (n_fold)] = train_preds[1].argmax(axis=-1)\n k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'validation', 'start_fold_%d' % (n_fold)] = valid_preds[0].argmax(axis=-1)\n k_fold.loc[k_fold['fold_%d' % (n_fold)] == 'validation', 'end_fold_%d' % (n_fold)] = valid_preds[1].argmax(axis=-1)\n \n k_fold['end_fold_%d' % (n_fold)] = k_fold['end_fold_%d' % (n_fold)].astype(int)\n k_fold['start_fold_%d' % (n_fold)] = k_fold['start_fold_%d' % (n_fold)].astype(int)\n k_fold['end_fold_%d' % (n_fold)].clip(0, k_fold['text_len'], inplace=True)\n k_fold['start_fold_%d' % (n_fold)].clip(0, k_fold['end_fold_%d' % (n_fold)], inplace=True)\n k_fold['prediction_fold_%d' % (n_fold)] = k_fold.apply(lambda x: decode(x['start_fold_%d' % (n_fold)], x['end_fold_%d' % (n_fold)], x['text'], config['question_size'], tokenizer), axis=1)\n k_fold['prediction_fold_%d' % (n_fold)].fillna(k_fold[\"text\"], inplace=True)\n k_fold['jaccard_fold_%d' % (n_fold)] = k_fold.apply(lambda x: jaccard(x['selected_text'], x['prediction_fold_%d' % (n_fold)]), axis=1)", "\nFOLD: 1\nTrain on 21984 samples, validate on 5496 samples\nEpoch 1/5\n21984/21984 - 293s - loss: 2.3062 - y_start_loss: 1.0831 - y_end_loss: 1.1406 - y_jac_loss: 0.0825 - y_start_categorical_accuracy: 0.6482 - y_end_categorical_accuracy: 0.6374 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.7325 - val_y_start_loss: 0.8578 - val_y_end_loss: 0.8048 - val_y_jac_loss: 0.0693 - val_y_start_categorical_accuracy: 0.7043 - val_y_end_categorical_accuracy: 0.7216 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 2/5\n21984/21984 - 274s - loss: 1.7346 - y_start_loss: 0.8454 - y_end_loss: 0.8263 - y_jac_loss: 0.0629 - y_start_categorical_accuracy: 0.6999 - y_end_categorical_accuracy: 0.7108 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6612 - val_y_start_loss: 0.8072 - val_y_end_loss: 0.7758 - val_y_jac_loss: 0.0775 - val_y_start_categorical_accuracy: 0.7123 - val_y_end_categorical_accuracy: 0.7242 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 3/5\n21984/21984 - 275s - loss: 1.5353 - y_start_loss: 0.7649 - y_end_loss: 0.7100 - y_jac_loss: 0.0604 - y_start_categorical_accuracy: 0.7195 - y_end_categorical_accuracy: 0.7419 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6250 - val_y_start_loss: 0.8042 - val_y_end_loss: 0.7509 - val_y_jac_loss: 0.0693 - val_y_start_categorical_accuracy: 0.7052 - val_y_end_categorical_accuracy: 0.7229 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 4/5\nRestoring model weights from the end of the best epoch.\n21984/21984 - 273s - loss: 1.4092 - y_start_loss: 0.6927 - y_end_loss: 0.6597 - y_jac_loss: 0.0568 - y_start_categorical_accuracy: 0.7416 - y_end_categorical_accuracy: 0.7546 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6854 - val_y_start_loss: 0.8620 - val_y_end_loss: 0.7591 - val_y_jac_loss: 0.0638 - val_y_start_categorical_accuracy: 0.7051 - val_y_end_categorical_accuracy: 0.7240 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 00004: early stopping\n\nFOLD: 2\nTrain on 21984 samples, validate on 5496 samples\nEpoch 1/5\n21984/21984 - 290s - loss: 2.1891 - y_start_loss: 1.0454 - y_end_loss: 1.0646 - y_jac_loss: 0.0791 - y_start_categorical_accuracy: 0.6571 - y_end_categorical_accuracy: 0.6563 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.7828 - val_y_start_loss: 0.8799 - val_y_end_loss: 0.8395 - val_y_jac_loss: 0.0628 - val_y_start_categorical_accuracy: 0.6976 - val_y_end_categorical_accuracy: 0.7191 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 2/5\n21984/21984 - 275s - loss: 1.6607 - y_start_loss: 0.8272 - y_end_loss: 0.7686 - y_jac_loss: 0.0648 - y_start_categorical_accuracy: 0.7031 - y_end_categorical_accuracy: 0.7265 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.7133 - val_y_start_loss: 0.8423 - val_y_end_loss: 0.8117 - val_y_jac_loss: 0.0588 - val_y_start_categorical_accuracy: 0.6963 - val_y_end_categorical_accuracy: 0.7182 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 3/5\n21984/21984 - 274s - loss: 1.4728 - y_start_loss: 0.7389 - y_end_loss: 0.6743 - y_jac_loss: 0.0596 - y_start_categorical_accuracy: 0.7250 - y_end_categorical_accuracy: 0.7497 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6975 - val_y_start_loss: 0.8411 - val_y_end_loss: 0.7978 - val_y_jac_loss: 0.0581 - val_y_start_categorical_accuracy: 0.7007 - val_y_end_categorical_accuracy: 0.7182 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 4/5\nRestoring model weights from the end of the best epoch.\n21984/21984 - 273s - loss: 1.3173 - y_start_loss: 0.6627 - y_end_loss: 0.5971 - y_jac_loss: 0.0575 - y_start_categorical_accuracy: 0.7468 - y_end_categorical_accuracy: 0.7752 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.7775 - val_y_start_loss: 0.8802 - val_y_end_loss: 0.8402 - val_y_jac_loss: 0.0567 - val_y_start_categorical_accuracy: 0.6949 - val_y_end_categorical_accuracy: 0.7085 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 00004: early stopping\n\nFOLD: 3\nTrain on 21984 samples, validate on 5496 samples\nEpoch 1/5\n21984/21984 - 291s - loss: 2.2849 - y_start_loss: 1.0802 - y_end_loss: 1.1233 - y_jac_loss: 0.0814 - y_start_categorical_accuracy: 0.6493 - y_end_categorical_accuracy: 0.6431 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.7854 - val_y_start_loss: 0.8749 - val_y_end_loss: 0.8430 - val_y_jac_loss: 0.0669 - val_y_start_categorical_accuracy: 0.7031 - val_y_end_categorical_accuracy: 0.7196 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 2/5\n21984/21984 - 275s - loss: 1.7186 - y_start_loss: 0.8494 - y_end_loss: 0.8024 - y_jac_loss: 0.0668 - y_start_categorical_accuracy: 0.6989 - y_end_categorical_accuracy: 0.7179 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6621 - val_y_start_loss: 0.8288 - val_y_end_loss: 0.7769 - val_y_jac_loss: 0.0561 - val_y_start_categorical_accuracy: 0.7018 - val_y_end_categorical_accuracy: 0.7193 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 3/5\n21984/21984 - 275s - loss: 1.5571 - y_start_loss: 0.7719 - y_end_loss: 0.7217 - y_jac_loss: 0.0635 - y_start_categorical_accuracy: 0.7174 - y_end_categorical_accuracy: 0.7399 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6521 - val_y_start_loss: 0.8218 - val_y_end_loss: 0.7625 - val_y_jac_loss: 0.0672 - val_y_start_categorical_accuracy: 0.7021 - val_y_end_categorical_accuracy: 0.7342 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 4/5\nRestoring model weights from the end of the best epoch.\n21984/21984 - 273s - loss: 1.4276 - y_start_loss: 0.7131 - y_end_loss: 0.6531 - y_jac_loss: 0.0614 - y_start_categorical_accuracy: 0.7331 - y_end_categorical_accuracy: 0.7548 - y_jac_categorical_accuracy: 1.0000 - val_loss: 1.6586 - val_y_start_loss: 0.8299 - val_y_end_loss: 0.7672 - val_y_jac_loss: 0.0611 - val_y_start_categorical_accuracy: 0.7083 - val_y_end_categorical_accuracy: 0.7338 - val_y_jac_categorical_accuracy: 1.0000\nEpoch 00004: early stopping\n" ] ], [ [ "# Model loss graph", "_____no_output_____" ] ], [ [ "sns.set(style=\"whitegrid\")\nfor n_fold in range(config['N_FOLDS']):\n print('Fold: %d' % (n_fold+1))\n plot_metrics(history_list[n_fold])", "Fold: 1\n" ] ], [ [ "# Model evaluation", "_____no_output_____" ] ], [ [ "display(evaluate_model_kfold(k_fold, config['N_FOLDS']).style.applymap(color_map))", "_____no_output_____" ] ], [ [ "# Visualize predictions", "_____no_output_____" ] ], [ [ "display(k_fold[[c for c in k_fold.columns if not (c.startswith('textID') or \n c.startswith('text_len') or \n c.startswith('selected_text_len') or \n c.startswith('text_wordCnt') or \n c.startswith('selected_text_wordCnt') or \n c.startswith('fold_') or \n c.startswith('start_fold_') or \n c.startswith('end_fold_'))]].head(15))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb1462e12e2c0cca1e7ea698321de73f230517c
12,955
ipynb
Jupyter Notebook
titanic.ipynb
iyedg/titanic
7cbf384627d06e492a92238587a91fc747a505d4
[ "Unlicense" ]
null
null
null
titanic.ipynb
iyedg/titanic
7cbf384627d06e492a92238587a91fc747a505d4
[ "Unlicense" ]
null
null
null
titanic.ipynb
iyedg/titanic
7cbf384627d06e492a92238587a91fc747a505d4
[ "Unlicense" ]
null
null
null
27.980562
161
0.498109
[ [ [ "import warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport matplotlib.pyplot as plt\nimport missingno\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom fancyimpute import MICE\nfrom IPython.core.debugger import set_trace\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline\nfrom sklearn.preprocessing import LabelEncoder\n\nplt.style.use(\"fivethirtyeight\")", "Using TensorFlow backend.\n" ], [ "train = pd.read_csv(\"/home/iyed/.kaggle/competitions/titanic/train.csv\")\ntest = pd.read_csv(\"/home/iyed/.kaggle/competitions/titanic/test.csv\")\ntrain.rename(str.lower, axis=1, inplace=True)\ntest.rename(str.lower, axis=1, inplace=True)", "_____no_output_____" ] ], [ [ "### Transformers", "_____no_output_____" ] ], [ [ "# https://zablo.net/blog/post/pandas-dataframe-in-scikit-learn-feature-union\nfrom scipy import sparse\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom sklearn.pipeline import FeatureUnion, _fit_transform_one, _transform_one\n\n\nclass PandasFeatureUnion(FeatureUnion):\n def fit_transform(self, X, y=None, **fit_params):\n self._validate_transformers()\n result = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_transform_one)(trans,\n weight,\n X,\n y,\n **fit_params) for name,\n trans,\n weight in self._iter()\n )\n\n if not result:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n Xs, transformers = zip(*result)\n self._update_transformer_list(transformers)\n if any(sparse.issparse(f) for f in Xs):\n Xs = sparse.hstack(Xs).tocsr()\n else:\n Xs = self.merge_dataframes_by_column(Xs)\n return Xs\n\n def merge_dataframes_by_column(self, Xs):\n return pd.concat(Xs, axis=\"columns\", copy=False)\n\n def transform(self, X):\n Xs = Parallel(n_jobs=self.n_jobs)(\n delayed(_transform_one)(trans,\n weight,\n X) for name,\n trans,\n weight in self._iter()\n )\n if not Xs:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n if any(sparse.issparse(f) for f in Xs):\n Xs = sparse.hstack(Xs).tocsr()\n else:\n Xs = self.merge_dataframes_by_column(Xs)\n return Xs", "_____no_output_____" ], [ "class NoFitMixin():\n def fit(self, X, y=None):\n return self\n\n\nclass MICEImputer(BaseEstimator, TransformerMixin, NoFitMixin):\n def transform(self, X, *args, **kwargs):\n \"\"\"\n Fill a 1-D array missing values with MICE\n \"\"\"\n assert isinstance(X, pd.Series)\n X = X.copy() # Should avoid error of already full for repeat execution\n has_null = X.isnull().any() # TODO: Used to avoid error of no null values from MICE\n if has_null:\n mice = MICE(verbose=False, *args, **kwargs)\n imputed = mice.complete(X.values.reshape(-1, 1))\n X.loc[:] = imputed.reshape(X.loc[:].shape)\n return pd.DataFrame(X)\n else:\n return X\n\n\nclass ColumnSelector(BaseEstimator, TransformerMixin, NoFitMixin):\n def __init__(self, columns, one_col=True):\n self.columns = columns\n self.one_col = one_col\n\n def transform(self, X, y=None):\n if self.one_col:\n return X[self.columns].iloc[:, 0]\n else:\n return X[self.columns]\n\n\nclass ColumnDummifier(BaseEstimator, TransformerMixin, NoFitMixin):\n def transform(self, X, y=None):\n return pd.get_dummies(X, sparse=True, drop_first=True)\n\n\nclass LabelEncoderWNaN(TransformerMixin, BaseEstimator):\n \"\"\"\n Applies the sklearn.LabelEncoder while keeping missing values\n \"\"\"\n\n def fit(self, X, y=None):\n self.le_ = LabelEncoder()\n self.le_.fit(X.loc[X.notnull()])\n return self\n\n def transform(self, X, y=None):\n X = X.copy(deep=True) # Do not apply tranform to the actual DF\n X.loc[X.notnull()] = self.le_.transform(X.loc[X.notnull()])\n return X.astype(\"float\")", "_____no_output_____" ] ], [ [ "## Explore", "_____no_output_____" ] ], [ [ "train_x = train.drop(columns=[\"survived\"])\ntrain_y = train.survived", "_____no_output_____" ], [ "benchmark_pipeline = Pipeline([(\n \"prep\",\n PandasFeatureUnion([\n (\"age\",\n make_pipeline(ColumnSelector([\"age\"]),\n MICEImputer())),\n (\"sex_dummy\",\n make_pipeline(ColumnSelector([\"sex\"]),\n ColumnDummifier())),\n (\n \"embarked\",\n make_pipeline(ColumnSelector([\"embarked\"]),\n LabelEncoderWNaN(),\n MICEImputer(),\n ColumnDummifier())\n ),\n (\"fare\",\n make_pipeline(ColumnSelector([\"fare\"]),\n MICEImputer())),\n (\"rest\",\n make_pipeline(ColumnSelector([\"parch\",\n \"sibsp\",\n \"pclass\"],\n one_col=False)))\n ])\n)])", "_____no_output_____" ], [ "train.columns", "_____no_output_____" ] ], [ [ "## Benchmark model", "_____no_output_____" ] ], [ [ "lr_pipeline = make_pipeline(benchmark_pipeline, LogisticRegression())", "_____no_output_____" ], [ "lr_pipeline.fit(train, train_y)", "_____no_output_____" ], [ "accuracy_score(train_y, lr_pipeline.predict(train_x))", "_____no_output_____" ], [ "validate = pd.read_excel(\"/home/iyed/Downloads/titanic3.xls\")", "_____no_output_____" ], [ "accuracy_score(validate.survived, lr_pipeline.predict(validate))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecb16af223c39666f17926da415324e4b47a6ab1
29,746
ipynb
Jupyter Notebook
ETL_AWS_RDS_POSTGRES/ETL.ipynb
almiller95/final-project-machine-learning
b3596d768a1ac61c543193a6d5076484fc77e016
[ "MIT" ]
null
null
null
ETL_AWS_RDS_POSTGRES/ETL.ipynb
almiller95/final-project-machine-learning
b3596d768a1ac61c543193a6d5076484fc77e016
[ "MIT" ]
null
null
null
ETL_AWS_RDS_POSTGRES/ETL.ipynb
almiller95/final-project-machine-learning
b3596d768a1ac61c543193a6d5076484fc77e016
[ "MIT" ]
null
null
null
29,746
29,746
0.423586
[ [ [ "import os\n# Find the latest version of spark 3.0 from http://www-us.apache.org/dist/spark/ and enter as the spark version\n# For example:\n# spark_version = 'spark-3.0.2'\nspark_version = 'spark-3.1.1'\nos.environ['SPARK_VERSION']=spark_version\n\n# Install Spark and Java\n!apt-get update\n!apt-get install openjdk-11-jdk-headless -qq > /dev/null\n!wget -q http://www-us.apache.org/dist/spark/$SPARK_VERSION/$SPARK_VERSION-bin-hadoop2.7.tgz\n!tar xf $SPARK_VERSION-bin-hadoop2.7.tgz\n!pip install -q findspark\n\n# Set Environment Variables\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-11-openjdk-amd64\"\nos.environ[\"SPARK_HOME\"] = f\"/content/{spark_version}-bin-hadoop2.7\"\n\n# Start a SparkSession\nimport findspark\nfindspark.init()", "\r0% [Working]\r \rIgn:1 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\n\r0% [Connecting to archive.ubuntu.com (91.189.88.152)] [Connecting to security.u\r \rIgn:2 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\n\r0% [Connecting to archive.ubuntu.com (91.189.88.152)] [Connecting to security.u\r \rHit:3 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release\nHit:4 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nHit:5 http://archive.ubuntu.com/ubuntu bionic InRelease\nGet:6 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\nHit:8 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\nGet:9 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\nHit:10 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\nHit:12 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease\nGet:13 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\nHit:14 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease\nHit:15 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nGet:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2,616 kB]\nGet:17 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,184 kB]\nFetched 5,053 kB in 3s (1,602 kB/s)\nReading package lists... Done\n" ], [ " !wget https://jdbc.postgresql.org/download/postgresql-42.2.9.jar", "--2021-06-08 02:00:00-- https://jdbc.postgresql.org/download/postgresql-42.2.9.jar\nResolving jdbc.postgresql.org (jdbc.postgresql.org)... 72.32.157.228, 2001:4800:3e1:1::228\nConnecting to jdbc.postgresql.org (jdbc.postgresql.org)|72.32.157.228|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 914037 (893K) [application/java-archive]\nSaving to: ‘postgresql-42.2.9.jar.2’\n\npostgresql-42.2.9.j 100%[===================>] 892.61K 5.49MB/s in 0.2s \n\n2021-06-08 02:00:00 (5.49 MB/s) - ‘postgresql-42.2.9.jar.2’ saved [914037/914037]\n\n" ], [ "from pyspark.sql import SparkSession\nspark = SparkSession.builder.appName(\"CloudETL\").config(\"spark.driver.extraClassPath\",\"/content/postgresql-42.2.9.jar\").getOrCreate()", "_____no_output_____" ], [ "from pyspark import SparkFiles\n# Load in employee.csv from S3 into a DataFrame\ncan_url = \"https://cancer-data-ml-060721-0001.s3.amazonaws.com/wisconsin_cancer_data.csv\"\nspark.sparkContext.addFile(can_url)\n\ndf = spark.read.option('header', 'true').csv(SparkFiles.get(\"wisconsin_cancer_data.csv\"), inferSchema=True, sep=',', timestampFormat=\"m/dd/yy\")\ndf.show(10)", "+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+----+\n| id|diagnosis|radius_mean|texture_mean|perimeter_mean|area_mean|smoothness_mean|compactness_mean|concavity_mean|concave points_mean|symmetry_mean|fractal_dimension_mean|radius_se|texture_se|perimeter_se|area_se|smoothness_se|compactness_se|concavity_se|concave points_se|symmetry_se|fractal_dimension_se|radius_worst|texture_worst|perimeter_worst|area_worst|smoothness_worst|compactness_worst|concavity_worst|concave points_worst|symmetry_worst|fractal_dimension_worst|_c32|\n+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+----+\n| 842302| M| 17.99| 10.38| 122.8| 1001.0| 0.1184| 0.2776| 0.3001| 0.1471| 0.2419| 0.07871| 1.095| 0.9053| 8.589| 153.4| 0.006399| 0.04904| 0.05373| 0.01587| 0.03003| 0.006193| 25.38| 17.33| 184.6| 2019.0| 0.1622| 0.6656| 0.7119| 0.2654| 0.4601| 0.1189|null|\n| 842517| M| 20.57| 17.77| 132.9| 1326.0| 0.08474| 0.07864| 0.0869| 0.07017| 0.1812| 0.05667| 0.5435| 0.7339| 3.398| 74.08| 0.005225| 0.01308| 0.0186| 0.0134| 0.01389| 0.003532| 24.99| 23.41| 158.8| 1956.0| 0.1238| 0.1866| 0.2416| 0.186| 0.275| 0.08902|null|\n|84300903| M| 19.69| 21.25| 130.0| 1203.0| 0.1096| 0.1599| 0.1974| 0.1279| 0.2069| 0.05999| 0.7456| 0.7869| 4.585| 94.03| 0.00615| 0.04006| 0.03832| 0.02058| 0.0225| 0.004571| 23.57| 25.53| 152.5| 1709.0| 0.1444| 0.4245| 0.4504| 0.243| 0.3613| 0.08758|null|\n|84348301| M| 11.42| 20.38| 77.58| 386.1| 0.1425| 0.2839| 0.2414| 0.1052| 0.2597| 0.09744| 0.4956| 1.156| 3.445| 27.23| 0.00911| 0.07458| 0.05661| 0.01867| 0.05963| 0.009208| 14.91| 26.5| 98.87| 567.7| 0.2098| 0.8663| 0.6869| 0.2575| 0.6638| 0.173|null|\n|84358402| M| 20.29| 14.34| 135.1| 1297.0| 0.1003| 0.1328| 0.198| 0.1043| 0.1809| 0.05883| 0.7572| 0.7813| 5.438| 94.44| 0.01149| 0.02461| 0.05688| 0.01885| 0.01756| 0.005115| 22.54| 16.67| 152.2| 1575.0| 0.1374| 0.205| 0.4| 0.1625| 0.2364| 0.07678|null|\n| 843786| M| 12.45| 15.7| 82.57| 477.1| 0.1278| 0.17| 0.1578| 0.08089| 0.2087| 0.07613| 0.3345| 0.8902| 2.217| 27.19| 0.00751| 0.03345| 0.03672| 0.01137| 0.02165| 0.005082| 15.47| 23.75| 103.4| 741.6| 0.1791| 0.5249| 0.5355| 0.1741| 0.3985| 0.1244|null|\n| 844359| M| 18.25| 19.98| 119.6| 1040.0| 0.09463| 0.109| 0.1127| 0.074| 0.1794| 0.05742| 0.4467| 0.7732| 3.18| 53.91| 0.004314| 0.01382| 0.02254| 0.01039| 0.01369| 0.002179| 22.88| 27.66| 153.2| 1606.0| 0.1442| 0.2576| 0.3784| 0.1932| 0.3063| 0.08368|null|\n|84458202| M| 13.71| 20.83| 90.2| 577.9| 0.1189| 0.1645| 0.09366| 0.05985| 0.2196| 0.07451| 0.5835| 1.377| 3.856| 50.96| 0.008805| 0.03029| 0.02488| 0.01448| 0.01486| 0.005412| 17.06| 28.14| 110.6| 897.0| 0.1654| 0.3682| 0.2678| 0.1556| 0.3196| 0.1151|null|\n| 844981| M| 13.0| 21.82| 87.5| 519.8| 0.1273| 0.1932| 0.1859| 0.09353| 0.235| 0.07389| 0.3063| 1.002| 2.406| 24.32| 0.005731| 0.03502| 0.03553| 0.01226| 0.02143| 0.003749| 15.49| 30.73| 106.2| 739.3| 0.1703| 0.5401| 0.539| 0.206| 0.4378| 0.1072|null|\n|84501001| M| 12.46| 24.04| 83.97| 475.9| 0.1186| 0.2396| 0.2273| 0.08543| 0.203| 0.08243| 0.2976| 1.599| 2.039| 23.94| 0.007149| 0.07217| 0.07743| 0.01432| 0.01789| 0.01008| 15.09| 40.68| 97.65| 711.4| 0.1853| 1.058| 1.105| 0.221| 0.4366| 0.2075|null|\n+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+----+\nonly showing top 10 rows\n\n" ], [ "can_df_1 = df.drop(\"_c32\")\n\ncan_df_1.show(10)", "+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+\n| id|diagnosis|radius_mean|texture_mean|perimeter_mean|area_mean|smoothness_mean|compactness_mean|concavity_mean|concave points_mean|symmetry_mean|fractal_dimension_mean|radius_se|texture_se|perimeter_se|area_se|smoothness_se|compactness_se|concavity_se|concave points_se|symmetry_se|fractal_dimension_se|radius_worst|texture_worst|perimeter_worst|area_worst|smoothness_worst|compactness_worst|concavity_worst|concave points_worst|symmetry_worst|fractal_dimension_worst|\n+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+\n| 842302| M| 17.99| 10.38| 122.8| 1001.0| 0.1184| 0.2776| 0.3001| 0.1471| 0.2419| 0.07871| 1.095| 0.9053| 8.589| 153.4| 0.006399| 0.04904| 0.05373| 0.01587| 0.03003| 0.006193| 25.38| 17.33| 184.6| 2019.0| 0.1622| 0.6656| 0.7119| 0.2654| 0.4601| 0.1189|\n| 842517| M| 20.57| 17.77| 132.9| 1326.0| 0.08474| 0.07864| 0.0869| 0.07017| 0.1812| 0.05667| 0.5435| 0.7339| 3.398| 74.08| 0.005225| 0.01308| 0.0186| 0.0134| 0.01389| 0.003532| 24.99| 23.41| 158.8| 1956.0| 0.1238| 0.1866| 0.2416| 0.186| 0.275| 0.08902|\n|84300903| M| 19.69| 21.25| 130.0| 1203.0| 0.1096| 0.1599| 0.1974| 0.1279| 0.2069| 0.05999| 0.7456| 0.7869| 4.585| 94.03| 0.00615| 0.04006| 0.03832| 0.02058| 0.0225| 0.004571| 23.57| 25.53| 152.5| 1709.0| 0.1444| 0.4245| 0.4504| 0.243| 0.3613| 0.08758|\n|84348301| M| 11.42| 20.38| 77.58| 386.1| 0.1425| 0.2839| 0.2414| 0.1052| 0.2597| 0.09744| 0.4956| 1.156| 3.445| 27.23| 0.00911| 0.07458| 0.05661| 0.01867| 0.05963| 0.009208| 14.91| 26.5| 98.87| 567.7| 0.2098| 0.8663| 0.6869| 0.2575| 0.6638| 0.173|\n|84358402| M| 20.29| 14.34| 135.1| 1297.0| 0.1003| 0.1328| 0.198| 0.1043| 0.1809| 0.05883| 0.7572| 0.7813| 5.438| 94.44| 0.01149| 0.02461| 0.05688| 0.01885| 0.01756| 0.005115| 22.54| 16.67| 152.2| 1575.0| 0.1374| 0.205| 0.4| 0.1625| 0.2364| 0.07678|\n| 843786| M| 12.45| 15.7| 82.57| 477.1| 0.1278| 0.17| 0.1578| 0.08089| 0.2087| 0.07613| 0.3345| 0.8902| 2.217| 27.19| 0.00751| 0.03345| 0.03672| 0.01137| 0.02165| 0.005082| 15.47| 23.75| 103.4| 741.6| 0.1791| 0.5249| 0.5355| 0.1741| 0.3985| 0.1244|\n| 844359| M| 18.25| 19.98| 119.6| 1040.0| 0.09463| 0.109| 0.1127| 0.074| 0.1794| 0.05742| 0.4467| 0.7732| 3.18| 53.91| 0.004314| 0.01382| 0.02254| 0.01039| 0.01369| 0.002179| 22.88| 27.66| 153.2| 1606.0| 0.1442| 0.2576| 0.3784| 0.1932| 0.3063| 0.08368|\n|84458202| M| 13.71| 20.83| 90.2| 577.9| 0.1189| 0.1645| 0.09366| 0.05985| 0.2196| 0.07451| 0.5835| 1.377| 3.856| 50.96| 0.008805| 0.03029| 0.02488| 0.01448| 0.01486| 0.005412| 17.06| 28.14| 110.6| 897.0| 0.1654| 0.3682| 0.2678| 0.1556| 0.3196| 0.1151|\n| 844981| M| 13.0| 21.82| 87.5| 519.8| 0.1273| 0.1932| 0.1859| 0.09353| 0.235| 0.07389| 0.3063| 1.002| 2.406| 24.32| 0.005731| 0.03502| 0.03553| 0.01226| 0.02143| 0.003749| 15.49| 30.73| 106.2| 739.3| 0.1703| 0.5401| 0.539| 0.206| 0.4378| 0.1072|\n|84501001| M| 12.46| 24.04| 83.97| 475.9| 0.1186| 0.2396| 0.2273| 0.08543| 0.203| 0.08243| 0.2976| 1.599| 2.039| 23.94| 0.007149| 0.07217| 0.07743| 0.01432| 0.01789| 0.01008| 15.09| 40.68| 97.65| 711.4| 0.1853| 1.058| 1.105| 0.221| 0.4366| 0.2075|\n+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+\nonly showing top 10 rows\n\n" ], [ "# renaming to get rid of spaces in column headers\n# concave points_mean, concave points_se, concave points_worst\n\ncan_df_2 = can_df_1.withColumnRenamed(\"concave points_mean\",\"concave_points_mean\").withColumnRenamed(\"concave points_se\",\"concave_points_se\").withColumnRenamed(\"concave points_worst\",\"concave_points_worst\")\n\ncan_df_2.show(10)", "+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+\n| id|diagnosis|radius_mean|texture_mean|perimeter_mean|area_mean|smoothness_mean|compactness_mean|concavity_mean|concave_points_mean|symmetry_mean|fractal_dimension_mean|radius_se|texture_se|perimeter_se|area_se|smoothness_se|compactness_se|concavity_se|concave_points_se|symmetry_se|fractal_dimension_se|radius_worst|texture_worst|perimeter_worst|area_worst|smoothness_worst|compactness_worst|concavity_worst|concave_points_worst|symmetry_worst|fractal_dimension_worst|\n+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+\n| 842302| M| 17.99| 10.38| 122.8| 1001.0| 0.1184| 0.2776| 0.3001| 0.1471| 0.2419| 0.07871| 1.095| 0.9053| 8.589| 153.4| 0.006399| 0.04904| 0.05373| 0.01587| 0.03003| 0.006193| 25.38| 17.33| 184.6| 2019.0| 0.1622| 0.6656| 0.7119| 0.2654| 0.4601| 0.1189|\n| 842517| M| 20.57| 17.77| 132.9| 1326.0| 0.08474| 0.07864| 0.0869| 0.07017| 0.1812| 0.05667| 0.5435| 0.7339| 3.398| 74.08| 0.005225| 0.01308| 0.0186| 0.0134| 0.01389| 0.003532| 24.99| 23.41| 158.8| 1956.0| 0.1238| 0.1866| 0.2416| 0.186| 0.275| 0.08902|\n|84300903| M| 19.69| 21.25| 130.0| 1203.0| 0.1096| 0.1599| 0.1974| 0.1279| 0.2069| 0.05999| 0.7456| 0.7869| 4.585| 94.03| 0.00615| 0.04006| 0.03832| 0.02058| 0.0225| 0.004571| 23.57| 25.53| 152.5| 1709.0| 0.1444| 0.4245| 0.4504| 0.243| 0.3613| 0.08758|\n|84348301| M| 11.42| 20.38| 77.58| 386.1| 0.1425| 0.2839| 0.2414| 0.1052| 0.2597| 0.09744| 0.4956| 1.156| 3.445| 27.23| 0.00911| 0.07458| 0.05661| 0.01867| 0.05963| 0.009208| 14.91| 26.5| 98.87| 567.7| 0.2098| 0.8663| 0.6869| 0.2575| 0.6638| 0.173|\n|84358402| M| 20.29| 14.34| 135.1| 1297.0| 0.1003| 0.1328| 0.198| 0.1043| 0.1809| 0.05883| 0.7572| 0.7813| 5.438| 94.44| 0.01149| 0.02461| 0.05688| 0.01885| 0.01756| 0.005115| 22.54| 16.67| 152.2| 1575.0| 0.1374| 0.205| 0.4| 0.1625| 0.2364| 0.07678|\n| 843786| M| 12.45| 15.7| 82.57| 477.1| 0.1278| 0.17| 0.1578| 0.08089| 0.2087| 0.07613| 0.3345| 0.8902| 2.217| 27.19| 0.00751| 0.03345| 0.03672| 0.01137| 0.02165| 0.005082| 15.47| 23.75| 103.4| 741.6| 0.1791| 0.5249| 0.5355| 0.1741| 0.3985| 0.1244|\n| 844359| M| 18.25| 19.98| 119.6| 1040.0| 0.09463| 0.109| 0.1127| 0.074| 0.1794| 0.05742| 0.4467| 0.7732| 3.18| 53.91| 0.004314| 0.01382| 0.02254| 0.01039| 0.01369| 0.002179| 22.88| 27.66| 153.2| 1606.0| 0.1442| 0.2576| 0.3784| 0.1932| 0.3063| 0.08368|\n|84458202| M| 13.71| 20.83| 90.2| 577.9| 0.1189| 0.1645| 0.09366| 0.05985| 0.2196| 0.07451| 0.5835| 1.377| 3.856| 50.96| 0.008805| 0.03029| 0.02488| 0.01448| 0.01486| 0.005412| 17.06| 28.14| 110.6| 897.0| 0.1654| 0.3682| 0.2678| 0.1556| 0.3196| 0.1151|\n| 844981| M| 13.0| 21.82| 87.5| 519.8| 0.1273| 0.1932| 0.1859| 0.09353| 0.235| 0.07389| 0.3063| 1.002| 2.406| 24.32| 0.005731| 0.03502| 0.03553| 0.01226| 0.02143| 0.003749| 15.49| 30.73| 106.2| 739.3| 0.1703| 0.5401| 0.539| 0.206| 0.4378| 0.1072|\n|84501001| M| 12.46| 24.04| 83.97| 475.9| 0.1186| 0.2396| 0.2273| 0.08543| 0.203| 0.08243| 0.2976| 1.599| 2.039| 23.94| 0.007149| 0.07217| 0.07743| 0.01432| 0.01789| 0.01008| 15.09| 40.68| 97.65| 711.4| 0.1853| 1.058| 1.105| 0.221| 0.4366| 0.2075|\n+--------+---------+-----------+------------+--------------+---------+---------------+----------------+--------------+-------------------+-------------+----------------------+---------+----------+------------+-------+-------------+--------------+------------+-----------------+-----------+--------------------+------------+-------------+---------------+----------+----------------+-----------------+---------------+--------------------+--------------+-----------------------+\nonly showing top 10 rows\n\n" ], [ "can_df_2.printSchema()", "root\n |-- id: integer (nullable = true)\n |-- diagnosis: string (nullable = true)\n |-- radius_mean: double (nullable = true)\n |-- texture_mean: double (nullable = true)\n |-- perimeter_mean: double (nullable = true)\n |-- area_mean: double (nullable = true)\n |-- smoothness_mean: double (nullable = true)\n |-- compactness_mean: double (nullable = true)\n |-- concavity_mean: double (nullable = true)\n |-- concave_points_mean: double (nullable = true)\n |-- symmetry_mean: double (nullable = true)\n |-- fractal_dimension_mean: double (nullable = true)\n |-- radius_se: double (nullable = true)\n |-- texture_se: double (nullable = true)\n |-- perimeter_se: double (nullable = true)\n |-- area_se: double (nullable = true)\n |-- smoothness_se: double (nullable = true)\n |-- compactness_se: double (nullable = true)\n |-- concavity_se: double (nullable = true)\n |-- concave_points_se: double (nullable = true)\n |-- symmetry_se: double (nullable = true)\n |-- fractal_dimension_se: double (nullable = true)\n |-- radius_worst: double (nullable = true)\n |-- texture_worst: double (nullable = true)\n |-- perimeter_worst: double (nullable = true)\n |-- area_worst: double (nullable = true)\n |-- smoothness_worst: double (nullable = true)\n |-- compactness_worst: double (nullable = true)\n |-- concavity_worst: double (nullable = true)\n |-- concave_points_worst: double (nullable = true)\n |-- symmetry_worst: double (nullable = true)\n |-- fractal_dimension_worst: double (nullable = true)\n\n" ], [ "# Configuration for RDS instance\nmode=\"append\"\njdbc_url = \"jdbc:postgresql://final-project-060521-001.cklzu8aj5kxi.us-east-2.rds.amazonaws.com:5432/breast_cancer\"\nconfig = {\"user\":\"postgres\",\n \"password\": \"<password>\",\n \"driver\":\"org.postgresql.Driver\"}", "_____no_output_____" ], [ "# write DataFrame to RDS\ncan_df_2.write.jdbc(url=jdbc_url, table='wisconsin_breast_cancer', mode=mode, properties=config)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb16bf02bbe8261d954d7beebad437c4ce1b65d
19,315
ipynb
Jupyter Notebook
nbs/32_text.models.awdlstm.ipynb
IamGianluca/fastai2
f3f8e6fe5ac5f345fdc14737d78f9cbbf2577bc6
[ "Apache-2.0" ]
null
null
null
nbs/32_text.models.awdlstm.ipynb
IamGianluca/fastai2
f3f8e6fe5ac5f345fdc14737d78f9cbbf2577bc6
[ "Apache-2.0" ]
null
null
null
nbs/32_text.models.awdlstm.ipynb
IamGianluca/fastai2
f3f8e6fe5ac5f345fdc14737d78f9cbbf2577bc6
[ "Apache-2.0" ]
null
null
null
35.768519
358
0.564484
[ [ [ "#export\nfrom fastai2.data.all import *\nfrom fastai2.text.core import *", "_____no_output_____" ], [ "from nbdev.showdoc import *", "_____no_output_____" ], [ "#default_exp text.models.awdlstm\n#default_cls_lvl 3", "_____no_output_____" ] ], [ [ "# AWD-LSTM\n\n> AWD LSTM from [Smerity et al.](https://arxiv.org/pdf/1708.02182.pdf) ", "_____no_output_____" ], [ "## Basic NLP modules", "_____no_output_____" ], [ "On top of the pytorch or the fastai [`layers`](/layers.html#layers), the language models use some custom layers specific to NLP.", "_____no_output_____" ] ], [ [ "#export\ndef dropout_mask(x, sz, p):\n \"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element.\"\n return x.new(*sz).bernoulli_(1-p).div_(1-p)", "_____no_output_____" ], [ "t = dropout_mask(torch.randn(3,4), [4,3], 0.25)\ntest_eq(t.shape, [4,3])\nassert ((t == 4/3) + (t==0)).all()", "_____no_output_____" ], [ "#export\nclass RNNDropout(Module):\n \"Dropout with probability `p` that is consistent on the seq_len dimension.\"\n def __init__(self, p=0.5): self.p=p\n\n def forward(self, x):\n if not self.training or self.p == 0.: return x\n return x * dropout_mask(x.data, (x.size(0), 1, x.size(2)), self.p)", "_____no_output_____" ], [ "dp = RNNDropout(0.3)\ntst_inp = torch.randn(4,3,7)\ntst_out = dp(tst_inp)\nfor i in range(4):\n for j in range(7):\n if tst_out[i,0,j] == 0: assert (tst_out[i,:,j] == 0).all()\n else: test_close(tst_out[i,:,j], tst_inp[i,:,j]/(1-0.3))", "_____no_output_____" ], [ "#export\nimport warnings", "_____no_output_____" ], [ "#export\nclass WeightDropout(Module):\n \"A module that warps another layer in which some weights will be replaced by 0 during training.\"\n\n def __init__(self, module, weight_p, layer_names='weight_hh_l0'):\n self.module,self.weight_p,self.layer_names = module,weight_p,L(layer_names)\n for layer in self.layer_names:\n #Makes a copy of the weights of the selected layers.\n w = getattr(self.module, layer)\n self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))\n self.module._parameters[layer] = F.dropout(w, p=self.weight_p, training=False)\n\n def _setweights(self):\n \"Apply dropout to the raw weights.\"\n for layer in self.layer_names:\n raw_w = getattr(self, f'{layer}_raw')\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_p, training=self.training)\n\n def forward(self, *args):\n self._setweights()\n with warnings.catch_warnings():\n #To avoid the warning that comes because the weights aren't flattened.\n warnings.simplefilter(\"ignore\")\n return self.module.forward(*args)\n\n def reset(self):\n for layer in self.layer_names:\n raw_w = getattr(self, f'{layer}_raw')\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_p, training=False)\n if hasattr(self.module, 'reset'): self.module.reset()", "_____no_output_____" ], [ "module = nn.LSTM(5,7).cuda()\ndp_module = WeightDropout(module, 0.4)\nwgts = getattr(dp_module.module, 'weight_hh_l0')\ntst_inp = torch.randn(10,20,5).cuda()\nh = torch.zeros(1,20,7).cuda(), torch.zeros(1,20,7).cuda()\nx,h = dp_module(tst_inp,h)\nnew_wgts = getattr(dp_module.module, 'weight_hh_l0')\ntest_eq(wgts, getattr(dp_module, 'weight_hh_l0_raw'))\nassert 0.2 <= (new_wgts==0).sum().float()/new_wgts.numel() <= 0.6", "_____no_output_____" ], [ "#export\nclass EmbeddingDropout(Module):\n \"Apply dropout with probabily `embed_p` to an embedding layer `emb`.\"\n\n def __init__(self, emb, embed_p):\n self.emb,self.embed_p = emb,embed_p\n\n def forward(self, words, scale=None):\n if self.training and self.embed_p != 0:\n size = (self.emb.weight.size(0),1)\n mask = dropout_mask(self.emb.weight.data, size, self.embed_p)\n masked_embed = self.emb.weight * mask\n else: masked_embed = self.emb.weight\n if scale: masked_embed.mul_(scale)\n return F.embedding(words, masked_embed, ifnone(self.emb.padding_idx, -1), self.emb.max_norm,\n self.emb.norm_type, self.emb.scale_grad_by_freq, self.emb.sparse)", "_____no_output_____" ], [ "enc = nn.Embedding(10, 7, padding_idx=1)\nenc_dp = EmbeddingDropout(enc, 0.5)\ntst_inp = torch.randint(0,10,(8,))\ntst_out = enc_dp(tst_inp)\nfor i in range(8):\n assert (tst_out[i]==0).all() or torch.allclose(tst_out[i], 2*enc.weight[tst_inp[i]])", "_____no_output_____" ], [ "#export\nclass AWD_LSTM(Module):\n \"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182\"\n initrange=0.1\n\n def __init__(self, vocab_sz, emb_sz, n_hid, n_layers, pad_token=1, hidden_p=0.2, input_p=0.6, embed_p=0.1,\n weight_p=0.5, bidir=False):\n store_attr(self, 'emb_sz,n_hid,n_layers,pad_token')\n self.bs = 1\n self.n_dir = 2 if bidir else 1\n self.encoder = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)\n self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)\n self.rnns = nn.ModuleList([self._one_rnn(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.n_dir,\n bidir, weight_p, l) for l in range(n_layers)])\n self.encoder.weight.data.uniform_(-self.initrange, self.initrange)\n self.input_dp = RNNDropout(input_p)\n self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])\n self.reset()\n\n def forward(self, inp, from_embeds=False):\n bs,sl = inp.shape[:2] if from_embeds else inp.shape\n if bs!=self.bs: self._change_hidden(bs)\n\n raw_output = self.input_dp(inp if from_embeds else self.encoder_dp(inp))\n new_hidden,raw_outputs,outputs = [],[],[]\n for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):\n raw_output, new_h = rnn(raw_output, self.hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.n_layers - 1: raw_output = hid_dp(raw_output)\n outputs.append(raw_output)\n self.hidden = to_detach(new_hidden, cpu=False, gather=False)\n return raw_outputs, outputs\n\n def _change_hidden(self, bs):\n self.hidden = [self._change_one_hidden(l, bs) for l in range(self.n_layers)]\n self.bs = bs\n\n def _one_rnn(self, n_in, n_out, bidir, weight_p, l):\n \"Return one of the inner rnn\"\n rnn = nn.LSTM(n_in, n_out, 1, batch_first=True, bidirectional=bidir)\n return WeightDropout(rnn, weight_p)\n\n def _one_hidden(self, l):\n \"Return one hidden state\"\n nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir\n return (one_param(self).new_zeros(self.n_dir, self.bs, nh), one_param(self).new_zeros(self.n_dir, self.bs, nh))\n\n def _change_one_hidden(self, l, bs):\n if self.bs < bs:\n nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir\n return tuple(torch.cat([h, h.new_zeros(self.n_dir, bs-self.bs, nh)], dim=1) for h in self.hidden[l])\n if self.bs > bs: return (self.hidden[l][0][:,:bs], self.hidden[l][1][:,:bs])\n return self.hidden[l]\n\n def reset(self):\n \"Reset the hidden states\"\n [r.reset() for r in self.rnns if hasattr(r, 'reset')]\n self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]", "_____no_output_____" ] ], [ [ "This is the core of an AWD-LSTM model, with embeddings from `vocab_sz` and `emb_sz`, `n_layers` LSTMs potentialy `bidir` stacked, the first one going from `emb_sz` to `n_hid`, the last one from `n_hid` to `emb_sz` and all the inner ones from `n_hid` to `n_hid`. `pad_token` is passed to the PyTorch embedding layer. The dropouts are applied as such:\n- the embeddings are wrapped in `EmbeddingDropout` of probability `embed_p`;\n- the result of thise embedding layer goes through an `RNNDropout` of probability `input_p`;\n- each LSTM has `WeightDropout` applied with probability `weight_p`;\n- between two of the inner LSTM, an `RNNDropout` is applied with probabilith `hidden_p`.\n\nTHe module returns two lists: the raw outputs (without being applied the dropout of `hidden_p`) of each inner LSTM and the list of outputs with dropout. Since there is no dropout applied on the last output, those two lists have the same last element, which is the output that should be fed to a decoder (in the case of a language model).", "_____no_output_____" ] ], [ [ "tst = AWD_LSTM(100, 20, 10, 2)\nx = torch.randint(0, 100, (10,5))\nr = tst(x)\ntest_eq(tst.bs, 10)\ntest_eq(len(tst.hidden), 2)\ntest_eq([h_.shape for h_ in tst.hidden[0]], [[1,10,10], [1,10,10]])\ntest_eq([h_.shape for h_ in tst.hidden[1]], [[1,10,20], [1,10,20]])\ntest_eq(len(r), 2)\ntest_eq(r[0][-1], r[1][-1]) #No dropout for last output\nfor i in range(2): test_eq([h_.shape for h_ in r[i]], [[10,5,10], [10,5,20]])\nfor i in range(2): test_eq(r[0][i][:,-1], tst.hidden[i][0][0]) #hidden state is the last timestep in raw outputs", "_____no_output_____" ], [ "#hide\n#test bs change\nx = torch.randint(0, 100, (6,5))\nr = tst(x)\ntest_eq(tst.bs, 6)", "_____no_output_____" ], [ "#export\ndef awd_lstm_lm_split(model):\n \"Split a RNN `model` in groups for differential learning rates.\"\n groups = [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]\n groups = L(groups + [nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])])\n return groups.map(params)", "_____no_output_____" ], [ "splits = awd_lstm_lm_split", "_____no_output_____" ], [ "#export\nawd_lstm_lm_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.1,\n hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)", "_____no_output_____" ], [ "#export\ndef awd_lstm_clas_split(model):\n \"Split a RNN `model` in groups for differential learning rates.\"\n groups = [nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)]\n groups += [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)]\n groups = L(groups + [model[1]])\n return groups.map(params)", "_____no_output_____" ], [ "#export\nawd_lstm_clas_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.4,\n hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)", "_____no_output_____" ] ], [ [ "## QRNN", "_____no_output_____" ] ], [ [ "#export\nclass AWD_QRNN(AWD_LSTM):\n \"Same as an AWD-LSTM, but using QRNNs instead of LSTMs\"\n def _one_rnn(self, n_in, n_out, bidir, weight_p, l):\n from fastai2.text.models.qrnn import QRNN\n rnn = QRNN(n_in, n_out, 1, save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True, bidirectional=bidir)\n rnn.layers[0].linear = WeightDropout(rnn.layers[0].linear, weight_p, layer_names='weight')\n return rnn\n\n def _one_hidden(self, l):\n \"Return one hidden state\"\n nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir\n return one_param(self).new_zeros(self.n_dir, self.bs, nh)\n\n def _change_one_hidden(self, l, bs):\n if self.bs < bs:\n nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir\n return torch.cat([self.hidden[l], self.hidden[l].new_zeros(self.n_dir, bs-self.bs, nh)], dim=1)\n if self.bs > bs: return self.hidden[l][:bs]\n return self.hidden[l]", "_____no_output_____" ], [ "#export\nawd_qrnn_lm_config = dict(emb_sz=400, n_hid=1552, n_layers=4, pad_token=1, bidir=False, output_p=0.1,\n hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)", "_____no_output_____" ], [ "#export\nawd_qrnn_clas_config = dict(emb_sz=400, n_hid=1552, n_layers=4, pad_token=1, bidir=False, output_p=0.4,\n hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)", "_____no_output_____" ] ], [ [ "## Export -", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.export import notebook2script\nnotebook2script()", "Converted 00_torch_core.ipynb.\nConverted 01_layers.ipynb.\nConverted 02_data.load.ipynb.\nConverted 03_data.core.ipynb.\nConverted 04_data.external.ipynb.\nConverted 05_data.transforms.ipynb.\nConverted 06_data.block.ipynb.\nConverted 07_vision.core.ipynb.\nConverted 08_vision.data.ipynb.\nConverted 09_vision.augment.ipynb.\nConverted 09b_vision.utils.ipynb.\nConverted 09c_vision.widgets.ipynb.\nConverted 10_tutorial.pets.ipynb.\nConverted 11_vision.models.xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_learner.ipynb.\nConverted 13a_metrics.ipynb.\nConverted 14_callback.schedule.ipynb.\nConverted 14a_callback.data.ipynb.\nConverted 15_callback.hook.ipynb.\nConverted 15a_vision.models.unet.ipynb.\nConverted 16_callback.progress.ipynb.\nConverted 17_callback.tracker.ipynb.\nConverted 18_callback.fp16.ipynb.\nConverted 19_callback.mixup.ipynb.\nConverted 20_interpret.ipynb.\nConverted 20a_distributed.ipynb.\nConverted 21_vision.learner.ipynb.\nConverted 22_tutorial.imagenette.ipynb.\nConverted 23_tutorial.transfer_learning.ipynb.\nConverted 24_vision.gan.ipynb.\nConverted 30_text.core.ipynb.\nConverted 31_text.data.ipynb.\nConverted 32_text.models.awdlstm.ipynb.\nConverted 33_text.models.core.ipynb.\nConverted 34_callback.rnn.ipynb.\nConverted 35_tutorial.wikitext.ipynb.\nConverted 36_text.models.qrnn.ipynb.\nConverted 37_text.learner.ipynb.\nConverted 38_tutorial.ulmfit.ipynb.\nConverted 40_tabular.core.ipynb.\nConverted 41_tabular.data.ipynb.\nConverted 42_tabular.learner.ipynb.\nConverted 43_tabular.model.ipynb.\nConverted 45_collab.ipynb.\nConverted 50_datablock_examples.ipynb.\nConverted 60_medical.imaging.ipynb.\nConverted 65_medical.text.ipynb.\nConverted 70_callback.wandb.ipynb.\nConverted 71_callback.tensorboard.ipynb.\nConverted 97_test_utils.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecb173379cce44d970fb35f9cd5e5888248d949c
40,260
ipynb
Jupyter Notebook
M15_Bruno_Salles_Pereira.ipynb
bsallesp/AnomalyDetection
3675d838cdefa8802a334e51b5fb8aa28f7e1b74
[ "MIT" ]
null
null
null
M15_Bruno_Salles_Pereira.ipynb
bsallesp/AnomalyDetection
3675d838cdefa8802a334e51b5fb8aa28f7e1b74
[ "MIT" ]
null
null
null
M15_Bruno_Salles_Pereira.ipynb
bsallesp/AnomalyDetection
3675d838cdefa8802a334e51b5fb8aa28f7e1b74
[ "MIT" ]
null
null
null
32.972973
246
0.330427
[ [ [ "<a href=\"https://colab.research.google.com/github/bsallesp/AnomalyDetection/blob/main/M15_Bruno_Salles_Pereira.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## Preprocessing:", "_____no_output_____" ], [ "### Unpacking resources and files:", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom google.colab import drive", "_____no_output_____" ], [ "drive.mount('/drive')\ndef getData():\n r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']\n ratings = pd.read_csv('/drive/MyDrive/ColabNotebooks/Mentorama/M15/u.data', sep='\\t', names=r_cols,\n encoding='latin-1')\n m_cols = ['movie_id', 'title', 'release_date', 'video_release_date', 'imdb_url']\n movies = pd.read_csv('/drive/MyDrive/ColabNotebooks/Mentorama/M15/u.item', sep='|', names=m_cols, usecols=range(5),\n encoding='latin-1')\n movie_ratings = pd.merge(movies, ratings)\n temp = movie_ratings[['movie_id', 'user_id', 'rating']].copy()\n temp = temp.pivot_table(columns='movie_id', index='user_id', values='rating').copy()\n temp.index = ['User_'+str(int(i)) for i in temp.index]\n temp.columns = ['Filme_'+str(int(i)) for i in temp.columns]\n qtd_cols = 80\n R = temp.iloc[:, :qtd_cols]\n l=[]\n for i in range(1, R.shape[0]+1):\n if R.iloc[i-1, ].isnull().sum() >= (qtd_cols - 10):\n l.append(i)\n R = R.drop([\"User_\"+str(r) for r in l])\n R.index = ['User_'+str(int(i)) for i in range(R.shape[0])]\n return R", "Drive already mounted at /drive; to attempt to forcibly remount, call drive.mount(\"/drive\", force_remount=True).\n" ] ], [ [ "### Class MatrixFactorization:", "_____no_output_____" ] ], [ [ "import time\n\nclass MatrixFactorization():\n \n def __init__(self, dataframe, K, steps, alpha, beta):\n self.df = dataframe\n self.K = K\n self.steps = steps\n self.alpha = alpha\n self.beta = beta\n \n def fit(self, print_ = False):\n t0 = time.time()\n \n R = self.df.values\n N, M = R.shape\n \n #inicio aleatorio\n P = np.random.rand(N,self.K)\n Q = np.random.rand(self.K,M)\n \n lista_erro_step = []\n \n #loop\n for step in range(self.steps):\n \n mse_total_step = 0\n #varrendo todas as entradas da matriz R\n for i in range(len(R)):\n for j in range(len(R[i])):\n #validando se o valor associado está preenchido\n if R[i][j] > 0:\n\n #calculando o erro:\n eij = R[i][j] - np.dot(P[i,:],Q[:,j])\n mse_total_step += (eij)**2\n #alterando os valores\n for k in range(self.K):\n P[i][k] = P[i][k] + self.alpha * ( 2 * eij * Q[k][j] - self.beta * P[i][k])\n Q[k][j] = Q[k][j] + self.alpha * ( 2 * eij * P[i][k] - self.beta * Q[k][j])\n \n lista_erro_step.append(mse_total_step)\n \n self.P = P\n self.Q = Q\n self.lista_erro_step = lista_erro_step\n t1 = time.time()\n if print_== True:\n print(\"Fatoração concluída. Tempo aproximado:\", int((t1-t0)/60)+1, 'minuto(s).')\n \n def predict(self):\n return self.P.dot(self.Q)\n \n def print_MSE_steps(self):\n plt.figure(figsize=[15,6])\n plt.title(\"Custo total por Step\", fontsize = 16, fontweight = 'bold')\n plt.xlabel(\"Step\", fontsize = 14, fontweight = 'bold')\n plt.ylabel(\"Erro\", fontsize = 14, fontweight = 'bold')\n plt.plot(range(1, 1+self.steps), self.lista_erro_step, c = 'blue', lw = 2)\n plt.grid()\n plt.show()", "_____no_output_____" ] ], [ [ "### grid_search / make_chess:", "_____no_output_____" ] ], [ [ "def grid_search(train, test, param_grid, print_ = False, clean_ = False):\n from itertools import product\n from IPython.display import clear_output \n results = []\n count = 0\n\n for param in product(*param_grid):\n \n fat = MatrixFactorization(dataframe = pd.DataFrame(train), K = param[0], steps = param[1], alpha = param[2], beta = param[3])\n fat.fit()\n mse = make_chess(val, fat.predict(), mse_ = True)\n results.append([param, mse])\n if print_ == True:\n print(f\"Lap {count + 1} / {total_loops}:\")\n print(f\"K = {param[0]}\")\n print(f\"steps = {param[1]}\")\n print(f\"alpha = {param[2]}\")\n print(f\"beta = {param[3]}\")\n print(f\"MSE = {mse}\")\n print(\"_\" * 75)\n count = count + 1\n\n if clean_ == True:\n clear_output()\n\n \n \n return results", "_____no_output_____" ], [ "# Select only pred position cells, and return new dataframe with pred position only:\ndef make_chess(df_true, df_pred, print_ = False, mse_ = True, dfs_ = False):\n # new df instances\n clean_df_true = np.array(df_true.copy())\n clean_df_pred = np.array(df_pred.copy())\n count = 0\n \n for i in range(0, len(df_true[0])):\n for j in range(0, len(df_true[1])):\n if df_true[i][j] == 0:\n clean_df_pred[i][j] = 0\n clean_df_true[i][j] = 0\n count = count + 1\n\n if print_ == True:\n total_count_pred = (df_true.shape[0] * df_true.shape[1]) - count\n total_count_original = df_true.shape[0] * df_true.shape[1]\n print(\"Total de instancias no dataset original: \", total_count_original)\n print(\"Total de instancias zeradas: \", count)\n print(\"Total de predicoes em pred e val: \", total_count_pred)\n print(\"% de predicoes em pred e val em comparacao ao dataset original:\", round(((total_count_pred / total_count_original * 100)-100)*-1, 2),\"%\")\n \n if dfs_ == True:\n return clean_df_true, clean_df_pred\n\n if mse_ == True:\n from sklearn import metrics\n mse = metrics.mean_absolute_error(clean_df_true, clean_df_pred)\n return mse", "_____no_output_____" ] ], [ [ "### train_test_split:", "_____no_output_____" ] ], [ [ "def train_test_split(ratings, qtd):\n test = np.zeros(ratings.shape)\n train = ratings.copy()\n for user in range(ratings.shape[0]):\n test_ratings = np.random.choice(ratings[user, :].nonzero()[0], \n size=qtd, \n replace=False)\n train[user, test_ratings] = 0.\n test[user, test_ratings] = ratings[user, test_ratings]\n \n return train, test", "_____no_output_____" ] ], [ [ "### Data splitting:", "_____no_output_____" ] ], [ [ "R = getData()\nprint(R.shape)\nR.sample(3)", "(367, 80)\n" ], [ "ratings = R.fillna(0).values\nratings", "_____no_output_____" ], [ "train, test = train_test_split(ratings, qtd = 2)\ntrain, val = train_test_split(train, qtd = 2)", "_____no_output_____" ], [ "train", "_____no_output_____" ], [ "val", "_____no_output_____" ], [ "test", "_____no_output_____" ] ], [ [ "## Model training:", "_____no_output_____" ], [ "### Final Results / best params:", "_____no_output_____" ], [ "#### Params:", "_____no_output_____" ] ], [ [ "# Set de parametros:\nK = np.arange(1, 5, 1)\nsteps = np.arange(1, 5, 2)\nalpha = np.arange(0.0001, 0.0005, 0.0001)\nbeta = np.arange(0.0001, 0.0005, 0.0001)\ntotal_loops = len(K) * len(steps) * len(alpha) * len(beta)\nparam_grid = {}\nparam_grid = K, steps, alpha, beta\nprint(len(steps))\nprint(len(alpha))\nprint(len(beta))\nprint(total_loops)\nprint(param_grid)", "2\n4\n4\n128\n(array([1, 2, 3, 4]), array([1, 3]), array([0.0001, 0.0002, 0.0003, 0.0004]), array([0.0001, 0.0002, 0.0003, 0.0004]))\n" ] ], [ [ "#### Grid search run:", "_____no_output_____" ] ], [ [ "results = grid_search(train, val, param_grid)", "_____no_output_____" ] ], [ [ "## Best results:", "_____no_output_____" ] ], [ [ "results1 = results.copy()", "_____no_output_____" ], [ "results1 = pd.DataFrame(results1, columns=['params','mse'])", "_____no_output_____" ], [ "results1.sort_values(by='mse').head(10)", "_____no_output_____" ], [ "results2 = results1.sort_values(by='mse')", "_____no_output_____" ], [ "results2.mse[1]", "_____no_output_____" ], [ "fat = MatrixFactorization(pd.DataFrame(train), 1, 1, 0.0001, 0.0002)\nfat.fit()\npred_final = fat.predict()", "_____no_output_____" ], [ "final_mse = make_chess(test, pred_final, print_=True)", "Total de instancias no dataset original: 29360\nTotal de instancias zeradas: 6240\nTotal de predicoes em pred e val: 23120\n% de predicoes em pred e val em comparacao ao dataset original: 21.25 %\n" ] ], [ [ "## Final result:", "_____no_output_____" ] ], [ [ "results2.mse[1], final_mse", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecb17d537db043881419515aa0091166de38554f
10,206
ipynb
Jupyter Notebook
lessons/swc-setdict/setdict-dict-learner.ipynb
leelasd/2014-07-08-stonybrook
0658e583fa8e59bac9162208524f19b24c21316a
[ "CC-BY-3.0" ]
4
2015-02-06T20:46:23.000Z
2021-11-26T08:27:12.000Z
lessons/swc-setdict/setdict-dict-learner.ipynb
geocarpentry/2014-01-21-erdc
a16757d0095faca247e110b9e876d834d6f0992d
[ "CC-BY-3.0" ]
1
2016-09-10T15:42:35.000Z
2016-09-10T15:42:35.000Z
lessons/swc-setdict/setdict-dict-learner.ipynb
geocarpentry/2014-01-21-erdc
a16757d0095faca247e110b9e876d834d6f0992d
[ "CC-BY-3.0" ]
2
2015-06-26T23:07:37.000Z
2016-03-01T15:20:22.000Z
23.901639
316
0.461885
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecb17f32449ace3c7477158ab7b3cb7551362f9d
88,226
ipynb
Jupyter Notebook
notebooks/basic-model/002-hiv-parameter-sweep.ipynb
mjbommar/cscs-530-w2016
beee8aacb0834f2f56612cf1b44e640e2635820e
[ "BSD-2-Clause" ]
18
2015-12-30T17:18:06.000Z
2022-02-09T16:00:49.000Z
notebooks/advanced-model/002-hiv-parameter-sweep.ipynb
mjbommar/cscs-530-w2016
beee8aacb0834f2f56612cf1b44e640e2635820e
[ "BSD-2-Clause" ]
null
null
null
notebooks/advanced-model/002-hiv-parameter-sweep.ipynb
mjbommar/cscs-530-w2016
beee8aacb0834f2f56612cf1b44e640e2635820e
[ "BSD-2-Clause" ]
9
2016-01-07T06:54:42.000Z
2018-10-18T07:47:46.000Z
60.51166
240
0.701777
[ [ [ "## CSCS530 Winter 2015\n#### Complex Systems 530 - Computer Modeling of Complex Systems (Winter 2015)\n\n * Course ID: CMPLXSYS 530\n * Course Title: Computer Modeling of Complex Systems\n * Term: Winter 2015\n * Schedule: Wednesdays and Friday, 1:00-2:30PM ET\n * Location: 120 West Hall (http://www.lsa.umich.edu/cscs/research/computerlab)\n * Teachers: [Mike Bommarito](https://www.linkedin.com/in/bommarito) and [Sarah Cherng](https://www.linkedin.com/pub/sarah-cherng/35/1b7/316)\n\n#### [View this repository on NBViewer](http://nbviewer.ipython.org/github/mjbommar/cscs-530-w2015/tree/master/)\n", "_____no_output_____" ], [ "## Basic ABM Parameter Sweep Experiment\n\n In this notebook, we'll take our simple HIV model from notebook ``001-hiv.ipynb`` and use it to perform a parameter sweep experiment.\n \n Specifically, we'll investigate how the level of institutional subsidy affects the percentage of population infected after 500 time steps.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\n# Standard imports\nimport copy\nimport itertools\n\n# Scientific computing imports\nimport numpy\nimport matplotlib.pyplot as plt\nimport networkx\nimport pandas\nimport seaborn; seaborn.set()\n\n# Import widget methods\nfrom IPython.html.widgets import *", ":0: FutureWarning: IPython widgets are experimental and may change in the future.\n" ] ], [ [ "## Define our Person and Model", "_____no_output_____" ] ], [ [ "class Person(object):\n \"\"\"\n Person class, which encapsulates the entire behavior of a person.\n \"\"\"\n \n def __init__(self, model, person_id, is_infected=False, condom_budget=1.0, prob_hookup=0.5):\n \"\"\"\n Constructor for Person class. By default,\n * not infected\n * will always buy condoms\n * will hookup 50% of the time\n \n Note that we must \"link\" the Person to their \"parent\" Model object.\n \"\"\"\n # Set model link and ID\n self.model = model\n self.person_id = person_id\n \n # Set Person parameters.\n self.is_infected = is_infected\n self.condom_budget = condom_budget\n self.prob_hookup = prob_hookup\n \n def decide_condom(self):\n \"\"\"\n Decide if we will use a condom.\n \"\"\"\n if self.condom_budget >= (self.model.condom_cost - self.model.condom_subsidy):\n return True\n else:\n return False\n \n def decide_hookup(self):\n \"\"\"\n Decide if we want to hookup with a potential partner.\n \"\"\"\n if numpy.random.random() <= self.prob_hookup:\n return True\n else:\n return False\n \n def get_position(self):\n \"\"\"\n Return position, calling through model.\n \"\"\"\n return self.model.get_person_position(self.person_id)\n \n def get_neighbors(self):\n \"\"\"\n Return neighbors, calling through model.\n \"\"\"\n return self.model.get_person_neighbors(self.person_id)\n\n def __repr__(self):\n '''\n Return string representation.\n '''\n skip_none = True\n repr_string = type(self).__name__ + \" [\"\n except_list = \"model\"\n\n elements = [e for e in dir(self) if str(e) not in except_list]\n for e in elements:\n # Make sure we only display \"public\" fields; skip anything private (_*), that is a method/function, or that is a module.\n if not e.startswith(\"_\") and eval('type(self.{0}).__name__'.format(e)) not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:\n value = eval(\"self.\" + e)\n if value != None and skip_none == True:\n repr_string += \"{0}={1}, \".format(e, value)\n\n # Clean up trailing space and comma.\n return repr_string.strip(\" \").strip(\",\") + \"]\"", "_____no_output_____" ], [ "class Model(object):\n \"\"\"\n Model class, which encapsulates the entire behavior of a single \"run\" in our HIV ABM.\n \"\"\"\n \n def __init__(self, grid_size, num_people, min_subsidy=0.0, max_subsidy=1.0,\n min_condom_budget=0.0, max_condom_budget=2.0,\n condom_cost=1.0, min_prob_hookup=0.0, max_prob_hookup=1.0,\n prob_transmit=0.9, prob_transmit_condom=0.1):\n \"\"\"\n Class constructor.\n \"\"\"\n # Set our model parameters; this is long but simple!\n self.grid_size = grid_size\n self.num_people = num_people\n self.min_subsidy = min_subsidy\n self.max_subsidy = max_subsidy\n self.min_condom_budget = min_condom_budget\n self.max_condom_budget = max_condom_budget\n self.condom_cost = condom_cost\n self.min_prob_hookup = min_prob_hookup\n self.max_prob_hookup = max_prob_hookup\n self.prob_transmit = prob_transmit\n self.prob_transmit_condom = prob_transmit_condom\n \n # Set our state variables\n self.t = 0\n self.space = numpy.array((0,0))\n self.condom_subsidy = 0.0\n self.people = []\n self.num_interactions = 0\n self.num_interactions_condoms = 0\n self.num_infected = 0\n \n # Setup our history variables.\n self.history_space = []\n self.history_space_infected = []\n self.history_interactions = []\n self.history_num_infected = []\n self.history_num_interactions = []\n self.history_num_interactions_condoms = []\n \n # Call our setup methods to initialize space, people, and institution.\n self.setup_space()\n self.setup_people()\n self.setup_institution()\n \n def setup_space(self):\n \"\"\"\n Method to setup our space.\n \"\"\"\n # Initialize a space with a NaN's\n self.space = numpy.full((self.grid_size, self.grid_size), numpy.nan)\n \n def setup_people(self):\n \"\"\"\n Method to setup our space.\n \"\"\"\n \n # First, begin by creating all agents without placing them.\n for i in xrange(self.num_people):\n self.people.append(Person(model=self,\n person_id=i,\n is_infected=False,\n condom_budget=numpy.random.uniform(self.min_condom_budget, self.max_condom_budget),\n prob_hookup=numpy.random.uniform(self.min_prob_hookup, self.max_prob_hookup)))\n \n # Second, once created, place them into the space.\n for person in self.people:\n # Loop until unique\n is_occupied = True\n while is_occupied:\n # Sample location\n random_x = numpy.random.randint(0, self.grid_size)\n random_y = numpy.random.randint(0, self.grid_size)\n \n # Check if unique\n if numpy.isnan(self.space[random_x, random_y]):\n is_occupied = False\n else:\n is_occupied = True\n \n # Now place the person there by setting their ID.\n self.space[random_x, random_y] = person.person_id\n \n # Third, pick one person to be infected initially.\n random_infected = numpy.random.choice(range(self.num_people))\n self.people[random_infected].is_infected = True\n self.num_infected += 1\n\n def setup_institution(self):\n \"\"\"\n Method to setup our space.\n \"\"\"\n # Randomly sample a subsidy level\n self.condom_subsidy = numpy.random.uniform(self.min_subsidy, self.max_subsidy)\n \n def get_neighborhood(self, x, y, distance=1):\n \"\"\"\n Get a Moore neighborhood of distance from (x, y).\n \"\"\"\n neighbor_pos = [ ( x % self.grid_size, y % self.grid_size)\n for x, y in itertools.product(xrange(x-distance, x+distance+1),\n xrange(y-distance, y+distance+1))]\n return neighbor_pos\n \n def get_neighbors(self, x, y, distance=1):\n \"\"\"\n Get any neighboring persons within distance from (x, y).\n \"\"\"\n neighbor_pos = self.get_neighborhood(x, y, distance)\n neighbor_list = []\n for pos in neighbor_pos:\n # Skip identity\n if pos[0] == x and pos[1] == y:\n continue\n \n # Check if empty\n if not numpy.isnan(self.space[pos[0], pos[1]]):\n neighbor_list.append(int(self.space[pos[0], pos[1]]))\n \n return neighbor_list\n \n def get_person_position(self, person_id):\n \"\"\"\n Get the position of a person based on their ID.\n \"\"\"\n # Find the value that matches our ID in self.space, then reshape to a 2-element list.\n return numpy.reshape(numpy.where(self.space == person_id), (1, 2))[0].tolist()\n\n def get_person_neighbors(self, person_id, distance=1):\n \"\"\"\n Get the position of a person based on their ID.\n \"\"\"\n # Find the value that matches our ID in self.space, then reshape to a 2-element list.\n x, y = self.get_person_position(person_id)\n return self.get_neighbors(x, y, distance) \n \n def move_person(self, person_id, x, y):\n \"\"\"\n Move a person to a new (x, y) location.\n \"\"\"\n \n # Get original\n original_position = self.get_person_position(person_id)\n \n # Check target location\n if not numpy.isnan(self.space[x, y]):\n raise ValueError(\"Unable to move person {0} to ({1}, {2}) since occupied.\".format(person_id, x, y))\n \n # Otherwise, move by emptying and setting.\n self.space[original_position[0], original_position[1]] = numpy.nan\n self.space[x, y] = person_id\n \n def step_move(self):\n \"\"\"\n Model step move function, which handles moving agents randomly around.\n \"\"\"\n \n # Get a random order for the agents.\n random_order = range(self.num_people)\n numpy.random.shuffle(random_order)\n \n # Iterate in random order.\n for i in random_order:\n # Get current position\n x, y = self.get_person_position(i)\n \n # Move our agent between -1, 0, +1 in each dimension\n x_new = (x + numpy.random.randint(-1, 2)) % self.grid_size\n y_new = (y + numpy.random.randint(-1, 2)) % self.grid_size\n \n # Try to move them\n try:\n self.move_person(i, x_new, y_new)\n except ValueError:\n # Occupied, so fail.\n pass\n \n def step_interact(self):\n \"\"\"\n \"Interact\" the agents by seeing if they will hookup and spread.\n \"\"\"\n \n # Get a random order for the agents.\n random_order = range(self.num_people)\n numpy.random.shuffle(random_order)\n \n # Track which pairs we've tested. Don't want to \"interact\" them twice w/in one step.\n seen_pairs = []\n \n # Iterate in random order.\n for i in random_order:\n # Get neighbors\n neighbors = self.get_person_neighbors(i)\n \n # Iterate over neighbors\n for neighbor in neighbors:\n # Check if we've already seen.\n a = min(i, neighbor)\n b = max(i, neighbor)\n if (a, b) not in seen_pairs:\n seen_pairs.append((a, b))\n else:\n continue\n \n # Check if hookup if not seen.\n hookup_a = self.people[a].decide_hookup()\n hookup_b = self.people[b].decide_hookup()\n if hookup_a and hookup_b:\n # Hookup going to happen. \n self.num_interactions += 1\n \n # Check now for condoms and use resulting rate.\n if self.people[a].decide_condom() or self.people[b].decide_condom():\n # Using a condom.\n self.num_interactions_condoms += 1\n use_condom = True\n \n if self.people[a].is_infected or self.people[b].is_infected:\n is_transmission = numpy.random.random() <= self.prob_transmit_condom\n else:\n is_transmission = False\n else:\n # Not using a condom.\n use_condom = False\n if self.people[a].is_infected or self.people[b].is_infected:\n is_transmission = numpy.random.random() <= self.prob_transmit\n else:\n is_transmission = False\n \n # Now infect.\n self.history_interactions.append((self.t, a, b, use_condom, is_transmission))\n if is_transmission:\n self.people[a].is_infected = True\n self.people[b].is_infected = True\n \n def get_num_infected(self):\n \"\"\"\n Get the number of infected persons.\n \"\"\"\n # Count\n infected = 0\n for person in self.people:\n if person.is_infected:\n infected += 1\n \n return infected\n \n def step(self):\n \"\"\"\n Model step function.\n \"\"\"\n \n # \"Interact\" agents.\n self.step_interact()\n \n # Move agents\n self.step_move()\n \n # Increment steps and track history.\n self.t += 1\n self.history_space.append(copy.deepcopy(self.space))\n self.history_space_infected.append(self.get_space_infected())\n self.num_infected = self.get_num_infected()\n self.history_num_infected.append(self.num_infected)\n self.history_num_interactions.append(self.num_interactions)\n self.history_num_interactions_condoms.append(self.num_interactions_condoms)\n\n def get_space_infected(self, t=None):\n \"\"\"\n Return a projection of the space that shows which cells have an infected person.\n \"\"\"\n if t == None:\n # Initialize empty\n infected_space = numpy.zeros_like(self.space)\n \n # Iterate over persons and set.\n for p in self.people:\n x, y = self.get_person_position(p.person_id)\n if p.is_infected:\n infected_space[x, y] = +1\n else:\n infected_space[x, y] = -1\n \n # Return\n return infected_space\n else:\n # Return historical step\n return self.history_space_infected[t]\n\n def __repr__(self):\n '''\n Return string representation.\n '''\n skip_none = True\n repr_string = type(self).__name__ + \" [\"\n\n elements = dir(self)\n for e in elements:\n # Make sure we only display \"public\" fields; skip anything private (_*), that is a method/function, or that is a module.\n e_type = eval('type(self.{0}).__name__'.format(e))\n if not e.startswith(\"_\") and e_type not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:\n value = eval(\"self.\" + e)\n if value != None and skip_none == True:\n if e_type in ['list', 'set', 'tuple']:\n repr_string += \"\\n\\n\\t{0}={1},\\n\\n\".format(e, value)\n elif e_type in ['ndarray']:\n repr_string += \"\\n\\n\\t{0}=\\t\\n{1},\\n\\n\".format(e, value)\n else:\n repr_string += \"{0}={1}, \".format(e, value)\n\n # Clean up trailing space and comma.\n return repr_string.strip(\" \").strip(\",\") + \"]\"", "_____no_output_____" ] ], [ [ "## Setup our parameter sweep", "_____no_output_____" ] ], [ [ "# Set number of samples per value and steps per sample\nnum_samples = 20\nnum_steps = 100\n\n# Set basic model parameters\ngrid_size = 20\nnum_people = 25\n\n# Set subsidy values to \"sweep\" over\nsubsidy_sweep_values = [0.0, 0.2, 0.4, \n 0.6, 0.8, 1.0]\nsubsidy_sweep_output = []\n\n# Iterate over values\nfor subsidy_value in subsidy_sweep_values:\n print(\"Running {0} samples for sweep value {1}\"\\\n .format(num_samples, subsidy_value))\n for n in xrange(num_samples):\n # Output info\n m = Model(grid_size=grid_size,\n num_people=num_people,\n min_condom_budget=0.0,\n max_condom_budget=1.0,\n min_subsidy=subsidy_value,\n max_subsidy=subsidy_value)\n for t in xrange(num_steps):\n m.step()\n subsidy_sweep_output.append((subsidy_value, n, m.num_infected,\n m.num_interactions,\n m.num_interactions_condoms))", "Running 20 samples for sweep value 0.0\nRunning 20 samples for sweep value 0.2\nRunning 20 samples for sweep value 0.4\nRunning 20 samples for sweep value 0.6\nRunning 20 samples for sweep value 0.8\nRunning 20 samples for sweep value 1.0\n" ] ], [ [ "## Pandas DataFrame\n\n Once we've created our list of model samples in ``subsidy_sweep_output``, we'll convert this list into a pandas DataFrame. This format will allow us to easily perform \"group-by\" calculations and other spreadsheet-like operations.", "_____no_output_____" ] ], [ [ "# Setup the pandas DataFrame\nsubsidy_sweep_df = pandas.DataFrame(subsidy_sweep_output,\n columns=[\"subsidy\", \"sample_number\", \"num_infected\", \n \"num_interactions\", \"num_interactions_condoms\"])\nsubsidy_sweep_df[\"percent_condom\"] = subsidy_sweep_df[\"num_interactions_condoms\"].astype(float) / subsidy_sweep_df[\"num_interactions\"]\nsubsidy_sweep_df.head()", "_____no_output_____" ] ], [ [ "## Average run outputs by parameter value\n\n In the code below, we use the pandas __group-by__ logic to calculate the mean and standard deviation sample values for a fixed subsidy value.\n \n [Read the pandas split-apply-combine tutorial](http://pandas.pydata.org/pandas-docs/stable/groupby.html) to get a better idea of what we're doing.", "_____no_output_____" ] ], [ [ "# Get means and std. dev.'s\nsweep_means = subsidy_sweep_df.groupby(\"subsidy\")[\"num_infected\"].mean()\nsweep_std = subsidy_sweep_df.groupby(\"subsidy\")[\"num_infected\"].std()\n\n# % of interactions\nsweep_pct_means = subsidy_sweep_df.groupby(\"subsidy\")[\"percent_condom\"].mean()\nsweep_pct_std = subsidy_sweep_df.groupby(\"subsidy\")[\"percent_condom\"].std()\n\npandas.DataFrame([sweep_means, sweep_std])", "_____no_output_____" ] ], [ [ "## Plotting\n\n Finally, we'll plot our means with error bars as a function of subsidy value.", "_____no_output_____" ] ], [ [ "# Create figure\nf = plt.figure(figsize=(8,6))\n_ = plt.errorbar(sweep_means.index, sweep_means, 2 * sweep_std)\n_ = plt.title(\"Effect of condom subsidy on number of infected\")\n_ = plt.xlabel(\"Subsidy level\")\n_ = plt.ylabel(\"Number of infected at $t=100$ ($2 \\sigma$ errors)\")\n\n# Create figure\nf = plt.figure(figsize=(8,6))\n_ = plt.errorbar(sweep_pct_means.index, sweep_pct_means, 2 * sweep_pct_std)\n_ = plt.title(\"Effect of condom subsidy on percent of protected interactions\")\n_ = plt.xlabel(\"Subsidy level\")\n_ = plt.ylabel(\"Percent of interactions protected at $t=100$ ($2 \\sigma$ errors)\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb1829dd2b87f13f43e6d788129e2827aa7b02c
232,533
ipynb
Jupyter Notebook
lectures/tutorial-week3-graphs_full.ipynb
s183910/comsocsci2021
d8749ebbf0bd9728d6599137de9544800b531153
[ "MIT" ]
null
null
null
lectures/tutorial-week3-graphs_full.ipynb
s183910/comsocsci2021
d8749ebbf0bd9728d6599137de9544800b531153
[ "MIT" ]
null
null
null
lectures/tutorial-week3-graphs_full.ipynb
s183910/comsocsci2021
d8749ebbf0bd9728d6599137de9544800b531153
[ "MIT" ]
null
null
null
198.57643
38,832
0.910426
[ [ [ "## Tutorial\n\nThis guide can help you start working with NetworkX.\n\n### Creating a graph\n\nCreate an empty graph with no nodes and no edges.", "_____no_output_____" ] ], [ [ "import networkx as nx\nG = nx.Graph()", "_____no_output_____" ] ], [ [ "By definition, a `Graph` is a collection of nodes (vertices) along with\nidentified pairs of nodes (called edges, links, etc). In NetworkX, nodes can\nbe any [hashable](https://docs.python.org/3/glossary.html#term-hashable) object e.g., a text string, an image, an XML object,\nanother Graph, a customized node object, etc.\n\n# Nodes\n\nThe graph `G` can be grown in several ways. NetworkX includes many graph\ngenerator functions and facilities to read and write graphs in many formats.\nTo get started though we’ll look at simple manipulations. You can add one node\nat a time,", "_____no_output_____" ] ], [ [ "G.add_node(1)", "_____no_output_____" ] ], [ [ "or add nodes from any [iterable](https://docs.python.org/3/glossary.html#term-iterable) container, such as a list", "_____no_output_____" ] ], [ [ "G.add_nodes_from([2, 3])", "_____no_output_____" ] ], [ [ "You can also add nodes along with node\nattributes if your container yields 2-tuples of the form\n`(node, node_attribute_dict)`:\n\n```\n>>> G.add_nodes_from([\n... (4, {\"color\": \"red\"}),\n... (5, {\"color\": \"green\"}),\n... ])\n```\n\nNode attributes are discussed further below.\n\nNodes from one graph can be incorporated into another:", "_____no_output_____" ] ], [ [ "H = nx.path_graph(10)\nG.add_nodes_from(H)", "_____no_output_____" ] ], [ [ "`G` now contains the nodes of `H` as nodes of `G`.\nIn contrast, you could use the graph `H` as a node in `G`.", "_____no_output_____" ] ], [ [ "G.add_node(H)", "_____no_output_____" ] ], [ [ "The graph `G` now contains `H` as a node. This flexibility is very powerful as\nit allows graphs of graphs, graphs of files, graphs of functions and much more.\nIt is worth thinking about how to structure your application so that the nodes\nare useful entities. Of course you can always use a unique identifier in `G`\nand have a separate dictionary keyed by identifier to the node information if\nyou prefer.\n\n# Edges\n\n`G` can also be grown by adding one edge at a time,", "_____no_output_____" ] ], [ [ "G.add_edge(1, 2)\ne = (2, 3)\nG.add_edge(*e) # unpack edge tuple*", "_____no_output_____" ] ], [ [ "by adding a list of edges,", "_____no_output_____" ] ], [ [ "G.add_edges_from([(1, 2), (1, 3)])", "_____no_output_____" ] ], [ [ "or by adding any ebunch of edges. An *ebunch* is any iterable\ncontainer of edge-tuples. An edge-tuple can be a 2-tuple of nodes or a 3-tuple\nwith 2 nodes followed by an edge attribute dictionary, e.g.,\n`(2, 3, {'weight': 3.1415})`. Edge attributes are discussed further\nbelow.", "_____no_output_____" ] ], [ [ "G.add_edges_from(H.edges)", "_____no_output_____" ] ], [ [ "There are no complaints when adding existing nodes or edges. For example,\nafter removing all nodes and edges,", "_____no_output_____" ] ], [ [ "G.clear()", "_____no_output_____" ] ], [ [ "we add new nodes/edges and NetworkX quietly ignores any that are\nalready present.", "_____no_output_____" ] ], [ [ "G.add_edges_from([(1, 2), (1, 3)])\nG.add_node(1)\nG.add_edge(1, 2)\nG.add_node(\"spam\") # adds node \"spam\"\nG.add_nodes_from(\"spam\") # adds 4 nodes: 's', 'p', 'a', 'm'\nG.add_edge(3, 'm')", "_____no_output_____" ] ], [ [ "At this stage the graph `G` consists of 8 nodes and 3 edges, as can be seen by:", "_____no_output_____" ] ], [ [ "G.number_of_nodes()\nG.number_of_edges()", "_____no_output_____" ] ], [ [ "# Examining elements of a graph\n\nWe can examine the nodes and edges. Four basic graph properties facilitate\nreporting: `G.nodes`, `G.edges`, `G.adj` and `G.degree`. These\nare set-like views of the nodes, edges, neighbors (adjacencies), and degrees\nof nodes in a graph. They offer a continually updated read-only view into\nthe graph structure. They are also dict-like in that you can look up node\nand edge data attributes via the views and iterate with data attributes\nusing methods `.items()`, `.data('span')`.\nIf you want a specific container type instead of a view, you can specify one.\nHere we use lists, though sets, dicts, tuples and other containers may be\nbetter in other contexts.", "_____no_output_____" ] ], [ [ "list(G.nodes)\nlist(G.edges)\nlist(G.adj[1]) # or list(G.neighbors(1))\nG.degree[1] # the number of edges incident to 1", "_____no_output_____" ] ], [ [ "One can specify to report the edges and degree from a subset of all nodes\nusing an nbunch. An *nbunch* is any of: `None` (meaning all nodes),\na node, or an iterable container of nodes that is not itself a node in the\ngraph.", "_____no_output_____" ] ], [ [ "G.edges([2, 'm'])\nG.degree([2, 3])", "_____no_output_____" ] ], [ [ "# Removing elements from a graph\n\nOne can remove nodes and edges from the graph in a similar fashion to adding.\nUse methods\n`Graph.remove_node()`,\n`Graph.remove_nodes_from()`,\n`Graph.remove_edge()`\nand\n`Graph.remove_edges_from()`, e.g.", "_____no_output_____" ] ], [ [ "G.remove_node(2)\nG.remove_nodes_from(\"spam\")\nlist(G.nodes)\nG.remove_edge(1, 3)", "_____no_output_____" ] ], [ [ "# Using the graph constructors\n\nGraph objects do not have to be built up incrementally - data specifying\ngraph structure can be passed directly to the constructors of the various\ngraph classes.\nWhen creating a graph structure by instantiating one of the graph\nclasses you can specify data in several formats.", "_____no_output_____" ] ], [ [ "G.add_edge(1, 2)\nH = nx.DiGraph(G) # create a DiGraph using the connections from G\nlist(H.edges())\nedgelist = [(0, 1), (1, 2), (2, 3)]\nH = nx.Graph(edgelist)", "_____no_output_____" ] ], [ [ "# What to use as nodes and edges\n\nYou might notice that nodes and edges are not specified as NetworkX\nobjects. This leaves you free to use meaningful items as nodes and\nedges. The most common choices are numbers or strings, but a node can\nbe any hashable object (except `None`), and an edge can be associated\nwith any object `x` using `G.add_edge(n1, n2, object=x)`.\n\nAs an example, `n1` and `n2` could be protein objects from the RCSB Protein\nData Bank, and `x` could refer to an XML record of publications detailing\nexperimental observations of their interaction.\n\nWe have found this power quite useful, but its abuse\ncan lead to surprising behavior unless one is familiar with Python.\nIf in doubt, consider using `convert_node_labels_to_integers()` to obtain\na more traditional graph with integer labels.\n\n# Accessing edges and neighbors\n\nIn addition to the views `Graph.edges`, and `Graph.adj`,\naccess to edges and neighbors is possible using subscript notation.", "_____no_output_____" ] ], [ [ "G = nx.Graph([(1, 2, {\"color\": \"yellow\"})])\nG[1] # same as G.adj[1]\nG[1][2]\nG.edges[1, 2]", "_____no_output_____" ] ], [ [ "You can get/set the attributes of an edge using subscript notation\nif the edge already exists.", "_____no_output_____" ] ], [ [ "G.add_edge(1, 3)\nG[1][3]['color'] = \"blue\"\nG.edges[1, 2]['color'] = \"red\"\nG.edges[1, 2]\n\nnx.draw(G, with_labels=True, font_weight='bold')", "_____no_output_____" ] ], [ [ "Fast examination of all (node, adjacency) pairs is achieved using\n`G.adjacency()`, or `G.adj.items()`.\nNote that for undirected graphs, adjacency iteration sees each edge twice.", "_____no_output_____" ] ], [ [ "FG = nx.Graph()\nFG.add_weighted_edges_from([(1, 2, 0.125), (1, 3, 0.75), (2, 4, 1.2), (3, 4, 0.375)])\nfor n, nbrs in FG.adj.items():\n for nbr, eattr in nbrs.items():\n wt = eattr['weight']\n if wt < 0.5: print(f\"({n}, {nbr}, {wt:.3})\")", "(1, 2, 0.125)\n(2, 1, 0.125)\n(3, 4, 0.375)\n(4, 3, 0.375)\n" ], [ "nx.draw(FG, with_labels=True, font_weight='bold')", "_____no_output_____" ] ], [ [ "Convenient access to all edges is achieved with the edges property.", "_____no_output_____" ] ], [ [ "for (u, v, wt) in FG.edges.data('weight'):\n if wt < 0.5:\n print(f\"({u}, {v}, {wt:.3})\")", "(1, 2, 0.125)\n(3, 4, 0.375)\n" ] ], [ [ "# Adding attributes to graphs, nodes, and edges\n\nAttributes such as weights, labels, colors, or whatever Python object you like,\ncan be attached to graphs, nodes, or edges.\n\nEach graph, node, and edge can hold key/value attribute pairs in an associated\nattribute dictionary (the keys must be hashable). By default these are empty,\nbut attributes can be added or changed using `add_edge`, `add_node` or direct\nmanipulation of the attribute dictionaries named `G.graph`, `G.nodes`, and\n`G.edges` for a graph `G`.\n\n## Graph attributes\n\nAssign graph attributes when creating a new graph", "_____no_output_____" ] ], [ [ "G = nx.Graph(day=\"Friday\")\nG.graph", "_____no_output_____" ] ], [ [ "Or you can modify attributes later", "_____no_output_____" ] ], [ [ "G.graph['day'] = \"Monday\"\nG.graph", "_____no_output_____" ] ], [ [ "# Node attributes\n\nAdd node attributes using `add_node()`, `add_nodes_from()`, or `G.nodes`", "_____no_output_____" ] ], [ [ "G.add_node(1, time='5pm')\nG.add_nodes_from([3], time='2pm')\nG.nodes[1]\nG.nodes[1]['room'] = 714\nG.nodes.data()", "_____no_output_____" ] ], [ [ "Note that adding a node to `G.nodes` does not add it to the graph, use\n`G.add_node()` to add new nodes. Similarly for edges.\n\n# Edge Attributes\n\nAdd/change edge attributes using `add_edge()`, `add_edges_from()`,\nor subscript notation.", "_____no_output_____" ] ], [ [ "G.add_edge(1, 2, weight=4.7 )\nG.add_edges_from([(3, 4), (4, 5)], color='red')\nG.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})])\nG[1][2]['weight'] = 4.7\nG.edges[3, 4]['weight'] = 4.2", "_____no_output_____" ] ], [ [ "The special attribute `weight` should be numeric as it is used by\nalgorithms requiring weighted edges.\n\n Directed graphs\n\nThe `DiGraph` class provides additional methods and properties specific\nto directed edges, e.g.,\n`DiGraph.out_edges`, `DiGraph.in_degree`,\n`DiGraph.predecessors()`, `DiGraph.successors()` etc.\nTo allow algorithms to work with both classes easily, the directed versions of\n`neighbors()` is equivalent to `successors()` while `degree` reports\nthe sum of `in_degree` and `out_degree` even though that may feel\ninconsistent at times.", "_____no_output_____" ] ], [ [ "DG = nx.DiGraph()\nDG.add_weighted_edges_from([(1, 2, 0.5), (3, 1, 0.75)])\nDG.out_degree(1, weight='weight')\nDG.degree(1, weight='weight')\nlist(DG.successors(1))\nlist(DG.neighbors(1))\nnx.draw(DG, with_labels=True, font_weight='bold')", "_____no_output_____" ] ], [ [ "Some algorithms work only for directed graphs and others are not well\ndefined for directed graphs. Indeed the tendency to lump directed\nand undirected graphs together is dangerous. If you want to treat\na directed graph as undirected for some measurement you should probably\nconvert it using `Graph.to_undirected()` or with", "_____no_output_____" ] ], [ [ "H = nx.Graph(G) # create an undirected graph H from a directed graph G", "_____no_output_____" ] ], [ [ "# Multigraphs\n\nNetworkX provides classes for graphs which allow multiple edges\nbetween any pair of nodes. The `MultiGraph` and\n`MultiDiGraph`\nclasses allow you to add the same edge twice, possibly with different\nedge data. This can be powerful for some applications, but many\nalgorithms are not well defined on such graphs.\nWhere results are well defined,\ne.g., `MultiGraph.degree()` we provide the function. Otherwise you\nshould convert to a standard graph in a way that makes the measurement\nwell defined.", "_____no_output_____" ] ], [ [ "MG = nx.MultiGraph()\nMG.add_weighted_edges_from([(1, 2, 0.5), (1, 2, 0.75), (2, 3, 0.5)])\ndict(MG.degree(weight='weight'))\nGG = nx.Graph()\nfor n, nbrs in MG.adjacency():\n for nbr, edict in nbrs.items():\n minvalue = min([d['weight'] for d in edict.values()])\n GG.add_edge(n, nbr, weight = minvalue)\n\nnx.shortest_path(GG, 1, 3)", "_____no_output_____" ] ], [ [ "# Graph generators and graph operations\n\nIn addition to constructing graphs node-by-node or edge-by-edge, they\ncan also be generated by\n\n1. Applying classic graph operations, such as:\n\n1. Using a call to one of the classic small graphs, e.g.,\n\n1. Using a (constructive) generator for a classic graph, e.g.,\n\nlike so:", "_____no_output_____" ] ], [ [ "K_5 = nx.complete_graph(5)\nK_3_5 = nx.complete_bipartite_graph(3, 5)\nbarbell = nx.barbell_graph(10, 10)\nlollipop = nx.lollipop_graph(10, 20)", "_____no_output_____" ] ], [ [ "1. Using a stochastic graph generator, e.g,\n\nlike so:", "_____no_output_____" ] ], [ [ "er = nx.erdos_renyi_graph(100, 0.15)\nws = nx.watts_strogatz_graph(30, 3, 0.1)\nba = nx.barabasi_albert_graph(100, 5)\nred = nx.random_lobster(100, 0.9, 0.9)", "_____no_output_____" ] ], [ [ "1. Reading a graph stored in a file using common graph formats,\n such as edge lists, adjacency lists, GML, GraphML, pickle, LEDA and others.", "_____no_output_____" ] ], [ [ "nx.write_gml(red, \"path.to.file\")\nmygraph = nx.read_gml(\"path.to.file\")", "_____no_output_____" ] ], [ [ "For details on graph formats see Reading and writing graphs\nand for graph generator functions see Graph generators\n\n# Analyzing graphs\n\nThe structure of `G` can be analyzed using various graph-theoretic\nfunctions such as:", "_____no_output_____" ] ], [ [ "G = nx.Graph()\nG.add_edges_from([(1, 2), (1, 3)])\nG.add_node(\"spam\") # adds node \"spam\"\nlist(nx.connected_components(G))\nsorted(d for n, d in G.degree())\nnx.clustering(G)", "_____no_output_____" ] ], [ [ "Some functions with large output iterate over (node, value) 2-tuples.\nThese are easily stored in a [dict](https://docs.python.org/3/library/stdtypes.html#dict) structure if you desire.", "_____no_output_____" ] ], [ [ "sp = dict(nx.all_pairs_shortest_path(G))\nsp[3]", "_____no_output_____" ] ], [ [ "See Algorithms for details on graph algorithms\nsupported.\n\n# Drawing graphs\n\nNetworkX is not primarily a graph drawing package but basic drawing with\nMatplotlib as well as an interface to use the open source Graphviz software\npackage are included. These are part of the `networkx.drawing` module and will\nbe imported if possible.\n\nFirst import Matplotlib’s plot interface (pylab works too)", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "To test if the import of `networkx.drawing` was successful draw `G` using one of", "_____no_output_____" ] ], [ [ "G = nx.petersen_graph()\nplt.subplot(121)\nnx.draw(G, with_labels=True, font_weight='bold')\nplt.subplot(122)\nnx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold')", "C:\\Users\\bayka\\Anaconda3\\lib\\site-packages\\networkx\\drawing\\nx_pylab.py:611: MatplotlibDeprecationWarning: isinstance(..., numbers.Number)\n if cb.is_numlike(alpha):\n" ] ], [ [ "when drawing to an interactive display. Note that you may need to issue a\nMatplotlib", "_____no_output_____" ] ], [ [ "plt.show()", "_____no_output_____" ] ], [ [ "command if you are not using matplotlib in interactive mode (see\n[Matplotlib FAQ](http://matplotlib.org/faq/installing_faq.html#matplotlib-compiled-fine-but-nothing-shows-up-when-i-use-it)\n).", "_____no_output_____" ] ], [ [ "options = {\n 'node_color': 'black',\n 'node_size': 100,\n 'width': 3,\n}\nplt.subplot(221)\nnx.draw_random(G, **options)\nplt.subplot(222)\nnx.draw_circular(G, **options)\nplt.subplot(223)\nnx.draw_spectral(G, **options)\nplt.subplot(224)\nnx.draw_shell(G, nlist=[range(5,10), range(5)], **options)", "_____no_output_____" ] ], [ [ "You can find additional options via `draw_networkx()` and\nlayouts via `layout`.\nYou can use multiple shells with `draw_shell()`.", "_____no_output_____" ] ], [ [ "G = nx.dodecahedral_graph()\nshells = [[2, 3, 4, 5, 6], [8, 1, 0, 19, 18, 17, 16, 15, 14, 7], [9, 10, 11, 12, 13]]\nnx.draw_shell(G, nlist=shells, **options)", "_____no_output_____" ] ], [ [ "To save drawings to a file, use, for example", "_____no_output_____" ] ], [ [ "nx.draw(G)\nplt.savefig(\"path.png\")", "_____no_output_____" ] ], [ [ "writes to the file `path.png` in the local directory. If Graphviz and\nPyGraphviz or pydot, are available on your system, you can also use\n`nx_agraph.graphviz_layout(G)` or `nx_pydot.graphviz_layout(G)` to get the\nnode positions, or write the graph in dot format for further processing.", "_____no_output_____" ] ], [ [ "# !pip install pygraphviz\n", "_____no_output_____" ], [ "from networkx.drawing.nx_pydot import write_dot\npos = nx.nx_agraph.graphviz_layout(G)\nnx.draw(G, pos=pos)\nwrite_dot(G, 'file.dot')", "_____no_output_____" ] ], [ [ "See Drawing for additional details.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ecb18cf760e605ecf43608f78b3f36d711a13d53
36,258
ipynb
Jupyter Notebook
notebooks/externals/kaggle/neural-network-model-for-house-prices-tensorflow.ipynb
gokhankesler/tensorflow-archive
0625c12e3b63bedcad441a503a342697edb40c78
[ "MIT" ]
2
2020-01-25T08:31:14.000Z
2022-03-23T18:24:03.000Z
notebooks/externals/kaggle/neural-network-model-for-house-prices-tensorflow.ipynb
gokhankesler/tensorflow-archive
0625c12e3b63bedcad441a503a342697edb40c78
[ "MIT" ]
null
null
null
notebooks/externals/kaggle/neural-network-model-for-house-prices-tensorflow.ipynb
gokhankesler/tensorflow-archive
0625c12e3b63bedcad441a503a342697edb40c78
[ "MIT" ]
null
null
null
36,258
36,258
0.757874
[ [ [ "# <center> A Neural Network Model for House Prices", "_____no_output_____" ], [ "The purpose of this notebook is to build a model (Deep Neural Network) with Tensorflow. We will see the differents steps to do that. This notebook is split in several parts:\n\n- I. Importation & Devices Available\n- II. Outliers\n- III. Preprocessing\n- IV. DNNRegressor for Contiunuous features\n- V. Predictions\n- VI. Example with Leaky Relu\n- VII. DNNRegressor for Continuous and Categorial\n- VIII. Predictions bis\n- IX. Shallow Neural Network\n- X. Conclusion\n\nWe will expose 3 models. The first one will use just the continuous features, the second one we will add the categorical features and finally we will use a Neural Network with just one layer.\n\nTheey are no tuning and we will use DNNRegressor with Relu for all activations functions and the number of units by layer are: [200, 100, 50, 25, 12]. So we have 5 layers.\n\nIn the part VI I show how to use another activation function with the example of Leaky Relu. Finally I will try to use a Shallow Neural Network (just with one Hidden Layer) just for fun.\n\nIf you have an idea to improve the performance of the model: Share it ! Fork it ! And play with it !", "_____no_output_____" ], [ "# <center> I. Importation & Devices Available", "_____no_output_____" ], [ "Before the importation I prefer to check the devices available. Sometimes we can have a problems with your GPU for example. And if you want to have a good performance you must do use GPU and not CPU. In our example we have just a CPU but now you have the code to check if your devices is detected.", "_____no_output_____" ] ], [ [ "import os\nimport tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = \"99\"", "_____no_output_____" ], [ "from tensorflow.python.client import device_lib\ndevice_lib.list_local_devices()", "_____no_output_____" ] ], [ [ "Now that we have checked the devices available we will test them wth a simple computation. Here we have an example with the computation on the CPU. But you can split the computation on your gpu with '/gpu:0'. If you want more GPU you can do 'with tf.device('/gpu:1'): ', 'with tf.device('/gpu:2'): ' etc...\n\nIn our example I display the log information.", "_____no_output_____" ] ], [ [ "# Test with a simple computation\nimport tensorflow as tf\n\ntf.Session()\n\nwith tf.device('/cpu:0'):\n a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])\n# If you have gpu you can try this line to compute b with your GPU\n#with tf.device('/gpu:0'): \n b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2])\nc = tf.matmul(a, b)\n# Creates a session with log_device_placement set to True.\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n\nprint(sess.run(c))\n\n# Runs the op.\n# Log information\noptions = tf.RunOptions(output_partition_graphs=True)\nmetadata = tf.RunMetadata()\nc_val = sess.run(c, options=options, run_metadata=metadata)\n\nprint(metadata.partition_graphs)\n\nsess.close()", "_____no_output_____" ] ], [ [ "In this tutorial our data is composed to 1460 row with 81 features. 38 continuous features and 43 categorical features. As exposed in the introduction we will use onlly the continuous features to build our first model.\n\nHere the objective is to predict the House Prices. In this case we have a regression model to build.\nSo our first data we will contain 37 features to explain the 'SalePrice'. We can see the list of features that we will use to build our first model.", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport matplotlib\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\ntf.logging.set_verbosity(tf.logging.INFO)\nsess = tf.InteractiveSession()\n\ntrain = pd.read_csv('../input/train.csv')\nprint('Shape of the train data with all features:', train.shape)\ntrain = train.select_dtypes(exclude=['object'])\nprint(\"\")\nprint('Shape of the train data with numerical features:', train.shape)\ntrain.drop('Id',axis = 1, inplace = True)\ntrain.fillna(0,inplace=True)\n\ntest = pd.read_csv('../input/test.csv')\ntest = test.select_dtypes(exclude=['object'])\nID = test.Id\ntest.fillna(0,inplace=True)\ntest.drop('Id',axis = 1, inplace = True)\n\nprint(\"\")\nprint(\"List of features contained our dataset:\",list(train.columns))", "_____no_output_____" ] ], [ [ "# <center> II. Outliers", "_____no_output_____" ], [ "In this small part we will isolate the outliers with an IsolationForest (http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html). I tried with and without this step and I had a better performance removing these rows.\n\nI haven't analysed the test set but I suppose that our train set looks like more at our data test without these outliers.\n", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import IsolationForest\n\nclf = IsolationForest(max_samples = 100, random_state = 42)\nclf.fit(train)\ny_noano = clf.predict(train)\ny_noano = pd.DataFrame(y_noano, columns = ['Top'])\ny_noano[y_noano['Top'] == 1].index.values\n\ntrain = train.iloc[y_noano[y_noano['Top'] == 1].index.values]\ntrain.reset_index(drop = True, inplace = True)\nprint(\"Number of Outliers:\", y_noano[y_noano['Top'] == -1].shape[0])\nprint(\"Number of rows without outliers:\", train.shape[0])", "_____no_output_____" ], [ "train.head(10)", "_____no_output_____" ] ], [ [ "# <center> III. Preprocessing", "_____no_output_____" ], [ "To rescale our data we will use the fonction MinMaxScaler of Scikit-learn. I am wondering if it is not interesting to use the same MinMaxScaler for Train and Test !", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')\n\ncol_train = list(train.columns)\ncol_train_bis = list(train.columns)\n\ncol_train_bis.remove('SalePrice')\n\nmat_train = np.matrix(train)\nmat_test = np.matrix(test)\nmat_new = np.matrix(train.drop('SalePrice',axis = 1))\nmat_y = np.array(train.SalePrice).reshape((1314,1))\n\nprepro_y = MinMaxScaler()\nprepro_y.fit(mat_y)\n\nprepro = MinMaxScaler()\nprepro.fit(mat_train)\n\nprepro_test = MinMaxScaler()\nprepro_test.fit(mat_new)\n\ntrain = pd.DataFrame(prepro.transform(mat_train),columns = col_train)\ntest = pd.DataFrame(prepro_test.transform(mat_test),columns = col_train_bis)\n\ntrain.head()", "_____no_output_____" ] ], [ [ "To use Tensorflow we need to transform our data (features) in a special format. As a reminder we have just the continuous features. So the first function used is: tf.contrib.layers.real_valued_column.\nMore information here: https://www.tensorflow.org/api_docs/python/tf/contrib/layers/real_valued_column\n\nThe others cells allowed to us to create a train set and test set with our training data set.", "_____no_output_____" ] ], [ [ "# List of features\nCOLUMNS = col_train\nFEATURES = col_train_bis\nLABEL = \"SalePrice\"\n\n# Columns for tensorflow\nfeature_cols = [tf.contrib.layers.real_valued_column(k) for k in FEATURES]\n\n# Training set and Prediction set with the features to predict\ntraining_set = train[COLUMNS]\nprediction_set = train.SalePrice\n\n# Train and Test \nx_train, x_test, y_train, y_test = train_test_split(training_set[FEATURES] , prediction_set, test_size=0.33, random_state=42)\ny_train = pd.DataFrame(y_train, columns = [LABEL])\ntraining_set = pd.DataFrame(x_train, columns = FEATURES).merge(y_train, left_index = True, right_index = True)\ntraining_set.head()\n\n# Training for submission\ntraining_sub = training_set[col_train]", "_____no_output_____" ], [ "# Same thing but for the test set\ny_test = pd.DataFrame(y_test, columns = [LABEL])\ntesting_set = pd.DataFrame(x_test, columns = FEATURES).merge(y_test, left_index = True, right_index = True)\ntesting_set.head()", "_____no_output_____" ] ], [ [ "# <center> IV. Deep Neural Network for continuous features", "_____no_output_____" ], [ "With tf.contrib.learn it is very easy to implement a Deep Neural Network. In our example we will have 5 hidden layers with repsectly 200, 100, 50, 25 and 12 units and the function of activation will be Relu.\n\nThe optimizer used in our case is an Adagrad optimizer (by default).", "_____no_output_____" ] ], [ [ "# Model\ntf.logging.set_verbosity(tf.logging.ERROR)\nregressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols, \n activation_fn = tf.nn.relu, hidden_units=[200, 100, 50, 25, 12])#,\n #optimizer = tf.train.GradientDescentOptimizer( learning_rate= 0.1 ))", "_____no_output_____" ], [ "# Reset the index of training\ntraining_set.reset_index(drop = True, inplace =True)", "_____no_output_____" ], [ "def input_fn(data_set, pred = False):\n \n if pred == False:\n \n feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}\n labels = tf.constant(data_set[LABEL].values)\n \n return feature_cols, labels\n\n if pred == True:\n feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}\n \n return feature_cols", "_____no_output_____" ], [ "# Deep Neural Network Regressor with the training set which contain the data split by train test split\nregressor.fit(input_fn=lambda: input_fn(training_set), steps=2000)", "_____no_output_____" ], [ "# Evaluation on the test set created by train_test_split\nev = regressor.evaluate(input_fn=lambda: input_fn(testing_set), steps=1)", "_____no_output_____" ], [ "# Display the score on the testing set\n# 0.002X in average\nloss_score1 = ev[\"loss\"]\nprint(\"Final Loss on the testing set: {0:f}\".format(loss_score1))", "_____no_output_____" ], [ "# Predictions\ny = regressor.predict(input_fn=lambda: input_fn(testing_set))\npredictions = list(itertools.islice(y, testing_set.shape[0]))\n", "_____no_output_____" ] ], [ [ "# <center> V. Predictions and submission", "_____no_output_____" ], [ "Let's go to prepare our first submission ! Data Preprocessed: checked ! Outlier excluded: checked ! Model built: : checked! Next step: Used our model to make the predictions with the data set Test. And add one graphic to see the difference between the reality and the predictions.", "_____no_output_____" ] ], [ [ "predictions = pd.DataFrame(prepro_y.inverse_transform(np.array(predictions).reshape(434,1)),columns = ['Prediction'])", "_____no_output_____" ], [ "reality = pd.DataFrame(prepro.inverse_transform(testing_set), columns = [COLUMNS]).SalePrice", "_____no_output_____" ], [ "matplotlib.rc('xtick', labelsize=30) \nmatplotlib.rc('ytick', labelsize=30) \n\nfig, ax = plt.subplots(figsize=(50, 40))\n\nplt.style.use('ggplot')\nplt.plot(predictions.values, reality.values, 'ro')\nplt.xlabel('Predictions', fontsize = 30)\nplt.ylabel('Reality', fontsize = 30)\nplt.title('Predictions x Reality on dataset Test', fontsize = 30)\nax.plot([reality.min(), reality.max()], [reality.min(), reality.max()], 'k--', lw=4)\nplt.show()", "_____no_output_____" ], [ "y_predict = regressor.predict(input_fn=lambda: input_fn(test, pred = True))\n\ndef to_submit(pred_y,name_out):\n y_predict = list(itertools.islice(pred_y, test.shape[0]))\n y_predict = pd.DataFrame(prepro_y.inverse_transform(np.array(y_predict).reshape(len(y_predict),1)), columns = ['SalePrice'])\n y_predict = y_predict.join(ID)\n y_predict.to_csv(name_out + '.csv',index=False)\n \nto_submit(y_predict, \"submission_continuous\")", "_____no_output_____" ] ], [ [ "# <center> VI. Leaky Relu", "_____no_output_____" ], [ "An example with another activation function: Leaky Relu ! We can create this new function with Relu. As a reminder Relu is Max(x,0) and Leaky Relu is the function Max(x, delta*x). In our case we can take delta = 0.01", "_____no_output_____" ] ], [ [ "def leaky_relu(x):\n return tf.nn.relu(x) - 0.01 * tf.nn.relu(-x)", "_____no_output_____" ], [ "# Model\nregressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols, \n activation_fn = leaky_relu, hidden_units=[200, 100, 50, 25, 12])\n \n# Deep Neural Network Regressor with the training set which contain the data split by train test split\nregressor.fit(input_fn=lambda: input_fn(training_set), steps=2000)\n\n# Evaluation on the test set created by train_test_split\nev = regressor.evaluate(input_fn=lambda: input_fn(testing_set), steps=1)", "_____no_output_____" ], [ "# Display the score on the testing set\n# 0.002X in average\nloss_score2 = ev[\"loss\"]\nprint(\"Final Loss on the testing set with Leaky Relu: {0:f}\".format(loss_score2))", "_____no_output_____" ], [ "# Predictions\ny_predict = regressor.predict(input_fn=lambda: input_fn(test, pred = True))\nto_submit(y_predict, \"Leaky_relu\")", "_____no_output_____" ], [ "# Model\nregressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols, \n activation_fn = tf.nn.elu, hidden_units=[200, 100, 50, 25, 12])\n \n# Deep Neural Network Regressor with the training set which contain the data split by train test split\nregressor.fit(input_fn=lambda: input_fn(training_set), steps=2000)\n\n# Evaluation on the test set created by train_test_split\nev = regressor.evaluate(input_fn=lambda: input_fn(testing_set), steps=1)\n\nloss_score3 = ev[\"loss\"]", "_____no_output_____" ], [ "print(\"Final Loss on the testing set with Elu: {0:f}\".format(loss_score3))", "_____no_output_____" ], [ "# Predictions\ny_predict = regressor.predict(input_fn=lambda: input_fn(test, pred = True))\nto_submit(y_predict, \"Elu\")", "_____no_output_____" ] ], [ [ "So we have 3 submissions with 3 differents activation functions. But we built ours models just with the continuous features. If you want to comapare the performance download the 3 submissions and submit to the leaderboard. \nNow we see how do build another model by adding a categorical features.", "_____no_output_____" ], [ "# <center> VII. Deep Neural Network for continuous and categorical features", "_____no_output_____" ], [ "For this part I repeat the same functions that you can find previously by adding a categorical features.\n", "_____no_output_____" ] ], [ [ "# Import and split\ntrain = pd.read_csv('../input/train.csv')\ntrain.drop('Id',axis = 1, inplace = True)\ntrain_numerical = train.select_dtypes(exclude=['object'])\ntrain_numerical.fillna(0,inplace = True)\ntrain_categoric = train.select_dtypes(include=['object'])\ntrain_categoric.fillna('NONE',inplace = True)\ntrain = train_numerical.merge(train_categoric, left_index = True, right_index = True) \n\ntest = pd.read_csv('../input/test.csv')\nID = test.Id\ntest.drop('Id',axis = 1, inplace = True)\ntest_numerical = test.select_dtypes(exclude=['object'])\ntest_numerical.fillna(0,inplace = True)\ntest_categoric = test.select_dtypes(include=['object'])\ntest_categoric.fillna('NONE',inplace = True)\ntest = test_numerical.merge(test_categoric, left_index = True, right_index = True) ", "_____no_output_____" ], [ "# Removie the outliers\nfrom sklearn.ensemble import IsolationForest\n\nclf = IsolationForest(max_samples = 100, random_state = 42)\nclf.fit(train_numerical)\ny_noano = clf.predict(train_numerical)\ny_noano = pd.DataFrame(y_noano, columns = ['Top'])\ny_noano[y_noano['Top'] == 1].index.values\n\ntrain_numerical = train_numerical.iloc[y_noano[y_noano['Top'] == 1].index.values]\ntrain_numerical.reset_index(drop = True, inplace = True)\n\ntrain_categoric = train_categoric.iloc[y_noano[y_noano['Top'] == 1].index.values]\ntrain_categoric.reset_index(drop = True, inplace = True)\n\ntrain = train.iloc[y_noano[y_noano['Top'] == 1].index.values]\ntrain.reset_index(drop = True, inplace = True)", "_____no_output_____" ], [ "col_train_num = list(train_numerical.columns)\ncol_train_num_bis = list(train_numerical.columns)\n\ncol_train_cat = list(train_categoric.columns)\n\ncol_train_num_bis.remove('SalePrice')\n\nmat_train = np.matrix(train_numerical)\nmat_test = np.matrix(test_numerical)\nmat_new = np.matrix(train_numerical.drop('SalePrice',axis = 1))\nmat_y = np.array(train.SalePrice)\n\nprepro_y = MinMaxScaler()\nprepro_y.fit(mat_y.reshape(1314,1))\n\nprepro = MinMaxScaler()\nprepro.fit(mat_train)\n\nprepro_test = MinMaxScaler()\nprepro_test.fit(mat_new)\n\ntrain_num_scale = pd.DataFrame(prepro.transform(mat_train),columns = col_train)\ntest_num_scale = pd.DataFrame(prepro_test.transform(mat_test),columns = col_train_bis)", "_____no_output_____" ], [ "train[col_train_num] = pd.DataFrame(prepro.transform(mat_train),columns = col_train_num)\ntest[col_train_num_bis] = test_num_scale", "_____no_output_____" ] ], [ [ "The principal changements are here with the lines beginning by for categorical_features... It is possible to use other function to prepare your categorical data. ", "_____no_output_____" ] ], [ [ "# List of features\nCOLUMNS = col_train_num\nFEATURES = col_train_num_bis\nLABEL = \"SalePrice\"\n\nFEATURES_CAT = col_train_cat\n\nengineered_features = []\n\nfor continuous_feature in FEATURES:\n engineered_features.append(\n tf.contrib.layers.real_valued_column(continuous_feature))\n\nfor categorical_feature in FEATURES_CAT:\n sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(\n categorical_feature, hash_bucket_size=1000)\n\n engineered_features.append(tf.contrib.layers.embedding_column(sparse_id_column=sparse_column, dimension=16,combiner=\"sum\"))\n \n# Training set and Prediction set with the features to predict\ntraining_set = train[FEATURES + FEATURES_CAT]\nprediction_set = train.SalePrice\n\n# Train and Test \nx_train, x_test, y_train, y_test = train_test_split(training_set[FEATURES + FEATURES_CAT] ,\n prediction_set, test_size=0.33, random_state=42)\ny_train = pd.DataFrame(y_train, columns = [LABEL])\ntraining_set = pd.DataFrame(x_train, columns = FEATURES + FEATURES_CAT).merge(y_train, left_index = True, right_index = True)\n\n# Training for submission\ntraining_sub = training_set[FEATURES + FEATURES_CAT]\ntesting_sub = test[FEATURES + FEATURES_CAT]", "_____no_output_____" ], [ "# Same thing but for the test set\ny_test = pd.DataFrame(y_test, columns = [LABEL])\ntesting_set = pd.DataFrame(x_test, columns = FEATURES + FEATURES_CAT).merge(y_test, left_index = True, right_index = True)", "_____no_output_____" ], [ "training_set[FEATURES_CAT] = training_set[FEATURES_CAT].applymap(str)\ntesting_set[FEATURES_CAT] = testing_set[FEATURES_CAT].applymap(str)\n\ndef input_fn_new(data_set, training = True):\n continuous_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}\n \n categorical_cols = {k: tf.SparseTensor(\n indices=[[i, 0] for i in range(data_set[k].size)], values = data_set[k].values, dense_shape = [data_set[k].size, 1]) for k in FEATURES_CAT}\n\n # Merges the two dictionaries into one.\n feature_cols = dict(list(continuous_cols.items()) + list(categorical_cols.items()))\n \n if training == True:\n # Converts the label column into a constant Tensor.\n label = tf.constant(data_set[LABEL].values)\n\n # Returns the feature columns and the label.\n return feature_cols, label\n \n return feature_cols\n\n# Model\nregressor = tf.contrib.learn.DNNRegressor(feature_columns = engineered_features, \n activation_fn = tf.nn.relu, hidden_units=[200, 100, 50, 25, 12])", "_____no_output_____" ], [ "categorical_cols = {k: tf.SparseTensor(indices=[[i, 0] for i in range(training_set[k].size)], values = training_set[k].values, dense_shape = [training_set[k].size, 1]) for k in FEATURES_CAT}", "_____no_output_____" ], [ "# Deep Neural Network Regressor with the training set which contain the data split by train test split\nregressor.fit(input_fn = lambda: input_fn_new(training_set) , steps=2000)", "_____no_output_____" ], [ "ev = regressor.evaluate(input_fn=lambda: input_fn_new(testing_set, training = True), steps=1)", "_____no_output_____" ], [ "loss_score4 = ev[\"loss\"]\nprint(\"Final Loss on the testing set: {0:f}\".format(loss_score4))", "_____no_output_____" ] ], [ [ "# <center> VIII. Predictions bis", "_____no_output_____" ] ], [ [ "# Predictions\ny = regressor.predict(input_fn=lambda: input_fn_new(testing_set))\npredictions = list(itertools.islice(y, testing_set.shape[0]))\npredictions = pd.DataFrame(prepro_y.inverse_transform(np.array(predictions).reshape(434,1)))", "_____no_output_____" ], [ "matplotlib.rc('xtick', labelsize=30) \nmatplotlib.rc('ytick', labelsize=30) \n\nfig, ax = plt.subplots(figsize=(50, 40))\n\nplt.style.use('ggplot')\nplt.plot(predictions.values, reality.values, 'ro')\nplt.xlabel('Predictions', fontsize = 30)\nplt.ylabel('Reality', fontsize = 30)\nplt.title('Predictions x Reality on dataset Test', fontsize = 30)\nax.plot([reality.min(), reality.max()], [reality.min(), reality.max()], 'k--', lw=4)\nplt.show()", "_____no_output_____" ], [ "y_predict = regressor.predict(input_fn=lambda: input_fn_new(testing_sub, training = False))", "_____no_output_____" ], [ "to_submit(y_predict, \"submission_cont_categ\")", "_____no_output_____" ] ], [ [ "# <center> IX. Shallow Network", "_____no_output_____" ], [ "For this part we will expolore the architecture with just one Hidden Layer with several units. The question is: How many units do you need to have a good score on the leaderboard? We will try with 1000 units with the activation function Relu.", "_____no_output_____" ] ], [ [ "# Model\nregressor = tf.contrib.learn.DNNRegressor(feature_columns = engineered_features, \n activation_fn = tf.nn.relu, hidden_units=[1000])", "_____no_output_____" ], [ "# Deep Neural Network Regressor with the training set which contain the data split by train test split\nregressor.fit(input_fn = lambda: input_fn_new(training_set) , steps=2000)", "_____no_output_____" ], [ "ev = regressor.evaluate(input_fn=lambda: input_fn_new(testing_set, training = True), steps=1)\nloss_score5 = ev[\"loss\"]", "_____no_output_____" ], [ "print(\"Final Loss on the testing set: {0:f}\".format(loss_score5))", "_____no_output_____" ], [ "y_predict = regressor.predict(input_fn=lambda: input_fn_new(testing_sub, training = False)) \nto_submit(y_predict, \"submission_shallow\")", "_____no_output_____" ] ], [ [ "# <center> X. Conclusion", "_____no_output_____" ] ], [ [ "list_score = [loss_score1, loss_score2, loss_score3, loss_score4,loss_score5]\nlist_model = ['Relu_cont', 'LRelu_cont', 'Elu_cont', 'Relu_cont_categ','Shallow_1ku']", "_____no_output_____" ], [ "import matplotlib.pyplot as plt; plt.rcdefaults()\n\nplt.style.use('ggplot')\nobjects = list_model\ny_pos = np.arange(len(objects))\nperformance = list_score\n \nplt.barh(y_pos, performance, align='center', alpha=0.9)\nplt.yticks(y_pos, objects)\nplt.xlabel('Loss ')\nplt.title('Model compared without hypertuning')\n \nplt.show()", "_____no_output_____" ] ], [ [ "So, I hope that this small introduction will be useful ! With this code you can build a regression model with Tensorflow with continuous and categorical features plus add a new activation function (LeakyRelu). \nIf you want to improve the results you can re-build the models on the whole of data. You can see that I'm used just 67% of the training set to build my models.\n\nTake my code and play with it: More Hyperparameters, 100% of the training set to build the next models, try with other activation function etc...\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ecb19a7984cf3fb3b58061c4e97f316e59fdfef3
190,513
ipynb
Jupyter Notebook
notebooks/T6 - 1 - Distancias.ipynb
diego1193/python-ml-course
f1c026b07d4539b8a466db79a45b1bc7842ae253
[ "MIT" ]
655
2018-04-06T00:27:58.000Z
2022-03-27T09:11:25.000Z
notebooks/T6 - 1 - Distancias.ipynb
diego1193/python-ml-course
f1c026b07d4539b8a466db79a45b1bc7842ae253
[ "MIT" ]
6
2018-11-12T06:06:36.000Z
2021-03-25T00:13:48.000Z
notebooks/T6 - 1 - Distancias.ipynb
diego1193/python-ml-course
f1c026b07d4539b8a466db79a45b1bc7842ae253
[ "MIT" ]
1,413
2018-04-23T07:53:37.000Z
2022-03-31T23:45:40.000Z
64.253963
29,696
0.660007
[ [ [ "# Distancias", "_____no_output_____" ] ], [ [ "from scipy.spatial import distance_matrix\nimport pandas as pd", "_____no_output_____" ], [ "data = pd.read_csv(\"../datasets/movies/movies.csv\", sep=\";\")\ndata", "_____no_output_____" ], [ "movies = data.columns.values.tolist()[1:]\nmovies", "_____no_output_____" ], [ "dd1 = distance_matrix(data[movies], data[movies], p=1)\ndd2 = distance_matrix(data[movies], data[movies], p=2)\ndd10 = distance_matrix(data[movies], data[movies], p=10)", "_____no_output_____" ], [ "def dm_to_df(dd, col_name):\n import pandas as pd\n return pd.DataFrame(dd, index=col_name, columns=col_name)", "_____no_output_____" ], [ "dm_to_df(dd1, data[\"user_id\"])", "_____no_output_____" ], [ "dm_to_df(dd2, data[\"user_id\"])", "_____no_output_____" ], [ "dm_to_df(dd10, data[\"user_id\"])", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D", "_____no_output_____" ], [ "fig = plt.figure()\nax = fig.add_subplot(111, projection=\"3d\")\nax.scatter(xs = data[\"star_wars\"], ys = data[\"lord_of_the_rings\"], zs=data[\"harry_potter\"])", "_____no_output_____" ] ], [ [ "# Enlaces", "_____no_output_____" ] ], [ [ "df = dm_to_df(dd1, data[\"user_id\"])\ndf", "_____no_output_____" ], [ "Z=[]", "_____no_output_____" ], [ "df[11]=df[1]+df[10]\ndf.loc[11]=df.loc[1]+df.loc[10]\nZ.append([1,10,0.7,2])#id1, id2, d, n_elementos_en_cluster -> 11.\ndf", "_____no_output_____" ], [ "for i in df.columns.values.tolist():\n df.loc[11][i] = min(df.loc[1][i], df.loc[10][i])\n df.loc[i][11] = min(df.loc[i][1], df.loc[i][10])\ndf", "_____no_output_____" ], [ "df = df.drop([1,10])\ndf = df.drop([1,10], axis=1)\ndf", "_____no_output_____" ], [ "x = 2\ny = 7\n\nn = 12\n\ndf[n]=df[x]+df[y]\ndf.loc[n]=df.loc[x]+df.loc[y]\nZ.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.\n\nfor i in df.columns.values.tolist():\n df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])\n df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])\n\ndf = df.drop([x,y])\ndf = df.drop([x,y], axis=1)\ndf", "_____no_output_____" ], [ "x = 5\ny = 8\n\nn = 13\n\ndf[n]=df[x]+df[y]\ndf.loc[n]=df.loc[x]+df.loc[y]\nZ.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.\n\nfor i in df.columns.values.tolist():\n df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])\n df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])\n\ndf = df.drop([x,y])\ndf = df.drop([x,y], axis=1)\ndf", "_____no_output_____" ], [ "x = 11\ny = 13\n\nn = 14\n\ndf[n]=df[x]+df[y]\ndf.loc[n]=df.loc[x]+df.loc[y]\nZ.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.\n\nfor i in df.columns.values.tolist():\n df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])\n df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])\n\ndf = df.drop([x,y])\ndf = df.drop([x,y], axis=1)\ndf", "_____no_output_____" ], [ "x = 9\ny = 12\nz = 14\n\nn = 15\n\ndf[n]=df[x]+df[y]\ndf.loc[n]=df.loc[x]+df.loc[y]\nZ.append([x,y,df.loc[x][y],3])#id1, id2, d, n_elementos_en_cluster -> 11.\n\nfor i in df.columns.values.tolist():\n df.loc[n][i] = min(df.loc[x][i], df.loc[y][i], df.loc[z][i])\n df.loc[i][n] = min(df.loc[i][x], df.loc[i][y], df.loc[i][z])\n\ndf = df.drop([x,y,z])\ndf = df.drop([x,y,z], axis=1)\ndf", "_____no_output_____" ], [ "x = 4\ny = 6\nz = 15\n\nn = 16\n\ndf[n]=df[x]+df[y]\ndf.loc[n]=df.loc[x]+df.loc[y]\nZ.append([x,y,df.loc[x][y],3])#id1, id2, d, n_elementos_en_cluster -> 11.\n\nfor i in df.columns.values.tolist():\n df.loc[n][i] = min(df.loc[x][i], df.loc[y][i], df.loc[z][i])\n df.loc[i][n] = min(df.loc[i][x], df.loc[i][y], df.loc[i][z])\n\ndf = df.drop([x,y,z])\ndf = df.drop([x,y,z], axis=1)\ndf", "_____no_output_____" ], [ "x = 3\ny = 16\n\nn = 17\n\ndf[n]=df[x]+df[y]\ndf.loc[n]=df.loc[x]+df.loc[y]\nZ.append([x,y,df.loc[x][y],2])#id1, id2, d, n_elementos_en_cluster -> 11.\n\nfor i in df.columns.values.tolist():\n df.loc[n][i] = min(df.loc[x][i], df.loc[y][i])\n df.loc[i][n] = min(df.loc[i][x], df.loc[i][y])\n\ndf = df.drop([x,y])\ndf = df.drop([x,y], axis=1)\ndf", "_____no_output_____" ], [ "Z", "_____no_output_____" ] ], [ [ "# Clustering jerárquico", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nfrom scipy.cluster.hierarchy import dendrogram, linkage", "_____no_output_____" ], [ "movies", "_____no_output_____" ], [ "data[movies]", "_____no_output_____" ], [ "Z = linkage(data[movies], \"ward\")\nZ\nplt.figure(figsize=(25,10))\nplt.title(\"Dendrograma jerárquico para el Clustering\")\nplt.xlabel(\"ID de los usuarios de Netflix\")\nplt.ylabel(\"Distancia\")\ndendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)\nplt.show()", "_____no_output_____" ], [ "Z = linkage(data[movies], \"average\")\nZ\nplt.figure(figsize=(25,10))\nplt.title(\"Dendrograma jerárquico para el Clustering\")\nplt.xlabel(\"ID de los usuarios de Netflix\")\nplt.ylabel(\"Distancia\")\ndendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)\nplt.show()", "_____no_output_____" ], [ "data[movies]", "_____no_output_____" ], [ "Z = linkage(data[movies], \"complete\")\nZ\nplt.figure(figsize=(25,10))\nplt.title(\"Dendrograma jerárquico para el Clustering\")\nplt.xlabel(\"ID de los usuarios de Netflix\")\nplt.ylabel(\"Distancia\")\ndendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)\nplt.show()", "_____no_output_____" ], [ "Z = linkage(data[movies], method=\"single\", metric=\"cosine\")\nZ\nplt.figure(figsize=(25,10))\nplt.title(\"Dendrograma jerárquico para el Clustering\")\nplt.xlabel(\"ID de los usuarios de Netflix\")\nplt.ylabel(\"Distancia\")\ndendrogram(Z, leaf_rotation=90., leaf_font_size=10.0)\nplt.show()", "_____no_output_____" ] ], [ [ "The distance function can be ‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘cityblock’, ‘correlation’, ‘cosine’, ‘dice’, ‘euclidean’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’, ‘matching’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ecb1ad8af88bf5d1924bf3f7a24b45d8cec27457
77,031
ipynb
Jupyter Notebook
df-conveccao.ipynb
mat-esp-2016/diferencas-finitas-conveccao-pratica09-edpf-lpglr007
b95344ddd7ee88b0af594694d2077109e6358025
[ "CC-BY-4.0" ]
null
null
null
df-conveccao.ipynb
mat-esp-2016/diferencas-finitas-conveccao-pratica09-edpf-lpglr007
b95344ddd7ee88b0af594694d2077109e6358025
[ "CC-BY-4.0" ]
null
null
null
df-conveccao.ipynb
mat-esp-2016/diferencas-finitas-conveccao-pratica09-edpf-lpglr007
b95344ddd7ee88b0af594694d2077109e6358025
[ "CC-BY-4.0" ]
null
null
null
136.82238
26,950
0.878439
[ [ [ "# Método das diferenças finitas: Convecção", "_____no_output_____" ], [ "Vamos resolver a equação de convecção:\n\n$$\\frac{\\partial u}{\\partial t} + c \\frac{\\partial u}{\\partial x} = 0$$", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "As células abaixo definem funções que criam o domínio e as condições iniciais.", "_____no_output_____" ] ], [ [ "def cria_dominios(tamanho, Nx, duração, Nt):\n \"\"\"\n Cria o domínio espacial e calcula os intervalos de tempo e espaço.\n \"\"\"\n x = np.linspace(0, tamanho, Nx)\n dx = x[1] - x[0]\n dt = duração/(Nt - 1)\n return x, dx, dt", "_____no_output_____" ], [ "x, dx, dt = cria_dominios(tamanho=2, Nx=51, duração=1, Nt=51)\nprint('dx =', dx, 'dt =', dt)", "dx = 0.04 dt = 0.02\n" ], [ "def cria_cond_inicial(x):\n \"\"\"\n Cria um vetor de condições iniciais u0 com uma função degrau.\n \"\"\"\n u0 = np.ones(x.size)\n u0[(x >= 0.2) & (x <= 0.5)] = 2\n return u0", "_____no_output_____" ], [ "u0 = cria_cond_inicial(x)\n\nplt.figure()\nplt.plot(x, u0, '.-k')\nplt.xlabel('x')\nplt.ylabel('u')\nplt.title('u0')\nplt.ylim(0, 3)", "_____no_output_____" ] ], [ [ "## Tarefa 1\n\nComplete a função abaixo que executa 1 único passo no tempo utilizando diferenças progressivas no tempo e regressivas no espaço.", "_____no_output_____" ] ], [ [ "def passo_no_tempo(u_passado, dx, dt, velocidade):\n \"\"\"\n Executa 1 passo no tempo.\n \n Dado u_passado, utiliza o método das diferenças finitas \n para calcular u_futuro com passo no tempo dt.\n \n Utiliza a condição de contorno u(x=0, t) = 1.\n \"\"\"\n u_futuro = u_passado.copy()\n Nx = len(u_passado)\n u_futuro[0] = 1\n for i in range (1,Nx):\n u_futuro[i] = u_passado[i]-velocidade*(dt/dx)*(u_passado[i]-u_passado[i-1])\n return u_futuro", "_____no_output_____" ] ], [ [ "Use as células abaixo para checar se sua função funciona.", "_____no_output_____" ] ], [ [ "u1 = passo_no_tempo(u0, dx, dt, velocidade=1)", "_____no_output_____" ], [ "plt.figure()\nplt.plot(x, u0, '--r')\nplt.plot(x, u1, '.-k')\nplt.xlabel('x')\nplt.ylabel('u')\nplt.ylim(0, 3)", "_____no_output_____" ] ], [ [ "## Tarefa 2\n\nComplete a função abaixo que executa uma simulação completa de diferenças finitas (utilizando as funções definidas acima) para uma deterimada duração.", "_____no_output_____" ] ], [ [ "def simula(tamanho, Nx, duração, Nt, velocidade):\n \"\"\"\n Executa uma simulação completa da equação de convecção \n utilizando diferenças finitas.\n \n 1. Cria o domínio e a condição inicial\n 2. Executa Nt passos no tempo\n 3. Retorna o domínio (x), a condição inicial (u0) e \n o resultado final da simulação (u_futuro).\n \"\"\"\n x, dx, dt = cria_dominios(tamanho, Nx, duração, Nt)\n u0 = cria_cond_inicial(x)\n u_passado = u0\n for i in range (Nt):\n u_futuro = passo_no_tempo(u_passado, dx, dt, velocidade)\n u_passado = u_futuro\n return x, u0, u_futuro", "_____no_output_____" ] ], [ [ "Utilize as células abaixo para checar o resultado da sua função.", "_____no_output_____" ] ], [ [ "x, u0, u_futuro = simula(tamanho=2, Nx=51, duração=1, Nt=51, velocidade=1)", "_____no_output_____" ], [ "plt.figure()\nplt.plot(x, u0, '--r')\nplt.plot(x, u_futuro, '.-k')\nplt.xlabel('x')\nplt.ylabel('u')\nplt.ylim(0, 3)", "_____no_output_____" ] ], [ [ "### O que aconteceu com o resultado no final da simulação? Isso deveria acontecer?", "_____no_output_____" ] ], [ [ "### Alteração do gráfico. Aconteceu devido a variação de um delta pra outro", "_____no_output_____" ] ], [ [ "## Tarefa 3\n\nFaça uma figura com o resultado da simulação para diferentes valores `Nx` (utilize a lista abaixo). Inclua uma legenda no seu gráfico.", "_____no_output_____" ] ], [ [ "valores_de_Nx = [51, 71, 91, 101, 111]", "_____no_output_____" ], [ "plt.figure()\n\nfor Nx in valores_de_Nx:\n x, u0, u_futuro = simula(tamanho = 2, Nx = Nx, Nt = 51, duração = 1, velocidade = 1) \n plt.plot(x, u_futuro, '.-') \n \nplt.legend(valores_de_Nx)\nplt.xlabel('x')\nplt.ylabel('u')\nplt.ylim(0, 3)", "_____no_output_____" ] ], [ [ "### O método é igualmente preciso para todos os valores de Nx?", "_____no_output_____" ], [ "## Bônus\n\nComplete a função abaixo que executa a simulação completa mas dessa vez guarda cada passo da simulação. A função deve gerar uma lista `u` que contem o valor de $u$ de cada iteração.\n\nComplete o código que gera um gráfico com o valor de `u` a cada 10 iterações. Ou seja, o gráfico deve conter `u[0]`, `u[10]`, `u[20]`, etc.", "_____no_output_____" ] ], [ [ "def simula_grava(tamanho, Nx, duração, Nt, velocidade):\n \"\"\"\n Executa uma simulação completa da equação de convecção \n utilizando diferenças finitas e grava cada iteração.\n \n 1. Cria o domínio e a condição inicial\n 2. Executa Nt passos no tempo\n 3. Retorna o domínio (x) e uma lista (u) com o resultado\n de cada passo no tempo (incluindo o u0).\n \"\"\"\n \n return x, u", "_____no_output_____" ], [ "x, u = simula_grava(tamanho=2, Nx=51, duração=1, Nt=51, velocidade=1)", "_____no_output_____" ], [ "plt.figure()\n\nplt.xlabel('x')\nplt.ylabel('u')\nplt.ylim(0, 3)\nplt.title('Simulação completa (cada 10 iterações)')", "_____no_output_____" ] ], [ [ "**Course website**: https://github.com/mat-esp/about\n\n**Note**: This notebook is part of the course \"Matemática Especial I\" of the [Universidade do Estado do Rio de Janeiro](http://www.uerj.br/). All content can be freely used and adapted under the terms of the \n[Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/).\n\n![Creative Commons License](https://i.creativecommons.org/l/by/4.0/88x31.png)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
ecb1cfa057abb7a1b68fb72d9da26d136606cbb7
73,245
ipynb
Jupyter Notebook
homework4-done.ipynb
razorcd/ml-training
1ef5bbe4abd74ccefb896733b5bcfd98d68835ee
[ "MIT" ]
3
2021-11-04T17:41:34.000Z
2021-12-29T15:01:19.000Z
homework4-done.ipynb
razorcd/ml-training
1ef5bbe4abd74ccefb896733b5bcfd98d68835ee
[ "MIT" ]
null
null
null
homework4-done.ipynb
razorcd/ml-training
1ef5bbe4abd74ccefb896733b5bcfd98d68835ee
[ "MIT" ]
1
2021-12-06T03:57:51.000Z
2021-12-06T03:57:51.000Z
40.511615
18,464
0.534753
[ [ [ "## Homework 4\n\nUse this notebook as a starter", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "Data:\n\n- https://github.com/gastonstat/CreditScoring\n- Also available [here](https://raw.githubusercontent.com/alexeygrigorev/mlbookcamp-code/master/chapter-06-trees/CreditScoring.csv)", "_____no_output_____" ] ], [ [ "!wget https://raw.githubusercontent.com/alexeygrigorev/mlbookcamp-code/master/chapter-06-trees/CreditScoring.csv", "--2021-10-01 13:36:36-- https://raw.githubusercontent.com/alexeygrigorev/mlbookcamp-code/master/chapter-06-trees/CreditScoring.csv\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.110.133, 185.199.108.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 182489 (178K) [text/plain]\nSaving to: ‘CreditScoring.csv.4’\n\nCreditScoring.csv.4 100%[===================>] 178,21K --.-KB/s in 0,06s \n\n2021-10-01 13:36:36 (2,84 MB/s) - ‘CreditScoring.csv.4’ saved [182489/182489]\n\n" ] ], [ [ "## Preparation \n\nWe'll talk about this dataset in more details in week 6. But for now, use the following code to get started", "_____no_output_____" ] ], [ [ "df = pd.read_csv('CreditScoring2.csv')\ndf.columns = df.columns.str.lower()\ndf", "_____no_output_____" ] ], [ [ "Some of the features are encoded as numbers. Use the following code to de-code them:", "_____no_output_____" ] ], [ [ "status_values = {\n 1: 'ok',\n 2: 'default',\n 0: 'unk'\n}\n\ndf.status = df.status.map(status_values)\n\n\nhome_values = {\n 1: 'rent',\n 2: 'owner',\n 3: 'private',\n 4: 'ignore',\n 5: 'parents',\n 6: 'other',\n 0: 'unk'\n}\n\ndf.home = df.home.map(home_values)\n\nmarital_values = {\n 1: 'single',\n 2: 'married',\n 3: 'widow',\n 4: 'separated',\n 5: 'divorced',\n 0: 'unk'\n}\n\ndf.marital = df.marital.map(marital_values)\n\nrecords_values = {\n 1: 'no',\n 2: 'yes',\n 0: 'unk'\n}\n\ndf.records = df.records.map(records_values)\n\njob_values = {\n 1: 'fixed',\n 2: 'partime',\n 3: 'freelance',\n 4: 'others',\n 0: 'unk'\n}\n\ndf.job = df.job.map(job_values)\ndf", "_____no_output_____" ] ], [ [ "Prepare the numerical variables:", "_____no_output_____" ] ], [ [ "for c in ['income', 'assets', 'debt']:\n df[c] = df[c].replace(to_replace=99999999, value=0)", "_____no_output_____" ] ], [ [ "Remove clients with unknown default status", "_____no_output_____" ] ], [ [ "df = df[df.status != 'unk'].reset_index(drop=True)", "_____no_output_____" ] ], [ [ "Create the target variable", "_____no_output_____" ] ], [ [ "df['default'] = (df.status == 'default').astype(int)\ndel df['status']\ndf", "_____no_output_____" ] ], [ [ "## Your code", "_____no_output_____" ], [ "What are the categorical variables? What are the numerical?", "_____no_output_____" ] ], [ [ "numerical = [\"seniority\",\"time\",\"age\",\"expenses\",\"income\",\"assets\",\"debt\",\"amount\",\"price\",\"default\"]\ncategorical = [\"home\", \"marital\", \"records\", \"job\"]", "_____no_output_____" ] ], [ [ "Split the data into 3 parts: train/validation/test with 60%/20%/20% distribution. Use `train_test_split` funciton for that with `random_state=1`", "_____no_output_____" ] ], [ [ "# Setup validation framework\nfrom sklearn.model_selection import train_test_split\n\ndf_full_train, df_test = train_test_split(df, test_size=0.2, random_state=1)\ndf_train, df_val = train_test_split(df_full_train, test_size=0.25, random_state=1)\n\ndf_train = df_train.reset_index(drop=\"true\")\ndf_val = df_val.reset_index(drop=\"true\")\ndf_test = df_test.reset_index(drop=\"true\")\n\n# y_full_train = df_full_train.churn.values\n# y_train = df_train.churn.values\n# y_val = df_val.churn.values\n# y_test = df_test.churn.values\n\n# del df_test[\"churn\"]\n# del df_val[\"churn\"]\n# del df_train[\"churn\"]\n# df_train.columns\n\nprint(\"train: %.2f, val: %.2f, test: %.2f\" % (len(df_train)/len(df), len(df_val)/len(df), len(df_test)/len(df)))", "train: 0.60, val: 0.20, test: 0.20\n" ] ], [ [ "## Question 1\n\nROC AUC could also be used to evaluate feature importance of numerical variables. \n\nLet's do that\n\n* For each numerical variable, use it as score and compute AUC with the \"default\" variable\n* Use the training dataset for that\n\n\nIf your AUC is < 0.5, invert this variable by putting \"-\" in front\n\n(e.g. `-df_train['expenses']`)\n\nAUC can go below 0.5 if the variable is negatively correlated with the target varialble. You can change the direction of the correlation by negating this variable - then negative correlation becomes positive.", "_____no_output_____" ] ], [ [ "# Q1 (not following question) (see Q1b)\n\nimport warnings\nwarnings.filterwarnings(action='once')\nwarnings.filterwarnings('ignore')\n\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn import linear_model\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import KFold\nfrom tqdm.auto import tqdm\n\ndef train(dataFrame, y):\n dicts = dataFrame.to_dict(orient=\"records\")\n dv = DictVectorizer(sparse=False)\n X = dv.fit_transform(dicts)\n\n model = linear_model.LogisticRegression()\n model.fit(X, y)\n return dv, model\n\ndef predict(dataFrame, dv, model):\n dicts = dataFrame.to_dict(orient=\"records\")\n X = dv.transform(dicts)\n y_pred = model.predict_proba(X)[:,1]\n return y_pred\n\nfields = [\n {\"field\": \"seniority\", \"correlation\": 1},\n {\"field\": \"time\", \"correlation\": 1},\n {\"field\": \"age\", \"correlation\": 1},\n {\"field\": \"expenses\", \"correlation\": 1},\n {\"field\": \"income\", \"correlation\": 1},\n {\"field\": \"assets\", \"correlation\": 1},\n {\"field\": \"debt\", \"correlation\": 1},\n {\"field\": \"amount\", \"correlation\": 1},\n {\"field\": \"price\", \"correlation\": -1},\n {\"field\": \"default\", \"correlation\": 1}\n]\n\nfor f in fields:\n df_train_selected = df_train[[f[\"field\"]]]\n df_val_selected = df_val[[f[\"field\"]]]\n\n y_train = f[\"correlation\"] * df_train[\"default\"].values\n y_val = df_val[\"default\"].values\n\n dv, model = train(df_train_selected, y_train)\n y_pred = predict(df_val_selected, dv, model)\n\n# display(y_val, y_pred)\n auc = metrics.roc_auc_score(y_val, y_pred)\n print(\"field:\", f[\"field\"], \"auc:\",round(auc,3))", "field: seniority auc: 0.69\nfield: time auc: 0.553\nfield: age auc: 0.559\nfield: expenses auc: 0.513\nfield: income auc: 0.664\nfield: assets auc: 0.602\nfield: debt auc: 0.508\nfield: amount auc: 0.588\nfield: price auc: 0.534\nfield: default auc: 1.0\n" ], [ "# Q1b (following question, no model training)\n\nimport warnings\nwarnings.filterwarnings(action='once')\nwarnings.filterwarnings('ignore')\n\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn import linear_model\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import KFold\nfrom tqdm.auto import tqdm\n\nfields = [\n {\"field\": \"seniority\", \"correlation\": -1},\n {\"field\": \"time\", \"correlation\": 1},\n {\"field\": \"age\", \"correlation\": -1},\n {\"field\": \"expenses\", \"correlation\": -1},\n {\"field\": \"income\", \"correlation\": -1},\n {\"field\": \"assets\", \"correlation\": -1},\n {\"field\": \"debt\", \"correlation\": -1},\n {\"field\": \"amount\", \"correlation\": 1},\n {\"field\": \"price\", \"correlation\": 1},\n {\"field\": \"default\", \"correlation\": 1}\n]\n\n# df_train[fields[0][\"field\"]].values\n\n\n# metrics.roc_auc_score([1,0,1], [0.3, 0.2, 0.4])\n\nfor f in fields:\n train_selected = f[\"correlation\"] * df_train[f[\"field\"]].values\n# df_val_selected = df_val[[f[\"field\"]]]\n\n# y_train = f[\"correlation\"] * df_train[\"default\"].values\n y = df_train[\"default\"].values\n\n# dv, model = train(df_train_selected, y_train)\n# y_pred = predict(df_val_selected, dv, model)\n\n auc = metrics.roc_auc_score(y, train_selected)\n print(\"field:\", f[\"field\"], \"auc:\",round(auc,3))", "field: seniority auc: 0.709\nfield: time auc: 0.561\nfield: age auc: 0.573\nfield: expenses auc: 0.501\nfield: income auc: 0.682\nfield: assets auc: 0.649\nfield: debt auc: 0.505\nfield: amount auc: 0.591\nfield: price auc: 0.504\nfield: default auc: 1.0\n" ] ], [ [ "Which numerical variable (among the following 4) has the highest AUC?\n\n- seniority\n- time\n- income\n- debt", "_____no_output_____" ], [ "## Training the model\n\nFrom now on, use these columns only:\n\n```\n['seniority', 'income', 'assets', 'records', 'job', 'home']\n```\n\nApply one-hot-encoding using `DictVectorizer` and train the logistic regression with these parameters:\n\n```\nLogisticRegression(solver='liblinear', C=1.0, max_iter=1000)\n```", "_____no_output_____" ] ], [ [ "# Q2\nselected_fields = ['seniority', 'income', 'assets', 'records', 'job', 'home']\n\ndef train(dataFrame, y):\n dicts = dataFrame[selected_fields].to_dict(orient=\"records\")\n dv = DictVectorizer(sparse=False)\n X = dv.fit_transform(dicts)\n\n model = linear_model.LogisticRegression(solver='liblinear', C=1.0, max_iter=1000)\n model.fit(X, y)\n return dv, model\n\ndef predict(dataFrame, dv, model):\n dicts = dataFrame.to_dict(orient=\"records\")\n X = dv.transform(dicts)\n y_pred = model.predict_proba(X)[:,1]\n return y_pred\n\ny_train = df_train[\"default\"].values\ny_val = df_val[\"default\"].values\n\ndv, model = train(df_train, y_train)\ny_pred = predict(df_val, dv, model)\n\nauc = metrics.roc_auc_score(y_val, y_pred)\nprint(\"auc:\",round(auc,3))\n", "auc: 0.812\n" ] ], [ [ "## Question 2\n\nWhat's the AUC of this model on the validation dataset? (round to 3 digits)\n\n- 0.512\n- 0.612\n- 0.712\n- 0.812", "_____no_output_____" ], [ "## Question 3\n\nNow let's compute precision and recall for our model.\n\n* Evaluate the model on all thresholds from 0.0 to 1.0 with step 0.01\n* For each threshold, compute precision and recall\n* Plot them", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nprecissions = []\nrecalls = []\n\n# default prediction thrashold:\nthrashholds = np.linspace(0,1, 100)\n\nfor t in thrashholds:\n predict_positive = (y_pred >= t)\n predict_negative = (y_pred < t)\n\n actual_positive = (y_val >= t)\n actual_negative = (y_val < t)\n\n true_positive = (predict_positive & actual_positive).sum()\n true_negative = (predict_negative & actual_negative).sum()\n false_positive = (predict_positive & actual_negative).sum()\n false_negative = (predict_negative & actual_positive).sum()\n\n precission = true_positive / (true_positive + false_positive)\n# print(\"t=\",t, \"precission=\",precission)\n precissions.append(precission)\n\n recall = true_positive / (true_positive + false_negative)\n# print(\"t=\",t, \"recall=\",recall)\n recalls.append(recall)\n\n# precissions, recalls\n\nplt.plot(thrashholds, precissions)\nplt.plot(thrashholds, recalls)", "_____no_output_____" ] ], [ [ "At which threshold precision and recall curves intersect?\n\n* 0.2\n* 0.4\n* 0.6\n* 0.8", "_____no_output_____" ], [ "## Question 4\n\nPrecision and recall are conflicting - when one grows, the other goes down. That's why they are often combined into the F1 score - a metrics that takes into account both\n\nThis is the formula for computing F1:\n\n$$F_1 = 2 \\cdot \\cfrac{P \\cdot R}{P + R}$$\n\nWhere $P$ is precision and $R$ is recall.\n\nLet's compute F1 for all thresholds from 0.0 to 1.0 with increment 0.01", "_____no_output_____" ] ], [ [ "thrashholds_precissions_recalls = zip(thrashholds, precissions, recalls)\n# set(thrashholds_precissions_recalls)\n\nthrashholds_F1s = []\n\nfor t_p_r in thrashholds_precissions_recalls:\n F1 = (2*t_p_r[1]*t_p_r[2])/(t_p_r[1]+t_p_r[2])\n thrashholds_F1s.append((t_p_r[0], F1))\n\nsorted_thrashholds_F1s = sorted(thrashholds_F1s, key=lambda elem: elem[1], reverse=True)\nrounded_sorted_thrashholds_F1s = map(lambda t: (round(t[0],2),round(t[1],2)), sorted_thrashholds_F1s)\nlist(rounded_sorted_thrashholds_F1s)\n\n#threashold #F1\n\n", "_____no_output_____" ] ], [ [ "At which threshold F1 is maximal?\n\n- 0.1\n- 0.3\n- 0.5\n- 0.7", "_____no_output_____" ], [ "## Question 5\n\n\nUse the `KFold` class from Scikit-Learn to evaluate our model on 5 different folds:\n\n```\nKFold(n_splits=5, shuffle=True, random_state=1)\n```\n\n* Iterate over different folds of `df_full_train`\n* Split the data into train and validation\n* Train the model on train with these parameters: `LogisticRegression(solver='liblinear', C=1.0, max_iter=1000)`\n* Use AUC to evaluate the model on validation\n", "_____no_output_____" ] ], [ [ "# !pip install tqdm\nimport warnings\nwarnings.filterwarnings(action='once')\nwarnings.filterwarnings('ignore')\n\n#Cross Validation\nfrom sklearn.model_selection import KFold\nfrom tqdm.auto import tqdm\n\ndef train(dataFrame, y):\n dicts = dataFrame[selected_fields].to_dict(orient=\"records\")\n dv = DictVectorizer(sparse=False)\n X = dv.fit_transform(dicts)\n\n model = linear_model.LogisticRegression(solver='liblinear', C=1.0, max_iter=1000)\n model.fit(X, y)\n return dv, model\n\ndef predict(dataFrame, dv, model):\n dicts = dataFrame.to_dict(orient=\"records\")\n X = dv.transform(dicts)\n y_pred = model.predict_proba(X)[:,1]\n return y_pred\n\n# df_full_train_selected1 = df_full_train[selected_fields]\n\nsplits = 5\n\n# for C in tqdm([ 0.001, 0.01, 0.1, 0.5, 1, 5, 10], total=splits):\nkf = KFold(n_splits=splits, shuffle=True, random_state=1)\nauc_scores = []\nfor train_idx, val_idx in kf.split(df_full_train):\n\n df_train_itter = df_full_train.iloc[train_idx]\n df_val_itter = df_full_train.iloc[val_idx]\n\n y_train_iter = df_full_train.iloc[train_idx].default.values\n y_val_iter = df_full_train.iloc[val_idx].default.values\n\n dv, model = train(df_train_itter, y_train_iter)\n y_pred_iter = predict(df_val_itter, dv, model)\n auc = metrics.roc_auc_score(y_val_iter, y_pred_iter)\n auc_scores.append(auc)\n\nauc_scores\nprint(\"AUC mean: %.4f, AUC std: +-%.4f\" % (np.mean(auc_scores), np.std(auc_scores)))", "AUC mean: 0.8142, AUC std: +-0.0146\n" ] ], [ [ "How large is standard devidation of the scores across different folds?\n\n- 0.001\n- 0.014\n- 0.09\n- 0.14", "_____no_output_____" ], [ "## Question 6\n\nNow let's use 5-Fold cross-validation to find the best parameter C\n\n* Iterate over the following C values: `[0.01, 0.1, 1, 10]`\n* Use these parametes for the model: `LogisticRegression(solver='liblinear', C=C, max_iter=1000)`\n* Compute the mean score as well as the std", "_____no_output_____" ] ], [ [ "# !pip install tqdm\nimport warnings\nwarnings.filterwarnings(action='once')\nwarnings.filterwarnings('ignore')\n\n#Cross Validation\nimport sklearn\nfrom sklearn.model_selection import KFold\n\ndef train(dataFrame, y, C):\n dicts = dataFrame[selected_fields].to_dict(orient=\"records\")\n dv = DictVectorizer(sparse=False)\n X = dv.fit_transform(dicts)\n\n model = linear_model.LogisticRegression(solver='liblinear', C=C, max_iter=1000)\n model.fit(X, y)\n return dv, model\n\ndef predict(dataFrame, dv, model):\n dicts = dataFrame.to_dict(orient=\"records\")\n X = dv.transform(dicts)\n y_pred = model.predict_proba(X)[:,1]\n return y_pred\n\n# df_full_train_selected1 = df_full_train[selected_fields]\n\nsplits = 5\n\nfor C in [0.01, 0.1, 1, 10]:\n kf = KFold(n_splits=splits, shuffle=True, random_state=1)\n auc_scores = []\n for train_idx, val_idx in kf.split(df_full_train):\n\n df_train_itter = df_full_train.iloc[train_idx]\n df_val_itter = df_full_train.iloc[val_idx]\n\n y_train_iter = df_full_train.iloc[train_idx].default.values\n y_val_iter = df_full_train.iloc[val_idx].default.values\n\n dv, model = train(df_train_itter, y_train_iter,C)\n y_pred_iter = predict(df_val_itter, dv, model)\n auc = metrics.roc_auc_score(y_val_iter, y_pred_iter)\n auc_scores.append(auc)\n\n print(\"C: %s, AUC mean: %.3f, AUC std: +-%.4f\" % (C, np.mean(auc_scores), np.std(auc_scores)))\nprint(\"sklearn.__version__=\",sklearn.__version__)", "C: 0.01, AUC mean: 0.809, AUC std: +-0.0126\nC: 0.1, AUC mean: 0.813, AUC std: +-0.0137\nC: 1, AUC mean: 0.814, AUC std: +-0.0146\nC: 10, AUC mean: 0.812, AUC std: +-0.0142\nsklearn.__version__= 0.24.1\n" ] ], [ [ "Which C leads to the best mean score?\n\n- 0.01\n- 0.1\n- 1\n- 10\n\nIf you have ties, select the score with the lowest std. If you still have ties, select the smallest C", "_____no_output_____" ], [ "## Submit the results\n\nSubmit your results here: https://forms.gle/e497sR5iB36mM9Cs5\n\nIt's possible that your answers won't match exactly. If it's the case, select the closest one.\n\n## Deadline\n\nThe deadline for submitting is 04 October 2021, 17:00 CET. After that, the form will be closed.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ecb1da3f10424ef9761957e2f31c24e3839d464e
10,391
ipynb
Jupyter Notebook
Map_saildrone_on_satellite_image.ipynb
cgentemann/2021_salinity_paper
73d646cb520aabee1cf59b8e230786a0c24b886f
[ "Apache-2.0" ]
null
null
null
Map_saildrone_on_satellite_image.ipynb
cgentemann/2021_salinity_paper
73d646cb520aabee1cf59b8e230786a0c24b886f
[ "Apache-2.0" ]
null
null
null
Map_saildrone_on_satellite_image.ipynb
cgentemann/2021_salinity_paper
73d646cb520aabee1cf59b8e230786a0c24b886f
[ "Apache-2.0" ]
5
2020-12-13T02:40:28.000Z
2021-06-03T19:07:44.000Z
41.730924
150
0.587431
[ [ [ "# Plot saildrone track data on satellite data", "_____no_output_____" ] ], [ [ "import matplotlib.ticker as mticker\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seawater as sw\nimport cartopy.crs as ccrs # import projections\nimport cartopy.feature as cf # import features\nimport uuid\nfrom scipy import interpolate\nfrom scipy import signal\nfrom glob import glob\nfrom sklearn.linear_model import LinearRegression\nfrom matplotlib import ticker, cm\nimport matplotlib.colors as colors\nimport datetime as dt\nimport scipy.ndimage\nimport spectrum\n\nplt.rcParams['figure.figsize'] = (10.0, 8.0)\n\n#create xarray dataset with saildrone filenames\n#data directory for saildrone data\ndata_dir = './data/'\nsaildrone_filenames = glob(data_dir+'*.nc')\n\ndata_dir2 = './data/collocations_with_8dy_sss_no_repeats/'\nsaildrone_filenames2 = glob(data_dir+'*.nc')\n\ndata_dir3 = './data/collocations_with_8day_sss/'\nsaildrone_filenames3 = glob(data_dir+'*.nc')", "_____no_output_____" ], [ "ds=[]\nfor iusv in range(3):\n fname=saildrone_filenames[iusv]\n ds_usv=xr.open_dataset(fname).isel(trajectory=0).swap_dims({'obs':'time'})\n ds_usv.close()\n\n #make diruanl plot\n xlon=ds_usv.longitude.data\n #time_offset_to_lmt=(xlon/360.)*24.*60\n #tem = ds_usv.time+time_offset_to_lmt*np.timedelta64(1,'m')# dt.timedelta(seconds=1)\n #ds_usv['tlmt']=tem\n #ds_usv2= ds_usv.swap_dims({'time':'tlmt'})\n #ds_usv2a = ds_usv2.where(ds_usv2.tlmt.dt.hour==6)\n #dymn = ds_usv2a.groupby(\"tlmt.dayofyear\").mean()\n #ds_usv3 = ds_usv2.groupby(\"tlmt.dayofyear\") - dymn\n #ds_usv['TEMP_AIR_MEAN_DW'] = ds_usv3.swap_dims({'tlmt':'time'}).drop({'tlmt'}).TEMP_AIR_MEAN\n #ds_usv['TEMP_SBE37_MEAN_DW'] = ds_usv3.swap_dims({'tlmt':'time'}).drop({'tlmt'}).TEMP_SBE37_MEAN\n ds_usv['wspd']=np.sqrt(ds_usv.UWND_MEAN**2+ds_usv.VWND_MEAN**2) \n tem=sw.dens0(ds_usv.SAL_SBE37_MEAN,ds_usv.TEMP_SBE37_MEAN)\n ds_usv['density_mean']=xr.DataArray(tem,dims=('time'),coords={'time':ds_usv.time})\n tem=sw.alpha(ds_usv.SAL_SBE37_MEAN,ds_usv.TEMP_SBE37_MEAN,ds_usv.BARO_PRES_MEAN*0) #pressure =0 at surface\n ds_usv['alpha_ME']=xr.DataArray(tem,dims=('time'),coords={'time':ds_usv.time})\n tem=sw.beta(ds_usv.SAL_SBE37_MEAN,ds_usv.TEMP_SBE37_MEAN,ds_usv.BARO_PRES_MEAN*0) #pressure =0 at surface\n ds_usv['beta_MEAN']=xr.DataArray(tem,dims=('time'),coords={'time':ds_usv.time})\n ds_usv['latitude']=ds_usv.latitude.interpolate_na(dim='time')\n ds_usv['longitude']=ds_usv.longitude.interpolate_na(dim='time')\n xlat=ds_usv.latitude\n xlon=ds_usv.longitude\n dkm2 = abs(np.abs((((xlon[1:].data-xlon[0:-1].data)**2+(xlat[1:].data-xlat[0:-1].data)**2)**.5)*110.567*np.cos(np.pi*xlat[1:].data/180)))\n dkm2=np.append(dkm2,dkm2[66238]) #add on last point\n dkm3 = dkm2.cumsum()\n ds_usv['dist_total']=xr.DataArray(dkm3,dims=('time'),coords={'time':ds_usv.time})\n ds_usv['dist_between']=xr.DataArray(dkm2,dims=('time'),coords={'time':ds_usv.time})\n ds.append(ds_usv)\nds_saildrone = xr.concat(ds, dim='trajectory')\n#ds = xr.concat([ds,ds_usv],dim='trajectory')\n#ds_saildrone = ds.copy(deep=True)\nds_saildrone.wspd.attrs={'units':'m/s'}", "_____no_output_____" ], [ "t1,t2=dt.datetime(2020,1,17),dt.datetime(2020,3,7)\nfnames=[]\nfor i in range(46):\n t=t1+dt.timedelta(days=i)\n tdir = t-dt.timedelta(days=4)\n sdoy = str(tdir.timetuple().tm_yday).zfill(3)\n smon = str(t.month).zfill(2)\n sdy = str(t.day).zfill(2)\n fname = 'F:/data/sat_data/smap/SSS/L3/JPL/V4.3/8day_running/2020/' + sdoy + '/SMAP_L3_SSS_2020'+smon+sdy+'_8DAYS_V4.3.nc'\n# fname = 'Z:/SalinityDensity/smap/L3/JPL/V4.3/8day_running/2020/' + sdoy + '/SMAP_L3_SSS_2020'+smon+sdy+'_8DAYS_V4.3.nc'\n fnames.append(fname)\ntem = xr.open_mfdataset(fnames,combine='nested',concat_dim='time')\ntem = tem.rename({'latitude':'lat','longitude':'lon'}).sel(lon=slice(-64,-46),lat=slice(16,4))\nds_jpl = tem.load()", "_____no_output_____" ], [ "t1,t2=dt.datetime(2020,1,17),dt.datetime(2020,3,7)\nfnames=[]\nfor i in range(46):\n t=t1+dt.timedelta(days=i)\n tdir = t-dt.timedelta(days=4)\n sdoy = str(tdir.timetuple().tm_yday).zfill(3)\n sdoy2 = str(t.timetuple().tm_yday).zfill(3)\n #\\\\White_home_pc\\f\\data\\sat_data\\smap\\SSS\\L3\\RSS\\V4\\8day_running\\SCI\\2020\n fname = 'F:/data/sat_data/smap/SSS/L3/RSS/V4/8day_running/SCI/2020/' + sdoy + '/RSS_smap_SSS_L3_8day_running_2020_'+sdoy2+'_FNL_v04.0.nc'\n# fname = 'Z:/SalinityDensity/smap/L3/RSS/V4/8day_running/SCI/2020/' + sdoy + '/RSS_smap_SSS_L3_8day_running_2020_'+sdoy2+'_FNL_v04.0.nc'\n fnames.append(fname)\ntem = xr.open_mfdataset(fnames,combine='nested',concat_dim='time')\ntem.coords['lon'] = (tem.coords['lon'] + 180) % 360 - 180\ntem = tem.sortby(tem.lon)\ntem = tem.sel(lon=slice(-64,-46),lat=slice(4,16))\nds_rss = tem.load()", "_____no_output_____" ] ], [ [ "# this goes through the entire dataset and plots the satellite data behind the saildrone dat with a dot on the saildrone postiion\n\n- it would be nice to just only show saildrone cruise color for the day of the data not implemented yet", "_____no_output_____" ] ], [ [ "def map_fig(ds_sat,ds,var,text1,text2,fout,date_str):\n ds_sat = ds_sat.sel(time=date_str)\n fig = plt.figure(figsize=(8,6))\n ax = plt.axes(projection = ccrs.PlateCarree()) # create a set of axes with Mercator projection\n im=ax.pcolormesh(ds_sat.lon,ds_sat.lat,ds_sat[var],vmin=34,vmax=36.5,transform=ccrs.PlateCarree(),cmap='viridis')\n for i in range(3):\n ds2 = ds.isel(trajectory=i)\n ax.scatter(ds2.longitude,ds2.latitude,c=ds2.SAL_SBE37_MEAN,vmin=34,vmax=36.5,\n s=.15,transform=ccrs.PlateCarree(),label=ds2.trajectory.data,cmap='viridis')\n ds2a = ds.isel(trajectory=i).sel(time=date_str)\n ax.plot(ds2a.longitude,ds2a.latitude,'.',transform=ccrs.PlateCarree(),color='w')\n\n ax.coastlines(resolution='10m') \n ax.set_extent([-64,-46,4,16])\n bx1 = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=0, color='grey', alpha=0.5, linestyle='--')\n bx1.xlabels_top = False; bx1.ylabels_left = True\n bx1.ylabels_right = False; bx1.xlines = False\n bx1.xlocator = mticker.FixedLocator([-60,-55,-50,-45])\n bx1.xformatter = LONGITUDE_FORMATTER; bx1.yformatter = LATITUDE_FORMATTER\n\n #ax.legend()\n ax.text(-63.5,7.5,'South America',fontsize=14)\n ax.text(-63.5,5,text1,fontsize=14)\n ax.text(-63.5,4.3,text2,fontsize=14)\n cax = fig.add_axes([0.5, 0.8, 0.37, 0.02])\n cbar = fig.colorbar(im,cax=cax, orientation='horizontal')\n cbar.set_label('Salinity (psu)')\n figs_dir = './figures/'\n fig.savefig(figs_dir+fout) \n\nds = ds_saildrone#.isel(trajectory=0)\ndate_start = ds.time[0]+np.timedelta64(12,'h')\ndate_end = ds.time[-1].data\nlength_cruise = int(np.floor((date_end-date_start)/np.timedelta64(1,'D')))\nfor incr in range(length_cruise):\n dd1 = date_start+np.timedelta64(incr,'D')\n date_str = str(dd1.data)[0:13]\n \n if not date_str=='2020-02-17T12':\n continue\n \n text2 = str(dd1.dt.month.data).zfill(2)+'-'+str(dd1.dt.day.data).zfill(2)+'-'+str(dd1.dt.year.data)+'8-day Average'\n\n #date_str = '2020-01-30T1200'\n #text2='7 Feb 2020 8-day Average'\n\n text1='B) RSS SMAP Salinity v4'\n fname = 'map_sss_smap'+date_str+'rss.png'\n map_fig(ds_rss,ds,'sss_smap',text1,text2,fname,date_str)\n\n text1='C) RSS SMAP Salinity v4 - 40 km'\n fname = 'map_sss_smap'+date_str+'_rss40km.png'\n map_fig(ds_rss,ds,'sss_smap_40km',text1,text2,fname,date_str)\n\n text1='A) JPL SMAP Salinity v4.3'\n fname = 'map_sss_smap'+date_str+'_jpl.png'\n map_fig(ds_jpl,ds,'smap_sss',text1,text2,fname,date_str)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecb1e31c004044569cc1c638d3e008f83df6e641
171,635
ipynb
Jupyter Notebook
_notebooks/2021-01-10-Text Classification.ipynb
MarkleyO/capstone-notebooks
1bfbe58246b02e2aa9dc0bb57a2a0efe5f7e0cd1
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-01-10-Text Classification.ipynb
MarkleyO/capstone-notebooks
1bfbe58246b02e2aa9dc0bb57a2a0efe5f7e0cd1
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-01-10-Text Classification.ipynb
MarkleyO/capstone-notebooks
1bfbe58246b02e2aa9dc0bb57a2a0efe5f7e0cd1
[ "Apache-2.0" ]
null
null
null
95.885475
96,276
0.656888
[ [ [ "# Text Classification \n\n> Chapter 1 - learn about using a neural network to classify text using sentiment (positive or negative).\n\n- toc: true\n- branch: master\n- badges: false\n- comments: false\n- annotations: true", "_____no_output_____" ], [ "# Introduction", "_____no_output_____" ], [ "Text classification is the process of assigning tags or categories to text according to its content. It’s one of the fundamental tasks in natural language processing.\n\nThe text we wanna classify is given as input to an algorithm, the algorithm will then analyze the text’s content, and then categorize the input as one of the tags or categories previously given.\n\n**Input → Classifying Algorithm → Classification of Input**\n\nReal life examples:\n\n- Sentiment analysis: how does the writer of the sentence feel about what they are writing about, do they think positively or negatively of the subject? Ex. restaurant reviews topic labeling: given sentences and a set of topics, which topic does this sentence fall under? Ex. is this essay about history? Math? etc? spam detection Ex. Email filtering: is this email a real important email or spam?\n\nExample. A restaurant wants to evaluate their ratings but don’t want to read through all of them. Therefore, they wanna use a computer algorithm to do all their work. They simply want to know if the customer’s review is positive or negative.\n\nHere’s an example of a customer’s review and a simple way an algorithm could classify their review.\n\nInput: “The food here was too salty and too expensive”\n\nAlgorithm: Goes through every word in the sentence and counts how many positive words and how many negative words are in the sentence.\n\n```\n “The, food, here, was, too, and” are all neutral words\n\n “Salty, expensive” are negative words.\n\n Negative words: 2\n Positive words: 0\n```\n\nClassification: Negative Review, because there are more negative words (2) than positive (0).\n\nHowever, this algorithm obviously doesn’t work in a lot of cases.\n\nFor example, “The food here was good, not expensive and not salty” would be classified as negative but it’s actually a positive review.\n\nLanguage and text can get very complicated which makes creating these algorithms difficult. Some things that make language difficult could be words that have multiple meanings, negation words (words such as not), slang, etc.", "_____no_output_____" ], [ "# Set up data and imports", "_____no_output_____" ], [ "## Library imports\n\nThis section of code is to import any necessary Python libraries that we'll need for the rest of this notebook. Some packages may need to be installed since they are not built in to Python3.", "_____no_output_____" ] ], [ [ "# collapse-hide\n!pip3 install seaborn\n!pip3 install plotly --user\n!pip3 install sklearn\n\nimport sys\nimport string\nfrom scipy import sparse\nfrom pprint import pprint\nimport pandas as pd\nimport seaborn as sns\nimport plotly.offline as py\nfrom plotly.offline import init_notebook_mode, iplot\nimport plotly.graph_objs as go\ninit_notebook_mode(connected = True)\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom html import escape\nfrom IPython.core.display import display, HTML\nfrom string import Template\nfrom sklearn.metrics import classification_report\nimport json\n\nHTML('<script src=\"https://d3js.org/d3.v3.min.js\" charset=\"utf-8\"></script>')\n", "Requirement already satisfied: seaborn in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (0.11.1)\nRequirement already satisfied: matplotlib>=2.2 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from seaborn) (3.3.3)\nRequirement already satisfied: pandas>=0.23 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from seaborn) (1.2.1)\nRequirement already satisfied: scipy>=1.0 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from seaborn) (1.6.0)\nRequirement already satisfied: numpy>=1.15 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from seaborn) (1.19.5)\nRequirement already satisfied: python-dateutil>=2.1 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from matplotlib>=2.2->seaborn) (2.8.1)\nRequirement already satisfied: cycler>=0.10 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from matplotlib>=2.2->seaborn) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from matplotlib>=2.2->seaborn) (2.4.7)\nRequirement already satisfied: pillow>=6.2.0 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from matplotlib>=2.2->seaborn) (8.1.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from matplotlib>=2.2->seaborn) (1.3.1)\nRequirement already satisfied: pytz>=2017.3 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from pandas>=0.23->seaborn) (2020.5)\nRequirement already satisfied: six>=1.5 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from python-dateutil>=2.1->matplotlib>=2.2->seaborn) (1.15.0)\n\u001b[33mYou are using pip version 18.1, however version 20.3.3 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\nRequirement already satisfied: plotly in /Users/thuyvynguyen/.local/lib/python3.7/site-packages (4.14.3)\nRequirement already satisfied: retrying>=1.3.3 in /Users/thuyvynguyen/.local/lib/python3.7/site-packages (from plotly) (1.3.3)\nRequirement already satisfied: six in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from plotly) (1.15.0)\n\u001b[33mYou are using pip version 18.1, however version 20.3.3 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\nRequirement already satisfied: sklearn in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (0.0)\nRequirement already satisfied: scikit-learn in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from sklearn) (0.24.1)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from scikit-learn->sklearn) (2.1.0)\nRequirement already satisfied: joblib>=0.11 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from scikit-learn->sklearn) (1.0.0)\nRequirement already satisfied: scipy>=0.19.1 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from scikit-learn->sklearn) (1.6.0)\nRequirement already satisfied: numpy>=1.13.3 in /Users/thuyvynguyen/.pyenv/versions/3.7.2/lib/python3.7/site-packages (from scikit-learn->sklearn) (1.19.5)\n\u001b[33mYou are using pip version 18.1, however version 20.3.3 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ] ], [ [ "## Getting our data \n\nBelow is a definition of getData, a basic function to pull from the `trainingSet.txt` and `testSet.txt`. The data that we're using for this example is a set of reviews written by users on Yelp, classified as positive (1) or negative (0). \n\nWe open the file, create temporary arrays, and pull from the file line by line.\n\nOpen the cell if you'd like to peek into what the function looks like.", "_____no_output_____" ] ], [ [ "# collapse-hide\n# Our two files that contain our data, split up into a training set and a testing set.\n\ntrainingFile = \"trainingSet.txt\"\ntestingFile = \"testSet.txt\"\n\ndef getData(fileName):\n f = open(fileName)\n file = f.readlines()\n\n sentences = []\n sentiments = []\n\n for line in file:\n sentence, sentiment = line.split('\\t')\n sentences.append(sentence.strip())\n sentiments.append(int(sentiment.strip())) # Sentiment in {0,1}\n\n return sentences, np.array(sentiments)", "_____no_output_____" ], [ "# get data from the training and testing files using the getData function defined above\n\ntrainingSentences, trainingLabels = getData(trainingFile)\ntestingSentences, testingLabels = getData(testingFile) ", "_____no_output_____" ] ], [ [ "Let's take a peek at what this data looks like:", "_____no_output_____" ] ], [ [ "#collapse-hide \n\nf = open(\"trainingSet.txt\")\nfile = f.readlines()\n\nsentences = []\nsentiments = []\n\nfor line in file:\n sentence, sentiment = line.split('\\t')\n sentences.append(sentence.strip())\n sentiments.append(int(sentiment.strip())) \n \nprint(\"Sample sentences:\")\npprint(sentences[:10]) \nprint(\"Corresponding sentiments:\")\npprint(sentiments[:10]) ", "Sample sentences:\n['Wow... Loved this place.',\n 'Not tasty and the texture was just nasty.',\n 'Stopped by during the late May bank holiday off Rick Steve recommendation '\n 'and loved it.',\n 'The selection on the menu was great and so were the prices.',\n 'Now I am getting angry and I want my damn pho.',\n \"Honeslty it didn't taste THAT fresh.)\",\n 'The potatoes were like rubber and you could tell they had been made up ahead '\n 'of time being kept under a warmer.',\n 'The fries were great too.',\n 'A great touch.',\n 'Service was very prompt.']\nCorresponding sentiments:\n[1, 0, 1, 1, 0, 0, 0, 1, 1, 1]\n" ] ], [ [ "## Pre-processing our data \n\nWe need to modify these sentences by tokenizing them into individual strings (word by word) so that we can feed our model individual words and their associated sentiment (negative / positive).", "_____no_output_____" ] ], [ [ "def preProcess(sentences):\n\n def cleanText(text):\n # Make lower case\n text = text.lower()\n\n # Replace non-text characters with spaces\n nonText = string.punctuation + (\"\")\n text = text.translate(str.maketrans(nonText, ' ' * (len(nonText))))\n\n # Split sentences into individual words - tokenize\n words = text.split()\n\n return words\n\n return list(map(cleanText, sentences))", "_____no_output_____" ], [ "# Process both the training and testing tokens.\n\ntrainingTokens = preProcess(trainingSentences)\ntestingTokens = preProcess(testingSentences)", "_____no_output_____" ] ], [ [ "Let's look at what these tokenized sentences look like now:", "_____no_output_____" ] ], [ [ "#collapse-hide\nprint(\"Training tokens:\")\npprint(trainingTokens[:2]) \nprint(\"Testing tokens:\")\npprint(testingTokens[:3]) ", "Training tokens:\n[['wow', 'loved', 'this', 'place'],\n ['not', 'tasty', 'and', 'the', 'texture', 'was', 'just', 'nasty']]\nTesting tokens:\n[['crust', 'is', 'not', 'good'],\n ['would', 'not', 'go', 'back'],\n ['i', 'was', 'shocked', 'because', 'no', 'signs', 'indicate', 'cash', 'only']]\n" ] ], [ [ "## Vectorizing our data\n\nNow that we have our sentences tokenized, notice how our training tokens are nested arrays. We want to pull them out of nested arrays and into just one general vocabulary list.", "_____no_output_____" ] ], [ [ "#collapse-hide\n\ndef getVocab(sentences):\n vocab = set()\n for sentence in sentences:\n for word in sentence:\n vocab.add(word)\n return sorted(vocab)", "_____no_output_____" ], [ "# Pull trainingTokens into one vocabulary listed, no nested arrays\n\nvocabulary = getVocab(trainingTokens)", "_____no_output_____" ] ], [ [ "We can peek at our vocabulary list, an alphabetically sorted list of words, now at a random set of indices:", "_____no_output_____" ] ], [ [ "#collapse-hide\n\npprint(vocabulary[50:70])", "['amount',\n 'an',\n 'and',\n 'angry',\n 'another',\n 'anticipated',\n 'any',\n 'anything',\n 'anytime',\n 'anyway',\n 'apologize',\n 'app',\n 'appalling',\n 'appetizers',\n 'apple',\n 'approval',\n 'are',\n 'area',\n 'aren',\n 'aria']\n" ] ], [ [ "We want our arrays to actually be proper vectors to feed to our model, which we'll create below as well. This function, ```createVector``` transforms our arrays into vectors. ", "_____no_output_____" ] ], [ [ "def createVector(vocab, sentences):\n indices = []\n wordOccurrences = []\n\n for sentenceIndex, sentence in enumerate(sentences):\n alreadyCounted = set() # Keep track of words so we don't double count.\n for word in sentence:\n if (word in vocab) and word not in alreadyCounted:\n # If we just want {0,1} for the presence of the word (bernoulli NB),\n # only count each word once. Otherwise (multinomial NB) count each\n # occurrence of the word.\n \n \n #which sentence, which word\n indices.append((sentenceIndex, vocab.index(word)))\n \n wordOccurrences.append(1)\n alreadyCounted.add(word)\n\n # Unzip\n rows = [row for row, _ in indices]\n columns = [column for _, column in indices]\n\n sentenceVectors = sparse.csr_matrix((wordOccurrences, (rows, columns)), dtype=int, shape=(len(sentences), len(vocab)))\n\n return sentenceVectors", "_____no_output_____" ], [ "training = createVector(vocabulary, trainingTokens)\ntesting = createVector(vocabulary, testingTokens)", "_____no_output_____" ] ], [ [ "Our training and test data has gone through some transformation. Here's what the training data looks like now:", "_____no_output_____" ] ], [ [ "#collapse-hide\n\nprint(\"Training data:\")\nprint(training[:2])", "Training data:\n (0, 694)\t1\n (0, 884)\t1\n (0, 1186)\t1\n (0, 1335)\t1\n (1, 52)\t1\n (1, 640)\t1\n (1, 768)\t1\n (1, 788)\t1\n (1, 1158)\t1\n (1, 1166)\t1\n (1, 1171)\t1\n (1, 1281)\t1\n" ] ], [ [ "# A Naive Bayes model", "_____no_output_____" ], [ "## Creating and Training our Model\n\nBelow is our Naive Bayes classifier, which is the model we've chosen to use for our sentiment analysis of restaurant reviews.", "_____no_output_____" ] ], [ [ "class NaiveBayesClassifier:\n def __init__(self):\n self.priorPositive = None # Probability that a review is positive\n self.priorNegative = None # Probability that a review is negative\n self.positiveLogConditionals = None\n self.negativeLogConditionals = None\n\n def computePriorProbabilities(self, labels):\n self.priorPositive = len([y for y in labels if y == 1]) / len(labels)\n self.priorNegative = 1 - self.priorPositive\n\n def computeConditionProbabilities(self, examples, labels, dirichlet=1):\n _, vocabularyLength = examples.shape\n\n # How many of each word are there in all of the positive reviews\n positiveCounts = np.array([dirichlet for _ in range(vocabularyLength)])\n # How many of each word are there in all of the negative reviews\n negativeCounts = np.array([dirichlet for _ in range(vocabularyLength)])\n\n # Here's how to iterate through a spare array\n coordinates = examples.tocoo() # Converted to a `coordinate` format\n for exampleIndex, featureIndex, observationCount in zip(coordinates.row, coordinates.col, coordinates.data):\n # For sentence {exampleIndex}, for word at index {featureIndex}, the word occurred {observationCount} times\n if labels[exampleIndex] == 1:\n positiveCounts[featureIndex] += observationCount\n else:\n negativeCounts[featureIndex] += observationCount\n\n # [!] Make sure to use the logs of the probabilities\n positiveReviewCount = len([y for y in labels if y == 1])\n negativeReviewCount = len([y for y in labels if y == 0])\n\n # We are using bernoulli NB (single occurance of a word)\n self.positiveLogConditionals = np.log(positiveCounts) - np.log(positiveReviewCount + dirichlet*2)\n self.negativeLogConditionals = np.log(negativeCounts) - np.log(negativeReviewCount + dirichlet*2)\n\n # This works for multinomial NB (multiple occurances of a word)\n # self.positiveLogConditionals = np.log(positiveCounts) - np.log(sum(positiveCounts))\n # self.negativeLogConditionals = np.log(negativeCounts) - np.log(sum(negativeCounts))\n\n # Calculate all of the parameters for making a naive bayes classification\n def fit(self, trainingExamples, trainingLabels):\n # Compute the probability of positive/negative review\n self.computePriorProbabilities(trainingLabels)\n\n # Compute\n self.computeConditionProbabilities(trainingExamples, trainingLabels)\n\n def computeLogPosteriors(self, sentence):\n return ((np.log(self.priorPositive) + sum(sentence * self.positiveLogConditionals)),\n (np.log(self.priorNegative) + sum(sentence * self.negativeLogConditionals)))\n \n # Have the model try predicting if a review if positive or negative\n def predict(self, examples):\n totalReviewCount, _ = examples.shape\n conf_list = []\n\n predictions = np.array([0 for _ in range(totalReviewCount)])\n\n for index, sentence in enumerate(examples):\n logProbabilityPositive, logProbabilityNegative = self.computeLogPosteriors(\n sentence)\n conf_list.append([np.exp(logProbabilityPositive), np.exp(logProbabilityNegative)])\n predictions[index] = 1 if logProbabilityPositive > logProbabilityNegative else 0\n\n return conf_list, predictions", "_____no_output_____" ] ], [ [ "Initialize an instance of model and begin to fit the model with our training data and corresponding labels.", "_____no_output_____" ] ], [ [ "nbClassifier = NaiveBayesClassifier()\nnbClassifier.fit(training, trainingLabels)", "_____no_output_____" ], [ "# determine the accuracy of our model\n\ndef accuracy(predictions, actual):\n return sum((predictions == actual)) / len(actual)", "_____no_output_____" ] ], [ [ "Let's take our model for a spin, using both the training set and the testing set. You may notice discrepencies in accuracy between training and testing - _why is that_?", "_____no_output_____" ] ], [ [ "# run our training and test using the Naive Bayes classifier\n\ntrain_confidence_scores, trainingPredictions = nbClassifier.predict(training)\ntest_confidence_scores, testingPredictions = nbClassifier.predict(testing)", "_____no_output_____" ], [ "#collapse-hide\nprint(\"Training accuracy:\", accuracy(trainingPredictions, trainingLabels))\nprint(\"Testing accuracy:\", accuracy(testingPredictions, testingLabels))", "Training accuracy: 0.9519038076152304\nTesting accuracy: 0.7947686116700201\n" ] ], [ [ "## Visualizing Results\n\nHere's another to visualize our results using a confusion matrix.", "_____no_output_____" ] ], [ [ "#collapse-hide\n\ndata = {'Actual': testingLabels,\n 'Predicted': testingPredictions\n }\n\ndf = pd.DataFrame(data, columns=['Actual','Predicted'])\nconfusion_matrix = pd.crosstab(df['Actual'], df['Predicted'], rownames=['Actual'], colnames=['Predicted'])\n\nax = sns.heatmap(confusion_matrix, annot=True,cmap=\"YlGnBu\")\nax.set_ylim(2.0, 0)\n\nplt.title('Confusion Matrix of Testing')\nplt.show()", "_____no_output_____" ] ], [ [ "## A Closer Look \n\nLet's look at the general results for our model - notably, we can look at its precision for predicting negative and positive sentiment in a given sentence.", "_____no_output_____" ] ], [ [ "target_names = ['negative', 'positive']\nprint(classification_report(testingLabels, testingPredictions, target_names=target_names))", " precision recall f1-score support\n\n negative 0.74 0.91 0.82 251\n positive 0.88 0.68 0.77 246\n\n accuracy 0.79 497\n macro avg 0.81 0.79 0.79 497\nweighted avg 0.81 0.79 0.79 497\n\n" ] ], [ [ "Now, we want to make an interactive confusion matrix so we can precisely see which results are accurately classified and which are mis-classified, as well as the confidence at which the model has classified that result. ", "_____no_output_____" ] ], [ [ "# collapse-hide\n\n# work with the model results to create a JSON dump of the data for future use\n\nimport json\n\noutput_filename = \"predict.json\"\ndata = []\nfor i in range(len(testingPredictions)):\n data.append({\n 'index': i,\n 'true_label': int(testingLabels[i]),\n 'predicted_label': int(testingPredictions[i]),\n 'confidence_score': test_confidence_scores[i],\n 'text': testingSentences[i]\n })\n\nwith open(output_filename, 'w') as outfile:\n json.dump(data, outfile, indent=4, sort_keys=False)", "_____no_output_____" ], [ "# collapse-hide\n\nfrom IPython.core.display import display, HTML\nfrom string import Template\n\n\njson_filepath = \"\\\"\" + output_filename + \"\\\"\"\nHTML('<script src=\"https://d3js.org/d3.v3.min.js\" charset=\"utf-8\"></script>')", "_____no_output_____" ], [ "css_text = '''\nbody {\n font-family: Arial, sans-serif;\n font-size: larger;\n}\n.box_highlighted { \n background-color: #ffb; \n border: 1px solid #b53;\n}\n.highlight{\n background-color: yellow;\n}\n.lighthigh{\n background-color: green;\n}\nli{\n font-size: smaller;\n}\ntd{\n min-width: 100px;\n}\n#review{\n border:1px solid pink; \n padding: 5px; \n float: left; \n width: 750px; \n height: 500px; \n background-color: white;\n margin: 20px;\n overflow: scroll;\n }\n\n#matrix{\n border:1px solid pink; \n padding: 5px; \n float: left;\n margin: 20px;\n}\n'''", "_____no_output_____" ], [ "js_template = Template('''\n\nconsole.log(\"Loading JavaScript...\")\nvar conf_data = $confusion_data\n\nconsole.log(\"Loaded Data:\")\nd3.json( $conf_data_filepath, function(d) {\n console.log(d)\n})\n\n/* I've temporarily left out the 'getType' function, since the names of these\ntypes are not included in the JSON file that is given to the JavaScript. More functionality can be incldued later to bring the names in as well as the raw data. Type will be represented by the given numeric identifier for now. */\n\n/* extractTypes: identifies different types each data point can be identified\nas, based off of the 'true_label' attribute in JSON file\ngiven: JSON file\nreturns: dictionary of possible values for 'true_label' */\nfunction extractTypes(data){\n var lookup = {};\n var items = data\n var result = [];\n\n for (var item, i=0; item = items[i++];){\n var name = item.true_label;\n\n if(!(name in lookup)){\n lookup[name] = 1;\n result.push(name);\n }\n }\n\n return lookup;\n}\n\nd3.json(\"predict.json\", function(d) {\n console.log(d); // Ensuring that data is properly read in.\n console.log(Object.keys(extractTypes(d))); // Logging list of possible types\n\n // # of Categories x # of Categories Table\n tableDimension = Object.keys(extractTypes(d)).length\n var table = new Array(tableDimension);\n var dataset = [];\n for(var i=0; i<tableDimension; i++){\n table[i] = new Array(tableDimension);\n for(var j=0; j<tableDimension; j++){\n table[i][j] = 0;\n }\n }\n\n // Filling out table with prediction counts and recording dataset\n /* Code is currently dependent on JSON having the data points: \n \"true_label\" and \"predicted_label\" */\n for(var i=0; i<d.length; i+=1){ // i+=1 or i++ ?\n table[parseInt(d[i][\"true_label\"])][parseInt(d[i][\"predicted_label\"])]+=1;\n dataset.push([d[i][\"true_label\"], d[i][\"predicted_label\"], d[i][\"text\"], d[i][\"index\"]]);\n }\n\n console.log(table); // Reporting current state of table values.\n\n var w = 750;\n var h = 700; // These are dimensions?\n\n // Create SVG element\n var svg = d3.select(\"body\") // This could be problematic later\n .select(\"#matrix\")\n .append(\"svg\")\n .attr(\"width\", w)\n .attr(\"height\", h);\n\n var rect = svg.selectAll(\"rect\")\n .data(dataset)\n .enter()\n .append(\"rect\");\n\n // Give these a more descriptive name later\n var counters = new Array(tableDimension * tableDimension).fill(0);\n var ycounters = new Array(tableDimension * tableDimension).fill(0);\n\n var confusing = h / tableDimension;\n\n // JSON Object Format Guide: \n // d[0] = true_label ; d[1] = predicted_label ; d[2] = text\n rect.attr(\"x\", function (d, i){\n var matrixnum = (parseInt(d[1]) * tableDimension) + parseInt(d[0]);\n var inmatrixcol = counters[matrixnum] % 16;\n\n counters[matrixnum]++;\n\n return 10 + (d[0] * confusing) + (inmatrixcol * 16);\n\n })\n .attr(\"y\", function(d, i){\n var matricvol = d[1];\n var matrixnum = (parseInt(d[1] * tableDimension) + parseInt(d[0]));\n var hm = Math.floor(ycounters[matrixnum]/16);\n\n ycounters[matrixnum]++;\n\n return 10 + (d[1] * confusing) + (hm * 16);\n })\n .attr(\"id\", function(d){\n return \"rect\" + d[3];\n })\n .attr(\"width\", function(d){\n return 15;\n })\n .attr(\"height\", function(d){\n return 15;\n })\n .attr(\"opacity\", function(d){\n return .85;\n })\n .attr(\"fill\", function(d){\n return (\"pink\");\n })\n .attr(\"class\", function(d){\n predicted_label = \"predicted_label_\" + d[1];\n true_label = \"true_label_\" + d[0];\n\n return true_label + \" \" + predicted_label;\n });\n\n d3.select(\"#review\")\n .select(\"testList\")\n .selectAll(\"rect\")\n .data(\n dataset.filter(d => d[0] != d[1]),\n function(d){\n return d[3];\n }\n )\n .enter()\n .append(\"li\")\n .attr(\"id\", function(d){\n return \"text\" + d[3];\n })\n .html(function(d){\n table = \"<table><tr>\"\n\n table += \"<td> True: \";\n table += parseInt(d[0]); //getType(d[0]);\n table += \"</td>\"\n\n table += \"<td> Predict: \";\n table += parseInt(d[1]); //getType(d[1]);\n table += \"</td>\"\n\n table += \"<td>\" + d[2].substr(0,200); + \"</td>\"\n table += \"</tr> </table>\"\n\n return table;\n });\n\n rect.on(\"click\", function(d_on){\n d3.select(\"#review\")\n .select(\"#testList\")\n .html(\"\");\n\n if(!this.classList.contains(\"past\")){\n d3.selectAll(\".past\")\n .attr(\"fill\", \"pink\")\n .classed(\"past\", false);\n\n d3.selectAll(\".reclick\")\n .attr(\"fill\", \"pink\")\n .classed(\"reclick\", false)\n }\n\n if(!this.classList.contains(\"reclick\")){\n d3.selectAll(\".reclick\")\n .attr(\"fill\", \"pink\")\n .classed(\"reclick\", false);\n }\n\n d3.select(this);\n\n textId = \"\";\n x = \".\" + this.classList[0];\n y = \".\" + this.classList[1];\n test = x + y;\n\n x1 = x.charAt(x.length - 1);\n y1 = y.charAt(y.length - 1);\n\n if(this.classList.contains(\"past\")){\n d3.select(this)\n .classed(\"reclick\", true)\n Id = this.id;\n textId = \"#text\" + Id.substring(4);\n }\n\n d3.selectAll(test)\n .attr(\"fill\", \"purple\")\n .classed(\"past\", \"true\");\n\n d3.select(\"#review\")\n .select(\"#testList\")\n .selectAll(\"rect\")\n .data(\n dataset\n .filter(d => d[0] == x1)\n .filter(d => d[1] == y1),\n function(d){\n return d[3];\n }\n )\n .enter()\n .append(\"li\")\n .attr(\"id\", function(d){\n return \"text\" + d[3];\n })\n .html(function(d){\n table = \"<table><tr>\"\n\n table += \"<td> True: \";\n table += parseInt(d[0]); //getType(d[0]);\n table += \"</td>\"\n\n table += \"<td> Predict: \";\n table += parseInt(d[1]); //getType(d[1]);\n table += \"</td>\"\n\n table += \"<td>\" + d[2].substr(0,200); + \"</td>\"\n table += \"</tr> </table>\"\n\n return table;\n });\n\n d3.select(\"#review\")\n .select(\"testList\")\n .selectAll(\"li\")\n .on(\"mouseover\", function(d_on){\n d3.select(this)\n .classed(\"lighthigh\", true)\n id = this.id;\n rectId = \"#rect\" + id.substring(4);\n d3.selectAll(rectId)\n .attr(\"fill\", \"green\");\n })\n .on(\"mouseout\", function(d_on){\n d3.select(this)\n .classed(\"lighthigh\", false)\n id = this.id;\n rectId = \"#rect\" + id.substring(4);\n d3.selectAll(rectId)\n .attr(\"fill\", \"purple\");\n });\n });\n\n d3.select(\"#review\")\n .select(\"#testList\")\n .selectAll(\"li\")\n .on(\"mouseover\", function(d_on){\n d3.select(this)\n .classed(\"lighthigh\", true)\n id = this.id;\n rectId = \"#rect\" + id.substring(4);\n d3.selectAll(rectId)\n .attr(\"fill\", \"green\");\n })\n .on(\"mouseout\", function(d_on){\n d3.select(this)\n .classed(\"lighthigh\", false)\n id = this.id;\n rectId = \"#rect\" + id.substring(4);\n d3.selectAll(rectId)\n .attr(\"fill\", \"pink\");\n });\n });\n\n''')", "_____no_output_____" ], [ "html_template = Template('''\n<style> $css_text </style>\n<h1> Interactive Confusion Matrix </h1>\n<div id=\"matrix\"> </div>\n<div id=\"review\">\n Data \n <ul id = \"testList\"></ul>\n</div>\n<script> $js_text </script>\n''')", "_____no_output_____" ], [ "js_text = js_template.substitute({'confusion_data': data,\n 'conf_data_filepath': json_filepath})\n\nHTML(html_template.substitute({'css_text': css_text, 'js_text': js_text}))", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecb204ff9afc165b7c82a2c2e087abf9afc611e4
113,666
ipynb
Jupyter Notebook
PythonCodes/Utilities/IllustrationNumerics/SEM_Quadrature_loc.ipynb
Nicolucas/C-Scripts
2608df5c2e635ad16f422877ff440af69f98f960
[ "MIT" ]
null
null
null
PythonCodes/Utilities/IllustrationNumerics/SEM_Quadrature_loc.ipynb
Nicolucas/C-Scripts
2608df5c2e635ad16f422877ff440af69f98f960
[ "MIT" ]
null
null
null
PythonCodes/Utilities/IllustrationNumerics/SEM_Quadrature_loc.ipynb
Nicolucas/C-Scripts
2608df5c2e635ad16f422877ff440af69f98f960
[ "MIT" ]
null
null
null
327.567723
13,644
0.93107
[ [ [ "# Illustration plotting: 2D GLL distribution under H- & P-refinement\nby JN Hayek\n\nIn the current jupyter notebook I intend to plot the GLL quadrature location under h and p refinement", "_____no_output_____" ] ], [ [ "from gll import gll\n\nimport matplotlib.pyplot as plt\nimport matplotlib.collections\nimport numpy as np\nimport itertools\nfrom matplotlib.gridspec import GridSpec", "_____no_output_____" ], [ "plt.rcParams[\"figure.figsize\"] = (8,8)", "_____no_output_____" ], [ "def PlotMesh(NumLines):\n x, y = np.meshgrid(np.linspace(0,1, NumLines), np.linspace(0, 1, NumLines))\n\n plt.plot(x, y,color='k') # use plot, not scatter\n segs1 = np.stack((x,y), axis=2)\n plt.gca().add_collection(matplotlib.collections.LineCollection(segs1,color='k'))\n\n\ndef PlotNodesGllToGlobal(NumBasis,dx,x0,y0):\n \n list1 = gll(NumBasis)[0]\n list2 = gll(NumBasis)[0]\n\n GllNodes=np.array([list(i) for i in list(itertools.product(list1, list2))])\n y = (GllNodes[:,0] + 1)*dx*0.5+x0\n z = (GllNodes[:,1] + 1)*dx*0.5+y0\n plt.plot(y,z, marker=\"o\", ls=\"\", color=\"crimson\")\n \ndef PlotBasis(NumBasis, dx, x0es):\n xyes = [list(i) for i in list(itertools.product(x0es, x0es))]\n for x in xyes:\n PlotNodesGllToGlobal(NumBasis,dx,x[0],x[1])\n \n\ndef PlotCell_and_GLL_Points(NumCells,NumBasis):\n NumLines=NumCells+1\n \n PlotMesh(NumLines)\n\n x0es = list(np.linspace(0,1, NumLines))[:-1]\n dx = list(np.linspace(0,1, NumLines))[1]\n \n PlotBasis(NumBasis=NumBasis, dx=dx, x0es=x0es)\n \n plt.gca().set_aspect('equal')\n plt.axis('off')\n\n", "_____no_output_____" ], [ "\nPlotCell_and_GLL_Points(NumCells=2,NumBasis=1);plt.show()\nPlotCell_and_GLL_Points(NumCells=2,NumBasis=2);plt.show()\nPlotCell_and_GLL_Points(NumCells=2,NumBasis=3);plt.show()\nPlotCell_and_GLL_Points(NumCells=2,NumBasis=4);plt.show()\nPlotCell_and_GLL_Points(NumCells=3,NumBasis=1);plt.show()\nPlotCell_and_GLL_Points(NumCells=3,NumBasis=2);plt.show()\nPlotCell_and_GLL_Points(NumCells=3,NumBasis=3);plt.show()\nPlotCell_and_GLL_Points(NumCells=3,NumBasis=4);plt.show()\nPlotCell_and_GLL_Points(NumCells=4,NumBasis=1);plt.show()\nPlotCell_and_GLL_Points(NumCells=4,NumBasis=2);plt.show()\nPlotCell_and_GLL_Points(NumCells=4,NumBasis=3);plt.show()\nPlotCell_and_GLL_Points(NumCells=4,NumBasis=4);plt.show()\nPlotCell_and_GLL_Points(NumCells=4,NumBasis=5);plt.show()", "_____no_output_____" ], [ "PlotCell_and_GLL_Points(NumCells=6,NumBasis=3)\nplt.savefig(\"/home/nico/Desktop/schematics/GLL_Field.svg\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecb20556345221622f70c82f0a83e3e7c4f809f7
26,694
ipynb
Jupyter Notebook
notebooks/.ipynb_checkpoints/reinforcement_q_learning-checkpoint.ipynb
a-regal/tesis_pregrado
501d3a137f305d53e8b4eaec7c4ba6f18d7b7706
[ "MIT" ]
1
2019-11-16T02:32:48.000Z
2019-11-16T02:32:48.000Z
notebooks/.ipynb_checkpoints/reinforcement_q_learning-checkpoint.ipynb
a-regal/tesis_pregrado
501d3a137f305d53e8b4eaec7c4ba6f18d7b7706
[ "MIT" ]
null
null
null
notebooks/.ipynb_checkpoints/reinforcement_q_learning-checkpoint.ipynb
a-regal/tesis_pregrado
501d3a137f305d53e8b4eaec7c4ba6f18d7b7706
[ "MIT" ]
1
2020-09-13T16:17:18.000Z
2020-09-13T16:17:18.000Z
60.668182
2,337
0.622162
[ [ [ "import gym\nimport math\nimport random\nimport numpy as np\nimport city_simulation\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport warnings\nimport os\nimport datetime\nfrom collections import namedtuple\nfrom itertools import count\nfrom gc import collect", "_____no_output_____" ] ], [ [ "%matplotlib inline\noutput_notebook()", "_____no_output_____" ] ], [ [ "warnings.filterwarnings('ignore')", "_____no_output_____" ], [ "step_fn = lambda x: 1 if x > 0.5 else 0", "_____no_output_____" ], [ "step_f = np.vectorize(step_fn)", "_____no_output_____" ], [ "env = gym.make('city_simulation-v0').unwrapped", "_____no_output_____" ], [ "# if gpu is to be used\n#device = torch.device('cpu')\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ], [ "Transition = namedtuple('Transition',\n ('state', 'action', 'next_state', 'reward'))\n\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)", "_____no_output_____" ], [ "class DQN(nn.Module):\n\n def __init__(self, outputs):\n super(DQN, self).__init__()\n\n# self.conv1 = nn.Conv2d(1, 32, kernel_size=2, padding=1)\n# self.mp1 = nn.MaxPool2d(kernel_size=2, padding=1)\n# self.bn1 = nn.BatchNorm2d(32)\n# self.conv2 = nn.Conv2d(32, 64, kernel_size=2, padding=1)\n# self.mp2 = nn.MaxPool2d(kernel_size=2, stride=1, padding=1)\n# self.bn2 = nn.BatchNorm2d(64)\n# self.conv3 = nn.Conv2d(64, 128, kernel_size=2, padding=1)\n# self.mp3 = nn.MaxPool2d(kernel_size=2, stride=1, padding=1)\n# self.bn3 = nn.BatchNorm2d(128)\n self.mlp1 = nn.Linear(12,32)\n# self.mlp2 = nn.Linear(32,64)\n# self.mlp3 = nn.Linear(64,128)\n# self.mlp4 = nn.Linear(128,256)\n# self.mlp5 = nn.Linear(256,256)\n self.head = nn.Linear(32, outputs)\n\n # Called with either one element to determine next action, or a batch\n # during optimization. Returns tensor([[left0exp,right0exp]...]).\n def forward(self, x):\n# x = F.relu(self.bn1(self.mp1(self.conv1(x))))\n# x = F.relu(self.bn2(self.mp2(self.conv2(x))))\n #x = F.relu(self.bn3(self.mp3(self.conv3(x))))\n x = F.relu(self.mlp1(x))\n# x = F.relu(self.mlp2(x))\n# x = F.relu(self.mlp3(x))\n# x = F.relu(self.mlp4(x))\n# x = F.relu(self.mlp5(x))\n \n return self.head(x.view(x.size(0), -1))", "_____no_output_____" ], [ "BATCH_SIZE = 32\nGAMMA = 0.999\nEPS_START = 0.9\nEPS_END = 0.05\nEPS_DECAY = 200\nTARGET_UPDATE = 10", "_____no_output_____" ], [ "# Get number of actions from gym action space\nn_actions = 5981", "_____no_output_____" ], [ "policy_net = DQN(n_actions).to(device)\ntarget_net = DQN(n_actions).to(device)\ntarget_net.load_state_dict(policy_net.state_dict())\ntarget_net.eval()", "_____no_output_____" ], [ "optimizer = optim.Adam(policy_net.parameters())\nmemory = ReplayMemory(500)", "_____no_output_____" ], [ "steps_done = 0", "_____no_output_____" ], [ "def select_action(state):\n global steps_done\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n math.exp(-1. * steps_done / EPS_DECAY)\n steps_done += 1\n if sample > eps_threshold:\n with torch.no_grad():\n # t.max(1) will return largest column value of each row.\n # second column on max result is index of where max element was\n # found, so we pick action with the larger expected reward.\n return policy_net(state.view([-1,12]))\n else:\n return torch.randn(n_actions, device=device)", "_____no_output_____" ], [ "episode_durations = []", "_____no_output_____" ], [ "def optimize_model():\n if len(memory) < BATCH_SIZE:\n return\n transitions = memory.sample(BATCH_SIZE)\n batch = Transition(*zip(*transitions))\n\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,\n batch.next_state)), device=device, dtype=torch.uint8)\n non_final_next_states = torch.cat([s for s in batch.next_state\n if s is not None])\n \n state_batch = torch.cat(batch.state)#.view(-1,1,12)\n #action_batch = torch.cat(batch.action).view(-1,n_actions)\n reward_batch = torch.cat(batch.reward)\n \n #print(state_batch.shape, action_batch.shape)\n \n # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n # columns of actions taken. These are the actions which would've been taken\n # for each batch state according to policy_net\n state_action_values = policy_net(state_batch)#.gather(1, action_batch)\n\n # Compute V(s_{t+1}) for all next states.\n # Expected values of actions for non_final_next_states are computed based\n # on the \"older\" target_net; selecting their best reward with max(1)[0].\n # This is merged based on the mask, such that we'll have either the expected\n # state value or 0 in case the state was final.\n next_state_values = torch.zeros(BATCH_SIZE, device=device)\n \n nsv = target_net(non_final_next_states).max(1)[0].detach()\n \n #print(state_action_values.shape, nsv.shape, next_state_values.shape, non_final_mask.shape)\n \n next_state_values[non_final_mask] = nsv\n \n \n # Compute the expected Q values\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n\n # Optimize the model\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n collect()", "_____no_output_____" ], [ "#p = figure(title=\"Training Reward per Timestep\", plot_height=350, plot_width=800)\n#target = show(p, notebook_handle=True)\ncurrent_ep = 0\n\nwith open('output/rewards_experiment_{}'.format(str(datetime.datetime.today())[:-7]), 'a') as reward_file:\n\n num_episodes = 400\n for i_episode in range(num_episodes):\n print(\"Episode {}\".format(i_episode))\n # Initialize the environment and state\n env.reset() \n for t in count():\n current_ep += 1\n state = env.render()\n action = select_action(torch.tensor(state, device=device, dtype=torch.float))\n next_state, reward, done, _ = env.step(action > 0.5)\n reward = torch.tensor([reward], device=device)\n state = torch.tensor(state, device=device, dtype=torch.float)\n next_state = torch.tensor(next_state, device=device, dtype=torch.float)\n\n print('Reward at timestep {t}: {r}'.format(t=t,r=reward.item()))\n \n reward_file.write(','.join([str(i_episode), str(current_ep), str(reward.item())])+r'\\n')\n #rewards.append(reward.item())\n #episodes.append(current_ep)\n\n #p.line(episodes, rewards)\n #push_notebook(handle=target)\n\n if state.view(-1).shape == 12:\n state = state.view(-1)\n next_state = next_state.view(-1)\n else:\n state = state.view(-1,12)\n next_state = next_state.view(-1,12)\n\n action = torch.tensor(action, device=device, dtype=torch.long).view(-1)\n action = (action == 1).nonzero().view(-1)\n\n if state.shape[0] == 0:\n memory.push(torch.zeros((1,12), device=device), action, next_state, reward)\n else:\n memory.push(state, action, next_state, reward)\n\n # Perform one step of the optimization (on the target network)\n optimize_model()\n collect()\n \n if done:\n break\n collect()\n # Update the target network, copying all weights and biases in DQN\n if i_episode % TARGET_UPDATE == 0:\n target_net.load_state_dict(policy_net.state_dict())\n collect()\n \n torch.save(target_net.state_dict(), './output_weights/target/target_net_weights_{}_ep_{}.pt'.format(str(datetime.datetime.today())[:-7], i_episode))\n torch.save(policy_net.state_dict(), './output_weights/policy/policy_net_weights_{}_ep_{}.pt'.format(str(datetime.datetime.today())[:-7], i_episode))\n\nprint('Complete')\nenv.render()\nenv.close()\ntorch.save(target_net.state_dict(), './output_weights/target_net.pt')\ntorch.save(policy_net.state_dict(), './output_weights/policy_net.pt')", "Episode 0\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb2080555cd7414916fc08864b548f4070b894c
5,068
ipynb
Jupyter Notebook
Pandas/DSPD0100ENT-Business Understanding.ipynb
reddyprasade/Data-Science-With-Python
b8e9b16691dc2f35a4d975946f8ca5cc0e0469d0
[ "Apache-2.0" ]
14
2020-04-20T14:17:39.000Z
2021-12-30T11:39:48.000Z
Pandas/DSPD0100ENT-Business Understanding.ipynb
SinghRupa0212/Data-Science-With-Python
5346a2192f9343ee0c8186115795393a32c06a24
[ "Apache-2.0" ]
null
null
null
Pandas/DSPD0100ENT-Business Understanding.ipynb
SinghRupa0212/Data-Science-With-Python
5346a2192f9343ee0c8186115795393a32c06a24
[ "Apache-2.0" ]
3
2020-05-13T10:08:18.000Z
2020-09-01T16:40:32.000Z
64.151899
503
0.705406
[ [ [ "The business understanding stage of the Data Science Process (DSP). This process provides a recommended lifecycle that you can use to structure your data-science projects.\n\n**Goals**\n* Specify the key variables that are to serve as the model targets and whose related metrics are used determine the success of the project.\n* Identify the relevant data sources that the business has access to or needs to obtain.\n\n#### How to do it\nThere are two main tasks addressed in this stage:\n\n* **Define objectives:** Work with your customer and other stakeholders to understand and identify the business problems. Formulate questions that define the business goals that the data science techniques can target.\n* **Identify data sources:** Find the relevant data that helps you answer the questions that define the objectives of the project.\n\n#### Define objectives\n1. A central objective of this step is to identify the key business variables that the analysis needs to predict. We refer to these variables as the model targets, and we use the metrics associated with them to determine the success of the project. Two examples of such targets are sales forecasts or the probability of an order being fraudulent.\n\n2. Define the project goals by asking and refining \"sharp\" questions that are relevant, specific, and unambiguous. Data science is a process that uses names and numbers to answer such questions. You typically use data science or machine learning to answer five types of questions:\n\n * How much or how many? (regression)\n * Which category? (classification)\n * Which group? (clustering)\n * Is this weird? (anomaly detection)\n * Which option should be taken? (recommendation)\nDetermine which of these questions you're asking and how answering it achieves your business goals.\n\n3. Define the project team by specifying the roles and responsibilities of its members. Develop a high-level milestone plan that you iterate on as you discover more information.\n\n4. Define the success metrics. For example, you might want to achieve a customer churn prediction. You need an accuracy rate of \"x\" percent by the end of this three-month project. With this data, you can offer customer promotions to reduce churn. The metrics must be **SMART**:\n\n* **S**pecific\n* **M**easurable\n* **A**chievable\n* **R**elevant\n* **T**ime-bound\n\n#### Identify data sources\nIdentify data sources that contain known examples of answers to your sharp questions. Look for the following data:\n\n* Data that's relevant to the question. Do you have measures of the target and features that are related to the target?\n* Data that's an accurate measure of your model target and the features of interest.\nFor example, you might find that the existing systems need to collect and log additional kinds of data to address the problem and achieve the project goals. In this situation, you might want to look for external data sources or update your systems to collect new data.\n\n#### Artifacts\nHere are the deliverables in this stage:\n\n* **Charter document:** A standard template is provided in the TDSP project structure definition. The charter document is a living document. You update the template throughout the project as you make new discoveries and as business requirements change. The key is to iterate upon this document, adding more detail, as you progress through the discovery process. Keep the customer and other stakeholders involved in making the changes and clearly communicate the reasons for the changes to them.\n* **Data sources:** The Raw data sources section of the Data definitions report that's found in the TDSP project Data report folder contains the data sources. This section specifies the original and destination locations for the raw data. In later stages, you fill in additional details like the scripts to move the data to your analytic environment.\n* **Data dictionaries:** This document provides descriptions of the data that's provided by the client. These descriptions include information about the schema (the data types and information on the validation rules, if any) and the entity-relation diagrams, if available.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
ecb22a6c0370cbbaf502c00b710d09bc5338dce4
1,965
ipynb
Jupyter Notebook
nbs/tutorial.ipynb
artste/fastai2
f659baad6b2de422f7656766dcbcc073a8c028e8
[ "Apache-2.0" ]
null
null
null
nbs/tutorial.ipynb
artste/fastai2
f659baad6b2de422f7656766dcbcc073a8c028e8
[ "Apache-2.0" ]
null
null
null
nbs/tutorial.ipynb
artste/fastai2
f659baad6b2de422f7656766dcbcc073a8c028e8
[ "Apache-2.0" ]
null
null
null
35.727273
418
0.640204
[ [ [ "# Tutorials\n\n> To help you get started", "_____no_output_____" ], [ "The most important thing to remember is that each page of this documentation comes from a notebook. You can find them in the \"nbs\" folder in the [main repo](https://github.com/fastai/fastai2/tree/master/nbs). For tutorials, you can play around with the code and tweak if to do your own experiments. For the pages documenting the library, you will be able to see the source code and interact with all the tests.", "_____no_output_____" ], [ "If you are just starting with the libary, checkout the beginners tutorials. They cover how to treat each application using the high-level API:\n\n- [vision](http://dev.fast.ai/tutorial.vision)\n- [text](http://dev.fast.ai/tutorial.text)\n- [tabular](http://dev.fast.ai/tutorial.tabular)\n\nOnce you are comfortable enough and want to start digging in the mid-level API, have a look at the intermediate tutorials:\n\n- [the data block API](http://dev.fast.ai/tutorial.datablock)\n- [a base training on Imagenette](http://dev.fast.ai/tutorial.imagenette)\n- [the mid-level data API in vision](http://dev.fast.ai/tutorial.pets)\n- [the mid-level data API in text](http://dev.fast.ai/tutorial.wikitext)\n\nAnd for even more experienced users that want to customize the library to their needs, check the advanced tutorials:\n\n- [Siamese model data collection and training](http://dev.fast.ai/tutorial.siamese)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
ecb23e7fd2abb28221218a989508765588bbe5c1
241,653
ipynb
Jupyter Notebook
model_builder/notebooks/.ipynb_checkpoints/privacy_text_transform_verbmobil_flair-checkpoint.ipynb
CrossLangNV/text_transformer
45d5092a660125d727fe278b2b50e046ab62813b
[ "Apache-2.0" ]
null
null
null
model_builder/notebooks/.ipynb_checkpoints/privacy_text_transform_verbmobil_flair-checkpoint.ipynb
CrossLangNV/text_transformer
45d5092a660125d727fe278b2b50e046ab62813b
[ "Apache-2.0" ]
null
null
null
model_builder/notebooks/.ipynb_checkpoints/privacy_text_transform_verbmobil_flair-checkpoint.ipynb
CrossLangNV/text_transformer
45d5092a660125d727fe278b2b50e046ab62813b
[ "Apache-2.0" ]
null
null
null
75.658422
1,474
0.528535
[ [ [ "# Privacy Text Transformation on Verbmobil using Flair", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\nfrom flair.data import Sentence\nfrom flair.models import SequenceTagger\nfrom flair.data import Corpus\nfrom flair.datasets import ColumnCorpus\nfrom flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings\nfrom typing import List\nfrom flair.trainers import ModelTrainer\nfrom flair.visual.training_curves import Plotter\n\n", "_____no_output_____" ], [ "# make a sentence\nsentence = Sentence('I love Berlin. Mark is going there .')\n\n# load the NER tagger\ntagger = SequenceTagger.load('ner')\n\n# run NER over sentence\ntagger.predict(sentence)", "2019-08-05 11:09:23,738 https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models-v0.4/NER-conll03-english/en-ner-conll03-v0.4.pt not found in cache, downloading to /tmp/tmpiysxso35\n" ], [ "print(sentence)\nprint('The following NER tags are found:')\n\n# iterate over entities and print\nfor entity in sentence.get_spans('ner'):\n print(entity)", "Sentence: \"I love Berlin. Mark is going there .\" - 8 Tokens\nThe following NER tags are found:\nPER-span [3,4]: \"Berlin. Mark\"\n" ] ], [ [ "## Load training data", "_____no_output_____" ] ], [ [ "columns = {0: 'text', 1: 'ner'}", "_____no_output_____" ], [ "def train_ner(input_dir, output_dir):\n # this is the folder in which train, test and dev files reside\n data_folder = input_dir\n\n # init a corpus using column format, data folder and the names of the train, dev and test files\n corpus: Corpus = ColumnCorpus(data_folder, columns,\n train_file='train.tsv',\n test_file='test.tsv',\n dev_file='valid.tsv')\n\n print(len(corpus.train))\n print(corpus.train[1].to_tagged_string('ner'))\n\n # 1. get the corpus\n print(corpus)\n\n # 2. what tag do we want to predict?\n tag_type = 'ner'\n\n # 3. make the tag dictionary from the corpus\n tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)\n print(tag_dictionary.idx2item)\n\n # 4. initialize embeddings\n embedding_types: List[TokenEmbeddings] = [\n\n WordEmbeddings('glove'),\n\n # comment in this line to use character embeddings\n # CharacterEmbeddings(),\n\n # comment in these lines to use flair embeddings\n # FlairEmbeddings('news-forward'),\n # FlairEmbeddings('news-backward'),\n ]\n\n embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)\n\n # 5. initialize sequence tagger\n tagger: SequenceTagger = SequenceTagger(hidden_size=256,\n embeddings=embeddings,\n tag_dictionary=tag_dictionary,\n tag_type=tag_type,\n use_crf=True)\n\n # 6. initialize trainer\n\n trainer: ModelTrainer = ModelTrainer(tagger, corpus)\n\n # 7. start training\n trainer.train(output_dir,\n learning_rate=0.1,\n mini_batch_size=32,\n max_epochs=80)\n\n\n # 8. plot training curves (optional)\n from flair.visual.training_curves import Plotter\n plotter = Plotter()\n plotter.plot_training_curves(output_dir+'loss.tsv')\n plotter.plot_weights(output_dir+'weights.txt')\n ", "_____no_output_____" ] ], [ [ "### uncased model", "_____no_output_____" ] ], [ [ "train_ner(input_dir ='../data/bio_uncased/' , output_dir = 'resources/taggers/uncased-ner/' )", "2019-08-05 12:40:06,742 Reading data from ../data/bio_uncased\n2019-08-05 12:40:06,747 Train: ../data/bio_uncased/train.tsv\n2019-08-05 12:40:06,750 Dev: ../data/bio_uncased/valid.tsv\n2019-08-05 12:40:06,753 Test: ../data/bio_uncased/test.tsv\n9299\nwhat is good for you\nCorpus: 9299 train + 975 dev + 1043 test sentences\n[b'<unk>', b'O', b'B-PER', b'B-DATE', b'I-DATE', b'B-TIME', b'I-TIME', b'B-LOC', b'I-LOC', b'B-ORG', b'I-ORG', b'I-PER', b'<START>', b'<STOP>']\n2019-08-05 12:40:09,463 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:40:09,464 Evaluation method: MICRO_F1_SCORE\n2019-08-05 12:40:09,651 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:40:10,034 epoch 1 - iter 0/291 - loss 40.72638702\n2019-08-05 12:40:12,970 epoch 1 - iter 29/291 - loss 11.91876203\n2019-08-05 12:40:15,729 epoch 1 - iter 58/291 - loss 9.34245343\n2019-08-05 12:40:18,629 epoch 1 - iter 87/291 - loss 8.14197249\n2019-08-05 12:40:21,313 epoch 1 - iter 116/291 - loss 7.24003727\n2019-08-05 12:40:24,321 epoch 1 - iter 145/291 - loss 6.59929649\n2019-08-05 12:40:27,151 epoch 1 - iter 174/291 - loss 6.14317198\n2019-08-05 12:40:29,860 epoch 1 - iter 203/291 - loss 5.72344067\n2019-08-05 12:40:32,708 epoch 1 - iter 232/291 - loss 5.41672699\n2019-08-05 12:40:35,473 epoch 1 - iter 261/291 - loss 5.13786361\n2019-08-05 12:40:38,052 epoch 1 - iter 290/291 - loss 4.89199517\n2019-08-05 12:40:38,346 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:40:38,347 EPOCH 1 done: loss 4.8920 - lr 0.1000 - bad epochs 0\n2019-08-05 12:40:42,771 DEV : loss 2.312211036682129 - score 0.6031\n2019-08-05 12:40:45,861 TEST : loss 2.3921608924865723 - score 0.5885\n2019-08-05 12:41:03,518 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:41:03,893 epoch 2 - iter 0/291 - loss 2.17247176\n2019-08-05 12:41:06,795 epoch 2 - iter 29/291 - loss 2.76418640\n2019-08-05 12:41:09,706 epoch 2 - iter 58/291 - loss 2.72706403\n2019-08-05 12:41:12,612 epoch 2 - iter 87/291 - loss 2.64219268\n2019-08-05 12:41:15,559 epoch 2 - iter 116/291 - loss 2.61982279\n2019-08-05 12:41:18,381 epoch 2 - iter 145/291 - loss 2.55602345\n2019-08-05 12:41:21,001 epoch 2 - iter 174/291 - loss 2.49125010\n2019-08-05 12:41:23,579 epoch 2 - iter 203/291 - loss 2.46483098\n2019-08-05 12:41:26,245 epoch 2 - iter 232/291 - loss 2.47143241\n2019-08-05 12:41:29,051 epoch 2 - iter 261/291 - loss 2.43346836\n2019-08-05 12:41:33,027 epoch 2 - iter 290/291 - loss 2.42069812\n2019-08-05 12:41:33,361 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:41:33,362 EPOCH 2 done: loss 2.4207 - lr 0.1000 - bad epochs 0\n2019-08-05 12:41:36,616 DEV : loss 1.5384644269943237 - score 0.708\n2019-08-05 12:41:39,720 TEST : loss 1.5552465915679932 - score 0.695\n2019-08-05 12:41:57,374 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:41:57,758 epoch 3 - iter 0/291 - loss 2.00301242\n2019-08-05 12:42:00,514 epoch 3 - iter 29/291 - loss 2.22469453\n2019-08-05 12:42:03,362 epoch 3 - iter 58/291 - loss 2.15072232\n2019-08-05 12:42:06,212 epoch 3 - iter 87/291 - loss 2.06519937\n2019-08-05 12:42:09,022 epoch 3 - iter 116/291 - loss 2.06647105\n2019-08-05 12:42:12,099 epoch 3 - iter 145/291 - loss 2.05621137\n2019-08-05 12:42:14,927 epoch 3 - iter 174/291 - loss 2.06432340\n2019-08-05 12:42:17,665 epoch 3 - iter 203/291 - loss 2.02561090\n2019-08-05 12:42:20,622 epoch 3 - iter 232/291 - loss 2.00488910\n2019-08-05 12:42:24,847 epoch 3 - iter 261/291 - loss 2.00848733\n2019-08-05 12:42:27,473 epoch 3 - iter 290/291 - loss 1.99528579\n2019-08-05 12:42:27,800 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:42:27,801 EPOCH 3 done: loss 1.9953 - lr 0.1000 - bad epochs 0\n2019-08-05 12:42:31,210 DEV : loss 1.209535837173462 - score 0.7436\n2019-08-05 12:42:34,371 TEST : loss 1.2542510032653809 - score 0.7483\n2019-08-05 12:42:52,058 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:42:52,460 epoch 4 - iter 0/291 - loss 1.35122037\n2019-08-05 12:42:55,487 epoch 4 - iter 29/291 - loss 1.91258162\n2019-08-05 12:42:58,333 epoch 4 - iter 58/291 - loss 1.81482894\n2019-08-05 12:43:01,328 epoch 4 - iter 87/291 - loss 1.79580216\n2019-08-05 12:43:04,427 epoch 4 - iter 116/291 - loss 1.83305408\n2019-08-05 12:43:07,232 epoch 4 - iter 145/291 - loss 1.80064955\n2019-08-05 12:43:10,436 epoch 4 - iter 174/291 - loss 1.75412574\n2019-08-05 12:43:13,217 epoch 4 - iter 203/291 - loss 1.74836358\n2019-08-05 12:43:16,146 epoch 4 - iter 232/291 - loss 1.72681274\n2019-08-05 12:43:20,076 epoch 4 - iter 261/291 - loss 1.73843149\n2019-08-05 12:43:22,733 epoch 4 - iter 290/291 - loss 1.72432152\n2019-08-05 12:43:23,034 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:43:23,034 EPOCH 4 done: loss 1.7243 - lr 0.1000 - bad epochs 0\n2019-08-05 12:43:26,306 DEV : loss 1.0085558891296387 - score 0.7852\n2019-08-05 12:43:29,413 TEST : loss 1.0170378684997559 - score 0.7984\n2019-08-05 12:43:47,089 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:43:47,482 epoch 5 - iter 0/291 - loss 1.26312733\n2019-08-05 12:43:50,464 epoch 5 - iter 29/291 - loss 1.62831760\n2019-08-05 12:43:53,302 epoch 5 - iter 58/291 - loss 1.67572578\n2019-08-05 12:43:56,060 epoch 5 - iter 87/291 - loss 1.64671320\n2019-08-05 12:43:59,193 epoch 5 - iter 116/291 - loss 1.61759762\n2019-08-05 12:44:01,975 epoch 5 - iter 145/291 - loss 1.58325352\n2019-08-05 12:44:05,059 epoch 5 - iter 174/291 - loss 1.62742410\n2019-08-05 12:44:07,855 epoch 5 - iter 203/291 - loss 1.61883134\n2019-08-05 12:44:10,580 epoch 5 - iter 232/291 - loss 1.60208069\n2019-08-05 12:44:14,512 epoch 5 - iter 261/291 - loss 1.60090111\n2019-08-05 12:44:17,337 epoch 5 - iter 290/291 - loss 1.58876171\n2019-08-05 12:44:17,634 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:44:17,634 EPOCH 5 done: loss 1.5888 - lr 0.1000 - bad epochs 0\n2019-08-05 12:44:20,947 DEV : loss 0.9729970097541809 - score 0.7894\n2019-08-05 12:44:24,054 TEST : loss 0.9809656143188477 - score 0.7958\n2019-08-05 12:44:41,724 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:44:42,127 epoch 6 - iter 0/291 - loss 1.47312808\n2019-08-05 12:44:44,823 epoch 6 - iter 29/291 - loss 1.40444808\n2019-08-05 12:44:47,573 epoch 6 - iter 58/291 - loss 1.39349261\n2019-08-05 12:44:50,346 epoch 6 - iter 87/291 - loss 1.46899614\n2019-08-05 12:44:53,164 epoch 6 - iter 116/291 - loss 1.48390555\n2019-08-05 12:44:56,113 epoch 6 - iter 145/291 - loss 1.49937827\n2019-08-05 12:44:58,881 epoch 6 - iter 174/291 - loss 1.47923134\n2019-08-05 12:45:01,744 epoch 6 - iter 203/291 - loss 1.48179582\n2019-08-05 12:45:04,681 epoch 6 - iter 232/291 - loss 1.50460259\n2019-08-05 12:45:08,660 epoch 6 - iter 261/291 - loss 1.48673827\n2019-08-05 12:45:11,737 epoch 6 - iter 290/291 - loss 1.49340641\n2019-08-05 12:45:12,058 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:45:12,059 EPOCH 6 done: loss 1.4934 - lr 0.1000 - bad epochs 0\n2019-08-05 12:45:15,598 DEV : loss 0.9314920902252197 - score 0.7926\n2019-08-05 12:45:18,786 TEST : loss 0.9034382104873657 - score 0.7913\n2019-08-05 12:45:36,543 ----------------------------------------------------------------------------------------------------\n2019-08-05 12:45:36,933 epoch 7 - iter 0/291 - loss 0.70404255\n2019-08-05 12:45:39,780 epoch 7 - iter 29/291 - loss 1.37724828\n2019-08-05 12:45:42,563 epoch 7 - iter 58/291 - loss 1.37633718\n2019-08-05 12:45:45,478 epoch 7 - iter 87/291 - loss 1.37491847\n2019-08-05 12:45:48,389 epoch 7 - iter 116/291 - loss 1.39992160\n2019-08-05 12:45:51,226 epoch 7 - iter 145/291 - loss 1.38851801\n" ] ], [ [ "### cased model", "_____no_output_____" ] ], [ [ "train_ner(input_dir ='../data/bio_cased/' , output_dir = 'resources/taggers/cased-ner/' )", "2019-08-05 14:11:02,518 Reading data from ../data/bio_cased\n2019-08-05 14:11:02,524 Train: ../data/bio_cased/train.tsv\n2019-08-05 14:11:02,527 Dev: ../data/bio_cased/valid.tsv\n2019-08-05 14:11:02,529 Test: ../data/bio_cased/test.tsv\n9299\nwhat is good for you\nCorpus: 9299 train + 975 dev + 1043 test sentences\n[b'<unk>', b'O', b'B-PER', b'B-DATE', b'I-DATE', b'B-TIME', b'I-TIME', b'B-LOC', b'I-LOC', b'B-ORG', b'I-ORG', b'I-PER', b'<START>', b'<STOP>']\n2019-08-05 14:11:04,774 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:11:04,775 Evaluation method: MICRO_F1_SCORE\n2019-08-05 14:11:05,020 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:11:05,493 epoch 1 - iter 0/291 - loss 19.21309090\n2019-08-05 14:11:08,366 epoch 1 - iter 29/291 - loss 9.47932544\n2019-08-05 14:11:11,302 epoch 1 - iter 58/291 - loss 8.19926015\n2019-08-05 14:11:15,347 epoch 1 - iter 87/291 - loss 7.21943849\n2019-08-05 14:11:18,027 epoch 1 - iter 116/291 - loss 6.45219372\n2019-08-05 14:11:20,755 epoch 1 - iter 145/291 - loss 5.96712129\n2019-08-05 14:11:23,651 epoch 1 - iter 174/291 - loss 5.55318623\n2019-08-05 14:11:26,398 epoch 1 - iter 203/291 - loss 5.20812501\n2019-08-05 14:11:29,202 epoch 1 - iter 232/291 - loss 4.94515686\n2019-08-05 14:11:31,897 epoch 1 - iter 261/291 - loss 4.71013133\n2019-08-05 14:11:34,638 epoch 1 - iter 290/291 - loss 4.52957345\n2019-08-05 14:11:34,966 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:11:34,967 EPOCH 1 done: loss 4.5296 - lr 0.1000 - bad epochs 0\n2019-08-05 14:11:38,341 DEV : loss 2.009014368057251 - score 0.6661\n2019-08-05 14:11:41,612 TEST : loss 1.9655473232269287 - score 0.6563\n2019-08-05 14:11:59,329 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:11:59,860 epoch 2 - iter 0/291 - loss 2.41936994\n2019-08-05 14:12:02,808 epoch 2 - iter 29/291 - loss 2.53624182\n2019-08-05 14:12:05,639 epoch 2 - iter 58/291 - loss 2.51289744\n2019-08-05 14:12:08,299 epoch 2 - iter 87/291 - loss 2.49085585\n2019-08-05 14:12:11,094 epoch 2 - iter 116/291 - loss 2.46840062\n2019-08-05 14:12:13,917 epoch 2 - iter 145/291 - loss 2.40099161\n2019-08-05 14:12:18,401 epoch 2 - iter 174/291 - loss 2.38618280\n2019-08-05 14:12:21,194 epoch 2 - iter 203/291 - loss 2.35033375\n2019-08-05 14:12:24,122 epoch 2 - iter 232/291 - loss 2.37601634\n2019-08-05 14:12:26,991 epoch 2 - iter 261/291 - loss 2.35385522\n2019-08-05 14:12:29,900 epoch 2 - iter 290/291 - loss 2.33659413\n2019-08-05 14:12:30,247 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:12:30,248 EPOCH 2 done: loss 2.3366 - lr 0.1000 - bad epochs 0\n2019-08-05 14:12:33,602 DEV : loss 1.6523380279541016 - score 0.6727\n2019-08-05 14:12:36,773 TEST : loss 1.5685831308364868 - score 0.671\n2019-08-05 14:12:54,523 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:12:54,983 epoch 3 - iter 0/291 - loss 2.77401376\n2019-08-05 14:12:58,120 epoch 3 - iter 29/291 - loss 2.33912638\n2019-08-05 14:13:00,951 epoch 3 - iter 58/291 - loss 2.07893725\n2019-08-05 14:13:03,675 epoch 3 - iter 87/291 - loss 2.09454065\n2019-08-05 14:13:06,569 epoch 3 - iter 116/291 - loss 2.06853022\n2019-08-05 14:13:09,268 epoch 3 - iter 145/291 - loss 2.01164407\n2019-08-05 14:13:12,156 epoch 3 - iter 174/291 - loss 1.97615804\n2019-08-05 14:13:15,005 epoch 3 - iter 203/291 - loss 1.97188777\n2019-08-05 14:13:18,031 epoch 3 - iter 232/291 - loss 1.96722984\n2019-08-05 14:13:20,800 epoch 3 - iter 261/291 - loss 1.94767598\n2019-08-05 14:13:23,501 epoch 3 - iter 290/291 - loss 1.90857726\n2019-08-05 14:13:23,834 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:13:23,835 EPOCH 3 done: loss 1.9086 - lr 0.1000 - bad epochs 0\n2019-08-05 14:13:28,975 DEV : loss 1.16249418258667 - score 0.7459\n2019-08-05 14:13:32,353 TEST : loss 1.1228629350662231 - score 0.7388\n2019-08-05 14:13:50,064 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:13:50,567 epoch 4 - iter 0/291 - loss 1.70858026\n2019-08-05 14:13:53,555 epoch 4 - iter 29/291 - loss 1.76551251\n2019-08-05 14:13:56,408 epoch 4 - iter 58/291 - loss 1.72327654\n2019-08-05 14:13:59,109 epoch 4 - iter 87/291 - loss 1.69918669\n2019-08-05 14:14:01,982 epoch 4 - iter 116/291 - loss 1.70108124\n2019-08-05 14:14:04,813 epoch 4 - iter 145/291 - loss 1.70610742\n2019-08-05 14:14:07,737 epoch 4 - iter 174/291 - loss 1.73414706\n2019-08-05 14:14:10,696 epoch 4 - iter 203/291 - loss 1.71865560\n2019-08-05 14:14:13,334 epoch 4 - iter 232/291 - loss 1.70096502\n2019-08-05 14:14:16,538 epoch 4 - iter 261/291 - loss 1.69423927\n2019-08-05 14:14:19,515 epoch 4 - iter 290/291 - loss 1.69833936\n2019-08-05 14:14:19,848 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:14:19,849 EPOCH 4 done: loss 1.6983 - lr 0.1000 - bad epochs 0\n2019-08-05 14:14:23,308 DEV : loss 1.0320308208465576 - score 0.7669\n2019-08-05 14:14:26,500 TEST : loss 0.9768574237823486 - score 0.7853\n2019-08-05 14:14:44,160 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:14:44,658 epoch 5 - iter 0/291 - loss 1.34376049\n2019-08-05 14:14:47,617 epoch 5 - iter 29/291 - loss 1.65087973\n2019-08-05 14:14:52,553 epoch 5 - iter 58/291 - loss 1.60734183\n2019-08-05 14:14:55,628 epoch 5 - iter 87/291 - loss 1.58324485\n2019-08-05 14:14:58,343 epoch 5 - iter 116/291 - loss 1.58070349\n2019-08-05 14:15:01,062 epoch 5 - iter 145/291 - loss 1.57831483\n2019-08-05 14:15:03,735 epoch 5 - iter 174/291 - loss 1.60212402\n2019-08-05 14:15:06,573 epoch 5 - iter 203/291 - loss 1.61655491\n2019-08-05 14:15:09,373 epoch 5 - iter 232/291 - loss 1.60688255\n2019-08-05 14:15:12,294 epoch 5 - iter 261/291 - loss 1.58924118\n2019-08-05 14:15:15,016 epoch 5 - iter 290/291 - loss 1.56210655\n2019-08-05 14:15:15,937 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:15:15,937 EPOCH 5 done: loss 1.5621 - lr 0.1000 - bad epochs 0\n2019-08-05 14:15:19,351 DEV : loss 0.975492000579834 - score 0.7884\n2019-08-05 14:15:22,668 TEST : loss 0.9232832193374634 - score 0.8081\n2019-08-05 14:15:40,761 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:15:41,230 epoch 6 - iter 0/291 - loss 1.01912153\n2019-08-05 14:15:44,147 epoch 6 - iter 29/291 - loss 1.42918794\n2019-08-05 14:15:46,903 epoch 6 - iter 58/291 - loss 1.42635890\n2019-08-05 14:15:50,176 epoch 6 - iter 87/291 - loss 1.45842137\n2019-08-05 14:15:53,225 epoch 6 - iter 116/291 - loss 1.49470791\n2019-08-05 14:15:55,993 epoch 6 - iter 145/291 - loss 1.48488701\n2019-08-05 14:15:58,835 epoch 6 - iter 174/291 - loss 1.48755954\n2019-08-05 14:16:03,339 epoch 6 - iter 203/291 - loss 1.50419305\n2019-08-05 14:16:05,914 epoch 6 - iter 232/291 - loss 1.49746481\n2019-08-05 14:16:08,581 epoch 6 - iter 261/291 - loss 1.48950117\n2019-08-05 14:16:11,269 epoch 6 - iter 290/291 - loss 1.47464313\n2019-08-05 14:16:11,598 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:16:11,599 EPOCH 6 done: loss 1.4746 - lr 0.1000 - bad epochs 0\n2019-08-05 14:16:14,913 DEV : loss 0.8453320264816284 - score 0.806\n2019-08-05 14:16:18,162 TEST : loss 0.813510000705719 - score 0.835\n2019-08-05 14:16:36,000 ----------------------------------------------------------------------------------------------------\n2019-08-05 14:16:36,481 epoch 7 - iter 0/291 - loss 0.98426563\n2019-08-05 14:16:39,210 epoch 7 - iter 29/291 - loss 1.32413566\n2019-08-05 14:16:42,026 epoch 7 - iter 58/291 - loss 1.39977080\n2019-08-05 14:16:44,872 epoch 7 - iter 87/291 - loss 1.42321777\n2019-08-05 14:16:47,727 epoch 7 - iter 116/291 - loss 1.41896770\n2019-08-05 14:16:50,692 epoch 7 - iter 145/291 - loss 1.40287186\n" ], [ "# load the model you trained\nmodel = SequenceTagger.load('resources/taggers/uncased-ner/final-model.pt')\n\n# create example sentence\nsentence = Sentence('I love Berlin')\n\n# predict tags and print\nmodel.predict(sentence)\n\nprint(sentence.to_tagged_string())", "2019-08-05 16:06:34,823 loading file resources/taggers/uncased-ner/final-model.pt\nI love Berlin <B-LOC>\n" ], [ "sentence = Sentence('MIMVMX_q002nxx0_022_130020_VM1_06 i can\"t come on friday the twenty first the weekend is free ')\n\n# predict tags and print\nmodel.predict(sentence)\n\nprint(sentence.to_tagged_string())", "MIMVMX_q002nxx0_022_130020_VM1_06 i can\"t come on friday <B-DATE> the <I-DATE> twenty <I-DATE> first <I-DATE> the <I-DATE> weekend <I-DATE> is free\n" ], [ " # this is the folder in which train, test and dev files reside\ndata_folder = '../data/bio_uncased/'\n\n# init a corpus using column format, data folder and the names of the train, dev and test files\ncorpus: Corpus = ColumnCorpus(data_folder, columns,\n train_file='train.tsv',\n test_file='test.tsv',\n dev_file='valid.tsv')\nbase_path = 'resources/taggers/uncased-ner/'", "2019-08-05 16:34:11,764 Reading data from ../data/bio_uncased\n2019-08-05 16:34:11,770 Train: ../data/bio_uncased/train.tsv\n2019-08-05 16:34:11,773 Dev: ../data/bio_uncased/valid.tsv\n2019-08-05 16:34:11,776 Test: ../data/bio_uncased/test.tsv\n" ], [ "from flair.training_utils import Result\ntest_results, test_loss = model.evaluate(\n corpus.test,\n out_path=base_path +\"test_.tsv\",\n )\n\ntest_results: Result = test_results\nprint(test_results.log_line)\nprint(test_results.detailed_results)\n", "0.8876\t0.8924\t0.89\n\nMICRO_AVG: acc 0.8018 - f1-score 0.89\nMACRO_AVG: acc 0.8658 - f1-score 0.9258200000000001\nDATE tp: 515 - fp: 51 - fn: 66 - tn: 515 - precision: 0.9099 - recall: 0.8864 - accuracy: 0.8149 - f1-score: 0.8980\nLOC tp: 54 - fp: 4 - fn: 6 - tn: 54 - precision: 0.9310 - recall: 0.9000 - accuracy: 0.8438 - f1-score: 0.9152\nORG tp: 25 - fp: 0 - fn: 0 - tn: 25 - precision: 1.0000 - recall: 1.0000 - accuracy: 1.0000 - f1-score: 1.0000\nPER tp: 57 - fp: 4 - fn: 1 - tn: 57 - precision: 0.9344 - recall: 0.9828 - accuracy: 0.9194 - f1-score: 0.9580\nTIME tp: 344 - fp: 67 - fn: 47 - tn: 344 - precision: 0.8370 - recall: 0.8798 - accuracy: 0.7511 - f1-score: 0.8579\n" ], [ "rr[1]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ecb2502e2642434ebe04845a781d3b3c100505f7
47,331
ipynb
Jupyter Notebook
python_examples/09_resource_map.ipynb
DataONEorg/dataone_examples
07e41fc99c3491ee18ed4c4f6b25f148484b78cc
[ "Apache-2.0" ]
2
2018-08-24T13:48:11.000Z
2020-09-16T16:21:08.000Z
python_examples/09_resource_map.ipynb
DataONEorg/dataone_examples
07e41fc99c3491ee18ed4c4f6b25f148484b78cc
[ "Apache-2.0" ]
null
null
null
python_examples/09_resource_map.ipynb
DataONEorg/dataone_examples
07e41fc99c3491ee18ed4c4f6b25f148484b78cc
[ "Apache-2.0" ]
1
2020-08-12T00:54:31.000Z
2020-08-12T00:54:31.000Z
59.461055
245
0.543069
[ [ [ "# d1_pyore Tutorial\n\nThe d1_pyore library facilitates creation and consumption of OAI-ORE documents that are used by DataONE to dscribe data packages (i.e. the combination of Data and Metadata that make up a scientifically meaningful usint of information.\n\nLoad the RDF-XML data from a DataONE Coordinating Node. Note that this direct access is more fragile than using the combination of resolve() and get().", "_____no_output_____" ] ], [ [ "import requests\nimport urllib.parse\n\n#identifier for a resource map\npid = \"ark:/13030/m5dz07z6/2/mrt-dataone-map.rdf\"\nurl = \"https://cn.dataone.org/cn/v2/object/\" + urllib.parse.quote(pid.encode('utf-8'))\nore_document = requests.get(url).text\nprint(ore_document)", "<rdf:RDF\n xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"\n xmlns:foaf=\"http://xmlns.com/foaf/0.1/\"\n xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n xmlns:ore=\"http://www.openarchives.org/ore/terms/\"\n xmlns:dcterms=\"http://purl.org/dc/terms/\"\n xmlns:cito=\"http://purl.org/spar/cito/\"\n xmlns:rdfs1=\"http://www.w3.org/2001/01/rdf-schema#\" > \n <rdf:Description rdf:about=\"http://www.openarchives.org/ore/terms/Aggregation\">\n <rdfs1:label>Aggregation</rdfs1:label>\n <rdfs1:isDefinedBy rdf:resource=\"http://www.openarchives.org/ore/terms/\"/>\n </rdf:Description>\n <rdf:Description rdf:about=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\">\n <dcterms:identifier>ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-vuln.csv</dcterms:identifier>\n <cito:isDocumentedBy rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"/>\n </rdf:Description>\n <rdf:Description rdf:about=\"http://store.cdlib.org:35121/content/1001/ark%3A%2F13030%2Fm5dz07z6/2/\">\n <ore:aggregates rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\"/>\n <ore:aggregates rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\"/>\n <ore:aggregates rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\"/>\n <ore:aggregates rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"/>\n <dcterms:title>Simple aggregation of science metadata and data</dcterms:title>\n <rdfs1:isDefinedBy rdf:resource=\"http://www.openarchives.org/ore/terms/\"/>\n </rdf:Description>\n <rdf:Description rdf:about=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\">\n <dcterms:identifier>ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.pdf</dcterms:identifier>\n <cito:isDocumentedBy rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"/>\n </rdf:Description>\n <rdf:Description rdf:about=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fmrt-dataone-map.rdf\">\n <dcterms:creator>Merritt</dcterms:creator>\n <dcterms:created>2013-09-12T07:33:27-07:00</dcterms:created>\n <ore:describes rdf:resource=\"http://store.cdlib.org:35121/content/1001/ark%3A%2F13030%2Fm5dz07z6/2/\"/>\n <dc:format>application/rdf+xml</dc:format>\n <rdf:type rdf:resource=\"http://www.openarchives.org/ore/terms/ResourceMap\"/>\n <dcterms:modified>2013-09-12T07:33:27-07:00</dcterms:modified>\n <dcterms:identifier>ark:/13030/m5dz07z6/2/mrt-dataone-map.rdf</dcterms:identifier>\n </rdf:Description>\n <rdf:Description rdf:about=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\">\n <dcterms:identifier>ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-main.csv</dcterms:identifier>\n <cito:isDocumentedBy rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"/>\n </rdf:Description>\n <rdf:Description rdf:about=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\">\n <cito:documents rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\"/>\n <cito:documents rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\"/>\n <dcterms:description>A reference to a science data object using a DataONE identifier.</dcterms:description>\n <dcterms:description>A reference to a science metadata document using a DataONE identifier.</dcterms:description>\n <dcterms:identifier>ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.xml</dcterms:identifier>\n <cito:documents rdf:resource=\"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\"/>\n </rdf:Description>\n</rdf:RDF>\n\n" ] ], [ [ "## Convert to JSON-LD\n\nPrint out the OAI-ORE document in JSON-LD format, which is more easily processed using Javascript.", "_____no_output_____" ] ], [ [ "from d1_common import resource_map\nore_object = resource_map.ResourceMap()\nore_object.parse(data=ore_document)\njson_document = ore_object.serialize(format=\"json-ld\").decode() \nprint(json_document)", "[\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\",\n \"http://purl.org/dc/terms/identifier\": [\n {\n \"@value\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.pdf\"\n }\n ],\n \"http://purl.org/spar/cito/isDocumentedBy\": [\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n }\n ]\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\",\n \"http://purl.org/dc/terms/identifier\": [\n {\n \"@value\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-vuln.csv\"\n }\n ],\n \"http://purl.org/spar/cito/isDocumentedBy\": [\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n }\n ]\n },\n {\n \"@id\": \"http://www.openarchives.org/ore/terms/Aggregation\",\n \"http://www.w3.org/2001/01/rdf-schema#isDefinedBy\": [\n {\n \"@id\": \"http://www.openarchives.org/ore/terms/\"\n }\n ],\n \"http://www.w3.org/2001/01/rdf-schema#label\": [\n {\n \"@value\": \"Aggregation\"\n }\n ]\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\",\n \"http://purl.org/dc/terms/description\": [\n {\n \"@value\": \"A reference to a science data object using a DataONE identifier.\"\n },\n {\n \"@value\": \"A reference to a science metadata document using a DataONE identifier.\"\n }\n ],\n \"http://purl.org/dc/terms/identifier\": [\n {\n \"@value\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.xml\"\n }\n ],\n \"http://purl.org/spar/cito/documents\": [\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\"\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\"\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\"\n }\n ]\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\",\n \"http://purl.org/dc/terms/identifier\": [\n {\n \"@value\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-main.csv\"\n }\n ],\n \"http://purl.org/spar/cito/isDocumentedBy\": [\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n }\n ]\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fmrt-dataone-map.rdf\",\n \"@type\": [\n \"http://www.openarchives.org/ore/terms/ResourceMap\"\n ],\n \"http://purl.org/dc/elements/1.1/format\": [\n {\n \"@value\": \"application/rdf+xml\"\n }\n ],\n \"http://purl.org/dc/terms/created\": [\n {\n \"@value\": \"2013-09-12T07:33:27-07:00\"\n }\n ],\n \"http://purl.org/dc/terms/creator\": [\n {\n \"@value\": \"Merritt\"\n }\n ],\n \"http://purl.org/dc/terms/identifier\": [\n {\n \"@value\": \"ark:/13030/m5dz07z6/2/mrt-dataone-map.rdf\"\n }\n ],\n \"http://purl.org/dc/terms/modified\": [\n {\n \"@value\": \"2013-09-12T07:33:27-07:00\"\n }\n ],\n \"http://www.openarchives.org/ore/terms/describes\": [\n {\n \"@id\": \"http://store.cdlib.org:35121/content/1001/ark%3A%2F13030%2Fm5dz07z6/2/\"\n }\n ]\n },\n {\n \"@id\": \"http://store.cdlib.org:35121/content/1001/ark%3A%2F13030%2Fm5dz07z6/2/\",\n \"http://purl.org/dc/terms/title\": [\n {\n \"@value\": \"Simple aggregation of science metadata and data\"\n }\n ],\n \"http://www.openarchives.org/ore/terms/aggregates\": [\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\"\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\"\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n },\n {\n \"@id\": \"https://cn.dataone.org/cn/v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\"\n }\n ],\n \"http://www.w3.org/2001/01/rdf-schema#isDefinedBy\": [\n {\n \"@id\": \"http://www.openarchives.org/ore/terms/\"\n }\n ]\n }\n]\n" ] ], [ [ "## Compact the JSON-LD document \n", "_____no_output_____" ] ], [ [ "import json\nfrom pyld import jsonld\n\ncontext = {\n \"@context\":{\n \"ore\":\"http://www.openarchives.org/ore/terms/\",\n \"dc\":\"http://purl.org/dc/elements/\",\n \"dct\":\"http://purl.org/dc/terms/\",\n \"dataone\":\"https://cn.dataone.org/cn/\",\n \"cito\":\"http://purl.org/spar/cito/\"\n }\n}\ndoc = json.loads(json_document)\ncompacted = jsonld.compact(doc, context)\nprint(json.dumps(compacted, indent=2))\n", "{\n \"@context\": {\n \"ore\": \"http://www.openarchives.org/ore/terms/\",\n \"dc\": \"http://purl.org/dc/elements/\",\n \"dct\": \"http://purl.org/dc/terms/\",\n \"dataone\": \"https://cn.dataone.org/cn/\",\n \"cito\": \"http://purl.org/spar/cito/\"\n },\n \"@graph\": [\n {\n \"@id\": \"http://store.cdlib.org:35121/content/1001/ark%3A%2F13030%2Fm5dz07z6/2/\",\n \"dct:title\": \"Simple aggregation of science metadata and data\",\n \"ore:aggregates\": [\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\"\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\"\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\"\n }\n ],\n \"http://www.w3.org/2001/01/rdf-schema#isDefinedBy\": {\n \"@id\": \"http://www.openarchives.org/ore/terms/\"\n }\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\",\n \"dct:description\": [\n \"A reference to a science data object using a DataONE identifier.\",\n \"A reference to a science metadata document using a DataONE identifier.\"\n ],\n \"dct:identifier\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.xml\",\n \"cito:documents\": [\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\"\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\"\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\"\n }\n ]\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fmrt-dataone-map.rdf\",\n \"@type\": \"ore:ResourceMap\",\n \"dc:1.1/format\": \"application/rdf+xml\",\n \"dct:created\": \"2013-09-12T07:33:27-07:00\",\n \"dct:creator\": \"Merritt\",\n \"dct:identifier\": \"ark:/13030/m5dz07z6/2/mrt-dataone-map.rdf\",\n \"dct:modified\": \"2013-09-12T07:33:27-07:00\",\n \"ore:describes\": {\n \"@id\": \"http://store.cdlib.org:35121/content/1001/ark%3A%2F13030%2Fm5dz07z6/2/\"\n }\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-vuln.csv\",\n \"dct:identifier\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-vuln.csv\",\n \"cito:isDocumentedBy\": {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n }\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001-main.csv\",\n \"dct:identifier\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-main.csv\",\n \"cito:isDocumentedBy\": {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n }\n },\n {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.pdf\",\n \"dct:identifier\": \"ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.pdf\",\n \"cito:isDocumentedBy\": {\n \"@id\": \"dataone:v1/resolve/ark%3A%2F13030%2Fm5dz07z6%2F2%2Fcadwsap-s3400113-001.xml\"\n }\n },\n {\n \"@id\": \"ore:Aggregation\",\n \"http://www.w3.org/2001/01/rdf-schema#isDefinedBy\": {\n \"@id\": \"http://www.openarchives.org/ore/terms/\"\n },\n \"http://www.w3.org/2001/01/rdf-schema#label\": \"Aggregation\"\n }\n ]\n}\n" ] ], [ [ "## Retrieve Data IDs\n\nRetrieve the entries that are the target of a CITO:documents statement. CITO:documents is used to indicate that the subject, i.e. a metadata document, docments the object, i.e. a data object.", "_____no_output_____" ] ], [ [ "from d1_common import resource_map\nimport pprint\n# A list of dc.identifer entries for each entry that is object of CITO:documents \ndocuments = ore_object.getAggregatedScienceDataPids()\npprint.pprint(documents)", "['ark:/13030/m5dz07z6/2/cadwsap-s3400113-001.pdf',\n 'ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-vuln.csv',\n 'ark:/13030/m5dz07z6/2/cadwsap-s3400113-001-main.csv']\n" ] ], [ [ "## Rendered Graph\n\nRender the OAI-ORE document using Graphviz", "_____no_output_____" ] ], [ [ "from graphviz import Source\nfrom io import StringIO\nore_dot = StringIO()\nfrom rdflib.tools import rdf2dot\n#rdf2dot.rdf2dot(ore_object, ore_dot)\nore_object.asGraphvizDot(ore_dot)\n#print(ore_dot.getvalue())\nore_graph = Source(ore_dot.getvalue())\nore_graph", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb256a11c2ba47e14578993fd51d868e0cd9c42
42,418
ipynb
Jupyter Notebook
StrategyBenchmarks.ipynb
jonholdship/WordleBot
16eb715218a73924068f0caa813082cfe5a8bcbc
[ "MIT" ]
1
2022-01-23T04:06:16.000Z
2022-01-23T04:06:16.000Z
StrategyBenchmarks.ipynb
jonholdship/WordleBot
16eb715218a73924068f0caa813082cfe5a8bcbc
[ "MIT" ]
2
2022-01-19T09:15:05.000Z
2022-02-21T20:54:18.000Z
StrategyBenchmarks.ipynb
jonholdship/WordleBot
16eb715218a73924068f0caa813082cfe5a8bcbc
[ "MIT" ]
1
2022-01-15T09:59:14.000Z
2022-01-15T09:59:14.000Z
124.02924
12,128
0.881465
[ [ [ "# Imports", "_____no_output_____" ] ], [ [ "from WordleBot import WordleBot", "_____no_output_____" ], [ "# We'll use these for testing strategies\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool\nfrom time import perf_counter\n\ndef performance_plot(num_of_guesses_to_get_answer):\n success_rate = (np.array(num_of_guesses_to_get_answer)<7).sum()/len(num_of_guesses_to_get_answer)\n print('\\rSuccess rate: {:.2%}'.format(success_rate))\n print('Median guesses: {:.0f}'.format(np.median(num_of_guesses_to_get_answer)))\n print('Mean guesses: {:.2f}'.format(np.mean(num_of_guesses_to_get_answer)))\n\n plt.hist(num_of_guesses_to_get_answer,bins=range(1,12),align=\"left\")\n y_max = plt.ylim()[1]\n plt.vlines(6.5,0,y_max,colors='r')\n plt.xlim([0.5,11.5])\n plt.ylim([0,y_max])\n plt.xlabel('Number of guesses')\n plt.ylabel('Occurence')\n plt.title('Histogram of \"{}\" guesses over all wordles'.format(strategy))\n plt.show()\n", "_____no_output_____" ] ], [ [ "### Make the test list\n\nWe test the final version of the bot strategies against the actual list of wordles, to characterise its real performance on the curated list:", "_____no_output_____" ] ], [ [ "bot = WordleBot()", "_____no_output_____" ], [ "test_list = bot.wordles", "_____no_output_____" ] ], [ [ "# Script for testing out strategies\n\nWhen you initiate WordleBot, you can send the optional argument ```strategy``` which selects a strategy for the guesser. ```strategy``` equals \"entropy\" by default.\n\nThe \"entropy\" strategy guesses the word that most reduces the entropy in the list of possible words at each stage. \nThe \"scored\" strategy chooses the first possible word from a pre-sorted list of words, ranked according to the occurrence of unique common letters. \nThe \"random\" strategy chooses a random possible word.", "_____no_output_____" ], [ "### Performance of \"entropy\" strategy:", "_____no_output_____" ] ], [ [ "strategy = \"entropy\"\n\ndef do_a_wordle(wordle):\n bot = WordleBot(wordle=wordle,strategy=strategy,dark_mode=False)\n \n while not bot.solved:\n bot.filter_possible_words()\n guess = bot.make_guess()\n bot.check_guess(guess)\n return bot.num_of_guesses", "_____no_output_____" ], [ "pooling=True\nn = len(test_list)\nif pooling:\n pool=Pool()\n num_of_guesses_to_get_answer=pool.map(do_a_wordle,test_list)\nelse:\n num_of_guesses_to_get_answer = []\n\n i = 0\n for i,wordle in enumerate(test_list):\n do_a_wordle(wordle)\n print('\\r{}/{}'.format(i+1,n),end='')\n num_of_guesses_to_get_answer.append(bot.num_of_guesses)", "_____no_output_____" ], [ "performance_plot(num_of_guesses_to_get_answer)", "Success rate: 96.03%\nMedian guesses: 4\nMean guesses: 4.18\n" ] ], [ [ "### Performance of \"scored\" strategy:", "_____no_output_____" ] ], [ [ "strategy = \"scored\"\n\ndef do_a_wordle(wordle):\n bot = WordleBot(wordle=wordle,strategy=strategy)\n \n while not bot.solved:\n bot.filter_possible_words()\n guess = bot.make_guess()\n bot.check_guess(guess)\n return bot.num_of_guesses", "_____no_output_____" ], [ "pooling=True\nn = len(test_list)\nif pooling:\n pool=Pool()\n num_of_guesses_to_get_answer=pool.map(do_a_wordle,test_list)\nelse:\n num_of_guesses_to_get_answer = []\n\n i = 0\n for i,wordle in enumerate(test_list):\n do_a_wordle(wordle)\n print('\\r{}/{}'.format(i+1,n),end='')\n num_of_guesses_to_get_answer.append(bot.num_of_guesses)", "_____no_output_____" ], [ "performance_plot(num_of_guesses_to_get_answer)", "Success rate: 91.75%\nMedian guesses: 4\nMean guesses: 4.60\n" ] ], [ [ "### Performance of \"random\" strategy:", "_____no_output_____" ] ], [ [ "strategy = \"random\"\n\ndef do_a_wordle(wordle):\n bot = WordleBot(wordle=wordle,strategy=strategy)\n \n while not bot.solved:\n bot.filter_possible_words()\n guess = bot.make_guess()\n bot.check_guess(guess)\n return bot.num_of_guesses", "_____no_output_____" ], [ "pooling=True\nn = len(test_list)\nif pooling:\n pool=Pool()\n num_of_guesses_to_get_answer=pool.map(do_a_wordle,test_list)\nelse:\n num_of_guesses_to_get_answer = []\n\n i = 0\n for i,wordle in enumerate(test_list):\n do_a_wordle(wordle)\n print('\\r{}/{}'.format(i+1,n),end='')\n num_of_guesses_to_get_answer.append(bot.num_of_guesses)", "_____no_output_____" ], [ "performance_plot(num_of_guesses_to_get_answer)", "Success rate: 88.94%\nMedian guesses: 5\nMean guesses: 4.93\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ecb25e5750a31c89b9f3e2e5f5cc37d596f717d9
7,576
ipynb
Jupyter Notebook
lecture_07/05_dataloader.ipynb
yukinaga/ai_programming
456f18146a785fb80124f8117af0ad9972c0b90c
[ "MIT" ]
2
2021-10-03T08:51:34.000Z
2021-10-03T13:33:51.000Z
lecture_07/05_dataloader.ipynb
yukinaga/ai_programming
456f18146a785fb80124f8117af0ad9972c0b90c
[ "MIT" ]
null
null
null
lecture_07/05_dataloader.ipynb
yukinaga/ai_programming
456f18146a785fb80124f8117af0ad9972c0b90c
[ "MIT" ]
1
2021-09-30T07:03:42.000Z
2021-09-30T07:03:42.000Z
29.364341
244
0.44905
[ [ [ "<a href=\"https://colab.research.google.com/github/yukinaga/ai_programming/blob/main/lecture_07/05_dataloader.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# DataLoader\nDataLoaderを使うと、データの読み込みやミニバッチ法の実装などが大幅に楽になります。 \n今回は、DataLoaderを使ってデータを扱い、手書き文字の認識を行います。 \nなお、今回から学習はGPUを使って行います。 \n", "_____no_output_____" ], [ "## データの読み込み\n`torchvision.datasets`を使って手書き文字のデータを読み込み、DataLoaderを設定します。 \n`torchvision.datasets`にはMNISTの他にも様々なデータセットが用意されています。 \nhttps://pytorch.org/docs/stable/torchvision/datasets.html\n", "_____no_output_____" ] ], [ [ "import torch\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n\n# 訓練データを取得\nmnist_train = MNIST(\"./data\", \n train=True, download=True,\n transform=transforms.ToTensor())\n# テストデータの取得\nmnist_test = MNIST(\"./data\",\n train=False, download=True,\n transform=transforms.ToTensor())\nprint(\"訓練データの数:\", len(mnist_train), \"テストデータの数:\", len(mnist_test))\n\n# DataLoaderの設定\nimg_size = 28\nbatch_size = 256\ntrain_loader = DataLoader(mnist_train, \n batch_size=batch_size,\n shuffle=True)\ntest_loader = DataLoader(mnist_test,\n batch_size=batch_size,\n shuffle=False)", "_____no_output_____" ] ], [ [ "手書き文字の画像サイズは、28×28になります。 \n\n", "_____no_output_____" ], [ "## モデルの構築\n今回は、`nn.Module`モジュールを継承したクラスとして、モデルを構築します。 \n`.cuda()`により、モデルの計算はGPU上で行われるようになります。 ", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(img_size*img_size, 1024) # 全結合層\n self.fc2 = nn.Linear(1024, 512)\n self.fc3 = nn.Linear(512, 10)\n\n def forward(self, x):\n x = x.view(-1, img_size*img_size) # バッチサイズ×入力の数\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\nnet = Net()\nnet.cuda() # GPU対応\nprint(net)", "_____no_output_____" ] ], [ [ "## 学習\nモデルを訓練します。 \nDataLoaderを使い、ミニバッチを取り出して訓練および評価を行います。 \n1エポックの中で何度もミニバッチを使って訓練が行われるので、ミニバッチ法が実装されていることになります。 \n学習には時間がかかりますので、編集→ノートブックの設定のハードウェアアクセラレーターでGPUを選択しましょう。\n", "_____no_output_____" ] ], [ [ "from torch import optim\n\n# 交差エントロピー誤差関数\nloss_fnc = nn.CrossEntropyLoss()\n\n# SGD\noptimizer = optim.SGD(net.parameters(), lr=0.01)\n\n# 損失のログ\nrecord_loss_train = []\nrecord_loss_test = []\n\n# 学習\nfor i in range(10): # 10エポック学習\n net.train() # 訓練モード\n loss_train = 0\n for j, (x, t) in enumerate(train_loader): # ミニバッチ(x, t)を取り出す\n x, t = x.cuda(), t.cuda() # GPU対応\n y = net(x)\n loss = loss_fnc(y, t)\n loss_train += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_train /= j+1\n record_loss_train.append(loss_train)\n\n net.eval() # 評価モード\n loss_test = 0\n for j, (x, t) in enumerate(test_loader): # ミニバッチ(x, t)を取り出す\n x, t = x.cuda(), t.cuda()\n y = net(x)\n loss = loss_fnc(y, t)\n loss_test += loss.item()\n loss_test /= j+1\n record_loss_test.append(loss_test)\n\n if i%1 == 0:\n print(\"Epoch:\", i, \"Loss_Train:\", loss_train, \"Loss_Test:\", loss_test)", "_____no_output_____" ] ], [ [ "## 誤差の推移\n訓練データ、テストデータで誤差の推移をグラフ表示します。 ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nplt.plot(range(len(record_loss_train)), record_loss_train, label=\"Train\")\nplt.plot(range(len(record_loss_test)), record_loss_test, label=\"Test\")\nplt.legend()\n\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Error\")\nplt.show()", "_____no_output_____" ] ], [ [ "## 正解率\nモデルの性能を把握するため、テストデータ使い正解率を測定します。 ", "_____no_output_____" ] ], [ [ "correct = 0\ntotal = 0\nfor i, (x, t) in enumerate(test_loader):\n x, t = x.cuda(), t.cuda() # GPU対応\n x = x.view(-1, img_size*img_size)\n y = net(x)\n correct += (y.argmax(1) == t).sum().item()\n total += len(x)\nprint(\"正解率:\", str(correct/total*100) + \"%\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb2667af2669486ceb756d6ae5f9cd1e62d26a6
839,685
ipynb
Jupyter Notebook
examples/spawn_analysis.ipynb
blevine37/pySpawn17
4fa65cfc3b4d399bcb586506782d00f86b453139
[ "MIT" ]
18
2018-03-30T16:11:13.000Z
2021-08-22T18:57:12.000Z
examples/spawn_analysis.ipynb
blevine37/pySpawn17
4fa65cfc3b4d399bcb586506782d00f86b453139
[ "MIT" ]
3
2018-03-30T17:26:51.000Z
2021-08-17T08:49:24.000Z
examples/spawn_analysis.ipynb
blevine37/pySpawn17
4fa65cfc3b4d399bcb586506782d00f86b453139
[ "MIT" ]
6
2018-11-21T15:30:38.000Z
2021-07-05T05:37:15.000Z
227.248985
237,667
0.878721
[ [ [ "import numpy as np\nimport os\nimport pyspawn\nimport h5py\nimport matplotlib.pyplot as plt\nimport ipywidgets as widgets\nfrom IPython.display import display\nfrom pyspawn.plotting import traj_plot\nimport MDAnalysis as mda\nimport nglview as nv", "_____no_output_____" ] ], [ [ "# Single simulation analysis", "_____no_output_____" ], [ "Here we create a fafile object that pulls the data from the sim.hdf5 file and outputs the arrays for plotting. ", "_____no_output_____" ] ], [ [ "print \"Currently in directory:\", os.getcwd()", "Currently in directory: /Users/Dmitry/PycharmProjects/pySpawn17/examples\n" ], [ "# THIS IS THE ONLY PART OF THE CODE THAT NEEDS TO BE CHANGED\ndir_name = \"/Users/Dmitry/Documents/Research/MSU/4tce/cis/\"\nh5filename = \"sim.1.hdf5\"", "_____no_output_____" ], [ "os.chdir(dir_name)\nan = pyspawn.fafile(h5filename)\n\nan.fill_electronic_state_populations(column_filename=\"N.dat\")\nan.fill_labels()\nan.fill_istates()\nan.get_numstates()\n\ntimes = an.datasets[\"quantum_times\"]\nel_pop = an.datasets[\"electronic_state_populations\"]\nistates = an.datasets[\"istates\"]\nlabels = an.datasets[\"labels\"]\nntraj = len(an.datasets[\"labels\"])\nnstates = an.datasets['numstates']\nan.fill_nuclear_bf_populations()\n\n# write files with energy data for each trajectory\nan.fill_trajectory_energies(column_file_prefix=\"E\")\n\n# write file with time derivative couplings for each trajectory\nan.fill_trajectory_tdcs(column_file_prefix=\"tdc\")\n\n# compute Mulliken population of each trajectory\nan.fill_mulliken_populations(column_filename=\"mull.dat\")\n\nmull_pop = an.datasets[\"electronic_state_populations\"]\n\n# istates dict\nan.create_istate_dict()\nistates_dict = an.datasets['istates_dict']", "{'00b2': 0, '00b3': 0, '00': 1, '00b1': 0, '00b0': 0}\n" ] ], [ [ "This part takes care of xyz trajectory files, bonds, angles (need to have atoms array for this to work)", "_____no_output_____" ] ], [ [ "# writing xyz files\nan.write_xyzs()\n\n# list of bonds to keep track of\nbonds_list = np.array([[3, 11]])\n# write datasets for bonds\nan.fill_trajectory_bonds(bonds_list, column_file_prefix=\"bonds\")\n\n# dihedral angles list\ndiheds_list = np.array([[2, 6, 9, 10]])\n# write datasets for dihedral angles\nan.fill_trajectory_diheds(diheds_list, column_file_prefix=\"diheds\")", "_____no_output_____" ] ], [ [ "Loading the arrays for plotting", "_____no_output_____" ] ], [ [ "arrays = (\"poten\", \"pop\", \"toten\", \"aven\", \"kinen\", \"time\", \"tdc\", \"bonds\", \"diheds\")\n# creating dictionary for the datasets we want to plot\n# keys are trajectory labels\nfor array in arrays:\n exec(array + \"= dict()\")\n\nfor traj in an.datasets[\"labels\"]:\n \n poten[traj] = an.datasets[traj + \"_poten\"]\n toten[traj] = an.datasets[traj + \"_toten\"]\n kinen[traj] = an.datasets[traj + \"_kinen\"]\n time[traj] = an.datasets[traj + \"_time\"]\n tdc[traj] = an.datasets[traj + \"_tdc\"]\n bonds[traj] = an.datasets[traj + \"_bonds\"]\n diheds[traj] = an.datasets[traj + \"_diheds\"]", "_____no_output_____" ] ], [ [ "Setting plotting parameters\n(Perhaps there is a better way to do it, right now these hardcoded color and styles limit us to 7 electronic states and 16 trajectories. However, one could argue that more lines on a single plot would not be very informative anyway)", "_____no_output_____" ] ], [ [ "colors = (\"r\", \"g\", \"b\", \"m\", \"y\", \"k\", \"k\")\nlinestyles = (\"-\", \"--\", \"-.\", \":\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\")\nmarkers=(\"None\",\"None\",\"None\",\"None\",\"d\",\"o\",\"v\",\"^\",\"s\",\"p\",\"d\",\"o\",\"v\",\"^\",\"s\",\"p\")\nlarge_size = 20\nmedium_size = 18\nsmall_size = 16", "_____no_output_____" ] ], [ [ "This widget picks the trajectories we want to plot in case there are many of them", "_____no_output_____" ] ], [ [ "labels_to_plot_widget = widgets.SelectMultiple(\n options=labels,\n value=['00'],\n rows=10,\n description='Trajectories',\n disabled=False\n)", "_____no_output_____" ], [ "display(labels_to_plot_widget)", "_____no_output_____" ] ], [ [ "# Plotting Total Energies", "_____no_output_____" ] ], [ [ "%matplotlib notebook\ntraj_plot.plot_total_energies(time, toten, labels_to_plot_widget.value, istates_dict, colors, markers, linestyles)", "_____no_output_____" ] ], [ [ "# Total Population", "_____no_output_____" ] ], [ [ "populated_states = np.amax(istates) + 1\ntraj_plot.plot_total_pop(times, mull_pop, populated_states, colors)", "_____no_output_____" ] ], [ [ "# Plotting Potential Energies", "_____no_output_____" ] ], [ [ "display(labels_to_plot_widget)", "_____no_output_____" ], [ "%matplotlib notebook\ntraj_plot.plot_energies(labels_to_plot_widget.value, time, poten, nstates, colors, linestyles)", "_____no_output_____" ] ], [ [ "# Plotting Energy gaps", "_____no_output_____" ] ], [ [ "display(labels_to_plot_widget)", "_____no_output_____" ], [ "%matplotlib notebook\n# Gap between ground and first excited states\nstate1 = 0\nstate2 = 1\ntraj_plot.plot_e_gap(time, poten, labels_to_plot_widget.value, state1, state2, istates_dict,\n colors, linestyles, markers)", "_____no_output_____" ], [ "%matplotlib notebook\nspawnthresh = 0.00785\n# plot_tdc(labels, time, tdc, nstates, spawnthresh)\ntraj_plot.plot_tdc(time, tdc, labels_to_plot_widget.value,\n nstates, istates_dict, spawnthresh, colors, linestyles, markers)", "_____no_output_____" ] ], [ [ "# Bonds", "_____no_output_____" ] ], [ [ "display(labels_to_plot_widget)", "_____no_output_____" ], [ "%matplotlib notebook\ntraj_plot.plot_bonds(time, labels_to_plot_widget.value, bonds_list, bonds, colors, linestyles)", "_____no_output_____" ] ], [ [ "# Dihedral Angles", "_____no_output_____" ] ], [ [ "%matplotlib notebook\ntraj_plot.plot_diheds(time, labels_to_plot_widget.value, diheds_list, diheds, colors, linestyles)\nplt.savefig('/Users/Dmitry/Documents/Research/MSU/4tce/cis/angles.png', dpi=300)", "_____no_output_____" ] ], [ [ "# Trajectory visualization", "_____no_output_____" ], [ "In this widget we pick the trajectory label to visualize", "_____no_output_____" ] ], [ [ "xyz_widget = widgets.RadioButtons(\n options=labels,\n# value='pineapple',\n description='Trajectory:',\n disabled=False\n)\ndisplay(xyz_widget)\n", "_____no_output_____" ], [ "print \"Trajectory:\", xyz_widget.value\npath_to_xyz = dir_name + \"/traj_\" + xyz_widget.value + \".xyz\"\nprint \"Path to xyz file:\", path_to_xyz\ntraj = mda.Universe(path_to_xyz)\nw = nv.show_mdanalysis(traj)\nw", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
ecb26e13e1fdc435e922377283fd876787a17ba4
25,642
ipynb
Jupyter Notebook
01-Lesson-Plans/06-Python-APIs/1/Activities/01-Ins_RequestsIntro/Solved/Ins_Requests_Demo.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
01-Lesson-Plans/06-Python-APIs/1/Activities/01-Ins_RequestsIntro/Solved/Ins_Requests_Demo.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
01-Lesson-Plans/06-Python-APIs/1/Activities/01-Ins_RequestsIntro/Solved/Ins_Requests_Demo.ipynb
anirudhmungre/sneaky-lessons
8e48015c50865059db96f8cd369bcc15365d66c7
[ "ADSL" ]
null
null
null
69.86921
8,890
0.612082
[ [ [ "# Dependencies\nimport requests\nimport json", "_____no_output_____" ], [ "# URL for GET requests to retrieve vehicle data\nurl = \"https://api.spacexdata.com/v4/launchpads\"", "_____no_output_____" ], [ "# Print the response object to the console\nprint(requests.get(url))", "<Response [200]>\n" ], [ "# Retrieving data and converting it into JSON\nprint(requests.get(url).json())", "[{'name': 'VAFB SLC 3W', 'full_name': 'Vandenberg Air Force Base Space Launch Complex 3W', 'locality': 'Vandenberg Air Force Base', 'region': 'California', 'timezone': 'America/Los_Angeles', 'latitude': 34.6440904, 'longitude': -120.5931438, 'launch_attempts': 0, 'launch_successes': 0, 'rockets': ['5e9d0d95eda69955f709d1eb'], 'launches': [], 'details': \"SpaceX's original west coast launch pad for Falcon 1. It was used in a static fire test but was never employed for a launch, and was abandoned due to range scheduling conflicts arising from overflying other active pads.\", 'status': 'retired', 'id': '5e9e4501f5090910d4566f83'}, {'name': 'CCSFS SLC 40', 'full_name': 'Cape Canaveral Space Force Station Space Launch Complex 40', 'locality': 'Cape Canaveral', 'region': 'Florida', 'timezone': 'America/New_York', 'latitude': 28.5618571, 'longitude': -80.577366, 'launch_attempts': 66, 'launch_successes': 64, 'rockets': ['5e9d0d95eda69973a809d1ec'], 'launches': ['5eb87cddffd86e000604b32f', '5eb87cdeffd86e000604b330', '5eb87cdfffd86e000604b331', '5eb87ce0ffd86e000604b332', '5eb87ce1ffd86e000604b333', '5eb87ce2ffd86e000604b335', '5eb87ce3ffd86e000604b336', '5eb87ce4ffd86e000604b337', '5eb87ce4ffd86e000604b338', '5eb87ce5ffd86e000604b339', '5eb87ce6ffd86e000604b33a', '5eb87ce7ffd86e000604b33b', '5eb87ce8ffd86e000604b33c', '5eb87ceaffd86e000604b33d', '5eb87ceaffd86e000604b33e', '5eb87cecffd86e000604b33f', '5eb87cedffd86e000604b340', '5eb87ceeffd86e000604b341', '5eb87cefffd86e000604b342', '5eb87cf2ffd86e000604b344', '5eb87cf3ffd86e000604b345', '5eb87cf5ffd86e000604b346', '5eb87cf6ffd86e000604b347', '5eb87cf8ffd86e000604b348', '5eb87cf9ffd86e000604b349', '5eb87cfaffd86e000604b34a', '5eb87cfbffd86e000604b34b', '5eb87d0effd86e000604b35c', '5eb87d10ffd86e000604b35e', '5eb87d11ffd86e000604b35f', '5eb87d15ffd86e000604b362', '5eb87d16ffd86e000604b364', '5eb87d18ffd86e000604b365', '5eb87d1bffd86e000604b368', '5eb87d1cffd86e000604b369', '5eb87d1effd86e000604b36a', '5eb87d20ffd86e000604b36c', '5eb87d22ffd86e000604b36d', '5eb87d26ffd86e000604b371', '5eb87d27ffd86e000604b372', '5eb87d2affd86e000604b374', '5eb87d2effd86e000604b377', '5eb87d30ffd86e000604b378', '5eb87d36ffd86e000604b37b', '5eb87d37ffd86e000604b37c', '5eb87d39ffd86e000604b37d', '5eb87d39ffd86e000604b37e', '5eb87d3bffd86e000604b37f', '5eb87d3cffd86e000604b380', '5eb87d3fffd86e000604b382', '5eb87d41ffd86e000604b383', '5eb87d42ffd86e000604b384', '5eb87d45ffd86e000604b387', '5eb87d46ffd86e000604b389', '5eb87d4affd86e000604b38b', '5eb87d50ffd86e000604b394', '5ed981d91f30554030d45c2a', '5eb87d47ffd86e000604b38a', '5ef6a2e70059c33cee4a8293', '5eb87d4cffd86e000604b38d', '5fb95b3f3a88ae63c954603c', '5eb87d4bffd86e000604b38c', '5eb87d4fffd86e000604b393', '5fd386aa7faea57d297c86c1', '5ff6554f9257f579ee3a6c5f', '600f9a5e8f798e2a4d5f979c'], 'details': \"SpaceX's primary Falcon 9 pad, where all east coast Falcon 9s launched prior to the AMOS-6 anomaly. Previously used alongside SLC-41 to launch Titan rockets for the US Air Force, the pad was heavily damaged by the AMOS-6 anomaly in September 2016. It returned to flight with CRS-13 on December 15, 2017, boasting an upgraded throwback-style Transporter-Erector modeled after that at LC-39A.\", 'status': 'active', 'id': '5e9e4501f509094ba4566f84'}, {'name': 'STLS', 'full_name': 'SpaceX South Texas Launch Site', 'locality': 'Boca Chica Village', 'region': 'Texas', 'timezone': 'America/Chicago', 'latitude': 25.9972641, 'longitude': -97.1560845, 'launch_attempts': 0, 'launch_successes': 0, 'rockets': [], 'launches': [], 'details': \"SpaceX's new private launch site currently under construction for suborbital test flights of Starship, and potentially orbital flights of the full super heavy stack (previously referred to as BFR) in the future. Currently planned to enter use with static fires and test hops of the Starhopper, with the first tethered hop successfully occurring in March 2019. It was previously going to be used for Falcon 9 and Falcon Heavy flights, but this no longer appears to be likely due to its construction timeline and existing launch sites being sufficient to handle present demand. Due to the Caribbean islands and off-shore oil wells, it will be limited to very few possible launch trajectories, and current Texas law only allows a limited number of beach closures per year for orbital launches.\", 'status': 'under construction', 'id': '5e9e4502f5090927f8566f85'}, {'name': 'Kwajalein Atoll', 'full_name': 'Kwajalein Atoll Omelek Island', 'locality': 'Omelek Island', 'region': 'Marshall Islands', 'timezone': 'Pacific/Kwajalein', 'latitude': 9.0477206, 'longitude': 167.7431292, 'launch_attempts': 5, 'launch_successes': 2, 'rockets': ['5e9d0d95eda69955f709d1eb'], 'launches': ['5eb87cd9ffd86e000604b32a', '5eb87cdaffd86e000604b32b', '5eb87cdbffd86e000604b32c', '5eb87cdbffd86e000604b32d', '5eb87cdcffd86e000604b32e'], 'details': \"SpaceX's original pad, where all of the Falcon 1 flights occurred (from 2006 to 2009). It would have also been the launch site of the Falcon 1e and the Falcon 9, but it was abandoned as SpaceX ended the Falcon 1 program and decided against upgrading it to support Falcon 9, likely due to its remote location and ensuing logistics complexities.\", 'status': 'retired', 'id': '5e9e4502f5090995de566f86'}, {'name': 'VAFB SLC 4E', 'full_name': 'Vandenberg Air Force Base Space Launch Complex 4E', 'locality': 'Vandenberg Air Force Base', 'region': 'California', 'timezone': 'America/Los_Angeles', 'latitude': 34.632093, 'longitude': -120.610829, 'launch_attempts': 16, 'launch_successes': 16, 'rockets': ['5e9d0d95eda69973a809d1ec'], 'launches': ['5eb87ce1ffd86e000604b334', '5eb87cf0ffd86e000604b343', '5eb87cfdffd86e000604b34c', '5eb87d05ffd86e000604b354', '5eb87d08ffd86e000604b357', '5eb87d0affd86e000604b359', '5eb87d0fffd86e000604b35d', '5eb87d14ffd86e000604b361', '5eb87d16ffd86e000604b363', '5eb87d1affd86e000604b367', '5eb87d1fffd86e000604b36b', '5eb87d23ffd86e000604b36e', '5eb87d25ffd86e000604b370', '5eb87d28ffd86e000604b373', '5eb87d31ffd86e000604b379', '5ed983aa1f30554030d45c31'], 'details': \"SpaceX's primary west coast launch pad for polar orbits and sun-synchronous orbits, primarily used for Iridium NEXT and scientific satellite launches. The pad was used for the debut of Falcon 9 v1.1 in the rocket's first ever non-dragon mission, CASSIOPE, in September 2013. It is SpaceX's only remaining pad with the old-style transporter/erector, which reclines prior to launch instead of using a throwback procedure. It is also capable of launching Falcon Heavy (although some pad modifications would be needed, but no west coast Falcon Heavy missions are currently planned).\", 'status': 'active', 'id': '5e9e4502f509092b78566f87'}, {'name': 'KSC LC 39A', 'full_name': 'Kennedy Space Center Historic Launch Complex 39A', 'locality': 'Cape Canaveral', 'region': 'Florida', 'timezone': 'America/New_York', 'latitude': 28.6080585, 'longitude': -80.6039558, 'launch_attempts': 30, 'launch_successes': 30, 'rockets': ['5e9d0d95eda69973a809d1ec', '5e9d0d95eda69974db09d1ed'], 'launches': ['5eb87cfeffd86e000604b34d', '5eb87cfeffd86e000604b34e', '5eb87d00ffd86e000604b34f', '5eb87d01ffd86e000604b350', '5eb87d01ffd86e000604b351', '5eb87d03ffd86e000604b352', '5eb87d04ffd86e000604b353', '5eb87d06ffd86e000604b355', '5eb87d07ffd86e000604b356', '5eb87d09ffd86e000604b358', '5eb87d0cffd86e000604b35a', '5eb87d0dffd86e000604b35b', '5eb87d13ffd86e000604b360', '5eb87d19ffd86e000604b366', '5eb87d24ffd86e000604b36f', '5eb87d2bffd86e000604b375', '5eb87d2dffd86e000604b376', '5eb87d35ffd86e000604b37a', '5eb87d3dffd86e000604b381', '5eb87d43ffd86e000604b385', '5eb87d44ffd86e000604b386', '5eb87d46ffd86e000604b388', '5ed9819a1f30554030d45c29', '5ef6a1e90059c33cee4a828a', '5ef6a2090059c33cee4a828b', '5ef6a2bf0059c33cee4a828c', '5eb87d4dffd86e000604b38e', '5eb87d4effd86e000604b391', '5f8399fb818d8b59f5740d43', '5fbfecce54ceb10a5664c80a'], 'details': \"NASA's historic pad that launched most of the Saturn V and Space Shuttle missions, including Apollo 11. SpaceX initially leased solely for Falcon Heavy and Crew Dragon launches, but the company has also used it for others as well following the damage to SLC-40 in the AMOS-6 explosion. After completing the necessary modifications, the first launch SpaceX performed on the pad was CRS-10 in February 2017. After SLC-40 was back online, 39A was upgraded to support Falcon Heavy and complete the removal of the shuttle-era Rotating Service Structure. More recently, a crew access arm and other safety equipment has been installed in order to launch commercial crew missions. 39A also occasionally launches other Falcon 9 missions between Falcon Heavy and Crew Dragon launches, depending on pad scheduling. The pad may also potentially be upgraded in the future for use with the BFR.\", 'status': 'active', 'id': '5e9e4502f509094188566f88'}]\n" ], [ "# Pretty Print the output of the JSON\nresponse = requests.get(url).json()\nprint(json.dumps(response, indent=4, sort_keys=True))", "[\n {\n \"details\": \"SpaceX's original west coast launch pad for Falcon 1. It was used in a static fire test but was never employed for a launch, and was abandoned due to range scheduling conflicts arising from overflying other active pads.\",\n \"full_name\": \"Vandenberg Air Force Base Space Launch Complex 3W\",\n \"id\": \"5e9e4501f5090910d4566f83\",\n \"latitude\": 34.6440904,\n \"launch_attempts\": 0,\n \"launch_successes\": 0,\n \"launches\": [],\n \"locality\": \"Vandenberg Air Force Base\",\n \"longitude\": -120.5931438,\n \"name\": \"VAFB SLC 3W\",\n \"region\": \"California\",\n \"rockets\": [\n \"5e9d0d95eda69955f709d1eb\"\n ],\n \"status\": \"retired\",\n \"timezone\": \"America/Los_Angeles\"\n },\n {\n \"details\": \"SpaceX's primary Falcon 9 pad, where all east coast Falcon 9s launched prior to the AMOS-6 anomaly. Previously used alongside SLC-41 to launch Titan rockets for the US Air Force, the pad was heavily damaged by the AMOS-6 anomaly in September 2016. It returned to flight with CRS-13 on December 15, 2017, boasting an upgraded throwback-style Transporter-Erector modeled after that at LC-39A.\",\n \"full_name\": \"Cape Canaveral Space Force Station Space Launch Complex 40\",\n \"id\": \"5e9e4501f509094ba4566f84\",\n \"latitude\": 28.5618571,\n \"launch_attempts\": 66,\n \"launch_successes\": 64,\n \"launches\": [\n \"5eb87cddffd86e000604b32f\",\n \"5eb87cdeffd86e000604b330\",\n \"5eb87cdfffd86e000604b331\",\n \"5eb87ce0ffd86e000604b332\",\n \"5eb87ce1ffd86e000604b333\",\n \"5eb87ce2ffd86e000604b335\",\n \"5eb87ce3ffd86e000604b336\",\n \"5eb87ce4ffd86e000604b337\",\n \"5eb87ce4ffd86e000604b338\",\n \"5eb87ce5ffd86e000604b339\",\n \"5eb87ce6ffd86e000604b33a\",\n \"5eb87ce7ffd86e000604b33b\",\n \"5eb87ce8ffd86e000604b33c\",\n \"5eb87ceaffd86e000604b33d\",\n \"5eb87ceaffd86e000604b33e\",\n \"5eb87cecffd86e000604b33f\",\n \"5eb87cedffd86e000604b340\",\n \"5eb87ceeffd86e000604b341\",\n \"5eb87cefffd86e000604b342\",\n \"5eb87cf2ffd86e000604b344\",\n \"5eb87cf3ffd86e000604b345\",\n \"5eb87cf5ffd86e000604b346\",\n \"5eb87cf6ffd86e000604b347\",\n \"5eb87cf8ffd86e000604b348\",\n \"5eb87cf9ffd86e000604b349\",\n \"5eb87cfaffd86e000604b34a\",\n \"5eb87cfbffd86e000604b34b\",\n \"5eb87d0effd86e000604b35c\",\n \"5eb87d10ffd86e000604b35e\",\n \"5eb87d11ffd86e000604b35f\",\n \"5eb87d15ffd86e000604b362\",\n \"5eb87d16ffd86e000604b364\",\n \"5eb87d18ffd86e000604b365\",\n \"5eb87d1bffd86e000604b368\",\n \"5eb87d1cffd86e000604b369\",\n \"5eb87d1effd86e000604b36a\",\n \"5eb87d20ffd86e000604b36c\",\n \"5eb87d22ffd86e000604b36d\",\n \"5eb87d26ffd86e000604b371\",\n \"5eb87d27ffd86e000604b372\",\n \"5eb87d2affd86e000604b374\",\n \"5eb87d2effd86e000604b377\",\n \"5eb87d30ffd86e000604b378\",\n \"5eb87d36ffd86e000604b37b\",\n \"5eb87d37ffd86e000604b37c\",\n \"5eb87d39ffd86e000604b37d\",\n \"5eb87d39ffd86e000604b37e\",\n \"5eb87d3bffd86e000604b37f\",\n \"5eb87d3cffd86e000604b380\",\n \"5eb87d3fffd86e000604b382\",\n \"5eb87d41ffd86e000604b383\",\n \"5eb87d42ffd86e000604b384\",\n \"5eb87d45ffd86e000604b387\",\n \"5eb87d46ffd86e000604b389\",\n \"5eb87d4affd86e000604b38b\",\n \"5eb87d50ffd86e000604b394\",\n \"5ed981d91f30554030d45c2a\",\n \"5eb87d47ffd86e000604b38a\",\n \"5ef6a2e70059c33cee4a8293\",\n \"5eb87d4cffd86e000604b38d\",\n \"5fb95b3f3a88ae63c954603c\",\n \"5eb87d4bffd86e000604b38c\",\n \"5eb87d4fffd86e000604b393\",\n \"5fd386aa7faea57d297c86c1\",\n \"5ff6554f9257f579ee3a6c5f\",\n \"600f9a5e8f798e2a4d5f979c\"\n ],\n \"locality\": \"Cape Canaveral\",\n \"longitude\": -80.577366,\n \"name\": \"CCSFS SLC 40\",\n \"region\": \"Florida\",\n \"rockets\": [\n \"5e9d0d95eda69973a809d1ec\"\n ],\n \"status\": \"active\",\n \"timezone\": \"America/New_York\"\n },\n {\n \"details\": \"SpaceX's new private launch site currently under construction for suborbital test flights of Starship, and potentially orbital flights of the full super heavy stack (previously referred to as BFR) in the future. Currently planned to enter use with static fires and test hops of the Starhopper, with the first tethered hop successfully occurring in March 2019. It was previously going to be used for Falcon 9 and Falcon Heavy flights, but this no longer appears to be likely due to its construction timeline and existing launch sites being sufficient to handle present demand. Due to the Caribbean islands and off-shore oil wells, it will be limited to very few possible launch trajectories, and current Texas law only allows a limited number of beach closures per year for orbital launches.\",\n \"full_name\": \"SpaceX South Texas Launch Site\",\n \"id\": \"5e9e4502f5090927f8566f85\",\n \"latitude\": 25.9972641,\n \"launch_attempts\": 0,\n \"launch_successes\": 0,\n \"launches\": [],\n \"locality\": \"Boca Chica Village\",\n \"longitude\": -97.1560845,\n \"name\": \"STLS\",\n \"region\": \"Texas\",\n \"rockets\": [],\n \"status\": \"under construction\",\n \"timezone\": \"America/Chicago\"\n },\n {\n \"details\": \"SpaceX's original pad, where all of the Falcon 1 flights occurred (from 2006 to 2009). It would have also been the launch site of the Falcon 1e and the Falcon 9, but it was abandoned as SpaceX ended the Falcon 1 program and decided against upgrading it to support Falcon 9, likely due to its remote location and ensuing logistics complexities.\",\n \"full_name\": \"Kwajalein Atoll Omelek Island\",\n \"id\": \"5e9e4502f5090995de566f86\",\n \"latitude\": 9.0477206,\n \"launch_attempts\": 5,\n \"launch_successes\": 2,\n \"launches\": [\n \"5eb87cd9ffd86e000604b32a\",\n \"5eb87cdaffd86e000604b32b\",\n \"5eb87cdbffd86e000604b32c\",\n \"5eb87cdbffd86e000604b32d\",\n \"5eb87cdcffd86e000604b32e\"\n ],\n \"locality\": \"Omelek Island\",\n \"longitude\": 167.7431292,\n \"name\": \"Kwajalein Atoll\",\n \"region\": \"Marshall Islands\",\n \"rockets\": [\n \"5e9d0d95eda69955f709d1eb\"\n ],\n \"status\": \"retired\",\n \"timezone\": \"Pacific/Kwajalein\"\n },\n {\n \"details\": \"SpaceX's primary west coast launch pad for polar orbits and sun-synchronous orbits, primarily used for Iridium NEXT and scientific satellite launches. The pad was used for the debut of Falcon 9 v1.1 in the rocket's first ever non-dragon mission, CASSIOPE, in September 2013. It is SpaceX's only remaining pad with the old-style transporter/erector, which reclines prior to launch instead of using a throwback procedure. It is also capable of launching Falcon Heavy (although some pad modifications would be needed, but no west coast Falcon Heavy missions are currently planned).\",\n \"full_name\": \"Vandenberg Air Force Base Space Launch Complex 4E\",\n \"id\": \"5e9e4502f509092b78566f87\",\n \"latitude\": 34.632093,\n \"launch_attempts\": 16,\n \"launch_successes\": 16,\n \"launches\": [\n \"5eb87ce1ffd86e000604b334\",\n \"5eb87cf0ffd86e000604b343\",\n \"5eb87cfdffd86e000604b34c\",\n \"5eb87d05ffd86e000604b354\",\n \"5eb87d08ffd86e000604b357\",\n \"5eb87d0affd86e000604b359\",\n \"5eb87d0fffd86e000604b35d\",\n \"5eb87d14ffd86e000604b361\",\n \"5eb87d16ffd86e000604b363\",\n \"5eb87d1affd86e000604b367\",\n \"5eb87d1fffd86e000604b36b\",\n \"5eb87d23ffd86e000604b36e\",\n \"5eb87d25ffd86e000604b370\",\n \"5eb87d28ffd86e000604b373\",\n \"5eb87d31ffd86e000604b379\",\n \"5ed983aa1f30554030d45c31\"\n ],\n \"locality\": \"Vandenberg Air Force Base\",\n \"longitude\": -120.610829,\n \"name\": \"VAFB SLC 4E\",\n \"region\": \"California\",\n \"rockets\": [\n \"5e9d0d95eda69973a809d1ec\"\n ],\n \"status\": \"active\",\n \"timezone\": \"America/Los_Angeles\"\n },\n {\n \"details\": \"NASA's historic pad that launched most of the Saturn V and Space Shuttle missions, including Apollo 11. SpaceX initially leased solely for Falcon Heavy and Crew Dragon launches, but the company has also used it for others as well following the damage to SLC-40 in the AMOS-6 explosion. After completing the necessary modifications, the first launch SpaceX performed on the pad was CRS-10 in February 2017. After SLC-40 was back online, 39A was upgraded to support Falcon Heavy and complete the removal of the shuttle-era Rotating Service Structure. More recently, a crew access arm and other safety equipment has been installed in order to launch commercial crew missions. 39A also occasionally launches other Falcon 9 missions between Falcon Heavy and Crew Dragon launches, depending on pad scheduling. The pad may also potentially be upgraded in the future for use with the BFR.\",\n \"full_name\": \"Kennedy Space Center Historic Launch Complex 39A\",\n \"id\": \"5e9e4502f509094188566f88\",\n \"latitude\": 28.6080585,\n \"launch_attempts\": 30,\n \"launch_successes\": 30,\n \"launches\": [\n \"5eb87cfeffd86e000604b34d\",\n \"5eb87cfeffd86e000604b34e\",\n \"5eb87d00ffd86e000604b34f\",\n \"5eb87d01ffd86e000604b350\",\n \"5eb87d01ffd86e000604b351\",\n \"5eb87d03ffd86e000604b352\",\n \"5eb87d04ffd86e000604b353\",\n \"5eb87d06ffd86e000604b355\",\n \"5eb87d07ffd86e000604b356\",\n \"5eb87d09ffd86e000604b358\",\n \"5eb87d0cffd86e000604b35a\",\n \"5eb87d0dffd86e000604b35b\",\n \"5eb87d13ffd86e000604b360\",\n \"5eb87d19ffd86e000604b366\",\n \"5eb87d24ffd86e000604b36f\",\n \"5eb87d2bffd86e000604b375\",\n \"5eb87d2dffd86e000604b376\",\n \"5eb87d35ffd86e000604b37a\",\n \"5eb87d3dffd86e000604b381\",\n \"5eb87d43ffd86e000604b385\",\n \"5eb87d44ffd86e000604b386\",\n \"5eb87d46ffd86e000604b388\",\n \"5ed9819a1f30554030d45c29\",\n \"5ef6a1e90059c33cee4a828a\",\n \"5ef6a2090059c33cee4a828b\",\n \"5ef6a2bf0059c33cee4a828c\",\n \"5eb87d4dffd86e000604b38e\",\n \"5eb87d4effd86e000604b391\",\n \"5f8399fb818d8b59f5740d43\",\n \"5fbfecce54ceb10a5664c80a\"\n ],\n \"locality\": \"Cape Canaveral\",\n \"longitude\": -80.6039558,\n \"name\": \"KSC LC 39A\",\n \"region\": \"Florida\",\n \"rockets\": [\n \"5e9d0d95eda69973a809d1ec\",\n \"5e9d0d95eda69974db09d1ed\"\n ],\n \"status\": \"active\",\n \"timezone\": \"America/New_York\"\n }\n]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ecb27506f39ed70bdab094e90c59b88447cf5efc
82,850
ipynb
Jupyter Notebook
cwatershed.ipynb
robertoalotufo/ia870p3
aa3cca44dfb4f0323d5ccfcbe0ee336f30c23461
[ "BSD-2-Clause" ]
5
2018-10-15T12:02:03.000Z
2022-02-11T12:47:12.000Z
cwatershed.ipynb
robertoalotufo/ia870p3
aa3cca44dfb4f0323d5ccfcbe0ee336f30c23461
[ "BSD-2-Clause" ]
1
2018-10-15T12:04:36.000Z
2019-01-25T12:04:35.000Z
cwatershed.ipynb
robertoalotufo/ia870p3
aa3cca44dfb4f0323d5ccfcbe0ee336f30c23461
[ "BSD-2-Clause" ]
4
2019-01-25T11:13:48.000Z
2020-12-20T01:42:33.000Z
270.751634
27,952
0.92455
[ [ [ "# Cwatershed - watershed from markers - usage illustration", "_____no_output_____" ] ], [ [ "import numpy as np\nimport ia870 as MT\nfrom PIL import Image", "_____no_output_____" ] ], [ [ "## Example 1, numerical", "_____no_output_____" ] ], [ [ "a = np.array([[10, 10, 10, 10, 10, 10, 10],\\\n [10, 9, 6, 18, 6, 5, 10],\\\n [10, 9, 6, 18, 6, 8, 10],\\\n [10, 9, 9, 15, 9, 9, 10],\\\n [10, 9, 9, 15, 12, 10, 10],\\\n [10, 10, 10, 10, 10, 10, 10]],\n dtype = np.uint8)\nb = (a == 6)\nprint('b=\\n',b * 1)\nprint('ws Lines:\\n', MT.iacwatershed(a, b, MT.iasecross()) *1 )\nprint('ws Regions:\\n',MT.iacwatershed(a, b, MT.iasecross(),'REGIONS'))", "b=\n [[0 0 0 0 0 0 0]\n [0 0 1 0 1 0 0]\n [0 0 1 0 1 0 0]\n [0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0]\n [0 0 0 0 0 0 0]]\nws Lines:\n [[0 0 0 0 1 0 0]\n [0 0 0 0 1 0 0]\n [0 0 0 0 1 0 0]\n [0 0 0 0 1 0 0]\n [0 0 0 0 1 0 0]\n [0 0 0 0 1 0 0]]\nws Regions:\n [[1 1 1 1 2 2 2]\n [1 1 1 1 2 2 2]\n [1 1 1 1 2 2 2]\n [1 1 1 1 2 2 2]\n [1 1 1 1 2 2 2]\n [1 1 1 1 2 2 2]]\n" ] ], [ [ "## Example 2 - image", "_____no_output_____" ] ], [ [ "f_pil = Image.open('data/astablet.tif')\nf_pil", "_____no_output_____" ] ], [ [ "### gradient image", "_____no_output_____" ] ], [ [ "f = np.array(f_pil)\ngrad = MT.iagradm(f)\nImage.fromarray(grad)", "_____no_output_____" ] ], [ [ "### Markers from Filtered Regional Minima", "_____no_output_____" ] ], [ [ "mark = MT.iaregmin(MT.iahmin(grad,35))\nImage.fromarray(mark.astype(np.uint8)*255)", "_____no_output_____" ] ], [ [ "### Watershed from markers", "_____no_output_____" ] ], [ [ "w = MT.iacwatershed(grad,mark)\nImage.fromarray(w.astype(np.uint8)*255)", "_____no_output_____" ] ], [ [ "### shown as labeled regions", "_____no_output_____" ] ], [ [ "wlabel = MT.iacwatershed(grad,mark, MT.iasecross(), 'REGIONS')\nImage.fromarray(MT.iaglblshow(wlabel).transpose(1,2,0))", "_____no_output_____" ] ], [ [ "## Reference\n\nR. Lotufo and A. Falcão, \"The ordered queue and the optimality of the watershed approaches\", in Proceedings of the 5th International Symposium on Mathematical Morphology and its Applications to Image and Signal Processing, vol. 18. Kluwer Academic Publishers, June 2000, pp. 341–350.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecb278c546f191424e37883d84ae95145d7bac4f
47,303
ipynb
Jupyter Notebook
Jupyter_Scrapping.ipynb
tccao/TwitterCrawlerAnalysis
4ae6258648c1cbbcf23b6e219d9fc4c9e3c24adc
[ "MIT" ]
null
null
null
Jupyter_Scrapping.ipynb
tccao/TwitterCrawlerAnalysis
4ae6258648c1cbbcf23b6e219d9fc4c9e3c24adc
[ "MIT" ]
null
null
null
Jupyter_Scrapping.ipynb
tccao/TwitterCrawlerAnalysis
4ae6258648c1cbbcf23b6e219d9fc4c9e3c24adc
[ "MIT" ]
null
null
null
43.397248
84
0.391392
[ [ [ "import csv\n# For parsing the dates received from twitter in readable formats\nimport datetime\nimport dateutil.parser\nimport unicodedata\n#To add wait time between requests\nimport time\nimport networkx as nx\nimport pandas as pd\n\ndef main():\n a=1\n \n \nif __name__ == '__main__':\n main()\n #Create graph object from networkx\n G = nx.Graph()\ndata_original = pd.read_csv(\"original_tweet_data.csv\")\n\ncounter = 0\nfor author in data_original.author_id:\n G.add_nodes_from([\n author, data_original.iloc[counter,4]\n ])\n counter +=1\ndata_original", "_____no_output_____" ], [ "data_retweet_user = pd.read_csv(\"retweet_user_data.csv\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ecb292050b9f999cdaafa33db5382afb70534c72
13,547
ipynb
Jupyter Notebook
dS-ML/week1/Pandas.ipynb
ananthamurthy/playground
38d13c054da081b2f89de476ce4d3e45dc91f663
[ "MIT" ]
null
null
null
dS-ML/week1/Pandas.ipynb
ananthamurthy/playground
38d13c054da081b2f89de476ce4d3e45dc91f663
[ "MIT" ]
null
null
null
dS-ML/week1/Pandas.ipynb
ananthamurthy/playground
38d13c054da081b2f89de476ce4d3e45dc91f663
[ "MIT" ]
null
null
null
22.099511
117
0.403041
[ [ [ "#!pip install pandas\nimport pandas as pd", "_____no_output_____" ], [ "numbers = range(1,4,2)\npd.Series(numbers)", "_____no_output_____" ], [ "numbers = range(1,100,5)\npd.Series(numbers)", "_____no_output_____" ], [ "string = \"Hi\", \"How\", \"are\", \"you\", \"?\"\npd.Series(string)", "_____no_output_____" ], [ "s = pd.Series([345, 'London', 34.5, -34.45, 'Happy Birthday'])\ns", "_____no_output_____" ], [ "marks = [60, 89, 74, 86]\nsubject = [\"maths\", \"science\", \"english\", \"social science\"]\npd.Series(marks, index = subject)", "_____no_output_____" ], [ "data = {'Maths' : 60, 'Science' : 89, 'English': 76, 'Social Science' : 86}\npd.Series(data)", "_____no_output_____" ], [ "subjects = [\"Maths\", \"Science\", \"Art and Craft\", \"Social Science\"]\nmarks_series = pd.Series(data, index = subjects)\nprint(marks_series)", "Maths 60.0\nScience 89.0\nArt and Craft NaN\nSocial Science 86.0\ndtype: float64\n" ], [ "marks_series.isnull()", "_____no_output_____" ], [ "marks_series.notnull()", "_____no_output_____" ], [ "marks_series[marks_series > 75]", "_____no_output_____" ], [ "marks_series[\"Art and Craft\"] = 68", "_____no_output_____" ], [ "marks_series #implicit", "_____no_output_____" ], [ "print(marks_series) #explicit", "Maths 60.0\nScience 89.0\nArt and Craft 68.0\nSocial Science 86.0\ndtype: float64\n" ], [ "marks_series[\"Maths\"] == 73", "_____no_output_____" ], [ "#But look to using methods\nmarks_series.Maths == 73", "_____no_output_____" ], [ "s1 = pd.Series([1, 2, 5, 6.5])\ns2 = pd.Series(['first', 35, 'college', 62.5])", "_____no_output_____" ], [ "#Dataframes are multidimensional analogs of Series\ndata = {'Subject' : ['Maths', 'History', 'Science', 'English', 'Geography', 'Art'],\n 'Marks' : [45, 65, 78, 65, 80, 78],\n 'CGPA' : [2.5, 3.0, 3.5, 2.0, 4.0, 4.0]}\ndf = pd.DataFrame(data)\nprint(df)", " Subject Marks CGPA\n0 Maths 45 2.5\n1 History 65 3.0\n2 Science 78 3.5\n3 English 65 2.0\n4 Geography 80 4.0\n5 Art 78 4.0\n" ], [ "Subject = pd.Series(['Maths', 'History', 'Science', 'English', 'Geography', 'Art'])\nMarks = pd.Series([45, 65, 78, 65, 80, 78])\nCGPA = pd.Series([2.5, 3.0, 3.5, 2.0, 4.0, 4.0])\npd.DataFrame([Subject, Marks, CGPA], index = ['Subject', 'Marks', 'CGPA']).T #Transponse for vertical dataframe", "_____no_output_____" ], [ "a1 = ['Hogwarts', 'Durmstrang', 'Beauxbatons']\na2 = ['Hogwarts', 'Durmstrang', 'Beauxbatons']\na3 = ['Hogwarts', 'Durmstrang', 'Beauxbatons']\n\nschool = [a1, a2, a3]\n\ninst = ['School_1', 'School_2', 'School_3']\n\nMuggle_data = pd.DataFrame(data=school, columns=inst)\n\nMuggle_data", "_____no_output_____" ], [ "#data.iloc[]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb2929eacb54b953dee8ff59ed4059613da76a3
31,999
ipynb
Jupyter Notebook
jupyter/notebooks/least_squares_multiple_lines_gradient.ipynb
edruet/estimation
1ee11e6f4d5dd5abcecc36a8a2f2ed869e1eb051
[ "Apache-2.0" ]
null
null
null
jupyter/notebooks/least_squares_multiple_lines_gradient.ipynb
edruet/estimation
1ee11e6f4d5dd5abcecc36a8a2f2ed869e1eb051
[ "Apache-2.0" ]
null
null
null
jupyter/notebooks/least_squares_multiple_lines_gradient.ipynb
edruet/estimation
1ee11e6f4d5dd5abcecc36a8a2f2ed869e1eb051
[ "Apache-2.0" ]
null
null
null
181.8125
25,548
0.884621
[ [ [ "import numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "# Parameters of the problem\n# The model that will be used is y(x) = s1 + ln(1 + exp((s2 - s1) + s3 * x))\n# The data will be generated with a (decreasing) line followed by a constant value (modeloffset)\nlinerate = -2 # An artificial model y = linerate * x + modeloffset is used to generate data\nmodeloffset = 53 # See here above\nnoiselevel = 2 # standard deviation of the noise applied to the observed values\nXMIN = -10 # Minimum value for input value\nXMAX = 40 # Maximum value for input value\nYMIN = 7 # Minimum observed value\nYMAX = 80 # Maximum observed value\nN = XMAX - XMIN + 1 # Number of samples\nseedinit = 1 # Initialization value for random generation\nalpha = 0.05 # Multiplier for the gradient algorithm\nNLOOPS = 10000 # Number of iterations for the optimization algorithm\nNVIEWS = 20 # Number of printed lines during optimization (for debugging)", "_____no_output_____" ], [ "xin = np.arange(XMIN,XMAX+1,1,dtype=float).reshape(N, 1) # Generating the input values\nAin = np.concatenate((np.ones((N,1),dtype=float), xin), axis=1) # Building the matrix: A x = y for the expected values\nnp.random.seed(seedinit) # Initializing the generator the control the reproducibility of the test\nyin = linerate * xin + modeloffset # Generating observed values\nyin = np.where(yin < YMIN, YMIN, yin) + noiselevel * (np.random.randn(N,1) - 0.5)", "_____no_output_____" ], [ "x = tf.placeholder(tf.float32, shape=(N,1)) # Placeholder for the inputs\ny = tf.placeholder(tf.float32, shape=(N,1)) # Placeholder for the observed values\n#s = tf.Variable([[YMIN],[modeloffset],[linerate]], dtype=tf.float32) # Case with squared_difference \n # (converges only with good initial values)\ns = tf.Variable(tf.zeros([3, 1]), dtype=tf.float32) # Works (with a lot of iterations and alpha = 0.05 for median estimation)\nys = s[0][0] + tf.log(1 + tf.exp((s[1][0] - s[0][0]) + s[2][0] * x))\n#cost_function = tf.reduce_mean(tf.squared_difference(ys, y)) # OK with good initial values and alpha = 0.01\ncost_function = tf.reduce_mean(tf.abs(ys - y)) # Absolute values <=> median estimation\ntrain_step = tf.train.GradientDescentOptimizer(alpha).minimize(cost_function)", "_____no_output_____" ], [ "init = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\nnp.set_printoptions(precision=3, suppress=True)\nfor i in range(NLOOPS+1):\n sess.run(train_step, feed_dict = {x:xin, y:yin})\n sout, ysout, costout = sess.run([s, ys, cost_function], feed_dict = {x:xin, y:yin})\n if (NVIEWS > 0) and ((i % int(NLOOPS/NVIEWS)) == 0):\n print(\"step: \" + str(i) + \", cost:\" + str(costout) + \", s:\" + str(sout.reshape(3)))\n #print(ysout)", "step: 0, cost:25.803238, s:[0.025 0.025 0.375]\nstep: 500, cost:14.111216, s:[ 7.977 5.737 -7.755]\nstep: 1000, cost:13.665284, s:[10.246 7.694 -8.023]\nstep: 1500, cost:13.508095, s:[10.615 9.617 -7.743]\nstep: 2000, cost:13.328767, s:[10.616 11.712 -7.437]\nstep: 2500, cost:13.115754, s:[10.617 13.996 -7.113]\nstep: 3000, cost:12.8832855, s:[10.569 16.383 -6.783]\nstep: 3500, cost:12.584779, s:[10.259 19.064 -6.397]\nstep: 4000, cost:12.223967, s:[10.2 22.03 -5.964]\nstep: 4500, cost:11.727214, s:[10.161 25.457 -5.199]\nstep: 5000, cost:10.986544, s:[ 9.636 29.681 -4.664]\nstep: 5500, cost:9.740974, s:[ 9.319 35.178 -3.989]\nstep: 6000, cost:4.7614665, s:[ 8.415 43.637 -1.557]\nstep: 6500, cost:1.6710263, s:[ 6.239 51.37 -1.905]\nstep: 7000, cost:1.5409365, s:[ 6.239 51.431 -1.972]\nstep: 7500, cost:1.540931, s:[ 6.239 51.431 -1.972]\nstep: 8000, cost:1.5409659, s:[ 6.238 51.431 -1.972]\nstep: 8500, cost:1.5409598, s:[ 6.238 51.431 -1.972]\nstep: 9000, cost:1.540939, s:[ 6.24 51.431 -1.972]\nstep: 9500, cost:1.5366786, s:[ 6.24 51.43 -1.975]\nstep: 10000, cost:1.5409298, s:[ 6.24 51.431 -1.972]\n" ], [ "plt.figure(figsize=(12,9))\nplt.axis([XMIN, XMAX, 0, YMAX])\nplt.plot(xin, yin, 'b.') # Displaying the samples used for fitting\nplt.plot(xin, ysout, 'g')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ecb2938d7d73b62cc0cee4662eff5bac945e3e27
12,381
ipynb
Jupyter Notebook
examples/name-and-gender/Working Group Emails and Drafts-hrpc.ipynb
nllz/bigbang
d4fef7eb41ae04e51f4e369de5a721c66231202b
[ "MIT" ]
4
2016-05-25T16:13:44.000Z
2017-11-06T15:16:30.000Z
examples/name-and-gender/Working Group Emails and Drafts-hrpc.ipynb
nllz/bigbang
d4fef7eb41ae04e51f4e369de5a721c66231202b
[ "MIT" ]
20
2016-06-13T15:28:56.000Z
2016-07-10T17:58:17.000Z
examples/name-and-gender/Working Group Emails and Drafts-hrpc.ipynb
nllz/bigbang
d4fef7eb41ae04e51f4e369de5a721c66231202b
[ "MIT" ]
8
2016-05-25T11:47:32.000Z
2016-07-06T08:29:00.000Z
23.809615
156
0.527502
[ [ [ "This notebook compares the email activities and draft activites of an IETF working group.", "_____no_output_____" ], [ "Import the BigBang modules as needed. These should be in your Python environment if you've installed BigBang correctly.", "_____no_output_____" ] ], [ [ "import bigbang.mailman as mailman\nfrom bigbang.parse import get_date\n#from bigbang.functions import *\nfrom bigbang.archive import Archive\n\nfrom ietfdata.datatracker import *", "_____no_output_____" ] ], [ [ "Also, let's import a number of other dependencies we'll use later.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport pytz\nimport pickle\nimport os", "_____no_output_____" ] ], [ [ "## Load the HRPC Mailing List\n\nNow let's load the email data for analysis.", "_____no_output_____" ] ], [ [ "wg = \"hrpc\"\n\nurls = [wg]\n\narchives = [Archive(url,mbox=True) for url in urls]\n\nactivities = [arx.get_activity(resolved=False) for arx in archives]\nactivity = activities[0]", "/home/sb/projects/bigbang/bigbang/mailman.py:141: UserWarning: No mailing list name found at hrpc\n warnings.warn(\"No mailing list name found at %s\" % url)\n" ] ], [ [ "## Load IETF Draft Data\n\nNext, we will use the `ietfdata` tracker to look at the frequency of drafts for this working group.", "_____no_output_____" ] ], [ [ "from ietfdata.datatracker import *\nfrom ietfdata.datatracker_ext import *\n\nimport pandas as pd\n\ndt = DataTracker()\n\ng = dt.group_from_acronym(\"hrpc\")\ndrafts = [draft for draft in dt.documents(group=g, doctype=dt.document_type_from_slug(\"draft\"))]\n\n\ndraft_df = pd.DataFrame.from_records([\n {'time' : draft.time, 'title' : draft.title, 'id' : draft.id} for draft in drafts]\n)", "_____no_output_____" ] ], [ [ "We will want to use the data of the drafts. Time resolution is too small.", "_____no_output_____" ] ], [ [ "draft_df['date'] = draft_df['time'].dt.date", "_____no_output_____" ] ], [ [ "## Gender score and tendency measures\n\nThis notebook uses the (notably imperfect) method of using first names to guess the gender of each draft author.", "_____no_output_____" ] ], [ [ "from gender_detector import gender_detector as gd\ndetector = gd.GenderDetector('us')\n\ndef gender_score(name):\n \"\"\"\n Takes a full name and returns a score for the guessed\n gender.\n \n 1 - male\n 0 - female\n .5 - unknown\n \"\"\"\n try:\n first_name = name.split(\" \")[0]\n guess = detector.guess(first_name)\n score = 0\n if guess == \"male\":\n return 1.0\n elif guess == \"female\":\n return 0.0\n else:\n # name does not have confidence to guesss\n return 0.5\n except:\n # Some error, \"unknown\"\n return .5", "_____no_output_____" ] ], [ [ "## Gender guesses on mailing list activity\n\nNow to use the gender guesser to track the contributions by differently gendered participants over time.", "_____no_output_____" ] ], [ [ "from bigbang.parse import clean_name", "_____no_output_____" ], [ "gender_activity = activity.groupby(\n by=lambda x: gender_score(clean_name(x)),\n axis=1).sum().rename({0.0 : \"women\", 0.5 : \"unknown\", 1.0 : \"men\"},\n axis=\"columns\")", "_____no_output_____" ] ], [ [ "Note that our gender scoring method currently is unable to get a clear guess for a large percentage of the emails!", "_____no_output_____" ] ], [ [ "print(\"%f.2 percent of emails are from an unknown gender.\" \\\n % (gender_activity[\"unknown\"].sum() / gender_activity.sum().sum()))\n\nplt.bar([\"women\",\"unknown\",\"men\"],gender_activity.sum())\nplt.title(\"Total emails sent by guessed gender\")", "_____no_output_____" ] ], [ [ "## Plotting\n\nSome preprocessing is necessary to get the drafts data ready for plotting.", "_____no_output_____" ] ], [ [ "from matplotlib import cm\n\nviridis = cm.get_cmap('viridis')", "_____no_output_____" ], [ "drafts_per_day = draft_df.groupby('date').count()['title']", "_____no_output_____" ] ], [ [ "For each of the mailing lists we are looking at, plot the rolling average (over `window`) of number of emails sent per day.\n\nThen plot a vertical line with the height of the drafts count and colored by the gender tendency.", "_____no_output_____" ] ], [ [ "window = 100", "_____no_output_____" ], [ "plt.figure(figsize=(12, 6))\n\nfor i, gender in enumerate(gender_activity.columns):\n\n colors = [viridis(0), viridis(.5), viridis(.99)]\n\n ta = gender_activity[gender]\n rmta = ta.rolling(window).mean()\n rmtadna = rmta.dropna()\n plt.plot_date(np.array(rmtadna.index),\n np.array(rmtadna.values),\n color = colors[i],\n linestyle = '-', marker = None,\n label='%s email activity - %s' % (wg, gender),\n xdate=True)\n\n\nvax = plt.vlines(drafts_per_day.index,\n 0,\n drafts_per_day,\n colors = 'r', # draft_gt_per_day,\n cmap = 'viridis',\n label=f'{wg} drafts ({drafts_per_day.sum()} total)')\n\nplt.legend()\nplt.title(f\"{wg} working group emails and drafts\")\n#plt.colorbar(vax, label = \"more womanly <-- Gender Tendency --> more manly\")\n\n#plt.savefig(\"activites-marked.png\")\n#plt.show()", "_____no_output_____" ] ], [ [ "### Is gender diversity correlated with draft output?\n\n", "_____no_output_____" ] ], [ [ "from scipy.stats import pearsonr\nimport pandas as pd\n\ndef calculate_pvalues(df):\n df = df.dropna()._get_numeric_data()\n dfcols = pd.DataFrame(columns=df.columns)\n pvalues = dfcols.transpose().join(dfcols, how='outer')\n for r in df.columns:\n for c in df.columns:\n pvalues[r][c] = round(pearsonr(df[r], df[c])[1], 4)\n return pvalues", "_____no_output_____" ], [ "drafts_per_ordinal_day = pd.Series({x[0].toordinal(): x[1] for x in drafts_per_day.items()})", "_____no_output_____" ], [ "drafts_per_ordinal_day", "_____no_output_____" ], [ "ta.rolling(window).mean()", "_____no_output_____" ], [ "garm = np.log1p(gender_activity.rolling(window).mean())", "_____no_output_____" ] ], [ [ "## Measuring diversity\n\nAs a rough measure of gender diversity, we sum the mailing list activity of women and those of unidentified gender, and divide by the activity of men.", "_____no_output_____" ] ], [ [ "garm['diversity'] = (garm['unknown'] + garm['women']) / garm['men']", "_____no_output_____" ], [ "garm['drafts'] = drafts_per_ordinal_day\ngarm['drafts'] = garm['drafts'].fillna(0)", "_____no_output_____" ], [ "garm.corr(method='pearson')", "_____no_output_____" ], [ "calculate_pvalues(garm)", "_____no_output_____" ] ], [ [ "Some variations...", "_____no_output_____" ] ], [ [ "garm_dna = garm.dropna(subset=['drafts'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecb29db9d071b89af4d71e865dc16086472e30fe
78,294
ipynb
Jupyter Notebook
example.ipynb
Ren-Research/OneProxy
e3c9b2c65113e16f1208988287810b283674f4de
[ "MIT" ]
9
2021-11-07T18:56:43.000Z
2022-03-08T10:18:36.000Z
example.ipynb
Ren-Research/OneProxy
e3c9b2c65113e16f1208988287810b283674f4de
[ "MIT" ]
null
null
null
example.ipynb
Ren-Research/OneProxy
e3c9b2c65113e16f1208988287810b283674f4de
[ "MIT" ]
null
null
null
98.607053
1,672
0.717194
[ [ [ "# This note book take NAS-Bench-201 search space as an example, to show how to use AdaProxy to boost the SRCC (> 0.9) between devices\n# with originally low SRCC", "_____no_output_____" ], [ "# first read and process some latency data, which is stored as a Python list in .pickle file \n\nimport pickle\n\n# all the latency data here are measured and released by recent work\n\n# nn-Meter: https://github.com/microsoft/nn-Meter\n# actual latency of Adreno 630 GPU, Adreno 640 GPU, CortexA76 CPU, Myriad VPU\ntrue_630 = pickle.load(open('./true_630.pickle', 'rb'))\ntrue_640 = pickle.load(open('./true_640.pickle', 'rb'))\ntrue_a76 = pickle.load(open('./true_a76.pickle', 'rb'))\ntrue_vpu = pickle.load(open('./true_vpu.pickle', 'rb'))\n\n# HW-NAS-Bench: https://github.com/RICE-EIC/HW-NAS-Bench\n# actual latency of Pixel3, EdgeGPU, EdgeTPU, eyeriss, FPGA, Raspi4\nrice_pixel3 = pickle.load(open('./rice_pixel3.pickle', 'rb'))\nrice_edgegpu = pickle.load(open('./rice_edgegpu.pickle', 'rb'))\nrice_edgetpu = pickle.load(open('./rice_edgetpu.pickle', 'rb'))\nrice_eyeriss = pickle.load(open('./rice_eyeriss.pickle', 'rb'))\nrice_fpga = pickle.load(open('./rice_fpga.pickle', 'rb'))\nrice_raspi4 = pickle.load(open('./rice_raspi4.pickle', 'rb'))\n\n# Eagle: https://github.com/zheng-ningxin/brp-nas\n# actual latency of GTX, CPU 855, DSP 855, GPU 855, i7, Jetson, Jetson (int16 models)\neagle_gtx = pickle.load(open('./eagle_gtx.pickle', 'rb'))\neagle_cpu_855 = pickle.load(open('./eagle_cpu_855.pickle', 'rb'))\neagle_dsp_855 = pickle.load(open('./eagle_dsp_855.pickle', 'rb'))\neagle_gpu_855 = pickle.load(open('./eagle_gpu_855.pickle', 'rb'))\neagle_i7 = pickle.load(open('./eagle_i7.pickle', 'rb'))\neagle_jetson = pickle.load(open('./eagle_jetson.pickle', 'rb'))\neagle_jetson_16 = pickle.load(open('./eagle_jetson_16.pickle', 'rb'))", "_____no_output_____" ], [ "# process the latency data\n# convert ms to s so that weight is smaller\n\n# nn-Meter latency\ntrue_vpu = [i/1000 for i in true_vpu]\ntrue_630 = [i/1000 for i in true_630]\ntrue_640 = [i/1000 for i in true_640]\ntrue_a76 = [i/1000 for i in true_a76]\n# eagle latency\neagle_gtx = [i/1000 for i in eagle_gtx]\neagle_cpu_855 = [i/1000 for i in eagle_cpu_855]\neagle_dsp_855 = [i/1000 for i in eagle_dsp_855]\neagle_gpu_855 = [i/1000 for i in eagle_gpu_855]\neagle_i7 = [i/1000 for i in eagle_i7]\neagle_jetson = [i/1000 for i in eagle_jetson]\neagle_jetson_16 = [i/1000 for i in eagle_jetson_16]\n# rice latency\nrice_pixel3 = [i/1000 for i in rice_pixel3]\nrice_edgegpu = [i/1000 for i in rice_edgegpu]\nrice_edgetpu = [i/1000 for i in rice_edgetpu]\nrice_eyeriss = [i/1000 for i in rice_eyeriss]\nrice_fpga = [i/1000 for i in rice_fpga]\nrice_raspi4 = [i/1000 for i in rice_raspi4]", "_____no_output_____" ], [ "# read the architecture encoding\n# for the NAS-Bench-201 models, we use simple one-hot encoding, which is 31-dim\n\nencode = pickle.load(open('./encode.pickle', 'rb')) # encode is a list stored in .pickle\n# because nn-Meter only releases latency of 2000 models, so we actually only use part of NAS-Bench-201\nprint(len(encode), len(encode[0]))", "1999 31\n" ], [ "import numpy as np\nfrom scipy import stats\n\n# select proxy and target device\nproxy = true_vpu\ntarget = eagle_gtx\n\n# convert list to numpy array\ntarget = np.array(target).reshape(-1, 1)\nproxy = np.array(proxy).reshape(-1, 1)\n\n# check the original SRCC between proxy and target\nprint(\"=\"*100)\nsrcc = stats.spearmanr(proxy, target)[0]\nprint(\"SRCC between target and proxy: \", srcc)\n\nif srcc >= 0.9:\n raise Exception(\"SRCC between target and proxy meets the threshold 0.9, there is no need to do proxy adaptation!\")\nelse:\n print(\"SRCC between target and proxy does not meet the threshold 0.9, proxy adaptation is needed\")\n \n \nprint(\"=\"*100)\n\n# convert list to numpy array\nencode = pickle.load(open('./encode.pickle', 'rb'))\nencode = np.array(encode)\nprint(encode.shape)", "====================================================================================================\nSRCC between target and proxy: 0.690526859861756\nSRCC between target and proxy does not meet the threshold 0.9, proxy adaptation is needed!\n====================================================================================================\n(1999, 31)\n" ], [ "from sklearn.metrics import mean_squared_error\n\n# directly use linear relationship to get the weight on proxy\nweight_linear = np.linalg.pinv(encode).dot(proxy)\n\n# check the linear fitting effect\nprint(\"=\"*100)\nprint(\"MSE for linear fitting on proxy:\", mean_squared_error(encode.dot(weight_linear), proxy))\nprint(\"SRCC between pred and true for linear:\", stats.spearmanr(encode.dot(weight_linear), proxy))\nprint(\"=\"*100)", "====================================================================================================\nMSE for linear fitting on proxy: 8.778622360177611e-08\nSRCC between pred and true for linear: SpearmanrResult(correlation=0.9676540304712693, pvalue=0.0)\n====================================================================================================\n" ], [ "# transfer on target\nimport random\nimport copy\nfrom functions.solver import Solver # the AdaProxy algorithm is in Solver class\n\n# number of models used on target\nn_val_target = 10\nn_train_target = 60\n\n# random selec models\nrandom.seed(1)\nindex = random.sample(range(encode.shape[0]), n_train_target+n_val_target)\n\nx_train_target = np.zeros(shape=(n_train_target, encode.shape[1]))\ny_train_target = np.zeros(shape=(n_train_target, 1))\nfor i in range(n_train_target):\n\tx_train_target[i, :] = encode[index[i]]\n\ty_train_target[i, :] = target[index[i]]\n\nx_val_target = np.zeros(shape=(n_val_target, encode.shape[1]))\ny_val_target = np.zeros(shape=(n_val_target, 1))\nfor i in range(n_val_target):\n\tx_val_target[i, :] = encode[index[i+n_train_target]]\n\ty_val_target[i, :] = target[index[i+n_train_target]]\n\t\nx_all = np.vstack((x_train_target, x_val_target))\ny_all = np.vstack((y_train_target, y_val_target))\n\n# x and y are all the architecture encoding and target latency\nx = copy.deepcopy(encode)\ny = copy.deepcopy(target)\n\n# initial weight on proxy\nw = copy.deepcopy(weight_linear)\n\nsolver = Solver(w, x_train_target, y_train_target, x_val_target, y_val_target, x, y, n_dim=len(encode[0]))\n\n# check the SRCC between training models, validation models, and training & val together\nprint(\"=\"*100)\nprint(\"SRCC between y_train: \", stats.spearmanr(x_train_target.dot(w), y_train_target))\nprint(\"SRCC between y_val: \", stats.spearmanr(x_val_target.dot(w), y_val_target))\nprint(\"SRCC between y_all: \", stats.spearmanr(x_all.dot(w), y_all))\nprint(\"=\"*100)", "====================================================================================================\nSRCC between y_train: SpearmanrResult(correlation=0.6601833842734095, pvalue=9.558357117232713e-09)\nSRCC between y_val: SpearmanrResult(correlation=0.7939393939393938, pvalue=0.0060999233136969115)\nSRCC between y_all: SpearmanrResult(correlation=0.6680255445717784, pvalue=2.656765492968506e-10)\n====================================================================================================\n" ], [ "# we use l2 norm\nnorm = 2\n\nlamb_dic = dict()\nmax_srcc_val = float('-inf')\nmax_srcc_all = None\nmax_srcc = None\nmax_lamb = None\nMSE_val = []\nMSE_all = []\nMSE = []\nSRCC_val = []\nSRCC_all = []\nSRCC = []\nfinal_lat_val = None\nfinal_lat_all = None\nfinal_lat = None\n# lamb is hyperparameter that can be tuned\n#lamb_range = np.arange(0, 10.01, 0.01)\nlamb_range = np.arange(1, 100000, 0.001)\n\nfor lamb in lamb_range:\n\testimate_target_train, estimate_target_val, estimate_target_all, estimate_target, srcc_train, srcc_val, srcc_all, srcc = solver.solve(lamb, norm)\n\n\tif srcc_val > max_srcc_val:\n\t\tmax_srcc_val = srcc_val\n\t\tmax_srcc_all = srcc_all\n\t\tmax_srcc = srcc\n\t\tmax_lamb = lamb\n\t\tfinal_lat_val = estimate_target_val\n\t\tfinal_lat_all = estimate_target_all\n\t\tfinal_lat = estimate_target\n\tprint(\"lamb:\", lamb, \"srcc_val:\", srcc_val, \"srcc_all:\", srcc_all, \"srcc_train:\", srcc_train, \"srcc\", srcc)\n\t\n\tmse_val = mean_squared_error(y_val_target, estimate_target_val)\n\tMSE_val.append(mse_val)\n\tmse_all = mean_squared_error(y_all, estimate_target_all)\n\tMSE_val.append(mse_all)\n\tmse = mean_squared_error(target, estimate_target)\n\tMSE.append(mse)\n\t\n\tSRCC_val.append(srcc_val)\n\tSRCC_all.append(srcc_all)\n\tSRCC.append(srcc)\n\t\n\tprint(\"Max SRCC val:\", max_srcc_val, \"lambda:\", max_lamb, \"MSE train:\", mean_squared_error(y_train_target, estimate_target_train), \"SRCC train:\", srcc_train, \"MSE val:\", mse_val, \"SRCC all:\", max_srcc_all, \"MSE all:\", mse_all, \"SRCC 2000 models:\", max_srcc, \"MSE 2000 models:\", mse)\n\tprint()", "1.0\nlamb: 1.0 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.58742488281726e-07 SRCC train: 0.9565434843011951 MSE val: 1.577527844062006e-07 SRCC all: 0.946076458752515 MSE all: 1.586011020137938e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.7706686492197e-07\n\n1.001\nlamb: 1.001 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874249227460823e-07 SRCC train: 0.9565434843011951 MSE val: 1.577528676365699e-07 SRCC all: 0.946076458752515 MSE all: 1.5860111732631707e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770671401059421e-07\n\n1.0019999999999998\nlamb: 1.0019999999999998 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874247301952234e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775299080456665e-07 SRCC all: 0.946076458752515 MSE all: 1.5860111841738582e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770674106579359e-07\n\n1.0029999999999997\nlamb: 1.0029999999999997 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874246942607251e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775313134191774e-07 SRCC all: 0.946076458752515 MSE all: 1.5860113541405042e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770676775996637e-07\n\n1.0039999999999996\nlamb: 1.0039999999999996 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874245631334096e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775327399482819e-07 SRCC all: 0.946076458752515 MSE all: 1.5860114455355342e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770679749349802e-07\n\n1.0049999999999994\nlamb: 1.0049999999999994 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874245094199794e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775345847147057e-07 SRCC all: 0.946076458752515 MSE all: 1.5860116630335116e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770682958987675e-07\n\n1.0059999999999993\nlamb: 1.0059999999999993 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874244723011292e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775358331393198e-07 SRCC all: 0.946076458752515 MSE all: 1.5860118095637275e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770685641989068e-07\n\n1.0069999999999992\nlamb: 1.0069999999999992 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.587424385661992e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775367918902038e-07 SRCC all: 0.946076458752515 MSE all: 1.5860118722660222e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770688144182646e-07\n\n1.0079999999999991\nlamb: 1.0079999999999991 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874242803340338e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775380428408413e-07 SRCC all: 0.946076458752515 MSE all: 1.5860119606921495e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770690792243892e-07\n\n1.008999999999999\nlamb: 1.008999999999999 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874242496417e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775386349139267e-07 SRCC all: 0.946076458752515 MSE all: 1.5860120189663032e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770693239237089e-07\n\n1.009999999999999\nlamb: 1.009999999999999 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874241276390298e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775403479718221e-07 SRCC all: 0.946076458752515 MSE all: 1.586012159115143e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770695974188343e-07\n\n1.0109999999999988\nlamb: 1.0109999999999988 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874241563843168e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775416497444334e-07 SRCC all: 0.946076458752515 MSE all: 1.586012369721477e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770698768334341e-07\n\n1.0119999999999987\nlamb: 1.0119999999999987 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874240536852637e-07 SRCC train: 0.9565434843011951 MSE val: 1.577542738414173e-07 SRCC all: 0.946076458752515 MSE all: 1.5860124372179652e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770701280191709e-07\n\n1.0129999999999986\nlamb: 1.0129999999999986 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874238560250036e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775441472300045e-07 SRCC all: 0.946076458752515 MSE all: 1.5860124690542893e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770704064634504e-07\n\n1.0139999999999985\nlamb: 1.0139999999999985 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874237748461658e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775457468047825e-07 SRCC all: 0.946076458752515 MSE all: 1.5860126279831109e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770706910099092e-07\n\n1.0149999999999983\nlamb: 1.0149999999999983 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874237977353874e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775467425411204e-07 SRCC all: 0.946076458752515 MSE all: 1.586012789850492e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770709326639066e-07\n\n1.0159999999999982\nlamb: 1.0159999999999982 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.587423690068139e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775481420179087e-07 SRCC all: 0.946076458752515 MSE all: 1.5860128974895344e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770712349950365e-07\n\n1.0169999999999981\nlamb: 1.0169999999999981 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.587423573608243e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775500260446998e-07 SRCC all: 0.946076458752515 MSE all: 1.5860130668134513e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770715073766676e-07\n\n1.017999999999998\nlamb: 1.017999999999998 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874235375695903e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775500199330516e-07 SRCC all: 0.946076458752515 MSE all: 1.586013035050085e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770717144975497e-07\n\n1.018999999999998\nlamb: 1.018999999999998 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.587423600472542e-07 SRCC train: 0.9565434843011951 MSE val: 1.5775526773375308e-07 SRCC all: 0.946076458752515 MSE all: 1.586013468596112e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770720369769865e-07\n\n1.0199999999999978\nlamb: 1.0199999999999978 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688365729411253\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874233670463896e-07 SRCC train: 0.9565434843011951 MSE val: 1.577552616003063e-07 SRCC all: 0.946076458752515 MSE all: 1.5860132597544861e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770722053189817e-07\n\n1.0209999999999977\nlamb: 1.0209999999999977 srcc_val: 0.6969696969696969 srcc_all: 0.946076458752515 srcc_train: 0.9565434843011951 srcc 0.8688361488810608\nMax SRCC val: 0.6969696969696969 lambda: 1.0 MSE train: 1.5874233117448094e-07 SRCC train: 0.9565434843011951 MSE val: 1.577554558086464e-07 SRCC all: 0.946076458752515 MSE all: 1.5860134897936178e-07 SRCC 15625: 0.8688365729411253 MSE 15625: 4.770724957987325e-07\n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb2a36fe0911a01259f97e1199ef6d62396a4f0
15,685
ipynb
Jupyter Notebook
txt_generation.ipynb
Aasish1106/MyCaptain-Assignments---Artificial-Intelligence
2dcbf4603db815123106a9f1d04db135d73c3a62
[ "Apache-2.0" ]
3
2020-06-28T12:36:05.000Z
2022-03-09T09:08:54.000Z
txt_generation.ipynb
Aasish1106/MyCaptain-Assignments---Artificial-Intelligence
2dcbf4603db815123106a9f1d04db135d73c3a62
[ "Apache-2.0" ]
null
null
null
txt_generation.ipynb
Aasish1106/MyCaptain-Assignments---Artificial-Intelligence
2dcbf4603db815123106a9f1d04db135d73c3a62
[ "Apache-2.0" ]
1
2021-03-30T07:03:48.000Z
2021-03-30T07:03:48.000Z
33.950216
1,014
0.527957
[ [ [ "<a href=\"https://colab.research.google.com/github/Aashish1106/MyCaptain-Assignments---Artificial-Intelligence/blob/master/txt_generation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "#importing dependencies\nimport numpy\nimport sys\nimport nltk\nnltk.download('stopwords')\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM\nfrom keras.utils import np_utils\nfrom keras.callbacks import ModelCheckpoint", "[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "#loading data \n# loading data and opening our input data in the form of a txt file\n#project Gutenberg/berg is where the data can be found \nfile = open(\"Frankenstein.txt\").read()", "_____no_output_____" ], [ "#tookenisation\n#standardisation \n#what is tokenization? Tokenization is the process of breaking a stream if text up into words phrases symbols or into \n# a meaningful elements.\ndef tokenize_words(input):\n # lowercase everything to a standardize it\n input = input.lower()\n # instantiating the tokenizer\n tokenizer = RegexpTokenizer(r'\\w+')\n # tokenizing the text into tokens\n tokens = tokenizer.tokenize(input)\n # filtering the stopwords using lambda\n filtered = filter(lambda token: token not in stopwords.words('english'), tokens)\n return \"\".join(filtered)\n\n# preprocess the input data, make tokens \nprocessed_inputs = tokenize_words(file)\n", "_____no_output_____" ], [ "# chars to numbers \n# convert characters in our input to numbers\n# we'll sort the list of the set of all characters that appear in our i/p text and then use the enumerate fn\n# to get numbers that represent the characters \n# we'll then create a dictionary that stores the keys and values, or the characters and the numbers that represent them\nchars = sorted(list(set(processed_inputs)))\nchar_to_num = dict((c,i) for i, c in enumerate(chars))", "_____no_output_____" ], [ "# Check if words to chars or chars to num(?!) has worked?\n# just so wem get an idea of whether our process of convrting words to characters has worked,\n# we print the length of our variables\ninput_len = len (processed_inputs)\nvocab_len = len (chars)\nprint(\"Total number of characters:\", input_len)\nprint(\"Total vocab:\", vocab_len)\n", "Total number of characters: 233296\nTotal vocab: 42\n" ], [ "#seq length\n# We're defining how long we want an individual sequence here\n# an individual sequence is a complete mapping of input characters as integers\nseq_length = 100\nx_data = []\ny_data = []\n", "_____no_output_____" ], [ "#loop through the sequence \n# here we're going through the entire list of i/ps and converting the chars to numbers with a for loop\n# this will create a bunch of sequences where each sequence starts with the next character in the i/p data \n# begnning with the first character\nfor i in range(0, input_len - seq_length, 1):\n # define i/p and o/p sequences\n # i/p is the current character plus the desired sequence length\n in_seq = processed_inputs[i:i + seq_length]\n # out sequence is the initial character plus total sequence length \n out_seq = processed_inputs[i + seq_length]\n # converting the list of characters to integers based on previous values and appending the values to our lists\n x_data.append([char_to_num[char] for char in in_seq])\n y_data.append(char_to_num[out_seq])\n\n# check to see how many total input sequence we have\nn_patterns = len(x_data)\nprint(\"Total Patterns:\", n_patterns)", "Total Patterns: 233196\n" ], [ "#convert input sequence to np array that our network can use \nX = numpy.reshape(x_data, (n_patterns, seq_length, 1))\nX = X/float(vocab_len)", "_____no_output_____" ], [ "#one hot-encoding our label data\ny = np_utils.to_categorical(y_data)", "_____no_output_____" ], [ "#creating the model\n# creating a sequential model\n# dropout is used to prevent overfitting\nmodel= Sequential()\nmodel.add(LSTM(256,input_shape=(X.shape[1], X.shape[2]), return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(256, return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(128))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(y.shape[1], activation='softmax'))", "_____no_output_____" ], [ "#compile the model\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')", "_____no_output_____" ], [ "#saving weights\nfilepath = 'model_weights_saved.hdf5'\ncheckpoint = ModelCheckpoint(filepath, monitor='loss', verbose= 1, save_best_only=True, mode='min')\ndesired_callbacks = (checkpoint)", "_____no_output_____" ], [ "# fit model and let it train \nmodel.fit(X,y, epochs=4, batch_size=256, callbacks=desired_callbacks)", "Epoch 1/4\n911/911 [==============================] - ETA: 0s - loss: 2.9374\nEpoch 00001: loss improved from inf to 2.93740, saving model to model_weights_saved.hdf5\n911/911 [==============================] - 3321s 4s/step - loss: 2.9374\nEpoch 2/4\n911/911 [==============================] - ETA: 0s - loss: 2.9165\nEpoch 00002: loss improved from 2.93740 to 2.91652, saving model to model_weights_saved.hdf5\n911/911 [==============================] - 3347s 4s/step - loss: 2.9165\nEpoch 3/4\n911/911 [==============================] - ETA: 0s - loss: 2.9132\nEpoch 00003: loss improved from 2.91652 to 2.91320, saving model to model_weights_saved.hdf5\n911/911 [==============================] - 3359s 4s/step - loss: 2.9132\nEpoch 4/4\n911/911 [==============================] - ETA: 0s - loss: 2.8851\nEpoch 00004: loss improved from 2.91320 to 2.88515, saving model to model_weights_saved.hdf5\n911/911 [==============================] - 3392s 4s/step - loss: 2.8851\n" ], [ "# recompile model with the saved weights\nfilename = 'model_weights_saved.hdf5'\nmodel.load_weights(filename)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')", "_____no_output_____" ], [ "# output of the model back into characters\nnum_to_char = dict((i,c) for i,c in enumerate(chars))", "_____no_output_____" ], [ "# random seed to help generate \nstart = numpy.random.randint(0, len(x_data) - 1)\npattern = x_data[start]\nprint(\"Random seed: \")\nprint(\"\\\"\" , ''.join([num_to_char[value] for value in pattern]), \"\\\"\")", "Random seed: \n\" confidencesinceritylistenedfathersilenceremainedtimeincapableofferingreplyrevolvedrapidlymindmultitu \"\n" ], [ "# generate the text\nfor i in range(1000):\n x = numpy.reshape(pattern, (1,len(pattern), 1))\n x = x/float(vocab_len)\n prediction = model.predict(x, verbose=0)\n index = numpy.argmax(prediction)\n result = num_to_char[index]\n seq_in = [num_to_char[value] for value in pattern]\n sys.stdout.write(result)\n pattern.append(index)\n pattern = pattern[ 1:len(pattern)]\n", "eneeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb2b0f5e338cb7f28aa759fe52fda56130789c6
497,177
ipynb
Jupyter Notebook
Lesson-SQL Intermediate Joins.ipynb
saquib-mehmood/sql_intermediate
7f9ccb4cb124845e71fbcf5a07e21f934f180941
[ "MIT" ]
1
2020-11-29T10:41:45.000Z
2020-11-29T10:41:45.000Z
Lesson-SQL Intermediate Joins.ipynb
saquib-mehmood/sql_intermediate
7f9ccb4cb124845e71fbcf5a07e21f934f180941
[ "MIT" ]
null
null
null
Lesson-SQL Intermediate Joins.ipynb
saquib-mehmood/sql_intermediate
7f9ccb4cb124845e71fbcf5a07e21f934f180941
[ "MIT" ]
null
null
null
830.011686
477,236
0.949662
[ [ [ "# Lesson - Intermediate joins in SQL\n\nMost databases have more than 2 tables, so we'll need strategies to be able to write queries to combine data from 3 or more tables.\n\nIn this lesson we'll learn some new techniques to work with the sort of databases that most businesses will use. We'll be working with a modified version of a database called Chinook. The Chinook database contains information about a fictional digital music shop - kind of like a mini-iTunes store.\n\nThe Chinook database contains information about the artists, songs, and albums from the music shop, as well as information on the shop's employees, customers, and the customers purchases. This information is contained in eleven tables. Like with our Factbook database, a schema diagram will help us understand the available columns and the structure of the data. Here's a schema diagram for the Chinook database:\n\n", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ], [ "Let's take a moment to understand the different parts of the schema diagram:\n\nTables names are shown in bold, with the columns in each table listed below.\n- Each table has one or more columns with shading, which indicates those columns are a primary key. Each row's primary key must be unique.\n- Relationships between tables are shown using lines between the tables. The lines indicate which columns are related. We can notice that at least one 'end' of the relationship will be a primary key.\n- ", "_____no_output_____" ], [ "**Load sql and connect to the Database factbook.db using following magic:**", "_____no_output_____" ] ], [ [ "%%capture\n%reload_ext sql\n%sql sqlite:///chinook.db ", "_____no_output_____" ] ], [ [ "### Query the database to get table information for `chinook.db` directly:\n**Use %%sql magic before query in each code cell to avoid returning an error ", "_____no_output_____" ] ], [ [ "%%sql\nSELECT * \nFROM sqlite_master \nWHERE type='table';", "_____no_output_____" ] ], [ [ "### Joining Three Tables\nWrite a query that For one single purchase (invoice_id=4), for each track purchased gives:\n\n- The id of the track.\n- The name of the track.\n- The name of media type of the track.\n- The price that the customer paid for the track.\n- The quantity of the track that was purchased.\n\nTo gather this information, we will need to write a query that joins 3 tables: `invoice_line`, `track`, and `media_type`.", "_____no_output_____" ] ], [ [ "%%sql\nPRAGMA TABLE_INFO(invoice_line)", "_____no_output_____" ], [ "%%sql\nPRAGMA TABLE_INFO(track)", "_____no_output_____" ], [ "%%sql\nPRAGMA TABLE_INFO(media_type)", "_____no_output_____" ] ], [ [ "Above tables are connected with keys as follows:\ninvoice_line - track: track_id\ntrack - media_type: media_type_id\n- We cannot join invoice_line and media_type directly as no common key exists\n- We can join the three sequentially.", "_____no_output_____" ] ], [ [ "%%sql\nSELECT il.track_id, t.name AS track_name, mt.name AS track_type, il.unit_price, il.quantity\n FROM invoice_line AS il\n INNER JOIN track AS t ON t.track_id=il.track_id\n INNER JOIN media_type AS mt ON mt.media_type_id=t.media_type_id\nWHERE il.invoice_id=4; ", "_____no_output_____" ] ], [ [ "### Joining more than Theee Tables \nIf we want also the artist's name for each track. We will have to join more tables. If we examine the schema, we'll notice that the data for the artist's name is not directly connected to the track table.\nArtist name is in `artist` table while `track` table is connected to `album` table which is further connected to artist table. So, if we want the artist's name in results, we will first have to join `album` table to existing join and then join `artist` table to the ensuing join.\n\nWrite a query which \n- Adds a column containing the artists name to previous query. \n- The column should be called artist_name\n- The column should be placed between track_name and track_type", "_____no_output_____" ] ], [ [ "%%sql\nPRAGMA TABLE_INFO(album)", "_____no_output_____" ], [ "%%sql\nPRAGMA TABLE_INFO(artist)", "_____no_output_____" ], [ "%%sql\nSELECT il.track_id, t.name AS track_name, ar.name AS artist_name, mt.name AS track_type, il.unit_price, il.quantity\n FROM invoice_line AS il\n INNER JOIN track AS t ON t.track_id=il.track_id\n INNER JOIN media_type AS mt ON mt.media_type_id=t.media_type_id\n INNER JOIN album AS al ON al.album_id=t.album_id\n INNER JOIN artist AS ar ON ar.artist_id=al.artist_id\nWHERE il.invoice_id=4; ", "_____no_output_____" ] ], [ [ "### Combining Multiple Joins with Subqueries\nBecause the invoice_line table contains each individual song from each customer purchase, it contains information about which songs are purchased the most.\n\nWe can use the table to find out which artists are purchased the most. Specifically, what we want to produce is a query that lists the top 10 artists, calculated by the number of times a track by that artist has been purchased.", "_____no_output_____" ] ], [ [ "%%sql\nSELECT *\n FROM invoice_line\nWHERE invoice_id=4; ", "_____no_output_____" ] ], [ [ "Writing above query will only need the artist name lined up with track_id in `invoice_line` table.But that will require joining `invoice_line`, `track`, `album` and `artist`tables. We can join `album\", 'artist` and `track` tables in a subquery to get `track_id` lined up with `artist_name` and then join the subquery wit `invoice_line` table to find counts of each artist's track purchases.\n\nWrite subquery first and then join with main query:", "_____no_output_____" ] ], [ [ "%%sql\nSELECT t.track_id, ar.name AS artist_name\n FROM track AS t\nINNER JOIN album AS al ON al.album_id = t.album_id\nINNER JOIN artist AS ar ON ar.artist_id = al.artist_id\nORDER BY 1 LIMIT 5;", "_____no_output_____" ], [ "%%sql\nSELECT\n ta.artist_name AS artist,\n COUNT(*) AS tracks_purchased\nFROM invoice_line AS il\nINNER JOIN (\n SELECT\n t.track_id,\n ar.name AS artist_name\n FROM track AS t\n INNER JOIN album AS al ON al.album_id = t.album_id\n INNER JOIN artist AS ar ON ar.artist_id = al.artist_id\n ) AS ta\n ON ta.track_id = il.track_id\nGROUP BY 1\nORDER BY 2 DESC LIMIT 10;", "_____no_output_____" ] ], [ [ "Write a query:\n- that returns the top 5 albums, as calculated by the number of times a track from that album has been purchased. Query should be sorted from most tracks purchased to least tracks purchased and return the following columns, in order:\n - `album`, the title of the album\n - `artist`, the artist who produced the album\n - `tracks_purchased`, the total number of tracks purchased from that album", "_____no_output_____" ], [ "Writing above query will need the album and artist name lined up with track_id in `invoice_line` table.But that will require joining `invoice_line`, `track`, `album` and `artist`tables. We can join `album\", 'artist` and `track` tables in a subquery to get `track_id` lined up with `album` and `artist_name`, and then join the subquery wit `invoice_line` table to find counts of each artist's track purchases.\n\nWrite subquery first and then join with main query:", "_____no_output_____" ] ], [ [ "%%sql\nPRAGMA TABLE_INFO(album)", "_____no_output_____" ], [ "%%sql\nSELECT t.track_id, al.title AS album, ar.name AS artist\n FROM track AS t\nINNER JOIN album AS al ON al.album_id=t.album_id\nINNER JOIN artist AS ar ON ar.artist_id=al.artist_id\nORDER BY 1\nLIMIT 5;", "_____no_output_____" ], [ "%%sql\nSELECT ta.album, ta.artist, COUNT(il.track_id) AS tracks_purchased\n FROM invoice_line AS il\nINNER JOIN (\n SELECT t.track_id, al.title AS album, ar.name AS artist\n FROM track AS t\n INNER JOIN album AS al ON al.album_id=t.album_id\n INNER JOIN artist AS ar ON ar.artist_id=al.artist_id\n ) AS ta ON ta.track_id=il.track_id\nGROUP BY 1,2\nORDER BY 3 DESC\nLIMIT 5;", "_____no_output_____" ] ], [ [ "### Recursive joins\n\nIn some cases, there can be a relation between two columns within the same table. We can see that in our employee table, where there is a reports_to column that has a relation to the employee_id column within the same table.\n\nThe reports_to column identifies each employee's supervisor. If we wanted to create a report of each employee and their supervisor's name, we would need some way of joining a table to itself. Doing this is called a recursive join.\n```\nSELECT\n e1.employee_id,\n e2.employee_id supervisor_id\nFROM employee e1\nINNER JOIN employee e2 on e1.reports_to = e2.employee_id\nLIMIT 4;\n```\nTo make our report more meaningful, we'll need to add some extra columns. One thing that would be nice is being able to combine the first_name and last_name columns into a single column. We can do that using the concatenate operator: ||\n\n```\nSELECT\n album_id,\n artist_id,\n \"album id is\" || album_id col_1,\n \"artist id is\" || artist_id col2,\n album_id || artist_id col3\nFROM album LIMIT 3;\n```\nWrite a query:\n- that returns information about each employee and their supervisor.\n- The report should include employees even if they do not report to another employee.\n- The report should be sorted alphabetically by the employee_name column.\n- query should return the following columns, in order:\n `employee_name` - containing the first_name and last_name columns separated by a space, eg Luke Skywalker\n `employee_title` - the title of that employee\n `supervisor_name` - the first and last name of the person the employee reports to, in the same format as employee_name\n`supervisor_title` - the title of the person the employee reports to", "_____no_output_____" ] ], [ [ "%%sql\nPRAGMA TABLE_INFO(employee)", "_____no_output_____" ], [ "%%sql\nSELECT \n e1.first_name || \" \" || e1.last_name AS employee_name,\n e1.title AS employee_title,\n e2.first_name || \" \" || e2.last_name AS supervisor_name,\n e2.title AS supervisor_title\n FROM employee AS e1\nLEFt JOIN employee AS e2 ON e1.reports_to=e2.employee_id\nORDER BY 1;\n ", "_____no_output_____" ] ], [ [ "### Pattern Matchin using `LIKE` Operator\nWe can use the LIKE operator to find pattern matches. The syntax for LIKE is as follows:\n```\nWHERE [column_name] LIKE \"[pattern]\"\n```\nPattern should be the substring we want to match for, and one or more % characters:\n`%Jen` - will match Jen at the end of a string, eg Sarah-Jen\n`Jen%`- will match Jen at the start of a string, eg Jenny\n`%Jen%` - will match Jen anywhere within the string, eg Kris Jenner\n\n```\nSELECT\n first_name,\n last_name,\n phone\nFROM customer\nWHERE first_name LIKE \"%Jen%\";\n```\nKeep in mind that in SQLite `LIKE` is case insensitive, so `LIKE \"%jen%\"` will match Jen and JEN and JeN. Other flavors of SQL may be case sensitive, so we may need to use the `LOWER()` function to get a case insensitive match.\n\nQuery\nYou have just returned from lunch to see a phone message on your desk: \"Call Belle.\"\n\nWrite a query \n- that finds the contact details of a customer with a first_name containing Belle from the database. Your query should include the following columns, in order:\n- first_name\n- last_name\n- phone", "_____no_output_____" ] ], [ [ "%%sql\nPRAGMA TABLE_INFO(customer)", "_____no_output_____" ], [ "%%sql\nSELECT \n c.first_name,\n c.last_name,\n c.phone\n FROM customer AS c \nWHERE first_name LIKE \"%Belle%\"; ", "_____no_output_____" ] ], [ [ "### Generating Columns with the CASE statement\nSometimes, we want to add some sort of categorization to our query. Let's say we wanted to generate a report summarizing each customer's purchases. It might be nice to create a column that puts each customer into a category:\n`small spender` - If the customer's total purchases are less than $40.\n`big spender` - If the customer's total purchases are greater than $100.\n`regular` - If the customer's total purchases are between $40 and $100 (inclusive).\n\nTo achieve this, we'll need to use the case statement. The case statement acts like a series of if/then options for a new column. The syntax for CASE is:\n```\nCASE\n WHEN [comparison_1] THEN [value_1]\n WHEN [comparison_2] THEN [value_2]\n ELSE [value_3]\n END\n AS [new_column_name]\n ```\nThere can be 1 or more `WHEN` lines, and the `ELSE` line is optional— without it, rows that don't match any `WHEN` will be assigned a null value. \nWe can't use `aliases` in the `WHEN` line, so when writing a `CASE` statement that deals with aggregate functions, we will need to include the aggregate function in each `WHEN` line.\n\nWrite a query:\n- that summarizes the purchases of each customer. \nQuery should include the following columns, in order:\n- `customer_name` - containing the first_name and last_name columns separated by a space, eg Luke Skywalker\n- `number_of_purchases`, counting the number of purchases made by each customer.\n- `total_spent` - the total sum of money spent by each customer.\n- `customer_category` - a column that categorizes the customer based on their total purchases. The column should contain the following values:\n - `small spender` - If the customer's total purchases are less than $40.\n - `big spender` - If the customer's total purchases are greater than $100.\n- `regular` - If the customer's total purchases are between $40 and $100 (inclusive).\nOrder your results by the `customer_name` column.\n \n \nFor above query, we need customer info (`customer_name` and `customer_id`) from `customer` table, invoice info (`invoice_id`, `total` and `customer_id`) from `invoice` table, and customer_category from `CASE` statement.", "_____no_output_____" ] ], [ [ "%%sql\nSELECT * \nFROM sqlite_master \nWHERE type='table';\n ", "_____no_output_____" ], [ "%%sql\nSELECT\n c.first_name || \" \" || c.last_name AS customer_name,\n COUNT(i.invoice_id) AS number_of_purchases,\n ROuND(SUM(i.total),2) AS total_spent,\n CASE\n WHEN sum(i.total) < 40 THEN 'small spender'\n WHEN sum(i.total) > 100 THEN 'big spender'\n ELSE 'regular'\n END\n AS customer_category\nFROM invoice i\nINNER JOIN customer AS c ON i.customer_id = c.customer_id\nGROUP BY 1 \nORDER BY 1;\n ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecb2d36ac0f82c7e449f6939460a67775ef82616
164,226
ipynb
Jupyter Notebook
IRIS Classification - EDA.ipynb
StormBorn18/Iris-classification
89799bc7d6c4fcc493f6ff37741ef2696b9c0967
[ "Apache-2.0" ]
null
null
null
IRIS Classification - EDA.ipynb
StormBorn18/Iris-classification
89799bc7d6c4fcc493f6ff37741ef2696b9c0967
[ "Apache-2.0" ]
null
null
null
IRIS Classification - EDA.ipynb
StormBorn18/Iris-classification
89799bc7d6c4fcc493f6ff37741ef2696b9c0967
[ "Apache-2.0" ]
null
null
null
249.583587
108,056
0.910483
[ [ [ "# Aim of the analysis is to classify Iris flower species \nWe analyse the different types of variables through univariate and bivariate analysis and gain any inisghts through Exploratory data analysis of variables\n\n ", "_____no_output_____" ], [ "<img src=\"https://www.gardendesign.com/pictures/images/900x705Max/site_3/iris-border-garden-blue-flowers-shutterstock-com_12582.jpg\">\n", "_____no_output_____" ], [ "### Iris flower\n\nIrises are wonderful garden plants. The word Iris means rainbow. Irises come in many colors such as blue and purple, white and yellow, pink and orange, brown and red, and even black.\n\nThe genus Iris has about 200 species and is native to the North Temperate regions of the world. The habitat of irises also varies a lot. Some irises grow in deserts, some in swamps, some in the cold far north, and many in temperate climates. Bearded Iris and Siberian Iris are two of the most common types of irises grown.\n\n### Iris Data Set \n\nEdgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. The data set consists of 150 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters. Based on the combination of these four features, Fisher developed a linear discriminant model to distinguish the species from each other.\n\n### Attribute Information:\n\n1. sepal length in cm \n2. sepal width in cm \n3. petal length in cm \n4. petal width in cm \n5. class: \n-- Iris Setosa \n-- Iris Versicolour \n-- Iris Virginica", "_____no_output_____" ] ], [ [ "# import all essential EDA packages in Python\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mp\nimport seaborn as sb\nos.getcwd()\n", "_____no_output_____" ], [ "iris = pd.read_csv(\"iris.csv\") #loading data set\n\niris = iris[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm','Species']]", "_____no_output_____" ], [ "iris.head(5) # Observing the top five observations in each variable", "_____no_output_____" ], [ "iris.info() # Analysing the types of variables", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150 entries, 0 to 149\nData columns (total 5 columns):\nSepalLengthCm 150 non-null float64\nSepalWidthCm 150 non-null float64\nPetalLengthCm 150 non-null float64\nPetalWidthCm 150 non-null float64\nSpecies 150 non-null object\ndtypes: float64(4), object(1)\nmemory usage: 5.9+ KB\n" ], [ "iris.Species.value_counts() # Lets count the total number of classes or levels in the label or dependent variable i.e. Species\n", "_____no_output_____" ] ], [ [ "### The various classes (i.e. Species of Iris flowers) of observations in the outcome variable are in equal number. So this data set is well balanced", "_____no_output_____" ] ], [ [ "iris.isnull().sum()", "_____no_output_____" ] ], [ [ "### There are no missing values present, so we can now proceed with univariate analysis ", "_____no_output_____" ] ], [ [ "iris.describe() #Performing basic univariate analysis", "_____no_output_____" ], [ "sb.boxplot(x=\"SepalLengthCm\", y=\"Species\",data = iris)", "_____no_output_____" ], [ "sb.boxplot(x=\"SepalWidthCm\", y=\"Species\",data = iris)", "_____no_output_____" ], [ "sb.boxplot(x=\"PetalLengthCm\", y=\"Species\",data = iris)", "_____no_output_____" ], [ "sb.boxplot(x=\"PetalWidthCm\", y=\"Species\",data = iris)", "_____no_output_____" ], [ "sb.boxplot(x=\"PetalLengthCm\", y=\"Species\",data = iris)", "_____no_output_____" ] ], [ [ "\n## Insights from Univariate Analysis\n\n1. We can infer from univariate analysis that median sepeal length and sepal width are larger than petal length and petal width. We can now examine if the Sepal length and Sepal width, similarly petal length and petal width exhibit certain relationships through which we can distinguish the three Iris flower species\n\n2. These are median values of sepal length and width, petal length and width\n\n\n Setosa Sepal Length & Sepal Width : ~ 5.0 cm & ~ 3.4 cm\n\n Versicolor Sepal Length & Sepal Width : ~ 5.8 cm & ~ 2.7 cm\n\n Virginica Sepal Length & Sepal Width : ~ 6.5 cm & ~ 3.0 cm\n\n\n\n Setosa Petal Length & Petal Width : ~ 1.5 cm & ~ 0.25 cm\n\n Versicolor Petal Length & Petal Width : ~ 4.5 cm & ~ 1.25 cm\n\n Virginica Petal Length & Petal Width : ~ 5.5 cm & ~ 2.0 cm\n\n\n> Setosa specie has the lowest median petal length. Its petal length is 6 times its petal width\n\n> Setosa Specie median petal length is almost three times less than its sepal length\n\n> Virginica Specie has the longest median Petal & Sepal Lengths\n\n> Versicolor petal length is 3.5 times its petal width\n\n> The data spread or standard deviation for Setosa petal width / petal length is less and does not overlap with that of Virginica or Versicolor data spread, which allow us to use the petal dimensions to distinguish Setosa from the other two species\n\n> Also noticeable is Setose exhibits a higher standard deviation for sepal width data spread\n\n> But the data distributions of Sepal and Petal morphology measurements of the other two species i.e. Virginica and Versicolor overlap and, in such case we can try using median measure to distinguish between Iris flower species\n", "_____no_output_____" ], [ "# Bivariate analysis to study relationships between variables\n\nHere we need to understand the crucial relationships between petal length and petal width & sepal length and sepal width variables. Once we define the relationships we can then segment data points which allow us to distinguish different Iris species", "_____no_output_____" ] ], [ [ "sb.pairplot(iris, hue=\"Species\") ", "_____no_output_____" ] ], [ [ "### Insights from bivariate analysis\n\nWe can observe clear demarkation of data points (refer to the species colors legend in the pair plot) of all three Iris flower species in a scatter plot. Some of the subplots of our interest are\n\n > Petal length vs Petal width \n > Sepal width vs Petal Length \n > Petal Length vs Sepal Length \n\nThese plots offered a clear distinction of data points belonging to different flower species. These distinctions allow us to develop Machine Learning models which allow us to predict Iris flower species using Petal length, Petal Width, Sepal length, Sepal width values. \n\nEarlier When we analysed median values of width and length of sepals and petals of these three different species we had observed differences in morphologies between species and these factors form basis for machine learning based classification\n\nTo classify IRIS FLOWER SPECIES, we can develop a IF ELSE process where\n\n > IF Petal Length < 2cm \n > THEN SETOSA\n > ELSE\n > IF Sepal Length >= 6cm & Petal Length >= 5.5 cm \n > THEN VIRGINICA\n > ELSE \n > IF Sepal width = 1.25 cm\n > THEN VERSICOLOR\n \n \n\n\n\n\n\n ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ecb2d94652f1dbbdf64fa48ca7732ba2bb461731
7,782
ipynb
Jupyter Notebook
src/notebook.ipynb
lauraverhoeven/programmeren-met-python
0e27ca6dedc606788e90e37aecade521d390c325
[ "MIT" ]
null
null
null
src/notebook.ipynb
lauraverhoeven/programmeren-met-python
0e27ca6dedc606788e90e37aecade521d390c325
[ "MIT" ]
null
null
null
src/notebook.ipynb
lauraverhoeven/programmeren-met-python
0e27ca6dedc606788e90e37aecade521d390c325
[ "MIT" ]
null
null
null
42.293478
230
0.519661
[ [ [ "# main program file\n\n# import the necessary modules\nimport demo\nimport data_handling as dh\nimport wind_power as wp\nimport change_emission as ce\nimport RQ3_Stoves as st\nimport Energy_efficiency as ee\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# path to file\ncbsdata = \"../data/raw/raw_data_renewable_energy.csv\"\n\n# load CBS data set\ntry:\n dataset = dh.load_data(cbsdata)\nexcept Exception as err:\n print(\"Something went wrong...\")\n print(err)\n\n# create a loop for the entire program, which runs until option 5 is selected.\nwhile True:\n # print options\n choice = input(\"\"\"What do you want to analyse? \n 0\\tShow a demo function of the program.\n 1\\tHow has the total avoided use of fossil energy due to wind energy changed over the period 2010-2018?\\n\\tWhat part of this avoided use of fossil energy can be assigned to wind power on land and wind power at sea?\n 2\\tHow much has the avoidance of CO2 emission in the Netherlands changed in the period 1990 to 2018? \n 3\\tWhat is the difference in usage of wood in a household regarding a freestanding wooden stove and a freestanding pallet stove? \n 4\\tWhich energy source was the most efficient in 2018 in terms of producing energy with the highest prevention of CO2 emission? \n 5\\tExit the program.\\n\n \"\"\")\n\n# evaluate user choice and proceed accordingly\n # demo chart\n if choice == \"0\":\n print(\"Create demo bar chart: total avoided use of fossil energy due to wind power over the period 2010-2018.\")\n\n # call function to make bar chart\n barplot = demo.wind_energy_plot(dataset)\n\n # make python show the plot\n plt.show()\n\n # research question 1\n elif choice == \"1\":\n while True:\n user_choice = input(\"\"\"\\nWhat do you want to do?\n 1\\tMake a bar graph of the total 'absolutely' avoided use of fossil energy due to the total wind power.\n 2\\tMake a bar graph of the total 'relatively' avoided use of fossil energy due to the total wind power.\n 3\\tMake a bar graph of the total 'absolutely' avoided use of fossil energy due to the wind power, with subdivisions on land and at sea.\n 4\\tMake a bar graph of the total 'relatively' avoided use of fossil energy due to the wind power, with subdivisions on land and at sea.\n 5\\tExit the menu.\\n\n \"\"\")\n # make a bar graph absolute\n if user_choice == \"1\":\n dataframe = wp.get_subset_dataframe_total(dataset)\n graph = wp.bar_graph_total(dataframe)\n # saves the graph to a png file inside of results\n plt.savefig(\"../results/output/bargraph_1_1.png\")\n\n # shows the graph\n plt.show()\n\n # make a bar graph relative\n elif user_choice == \"2\":\n dataframe = wp.get_subset_dataframe_total(dataset, 1)\n graph = wp.bar_graph_total(dataframe, 1)\n # saves the graph to a png file inside of results\n plt.savefig(\"../results/output/bargraph_1_2.png\")\n\n # shows the graph\n plt.show()\n\n # make a bar graph absolute divided\n elif user_choice == \"3\":\n dataframe = wp.get_subset_dataframe_parts(dataset)\n graph = wp.wind_energy_plot(dataframe)\n # saves the graph to a png file inside of results\n plt.savefig(\"../results/output/bargraph_1_3.png\")\n\n # shows the graph\n plt.show()\n\n # make a bar graph relative divided\n elif user_choice == \"4\":\n dataframe = wp.get_subset_dataframe_parts(dataset, 1)\n graph = wp.wind_energy_plot(dataframe, 1)\n # saves the graph to a png file inside of results\n plt.savefig(\"../results/output/bargraph_1_4.png\")\n\n # shows the graph\n plt.show()\n\n # exit the program\n elif user_choice == \"5\":\n print(\"Thank you.\")\n break\n\n # research question 2\n elif choice == \"2\":\n\n # creates a line graph of how much the emission has changed in the Netherlands\n line_graph = ce.emission_changed(dataset)\n # saves the graph to a png file inside of results\n plt.savefig(\"../results/output/linegraph_2.png\")\n\n # show the line graph\n plt.show()\n\n # research question 3\n elif choice == \"3\":\n\n # creates a bar graph of the biomassa\n graph = st.biomassa_wood_plot(dataset)\n # saves the bar graph\n plt.savefig(\"../results/output/bargraph_3.png\")\n\n # show the bar graph\n plt.show()\n\n # research question 4\n elif choice == \"4\":\n echoice = input(\n \"Do you want to compare efficiency of sources (1) or see how the efficiency of energybsources have developed over the years(2)? \")\n if echoice == \"1\":\n # the efficiency function is connected with the barchart function\n efficient = ee.efficiency(dataset)\n barset = ee.barchart(dataset, efficient)\n # show bargraph\n plt.show()\n\n if echoice == \"2\":\n # the efficiency function is connected with the barchart function\n efficient = ee.efficiency(dataset)\n lineset = ee.linechart(dataset, efficient)\n # some additional parts of the graph are introduced\n plt.title(\"Efficiency over the years\")\n plt.xlabel(\"Years\")\n plt.ylabel(\"Efficiency\")\n # show linegraph\n plt.show()\n\n elif choice == \"5\":\n print(\"Thank you for your participation. Bye!\")\n break\n else:\n print(\"Choice was not recognized, or invalid input. Please try again\")\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ecb2fcab6d548d7e93250bc4e3275e6dc9714116
34,836
ipynb
Jupyter Notebook
Chapter15/Activity15.01/Activity_15_1_Benchmark_Model_using_Logistic_Regression.ipynb
khieunguyen/The-Data-Science-Workshop
52cab305e6e2e8bb6820cf488ddb6e16b5567ac9
[ "MIT" ]
4
2019-06-24T11:40:40.000Z
2019-08-17T05:47:20.000Z
Chapter15/Activity15.01/Activity_15_1_Benchmark_Model_using_Logistic_Regression.ipynb
khieunguyen/The-Data-Science-Workshop
52cab305e6e2e8bb6820cf488ddb6e16b5567ac9
[ "MIT" ]
1
2022-03-12T01:03:16.000Z
2022-03-12T01:03:16.000Z
Chapter15/Activity15.01/Activity_15_1_Benchmark_Model_using_Logistic_Regression.ipynb
khieunguyen/The-Data-Science-Workshop
52cab305e6e2e8bb6820cf488ddb6e16b5567ac9
[ "MIT" ]
6
2019-10-18T00:42:08.000Z
2022-03-22T04:04:06.000Z
34.321182
483
0.288839
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "#Loading data from the google drive to colab notebook\n\n# Please change the filename as per the location where the file is stored\n\nfilename = '/content/drive/My Drive/Packt_Colab/crx.data'\n\n", "_____no_output_____" ], [ "# Loading the data using pandas\n\ncredData = pd.read_csv(filename,sep=\",\",header = None,na_values = \"?\")\ncredData.head()", "_____no_output_____" ], [ "# Changing the Classes to 1 & 0\ncredData.loc[credData[15] == '+' , 15] = 1\ncredData.loc[credData[15] == '-' , 15] = 0\ncredData.head()", "_____no_output_____" ], [ "# Dropping all the rows with na values\nnewcred = credData.dropna(axis = 0)\nnewcred.shape", "_____no_output_____" ], [ "# Seperating the categorical variables to make dummy variables\n\ncredCat = pd.get_dummies(newcred[[0,3,4,5,6,8,9,11,12]])\n", "_____no_output_____" ], [ "# Seperating the numerical variables\n\ncredNum = newcred[[1,2,7,10,13,14]]\n", "_____no_output_____" ], [ "# Making the X variable which is a concatenation of categorical and numerical data\n\nX = pd.concat([credCat,credNum],axis = 1)\nprint(X.shape)\n\n# Seperating the label as y variable\ny = newcred[15]\nprint(y.shape)", "(653, 46)\n(653,)\n" ], [ "# Normalising the data sets\n# Import library function\nfrom sklearn import preprocessing\n# Creating the scaling function\nminmaxScaler = preprocessing.MinMaxScaler()\n# Transforming with the scaler function\nX_tran = pd.DataFrame(minmaxScaler.fit_transform(X))\n# Printing the output\nX_tran.head()", "_____no_output_____" ], [ "# Splitting the data set to train and test sets\nfrom sklearn.model_selection import train_test_split\n\n# Splitting the data into train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X_tran, y, test_size=0.3, random_state=123)\n\n", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\n# Defining the LogisticRegression function\nbenchmarkModel = LogisticRegression()\n# Fitting the model\nbenchmarkModel.fit(X_train, y_train)", "/usr/local/lib/python3.6/dist-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\n" ], [ "# Prediction and accuracy metrics\npred = benchmarkModel.predict(X_test)\nprint('Accuracy of Logistic regression model prediction on test set: {:.2f}'.format(benchmarkModel.score(X_test, y_test)))", "Accuracy of Logistic regression model prediction on test set: 0.89\n" ], [ "from sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n# Confusion Matrix for the model\nprint(confusion_matrix(y_test, pred))\n# Classification report for the model\nprint(classification_report(y_test, pred))", "[[93 14]\n [ 8 81]]\n precision recall f1-score support\n\n 0 0.92 0.87 0.89 107\n 1 0.85 0.91 0.88 89\n\n accuracy 0.89 196\n macro avg 0.89 0.89 0.89 196\nweighted avg 0.89 0.89 0.89 196\n\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb2fd7d979be869c3c3094cc5556ea122ee221b
400,330
ipynb
Jupyter Notebook
notebooks/Wood plate segmentation sample.ipynb
daevem/tensorflow-research
bee16375186f3b16800986f56dcebfb43d79f293
[ "MIT" ]
null
null
null
notebooks/Wood plate segmentation sample.ipynb
daevem/tensorflow-research
bee16375186f3b16800986f56dcebfb43d79f293
[ "MIT" ]
null
null
null
notebooks/Wood plate segmentation sample.ipynb
daevem/tensorflow-research
bee16375186f3b16800986f56dcebfb43d79f293
[ "MIT" ]
null
null
null
1,202.192192
328,336
0.952909
[ [ [ "## Import dependencies", "_____no_output_____" ] ], [ [ "import sys\nimport pathlib\ncurrent_path = pathlib.Path().absolute()\nroot_path = \"{0}/..\".format(current_path)\nsys.path.append(\"{0}/src\".format(root_path))\n\nimport os\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom utils import Config, ImageUtil, create_model", "_____no_output_____" ], [ "seed_value = 33\nos.environ[\"PYTHONHASHSEED\"] = str(seed_value)\nrandom.seed(seed_value)\nnp.random.seed(seed_value)\ntf.random.set_seed(seed_value)\n\nphysical_devices = tf.config.list_physical_devices(\"GPU\")\ntry:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\nexcept:\n # Invalid device or cannot modify virtual devices once initialized.\n pass", "_____no_output_____" ] ], [ [ "## Prepare configuration (training and test)", "_____no_output_____" ] ], [ [ "image_util = ImageUtil()\nconfig_path = \"{0}/config/segmentation_wood_plate.json\".format(root_path)\nconfig = Config(config_path)\n\n# Select the predefined model\nconfig.model = \"vanilla_unet\"\n\n# Set training configuration\nconfig.train.files_path = \"{0}/img/wood_plate/train\".format(root_path)\nconfig.train.mask_files_path = \"{0}/masks\".format(config.train.files_path)\nconfig.train.checkpoints_path = \"{0}/checkpoints/wood_plate\".format(root_path)\nconfig.train.learning_rate = 1e-5\nconfig.train.batch_size = 10\nconfig.train.epochs = 50\n\n# Set eval configuration (will be used for prediction)\nconfig.eval.files_path = \"{0}/img/wood_plate/eval\".format(root_path)\nconfig.eval.threshold = 0.1\n\n# We only have few images, so we will create some new augmented\nconfig.train.image_data_generator.loop_count = 10", "_____no_output_____" ] ], [ [ "## Create model", "_____no_output_____" ] ], [ [ "model_container = create_model(config)", "_____no_output_____" ] ], [ [ "## Train model", "_____no_output_____" ] ], [ [ "model_container.train()", "Train on 192 samples, validate on 48 samples\nEpoch 1/50\n192/192 [==============================] - 9s 47ms/sample - loss: 0.1137 - accuracy: 0.9838 - val_loss: 0.0711 - val_accuracy: 0.9911\nEpoch 2/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0798 - accuracy: 0.9910 - val_loss: 0.0580 - val_accuracy: 0.9913\nEpoch 3/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0636 - accuracy: 0.9922 - val_loss: 0.0512 - val_accuracy: 0.9913\nEpoch 4/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0545 - accuracy: 0.9925 - val_loss: 0.0477 - val_accuracy: 0.9913\nEpoch 5/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0490 - accuracy: 0.9927 - val_loss: 0.0456 - val_accuracy: 0.9913\nEpoch 6/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0455 - accuracy: 0.9927 - val_loss: 0.0444 - val_accuracy: 0.9913\nEpoch 7/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0434 - accuracy: 0.9928 - val_loss: 0.0438 - val_accuracy: 0.9913\nEpoch 8/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0420 - accuracy: 0.9928 - val_loss: 0.0434 - val_accuracy: 0.9913\nEpoch 9/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0408 - accuracy: 0.9928 - val_loss: 0.0432 - val_accuracy: 0.9913\nEpoch 10/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0400 - accuracy: 0.9929 - val_loss: 0.0430 - val_accuracy: 0.9913\nEpoch 11/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0394 - accuracy: 0.9929 - val_loss: 0.0428 - val_accuracy: 0.9913\nEpoch 12/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0390 - accuracy: 0.9929 - val_loss: 0.0425 - val_accuracy: 0.9913\nEpoch 13/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0385 - accuracy: 0.9929 - val_loss: 0.0423 - val_accuracy: 0.9913\nEpoch 14/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0381 - accuracy: 0.9929 - val_loss: 0.0420 - val_accuracy: 0.9913\nEpoch 15/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0377 - accuracy: 0.9929 - val_loss: 0.0416 - val_accuracy: 0.9913\nEpoch 16/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0372 - accuracy: 0.9929 - val_loss: 0.0409 - val_accuracy: 0.9913\nEpoch 17/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0366 - accuracy: 0.9929 - val_loss: 0.0402 - val_accuracy: 0.9913\nEpoch 18/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0358 - accuracy: 0.9929 - val_loss: 0.0392 - val_accuracy: 0.9913\nEpoch 19/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0347 - accuracy: 0.9929 - val_loss: 0.0377 - val_accuracy: 0.9913\nEpoch 20/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0336 - accuracy: 0.9929 - val_loss: 0.0361 - val_accuracy: 0.9913\nEpoch 21/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0315 - accuracy: 0.9930 - val_loss: 0.0333 - val_accuracy: 0.9914\nEpoch 22/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0291 - accuracy: 0.9931 - val_loss: 0.0301 - val_accuracy: 0.9915\nEpoch 23/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0270 - accuracy: 0.9933 - val_loss: 0.0277 - val_accuracy: 0.9917\nEpoch 24/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0242 - accuracy: 0.9935 - val_loss: 0.0247 - val_accuracy: 0.9920\nEpoch 25/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0223 - accuracy: 0.9938 - val_loss: 0.0225 - val_accuracy: 0.9925\nEpoch 26/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0203 - accuracy: 0.9941 - val_loss: 0.0211 - val_accuracy: 0.9929\nEpoch 27/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0186 - accuracy: 0.9944 - val_loss: 0.0193 - val_accuracy: 0.9934\nEpoch 28/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0167 - accuracy: 0.9948 - val_loss: 0.0212 - val_accuracy: 0.9935\nEpoch 29/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0169 - accuracy: 0.9949 - val_loss: 0.0175 - val_accuracy: 0.9938\nEpoch 30/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0166 - accuracy: 0.9950 - val_loss: 0.0172 - val_accuracy: 0.9940\nEpoch 31/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0149 - accuracy: 0.9954 - val_loss: 0.0166 - val_accuracy: 0.9942\nEpoch 32/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0141 - accuracy: 0.9956 - val_loss: 0.0157 - val_accuracy: 0.9942\nEpoch 33/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0165 - accuracy: 0.9955 - val_loss: 0.0153 - val_accuracy: 0.9944\nEpoch 34/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0170 - accuracy: 0.9954 - val_loss: 0.0177 - val_accuracy: 0.9948\nEpoch 35/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0137 - accuracy: 0.9960 - val_loss: 0.0176 - val_accuracy: 0.9950\nEpoch 36/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0122 - accuracy: 0.9963 - val_loss: 0.0149 - val_accuracy: 0.9952\nEpoch 37/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0123 - accuracy: 0.9963 - val_loss: 0.0162 - val_accuracy: 0.9955\nEpoch 38/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0114 - accuracy: 0.9964 - val_loss: 0.0155 - val_accuracy: 0.9957\nEpoch 39/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0112 - accuracy: 0.9965 - val_loss: 0.0158 - val_accuracy: 0.9958\nEpoch 40/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0110 - accuracy: 0.9968 - val_loss: 0.0146 - val_accuracy: 0.9959\nEpoch 41/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0108 - accuracy: 0.9967 - val_loss: 0.0152 - val_accuracy: 0.9960\nEpoch 42/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0104 - accuracy: 0.9968 - val_loss: 0.0158 - val_accuracy: 0.9961\nEpoch 43/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0103 - accuracy: 0.9967 - val_loss: 0.0151 - val_accuracy: 0.9962\nEpoch 44/50\n192/192 [==============================] - 4s 18ms/sample - loss: 0.0100 - accuracy: 0.9971 - val_loss: 0.0150 - val_accuracy: 0.9963\nEpoch 45/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0098 - accuracy: 0.9969 - val_loss: 0.0177 - val_accuracy: 0.9960\nEpoch 46/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0105 - accuracy: 0.9968 - val_loss: 0.0167 - val_accuracy: 0.9961\nEpoch 47/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0101 - accuracy: 0.9973 - val_loss: 0.0149 - val_accuracy: 0.9963\nEpoch 48/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0097 - accuracy: 0.9971 - val_loss: 0.0148 - val_accuracy: 0.9964\nEpoch 49/50\n192/192 [==============================] - 3s 18ms/sample - loss: 0.0086 - accuracy: 0.9974 - val_loss: 0.0165 - val_accuracy: 0.9962\nEpoch 50/50\n192/192 [==============================] - 3s 17ms/sample - loss: 0.0089 - accuracy: 0.9973 - val_loss: 0.0162 - val_accuracy: 0.9963\n" ] ], [ [ "## Show history", "_____no_output_____" ] ], [ [ "model_container.plot_history()", "_____no_output_____" ] ], [ [ "## Prepare test images", "_____no_output_____" ] ], [ [ "color_mode = image_util.cv2_grayscale\nif config.input_shape[2] == 3:\n color_mode = image_util.cv2_color\ntest_images = image_util.load_images(config.eval.files_path, color_mode)\ntmp_imgs = []\nfor img in test_images:\n res = image_util.resize_image(img, config.input_shape[1], config.input_shape[0])\n norm = image_util.normalize(res, config.input_shape)\n tmp_imgs.append(norm)\ntest_images = np.array(tmp_imgs, dtype=np.float32)", "_____no_output_____" ] ], [ [ "## Predict", "_____no_output_____" ] ], [ [ "model_container.predict(test_images)\nmodel_container.plot_predictions(test_images)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecb2fe6d503242bce952d3b0289b8e8953845e37
191,231
ipynb
Jupyter Notebook
notebooks/Colab DFS of multistep_GridSearch_v2.ipynb
harryli18/hybrid-rnn-models
9baae52985cf21635b5c2e75b785ee6c2eac85d4
[ "MIT" ]
1
2021-03-11T03:45:06.000Z
2021-03-11T03:45:06.000Z
notebooks/Colab DFS of multistep_GridSearch_v2.ipynb
harryli18/hybrid-rnn-models
9baae52985cf21635b5c2e75b785ee6c2eac85d4
[ "MIT" ]
null
null
null
notebooks/Colab DFS of multistep_GridSearch_v2.ipynb
harryli18/hybrid-rnn-models
9baae52985cf21635b5c2e75b785ee6c2eac85d4
[ "MIT" ]
null
null
null
50.310708
7,342
0.418201
[ [ [ "import plaidml.keras\nplaidml.keras.install_backend()\nimport os\nos.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"", "_____no_output_____" ], [ "# Importing useful libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport keras\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional, Conv1D, Flatten, MaxPooling1D\nfrom keras.optimizers import SGD\nimport math\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom keras import optimizers\n\nimport time ", "_____no_output_____" ], [ "# from google.colab import files\n# uploaded = files.upload()", "_____no_output_____" ], [ "df = pd.read_csv('../data/num_data.csv')", "_____no_output_____" ], [ "keras.backend.clear_session()\n", "_____no_output_____" ] ], [ [ "### Data Processing", "_____no_output_____" ] ], [ [ "# df = pd.read_csv('../data/num_data.csv')", "_____no_output_____" ], [ "POLLUTION = ['PM2.5', 'PM10', 'SO2', 'NO2', 'CO', 'O3']", "_____no_output_____" ], [ "WEATHER = ['PM2.5', 'TEMP', 'PRES', 'DEWP', 'RAIN', 'wd', 'WSPM']", "_____no_output_____" ], [ "dataset = df", "_____no_output_____" ], [ "dataset.shape", "_____no_output_____" ], [ "# Useful functions\ndef plot_predictions(test, predicted):\n plt.figure(figsize=(30, 15));\n\n plt.plot(test, color='red', alpha=0.5, label='Actual PM2.5 Concentration',)\n plt.plot(predicted, color='blue', alpha=0.5, label='Predicted PM2.5 Concentation')\n plt.title('PM2.5 Concentration Prediction')\n plt.xlabel('Time')\n plt.ylabel('PM2.5 Concentration')\n plt.legend()\n plt.show()\n \n\ndef return_rmse(test,predicted):\n rmse = math.sqrt(mean_squared_error(test, predicted))\n return rmse", "_____no_output_____" ], [ "data_size = dataset.shape[0]\ntrain_size=int(data_size * 0.6)\ntest_size = int(data_size * 0.2)\nvalid_size = data_size - train_size - test_size\n", "_____no_output_____" ], [ "training_set = dataset[:train_size].iloc[:,4:16].values\nvalid_set = dataset[train_size:train_size+valid_size].iloc[:,4:16].values\ntest_set = dataset[data_size-test_size:].iloc[:,4:16].values", "_____no_output_____" ], [ "y = dataset.iloc[:,0].values\ny = y.reshape(-1,1)\nn_feature = training_set.shape[1]\ny.shape", "_____no_output_____" ], [ "# Scaling the dataset\nsc = MinMaxScaler(feature_range=(0,1))\ntraining_set_scaled = sc.fit_transform(training_set)\nvalid_set_scaled = sc.fit_transform(valid_set)\ntest_set_scaled = sc.fit_transform(test_set)\n\nsc_y = MinMaxScaler(feature_range=(0,1))\ny_scaled = sc_y.fit_transform(y)", "_____no_output_____" ], [ "# split a multivariate sequence into samples\ndef split_sequences(sequences, n_steps_in, n_steps_out):\n X_, y_ = list(), list()\n for i in range(len(sequences)):\n # find the end of this pattern\n end_ix = i + n_steps_in\n out_end_ix = end_ix + n_steps_out-1\n # check if we are beyond the dataset\n if out_end_ix > len(sequences):\n break\n # gather input and output parts of the pattern\n seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix-1:out_end_ix, 0]\n X_.append(seq_x)\n y_.append(seq_y)\n return np.array(X_), np.array(y_)", "_____no_output_____" ], [ "n_steps_in = 12\nn_steps_out = 12\nX_train, y_train = split_sequences(training_set_scaled, n_steps_in, n_steps_out)\nX_valid, y_valid = split_sequences(valid_set_scaled, n_steps_in, n_steps_out)\nX_test, y_test = split_sequences(test_set_scaled, n_steps_in, n_steps_out)", "_____no_output_____" ] ], [ [ "## Grid Search Control \n", "_____no_output_____" ] ], [ [ "n_activation = ['tanh', 'sigmoid', 'relu']\nact = n_activation[0]\n\nn_learn_rate = [0.01, 0.1, 0.2]\nlr = n_learn_rate[0]\n\nn_optimizers = [optimizers.Adam(lr=lr), optimizers.RMSprop(lr=lr), optimizers.SGD(lr=lr)]\nopt = n_optimizers[0]\n\nn_epoches = [50]\nepoch = n_epoches[0]\n\nn_batch_size = [32, 256, 1024]\nbatch = n_batch_size[-1]\n\nn_of_neurons = [10, 50, 200]\nneuron = n_of_neurons[1]", "INFO:plaidml:Opening device \"llvm_cpu.0\"\n" ], [ "rmse_df = pd.DataFrame(columns=['Model', 'train_rmse', 'valid_rmse', 'test_rmse', 'train_time', 'epoch', \n 'batch', 'neuron'])", "_____no_output_____" ], [ "DFS = Sequential()\n", "_____no_output_____" ], [ "for act in n_activation:\n for lr in n_learn_rate:\n n_optimizers = [optimizers.Adam(lr=lr), optimizers.RMSprop(lr=lr), optimizers.SGD(lr=lr)]\n for opt in n_optimizers: \n DFS = Sequential()\n DFS.add(Conv1D(filters=64, kernel_size=6, activation='tanh', input_shape=(X_train.shape[1],n_feature)))\n DFS.add(MaxPooling1D(pool_size=4))\n DFS.add(Dropout(0.2)) \n DFS.add(LSTM(units=neuron, return_sequences=False, input_shape=(X_train.shape[1],n_feature), activation=act))\n DFS.add(Dropout(0.190 + 0.0025 * n_steps_in))\n DFS.add(Dense(units=n_steps_out))\n DFS.compile(optimizer=opt,loss='mean_squared_error')\n\n \n regressor = DFS\n model = 'DFS'\n \n print('training start for', model) \n start = time.process_time()\n regressor.fit(X_train,y_train,epochs=epoch,batch_size=batch)\n train_time = round(time.process_time() - start, 2)\n\n print('results for training set')\n y_train_pred = regressor.predict(X_train)\n train_rmse = return_rmse(y_train,y_train_pred)\n\n print('results for valid set')\n y_valid_pred = regressor.predict(X_valid)\n valid_rmse = return_rmse(y_valid,y_valid_pred) \n \n \n print('results for test set')\n y_test_pred = regressor.predict(X_test)\n test_rmse = return_rmse(y_test,y_test_pred)\n \n one_df = pd.DataFrame([[model, train_rmse, valid_rmse, test_rmse, train_time, epoch, batch, neuron]],\n columns=['Model', 'train_rmse', 'valid_rmse', 'test_rmse', 'train_time', 'epoch', \n 'batch', 'neuron'])\n rmse_df = pd.concat([rmse_df, one_df])\n\n# save the rmse results \nrmse_df.to_csv('../dfs_grid_search_v2.csv')\n ", "training start for DFS\nEpoch 1/50\n252438/252438 [==============================] - 22s 89us/step - loss: 0.0039\nEpoch 2/50\n252438/252438 [==============================] - 18s 73us/step - loss: 0.0034\nEpoch 3/50\n252438/252438 [==============================] - 24s 93us/step - loss: 0.0034\nEpoch 4/50\n252438/252438 [==============================] - 22s 88us/step - loss: 0.0033\nEpoch 5/50\n252438/252438 [==============================] - 22s 86us/step - loss: 0.0033\nEpoch 6/50\n252438/252438 [==============================] - 22s 86us/step - loss: 0.0032\nEpoch 7/50\n252438/252438 [==============================] - 22s 85us/step - loss: 0.0032\nEpoch 8/50\n252438/252438 [==============================] - 22s 86us/step - loss: 0.0032\nEpoch 9/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0032\nEpoch 10/50\n252438/252438 [==============================] - 22s 86us/step - loss: 0.0031\nEpoch 11/50\n252438/252438 [==============================] - 22s 89us/step - loss: 0.0031\nEpoch 12/50\n252438/252438 [==============================] - 22s 88us/step - loss: 0.0031\nEpoch 13/50\n252438/252438 [==============================] - 22s 89us/step - loss: 0.0031\nEpoch 14/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0031\nEpoch 15/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0031\nEpoch 16/50\n252438/252438 [==============================] - 22s 88us/step - loss: 0.0031\nEpoch 17/50\n252438/252438 [==============================] - 22s 88us/step - loss: 0.0030\nEpoch 18/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 19/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 20/50\n252438/252438 [==============================] - 22s 88us/step - loss: 0.0031\nEpoch 21/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 22/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 23/50\n252438/252438 [==============================] - 22s 88us/step - loss: 0.0030\nEpoch 24/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 25/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 26/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0030\nEpoch 27/50\n252438/252438 [==============================] - 23s 89us/step - loss: 0.0030\nEpoch 28/50\n252438/252438 [==============================] - 24s 95us/step - loss: 0.0030\nEpoch 29/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0030\nEpoch 30/50\n252438/252438 [==============================] - 23s 89us/step - loss: 0.0030\nEpoch 31/50\n252438/252438 [==============================] - 23s 89us/step - loss: 0.0030\nEpoch 32/50\n252438/252438 [==============================] - 23s 93us/step - loss: 0.0029\nEpoch 33/50\n252438/252438 [==============================] - 23s 90us/step - loss: 0.0030\nEpoch 34/50\n252438/252438 [==============================] - 23s 91us/step - loss: 0.0030 0s - loss: \nEpoch 35/50\n252438/252438 [==============================] - 23s 91us/step - loss: 0.0029 0s - loss: 0\nEpoch 36/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0029\nEpoch 37/50\n252438/252438 [==============================] - 23s 93us/step - loss: 0.0029\nEpoch 38/50\n252438/252438 [==============================] - 24s 93us/step - loss: 0.0029 3s - loss: - ETA: 0s - loss: 0.002\nEpoch 39/50\n252438/252438 [==============================] - 23s 91us/step - loss: 0.0029\nEpoch 40/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0029\nEpoch 41/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0029\nEpoch 42/50\n252438/252438 [==============================] - 24s 94us/step - loss: 0.0029 0s - loss: 0.\nEpoch 43/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0029\nEpoch 44/50\n252438/252438 [==============================] - 23s 93us/step - loss: 0.0029 0s - loss: 0.0\nEpoch 45/50\n252438/252438 [==============================] - 24s 93us/step - loss: 0.0029\nEpoch 46/50\n252438/252438 [==============================] - 25s 99us/step - loss: 0.0029\nEpoch 47/50\n252438/252438 [==============================] - 23s 93us/step - loss: 0.0029\nEpoch 48/50\n252438/252438 [==============================] - 23s 91us/step - loss: 0.0029\nEpoch 49/50\n252438/252438 [==============================] - 24s 96us/step - loss: 0.0029\nEpoch 50/50\n252438/252438 [==============================] - 23s 93us/step - loss: 0.0029\nresults for training set\nresults for valid set\nresults for test set\ntraining start for DFS\nEpoch 1/50\n252438/252438 [==============================] - 32s 125us/step - loss: 0.0057\nEpoch 2/50\n252438/252438 [==============================] - 24s 94us/step - loss: 0.0043\nEpoch 3/50\n252438/252438 [==============================] - 24s 94us/step - loss: 0.0040\nEpoch 4/50\n252438/252438 [==============================] - 23s 93us/step - loss: 0.0040\nEpoch 5/50\n252438/252438 [==============================] - 24s 96us/step - loss: 0.0039\nEpoch 6/50\n252438/252438 [==============================] - 24s 94us/step - loss: 0.0038\nEpoch 7/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0038\nEpoch 8/50\n252438/252438 [==============================] - 24s 95us/step - loss: 0.0038\nEpoch 9/50\n252438/252438 [==============================] - 24s 93us/step - loss: 0.0037\nEpoch 10/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0037\nEpoch 11/50\n252438/252438 [==============================] - 24s 95us/step - loss: 0.0037\nEpoch 12/50\n252438/252438 [==============================] - 24s 94us/step - loss: 0.0036\nEpoch 13/50\n252438/252438 [==============================] - 19s 74us/step - loss: 0.0036\nEpoch 14/50\n252438/252438 [==============================] - 16s 64us/step - loss: 0.0036\nEpoch 15/50\n252438/252438 [==============================] - 17s 68us/step - loss: 0.0036\nEpoch 16/50\n252438/252438 [==============================] - 19s 76us/step - loss: 0.0036\nEpoch 17/50\n252438/252438 [==============================] - 22s 89us/step - loss: 0.0035\nEpoch 18/50\n252438/252438 [==============================] - 21s 85us/step - loss: 0.0035\nEpoch 19/50\n252438/252438 [==============================] - 24s 94us/step - loss: 0.0035\nEpoch 20/50\n252438/252438 [==============================] - 24s 95us/step - loss: 0.0035\nEpoch 21/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0035\nEpoch 22/50\n252438/252438 [==============================] - 36s 142us/step - loss: 0.0035\nEpoch 23/50\n252438/252438 [==============================] - 32s 127us/step - loss: 0.0035\nEpoch 24/50\n252438/252438 [==============================] - 25s 98us/step - loss: 0.0034\nEpoch 25/50\n252438/252438 [==============================] - 27s 105us/step - loss: 0.0034\nEpoch 26/50\n252438/252438 [==============================] - 35s 137us/step - loss: 0.0034\nEpoch 27/50\n252438/252438 [==============================] - 41s 161us/step - loss: 0.0034\nEpoch 28/50\n252438/252438 [==============================] - 30s 121us/step - loss: 0.0034\nEpoch 29/50\n252438/252438 [==============================] - 30s 121us/step - loss: 0.0034\nEpoch 30/50\n252438/252438 [==============================] - 32s 125us/step - loss: 0.0034\nEpoch 31/50\n252438/252438 [==============================] - 29s 114us/step - loss: 0.0034\nEpoch 32/50\n252438/252438 [==============================] - 33s 131us/step - loss: 0.0034\nEpoch 33/50\n252438/252438 [==============================] - 34s 135us/step - loss: 0.0034\nEpoch 34/50\n252438/252438 [==============================] - 32s 125us/step - loss: 0.0033\nEpoch 35/50\n252438/252438 [==============================] - 35s 138us/step - loss: 0.0034\nEpoch 36/50\n252438/252438 [==============================] - 34s 136us/step - loss: 0.0033\nEpoch 37/50\n252438/252438 [==============================] - 34s 134us/step - loss: 0.0033\nEpoch 38/50\n252438/252438 [==============================] - 31s 122us/step - loss: 0.0033\nEpoch 39/50\n252438/252438 [==============================] - 38s 150us/step - loss: 0.0033\nEpoch 40/50\n252438/252438 [==============================] - 41s 164us/step - loss: 0.0033\nEpoch 41/50\n252438/252438 [==============================] - 42s 165us/step - loss: 0.0033\nEpoch 42/50\n252438/252438 [==============================] - 34s 135us/step - loss: 0.0033\nEpoch 43/50\n252438/252438 [==============================] - 32s 125us/step - loss: 0.0033\nEpoch 44/50\n252438/252438 [==============================] - 39s 153us/step - loss: 0.0033\nEpoch 45/50\n252438/252438 [==============================] - 34s 135us/step - loss: 0.0033\nEpoch 46/50\n252438/252438 [==============================] - 34s 133us/step - loss: 0.0033\nEpoch 47/50\n252438/252438 [==============================] - 39s 153us/step - loss: 0.0033\nEpoch 48/50\n252438/252438 [==============================] - 29s 115us/step - loss: 0.0033\nEpoch 49/50\n252438/252438 [==============================] - 28s 113us/step - loss: 0.0033\nEpoch 50/50\n252438/252438 [==============================] - 23s 92us/step - loss: 0.0033\nresults for training set\nresults for valid set\nresults for test set\ntraining start for DFS\nEpoch 1/50\n252438/252438 [==============================] - 29s 116us/step - loss: 0.01122s - ETA: 0s - los\nEpoch 2/50\n252438/252438 [==============================] - 22s 87us/step - loss: 0.0091\nEpoch 3/50\n252438/252438 [==============================] - 22s 89us/step - loss: 0.0086\nEpoch 4/50\n252438/252438 [==============================] - 23s 89us/step - loss: 0.0084\nEpoch 5/50\n252438/252438 [==============================] - 22s 89us/step - loss: 0.0083\nEpoch 6/50\n252438/252438 [==============================] - 25s 99us/step - loss: 0.0082\nEpoch 7/50\n252438/252438 [==============================] - 29s 115us/step - loss: 0.0081\nEpoch 8/50\n252438/252438 [==============================] - 25s 98us/step - loss: 0.0080\nEpoch 9/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0080\nEpoch 10/50\n252438/252438 [==============================] - 28s 110us/step - loss: 0.0079\nEpoch 11/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0078\nEpoch 12/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0077\nEpoch 13/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0077\nEpoch 14/50\n252438/252438 [==============================] - 28s 113us/step - loss: 0.0076\nEpoch 15/50\n252438/252438 [==============================] - 28s 110us/step - loss: 0.0075\nEpoch 16/50\n252438/252438 [==============================] - 26s 103us/step - loss: 0.0075\nEpoch 17/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0074\nEpoch 18/50\n252438/252438 [==============================] - 27s 109us/step - loss: 0.0073\nEpoch 19/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0073\nEpoch 20/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0072\nEpoch 21/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.00720s - loss: 0\nEpoch 22/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0071\nEpoch 23/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0071\nEpoch 24/50\n252438/252438 [==============================] - 27s 107us/step - loss: 0.0070\nEpoch 25/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0070\nEpoch 26/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0069\nEpoch 27/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0069\nEpoch 28/50\n252438/252438 [==============================] - 29s 115us/step - loss: 0.0069\nEpoch 29/50\n252438/252438 [==============================] - 26s 104us/step - loss: 0.0068\nEpoch 30/50\n252438/252438 [==============================] - 19s 74us/step - loss: 0.0068\nEpoch 31/50\n252438/252438 [==============================] - 18s 73us/step - loss: 0.0067\nEpoch 32/50\n252438/252438 [==============================] - 20s 78us/step - loss: 0.0067\nEpoch 33/50\n252438/252438 [==============================] - 29s 113us/step - loss: 0.0066\nEpoch 34/50\n252438/252438 [==============================] - 26s 102us/step - loss: 0.0066\nEpoch 35/50\n252438/252438 [==============================] - 27s 107us/step - loss: 0.0066\nEpoch 36/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0065\nEpoch 37/50\n252438/252438 [==============================] - 33s 132us/step - loss: 0.0065\nEpoch 38/50\n252438/252438 [==============================] - 26s 105us/step - loss: 0.0065\nEpoch 39/50\n252438/252438 [==============================] - 27s 105us/step - loss: 0.0064\nEpoch 40/50\n252438/252438 [==============================] - 29s 115us/step - loss: 0.0064\nEpoch 41/50\n252438/252438 [==============================] - 27s 105us/step - loss: 0.0064\nEpoch 42/50\n252438/252438 [==============================] - 27s 107us/step - loss: 0.0063\nEpoch 43/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0063\nEpoch 44/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0063\nEpoch 45/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0062\nEpoch 46/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.0062\nEpoch 47/50\n252438/252438 [==============================] - 26s 104us/step - loss: 0.0062\nEpoch 48/50\n252438/252438 [==============================] - 28s 113us/step - loss: 0.0061\nEpoch 49/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0061\nEpoch 50/50\n252438/252438 [==============================] - 27s 106us/step - loss: 0.0061\nresults for training set\nresults for valid set\nresults for test set\ntraining start for DFS\nEpoch 1/50\n252438/252438 [==============================] - 37s 148us/step - loss: 0.0241\nEpoch 2/50\n252438/252438 [==============================] - 28s 111us/step - loss: 0.0069\nEpoch 3/50\n252438/252438 [==============================] - 28s 112us/step - loss: 0.00701s - lo\nEpoch 4/50\n252438/252438 [==============================] - 28s 110us/step - loss: 0.0070\nEpoch 5/50\n252438/252438 [==============================] - 28s 111us/step - loss: 0.0071\nEpoch 6/50\n252438/252438 [==============================] - 27s 106us/step - loss: 0.0071\nEpoch 7/50\n252438/252438 [==============================] - 28s 110us/step - loss: 0.0071\nEpoch 8/50\n252438/252438 [==============================] - 27s 109us/step - loss: 0.0072\nEpoch 9/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0072\nEpoch 10/50\n252438/252438 [==============================] - 31s 125us/step - loss: 0.0069\nEpoch 11/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0060\nEpoch 12/50\n252438/252438 [==============================] - 28s 113us/step - loss: 0.0061\nEpoch 13/50\n252438/252438 [==============================] - 28s 113us/step - loss: 0.0058\nEpoch 14/50\n252438/252438 [==============================] - 27s 106us/step - loss: 0.0057\nEpoch 15/50\n252438/252438 [==============================] - 27s 109us/step - loss: 0.0056\nEpoch 16/50\n252438/252438 [==============================] - 27s 106us/step - loss: 0.0058\nEpoch 17/50\n252438/252438 [==============================] - 27s 107us/step - loss: 0.0059\nEpoch 18/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0057\nEpoch 19/50\n252438/252438 [==============================] - 28s 111us/step - loss: 0.0058\nEpoch 20/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0056\nEpoch 21/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0062\nEpoch 22/50\n252438/252438 [==============================] - 27s 108us/step - loss: 0.0076\nEpoch 23/50\n252438/252438 [==============================] - 27s 106us/step - loss: 0.0074\nEpoch 24/50\n252438/252438 [==============================] - 28s 109us/step - loss: 0.0073\nEpoch 25/50\n252438/252438 [==============================] - 27s 107us/step - loss: 0.0072\nEpoch 26/50\n252438/252438 [==============================] - 27s 106us/step - loss: 0.0073\nEpoch 27/50\n" ], [ "rmse_df", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecb300bb0e6ec7f7c41d06a7160fa1482b954136
273,530
ipynb
Jupyter Notebook
Keras-DeepRecommender-Clothing-Shoes-Jewelry/2_Modeling/*dense_2_Concatenate_10_embeddings_10_epochs.ipynb
zirubak/dse260-CapStone-Amazon
4c90c1cf6979d837af7eba1c9806da5f48b7655a
[ "Apache-2.0" ]
1
2020-02-17T21:15:00.000Z
2020-02-17T21:15:00.000Z
Keras-DeepRecommender-Clothing-Shoes-Jewelry/2_Modeling/*dense_2_Concatenate_10_embeddings_10_epochs.ipynb
zirubak/dse260-CapStone-Amazon
4c90c1cf6979d837af7eba1c9806da5f48b7655a
[ "Apache-2.0" ]
null
null
null
Keras-DeepRecommender-Clothing-Shoes-Jewelry/2_Modeling/*dense_2_Concatenate_10_embeddings_10_epochs.ipynb
zirubak/dse260-CapStone-Amazon
4c90c1cf6979d837af7eba1c9806da5f48b7655a
[ "Apache-2.0" ]
3
2020-02-11T08:03:10.000Z
2020-07-28T16:48:04.000Z
111.690486
157,740
0.816985
[ [ [ "# dense_2_Concatenate_10_embeddings_10_epochs\n\n# Deep recommender on top of Amason’s Clean Clothing Shoes and Jewelry explicit rating dataset\n\nFrame the recommendation system as a rating prediction machine learning problem and create a hybrid architecture that mixes the collaborative and content based filtering approaches:\n- Collaborative part: Predict items ratings in order to recommend to the user items that he is likely to rate high.\n- Content based: use metadata inputs (such as price and title) about items to find similar items to recommend.\n\n### - Create 2 explicit recommendation engine models based on 2 machine learning architecture using Keras: \n 1. a matrix factorization model \n 2. a deep neural network model.\n\n\n### Compare the results of the different models and configurations to find the \"best\" predicting model\n\n### Used the best model for recommending items to users", "_____no_output_____" ] ], [ [ "### name of model\nmodname = 'dense_2_Concatenate_10_embeddings_10_epochs'\n\n### number of epochs\nnum_epochs = 10\n\n### size of embedding\nembedding_size = 10", "_____no_output_____" ], [ "# import sys\n\n# !{sys.executable} -m pip install --upgrade pip\n# !{sys.executable} -m pip install sagemaker-experiments\n# !{sys.executable} -m pip install pandas\n# !{sys.executable} -m pip install numpy\n# !{sys.executable} -m pip install matplotlib\n# !{sys.executable} -m pip install boto3\n# !{sys.executable} -m pip install sagemaker\n# !{sys.executable} -m pip install pyspark\n# !{sys.executable} -m pip install ipython-autotime\n# !{sys.executable} -m pip install surprise\n# !{sys.executable} -m pip install smart_open\n# !{sys.executable} -m pip install pyarrow\n# !{sys.executable} -m pip install fastparquet", "_____no_output_____" ], [ "# Check Jave version \n# !sudo yum -y update", "_____no_output_____" ], [ "# # Need to use Java 1.8.0\n# !sudo yum remove jre-1.7.0-openjdk -y", "_____no_output_____" ], [ "!java -version", "openjdk version \"11.0.1\" 2018-10-16 LTS\nOpenJDK Runtime Environment Zulu11.2+3 (build 11.0.1+13-LTS)\nOpenJDK 64-Bit Server VM Zulu11.2+3 (build 11.0.1+13-LTS, mixed mode)\n" ], [ "# !sudo update-alternatives --config java", "_____no_output_____" ], [ "# !pip install pyarrow fastparquet\n# !pip install ipython-autotime\n# !pip install tqdm pydot pydotplus pydot_ng", "_____no_output_____" ], [ "#### To measure all running time\n# https://github.com/cpcloud/ipython-autotime\n\n%load_ext autotime", "_____no_output_____" ], [ "%pylab inline\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n%matplotlib inline\nimport re\nimport seaborn as sbn\nimport nltk\nimport tqdm as tqdm\nimport sqlite3\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame \nimport string\nimport pydot \nimport pydotplus\nimport pydot_ng\nimport pickle\nimport time\nimport gzip\nimport os\nos.getcwd()\n \nimport matplotlib.pyplot as plt\nfrom math import floor,ceil\n\n#from nltk.corpus import stopwords\n#stop = stopwords.words(\"english\")\nfrom nltk.stem.porter import PorterStemmer\nenglish_stemmer=nltk.stem.SnowballStemmer('english')\nfrom nltk.tokenize import word_tokenize\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix,roc_curve, auc,classification_report, mean_squared_error, mean_absolute_error\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.svm import LinearSVC\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import neighbors\nfrom scipy.spatial.distance import cosine\nfrom sklearn.feature_selection import SelectKBest\nfrom IPython.display import SVG\n\n# Tensorflow\nimport tensorflow as tf\n\n#Keras\nfrom keras.models import Sequential, Model, load_model, save_model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Embedding\nfrom keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\nfrom keras.optimizers import Adam\nfrom keras.layers.core import Reshape, Dropout, Dense\nfrom keras.layers.merge import Multiply, Dot, Concatenate\nfrom keras.layers.embeddings import Embedding\nfrom keras import optimizers\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils.vis_utils import model_to_dot", "Populating the interactive namespace from numpy and matplotlib\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/tensorflow_core/__init__.py:1467: The name tf.estimator.inputs is deprecated. Please use tf.compat.v1.estimator.inputs instead.\n\ntime: 3.16 s\n" ] ], [ [ "### Set and Check GPUs", "_____no_output_____" ] ], [ [ "#Session\nfrom keras import backend as K\n\ndef set_check_gpu():\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.per_process_gpu_memory_fraction =1 # allow all of the GPU memory to be allocated\n # for 8 GPUs\n # cfg.gpu_options.visible_device_list = \"0,1,2,3,4,5,6,7\" # \"0,1\"\n # for 1 GPU\n cfg.gpu_options.visible_device_list = \"0\"\n #cfg.gpu_options.allow_growth = True # # Don't pre-allocate memory; dynamically allocate the memory used on the GPU as-needed\n #cfg.log_device_placement = True # to log device placement (on which device the operation ran)\n sess = K.tf.Session(config=cfg)\n K.set_session(sess) # set this TensorFlow session as the default session for Keras\n\n print(\"* TF version: \", [tf.__version__, tf.test.is_gpu_available()])\n print(\"* List of GPU(s): \", tf.config.experimental.list_physical_devices() )\n print(\"* Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU'))) \n \n \n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\";\n # set for 8 GPUs\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3,4,5,6,7\";\n # set for 1 GPU\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\";\n\n # Tf debugging option\n tf.debugging.set_log_device_placement(True)\n\n gpus = tf.config.experimental.list_physical_devices('GPU')\n\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\n# print(tf.config.list_logical_devices('GPU'))\n print(tf.config.experimental.list_physical_devices('GPU'))\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))", "time: 5.39 ms\n" ], [ "set_check_gpu()", "* TF version: ['1.15.2', True]\n* List of GPU(s): [PhysicalDevice(name='/physical_device:CPU:0', device_type='CPU'), PhysicalDevice(name='/physical_device:XLA_CPU:0', device_type='XLA_CPU'), PhysicalDevice(name='/physical_device:XLA_GPU:0', device_type='XLA_GPU'), PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]\n* Num GPUs Available: 1\n1 Physical GPUs, 1 Logical GPUs\n[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]\nNum GPUs Available: 1\ntime: 247 ms\n" ], [ "# reset GPU memory& Keras Session\ndef reset_keras():\n try:\n del classifier\n del model \n except:\n pass\n\n K.clear_session()\n \n K.get_session().close()\n# sess = K.get_session()\n\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.per_process_gpu_memory_fraction \n# cfg.gpu_options.visible_device_list = \"0,1,2,3,4,5,6,7\" # \"0,1\"\n cfg.gpu_options.visible_device_list = \"0\" # \"0,1\"\n cfg.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\n\n sess = K.tf.Session(config=cfg)\n K.set_session(sess) # set this TensorFlow session as the default session for Keras\n ", "time: 2.36 ms\n" ] ], [ [ "## Load dataset and analysis using Spark", "_____no_output_____" ], [ "## Download and prepare Data:\n#### 1. Read the data:\n#### Read the data from the reviews dataset of amazon. \n#### Use the dastaset in which all users and items have at least 5 reviews. \n\n### Location of dataset: https://nijianmo.github.io/amazon/index.html", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nimport boto3\nimport sagemaker\nfrom sagemaker import get_execution_role\nfrom sagemaker.session import Session\nfrom sagemaker.analytics import ExperimentAnalytics\n\nimport gzip\nimport json\n\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql.types import StructField, StructType, StringType, DoubleType\nfrom pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler\nfrom pyspark.sql.functions import *\n\n# spark imports\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import UserDefinedFunction, explode, desc\nfrom pyspark.sql.types import StringType, ArrayType\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\nimport os\nimport pandas as pd\nimport pyarrow\nimport fastparquet\n\n# from pandas_profiling import ProfileReport", "time: 461 ms\n" ], [ "# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/Clean_Clothing_Shoes_and_Jewelry_5_clean.parquet ./data/", "time: 804 µs\n" ], [ "!ls -alh ./data", "total 3.3G\ndrwxrwxr-x 5 ec2-user ec2-user 4.0K May 26 16:08 .\ndrwxrwxr-x 8 ec2-user ec2-user 4.0K May 26 21:11 ..\n-rw-rw-r-- 1 ec2-user ec2-user 308M May 26 15:35 Clean_Clothing_Shoes_and_Jewelry_5_clean.parquet\ndrwxrwxr-x 2 ec2-user ec2-user 4.0K May 26 15:46 Cleaned_meta_Clothing_Shoes_and_Jewelry.parquet\n-rw-rw-r-- 1 ec2-user ec2-user 1.2G Nov 21 2019 Clothing_Shoes_and_Jewelry_5.json.gz\ndrwxrwxr-x 2 ec2-user ec2-user 4.0K May 26 15:34 Clothing_Shoes_and_Jewelry_5.parquet\n-rw-rw-r-- 1 ec2-user ec2-user 31 May 26 15:34 for_dataset.txt\ndrwxrwxr-x 2 ec2-user ec2-user 4.0K May 26 15:34 .ipynb_checkpoints\n-rw-rw-r-- 1 ec2-user ec2-user 1.5G Oct 15 2019 meta_Clothing_Shoes_and_Jewelry.json.gz\n-rw-rw-r-- 1 ec2-user ec2-user 71M May 26 16:08 ratings_test.parquet\n-rw-rw-r-- 1 ec2-user ec2-user 282M May 26 16:08 ratings_train.parquet\ntime: 135 ms\n" ] ], [ [ "### Read clened dataset from parquet files", "_____no_output_____" ] ], [ [ "review_data = pd.read_parquet(\"./data/Clean_Clothing_Shoes_and_Jewelry_5_clean.parquet\")", "time: 4.03 s\n" ], [ "review_data[:3]", "_____no_output_____" ], [ "review_data.shape", "_____no_output_____" ] ], [ [ "### 2. Arrange and clean the data", "_____no_output_____" ], [ "Rearrange the columns by relevance and rename column names", "_____no_output_____" ] ], [ [ "review_data.columns", "_____no_output_____" ], [ "review_data = review_data[['asin', 'image', 'summary', 'reviewText', 'overall', 'reviewerID', 'reviewerName', 'reviewTime']]\n\nreview_data.rename(columns={ 'overall': 'score','reviewerID': 'user_id', 'reviewerName': 'user_name'}, inplace=True)\n\n#the variables names after rename in the modified data frame\nlist(review_data)", "_____no_output_____" ] ], [ [ "# Add Metadata \n\n### Metadata includes descriptions, price, sales-rank, brand info, and co-purchasing links\n- asin - ID of the product, e.g. 0000031852\n- title - name of the product\n- price - price in US dollars (at time of crawl)\n- imUrl - url of the product image\n- related - related products (also bought, also viewed, bought together, buy after viewing)\n- salesRank - sales rank information\n- brand - brand name\n- categories - list of categories the product belongs to", "_____no_output_____" ] ], [ [ "# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/Cleaned_meta_Clothing_Shoes_and_Jewelry.parquet ./data/", "time: 829 µs\n" ], [ "all_info = pd.read_parquet(\"./data/Cleaned_meta_Clothing_Shoes_and_Jewelry.parquet\")", "time: 35.8 s\n" ], [ "all_info.head(n=5)", "_____no_output_____" ] ], [ [ "### Arrange and clean the data", "_____no_output_____" ], [ "- Cleaning, handling missing data, normalization, etc:\n- For the algorithm in keras to work, remap all item_ids and user_ids to an interger between 0 and the total number of users or the total number of items", "_____no_output_____" ] ], [ [ "all_info.columns", "_____no_output_____" ], [ "items = all_info.asin.unique()\nitem_map = {i:val for i,val in enumerate(items)}\ninverse_item_map = {val:i for i,val in enumerate(items)}\nall_info[\"old_item_id\"] = all_info[\"asin\"] # copying for join with metadata\nall_info[\"item_id\"] = all_info[\"asin\"].map(inverse_item_map)\nitems = all_info.item_id.unique()\nprint (\"We have %d unique items in metadata \"%items.shape[0])", "We have 2681355 unique items in metadata \ntime: 11 s\n" ], [ "all_info['description'] = all_info['description'].fillna(all_info['title'].fillna('no_data'))\nall_info['title'] = all_info['title'].fillna(all_info['description'].fillna('no_data').apply(str).str[:20])\nall_info['image'] = all_info['image'].fillna('no_data')\nall_info['price'] = pd.to_numeric(all_info['price'],errors=\"coerce\")\nall_info['price'] = all_info['price'].fillna(all_info['price'].median()) ", "time: 54.1 s\n" ], [ "users = review_data.user_id.unique()\nuser_map = {i:val for i,val in enumerate(users)}\ninverse_user_map = {val:i for i,val in enumerate(users)}\nreview_data[\"old_user_id\"] = review_data[\"user_id\"] \nreview_data[\"user_id\"] = review_data[\"user_id\"].map(inverse_user_map)\n\nitems_reviewed = review_data.asin.unique()\nreview_data[\"old_item_id\"] = review_data[\"asin\"] # copying for join with metadata\nreview_data[\"item_id\"] = review_data[\"asin\"].map(inverse_item_map)\n\nitems_reviewed = review_data.item_id.unique()\nusers = review_data.user_id.unique()", "time: 9.82 s\n" ], [ "print (\"We have %d unique users\"%users.shape[0])\nprint (\"We have %d unique items reviewed\"%items_reviewed.shape[0])\n# We have 192403 unique users in the \"small\" dataset\n# We have 63001 unique items reviewed in the \"small\" dataset", "We have 513010 unique users\nWe have 245636 unique items reviewed\ntime: 2 ms\n" ], [ "review_data.head(3)", "_____no_output_____" ] ], [ [ "## Adding the review count and avarage to the metadata", "_____no_output_____" ] ], [ [ "#items_nb = review_data['old_item_id'].value_counts().reset_index()\nitems_avg = review_data.drop(['summary','reviewText','user_id','asin','user_name','reviewTime','old_user_id','item_id'],axis=1).groupby('old_item_id').agg(['count','mean']).reset_index()\nitems_avg.columns= ['old_item_id','num_ratings','avg_rating']\n#items_avg.head(5)\nitems_avg['num_ratings'].describe()", "_____no_output_____" ], [ "all_info = pd.merge(all_info,items_avg,how='left',left_on='asin',right_on='old_item_id')\npd.set_option('display.max_colwidth', 100)\nall_info.head(2)", "_____no_output_____" ] ], [ [ "# Explicit feedback (Reviewed Dataset) Recommender System", "_____no_output_____" ], [ "### Explicit feedback is when users gives voluntarily the rating information on what they like and dislike.\n\n- In this case, I have explicit item ratings ranging from one to five.\n- Framed the recommendation system as a rating prediction machine learning problem: \n - Predict an item's ratings in order to be able to recommend to a user an item that he is likely to rate high if he buys it. `\n\n### To evaluate the model, I randomly separate the data into a training and test set. ", "_____no_output_____" ] ], [ [ "ratings_train, ratings_test = train_test_split( review_data, test_size=0.1, random_state=0)", "time: 1.72 s\n" ], [ "ratings_train.shape", "_____no_output_____" ], [ "ratings_test.shape", "_____no_output_____" ] ], [ [ "## Adding Metadata to the train set\nCreate an architecture that mixes the collaborative and content based filtering approaches:\n```\n- Collaborative Part: Predict items ratings to recommend to the user items which he is likely to rate high according to learnt item & user embeddings (learn similarity from interactions).\n- Content based part: Use metadata inputs (such as price and title) about items to recommend to the user contents similar to those he rated high (learn similarity of item attributes).\n```\n\n#### Adding the title and price - Add the metadata of the items in the training and test datasets.", "_____no_output_____" ] ], [ [ "# # creating metadata mappings \n# titles = all_info['title'].unique()\n# titles_map = {i:val for i,val in enumerate(titles)}\n# inverse_titles_map = {val:i for i,val in enumerate(titles)}\n\n# price = all_info['price'].unique()\n# price_map = {i:val for i,val in enumerate(price)}\n# inverse_price_map = {val:i for i,val in enumerate(price)}\n\n# print (\"We have %d prices\" %price.shape)\n# print (\"We have %d titles\" %titles.shape)\n\n\n# all_info['price_id'] = all_info['price'].map(inverse_price_map)\n# all_info['title_id'] = all_info['title'].map(inverse_titles_map)\n\n# # creating dict from \n# item2prices = {}\n# for val in all_info[['item_id','price_id']].dropna().drop_duplicates().iterrows():\n# item2prices[val[1][\"item_id\"]] = val[1][\"price_id\"]\n\n# item2titles = {}\n# for val in all_info[['item_id','title_id']].dropna().drop_duplicates().iterrows():\n# item2titles[val[1][\"item_id\"]] = val[1][\"title_id\"]\n \n\n\n# # populating the rating dataset with item metadata info\n# ratings_train[\"price_id\"] = ratings_train[\"item_id\"].map(lambda x : item2prices[x])\n# ratings_train[\"title_id\"] = ratings_train[\"item_id\"].map(lambda x : item2titles[x])\n\n\n# # populating the test dataset with item metadata info\n# ratings_test[\"price_id\"] = ratings_test[\"item_id\"].map(lambda x : item2prices[x])\n# ratings_test[\"title_id\"] = ratings_test[\"item_id\"].map(lambda x : item2titles[x])\n", "time: 1.42 ms\n" ] ], [ [ "## create rating train/test dataset and upload into S3", "_____no_output_____" ] ], [ [ "# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/ratings_test.parquet ./data/\n# !aws s3 cp s3://dse-cohort5-group1/2-Keras-DeepRecommender/dataset/ratings_train.parquet ./data/", "time: 1.82 ms\n" ], [ "ratings_test = pd.read_parquet('./data/ratings_test.parquet')\nratings_train = pd.read_parquet('./data/ratings_train.parquet')", "time: 5.13 s\n" ], [ "ratings_train[:3]", "_____no_output_____" ], [ "ratings_train.shape", "_____no_output_____" ] ], [ [ "# **Define embeddings\n### The $\\underline{embeddings}$ are low-dimensional hidden representations of users and items, \n### i.e. for each item I can find its properties and for each user I can encode how much they like those properties so I can determine attitudes or preferences of users by a small number of hidden factors \n\n### Throughout the training, I learn two new low-dimensional dense representations: one embedding for the users and another one for the items.\n", "_____no_output_____" ] ], [ [ "price = all_info['price'].unique()\ntitles = all_info['title'].unique()", "time: 6.86 s\n" ], [ "# declare input embeddings to the model\n# User input\nuser_id_input = Input(shape=[1], name='user')\n# Item Input\nitem_id_input = Input(shape=[1], name='item')\nprice_id_input = Input(shape=[1], name='price')\ntitle_id_input = Input(shape=[1], name='title')\n\n# define the size of embeddings as a parameter\n# Check 5, 10 , 15, 20, 50\nuser_embedding_size = embedding_size \nitem_embedding_size = embedding_size\nprice_embedding_size = embedding_size\ntitle_embedding_size = embedding_size\n\n# apply an embedding layer to all inputs\nuser_embedding = Embedding(output_dim=user_embedding_size, input_dim=users.shape[0],\n input_length=1, name='user_embedding')(user_id_input)\n\nitem_embedding = Embedding(output_dim=item_embedding_size, input_dim=items_reviewed.shape[0],\n input_length=1, name='item_embedding')(item_id_input)\n\nprice_embedding = Embedding(output_dim=price_embedding_size, input_dim=price.shape[0],\n input_length=1, name='price_embedding')(price_id_input)\n\ntitle_embedding = Embedding(output_dim=title_embedding_size, input_dim=titles.shape[0],\n input_length=1, name='title_embedding')(title_id_input)\n\n# reshape from shape (batch_size, input_length,embedding_size) to (batch_size, embedding_size). \nuser_vecs = Reshape([user_embedding_size])(user_embedding)\nitem_vecs = Reshape([item_embedding_size])(item_embedding)\nprice_vecs = Reshape([price_embedding_size])(price_embedding)\ntitle_vecs = Reshape([title_embedding_size])(title_embedding)", "WARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n\ntime: 81.5 ms\n" ] ], [ [ "# 2. Deep Recommender\n\n### Instead of taking a dot product of the user and the item embedding, concatenate or multiply them and use them as features for a neural network. \n### Thus, we are not constrained to the dot product way of combining the embeddings, and can learn complex non-linear relationships.\n\n![image.png](attachment:image.png)\n\n\n\n\n", "_____no_output_____" ] ], [ [ "!mkdir -p ./models", "time: 423 ms\n" ], [ "# Try add dense layers on top of the embeddings before merging (Comment to drop this idea.)\nuser_vecs = Dense(64, activation='relu')(user_vecs) \nitem_vecs = Dense(64, activation='relu')(item_vecs)\n# price_vecs = Dense(64, activation='relu')(price_vecs) \n# title_vecs = Dense(64, activation='relu')(title_vecs)", "time: 31.6 ms\n" ], [ "# Concatenate the item embeddings :\n# item_vecs_complete = Concatenate()([item_vecs, price_vecs,title_vecs])\n\n# Concatenate user and item embeddings and use them as features for the neural network:\n# input_vecs = Concatenate()([user_vecs, item_vecs_complete]) # can be changed by Multiply\ninput_vecs = Concatenate()([user_vecs, item_vecs]) # can be changed by Multiply\n\n# Multiply user and item embeddings and use them as features for the neural network:\n# input_vecs = Multiply()([user_vecs, item_vecs]) # can be changed by concat \n\n# Dropout is a technique where randomly selected neurons are ignored during training to prevent overfitting \ninput_vecs = Dropout(0.1)(input_vecs) \n\n# Check one dense 128 or two dense layers (128,128) or (128,64) or three denses layers (128,64,32))\n\n# First layer\n# Dense(128) is a fully-connected layer with 128 hidden units.\n# Use rectified linear units (ReLU) f(x)=max(0,x) as an activation function.\nx = Dense(128, activation='relu')(input_vecs)\nx = Dropout(0.1)(x) # Add droupout or not # To improve the performance\n\n# Next Layers\nx = Dense(128, activation='relu')(x) # Add dense again or not \nx = Dropout(0.1)(x) # Add droupout or not # To improve the performance\n# x = Dense(64, activation='relu')(x) # Add dense again or not \n# x = Dropout(0.1)(x) # Add droupout or not # To improve the performance\n# x = Dense(32, activation='relu')(x) # Add dense again or not #\n# x = Dropout(0.1)(x) # Add droupout or not # To improve the performance\n\n# The output\ny = Dense(1)(x)", "WARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:133: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\ntime: 120 ms\n" ], [ "# create model\nmodel = Model(inputs=\n [\n user_id_input,\n item_id_input\n ], \n outputs=y)\n\n# compile model\nmodel.compile(loss='mse',\n optimizer=\"adam\" )\n\n# set save location for model\nsave_path = \"./models\"\nthename = save_path + '/' + modname + '.h5'\nmcheck = ModelCheckpoint(thename, monitor='val_loss', save_best_only=True)\n\n# fit model\nhistory = model.fit([ratings_train[\"user_id\"]\n , ratings_train[\"item_id\"]\n ]\n , ratings_train[\"score\"]\n , batch_size=64\n , epochs=num_epochs\n , validation_split=0.2\n , callbacks=[mcheck]\n , shuffle=True)", "WARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:986: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:973: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n\nTrain on 684523 samples, validate on 171131 samples\nEpoch 1/10\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:174: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:190: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:199: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.\n\nWARNING:tensorflow:From /home/ec2-user/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:206: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n\n684523/684523 [==============================] - 76s 111us/step - loss: 1.7269 - val_loss: 1.5988\nEpoch 2/10\n684523/684523 [==============================] - 59s 86us/step - loss: 1.1386 - val_loss: 1.7199\nEpoch 3/10\n684523/684523 [==============================] - 58s 85us/step - loss: 0.8872 - val_loss: 1.7561\nEpoch 4/10\n684523/684523 [==============================] - 59s 86us/step - loss: 0.7413 - val_loss: 1.9053\nEpoch 5/10\n684523/684523 [==============================] - 59s 86us/step - loss: 0.6856 - val_loss: 1.9473\nEpoch 6/10\n684523/684523 [==============================] - 59s 86us/step - loss: 0.6254 - val_loss: 1.9180\nEpoch 7/10\n684523/684523 [==============================] - 59s 86us/step - loss: 0.5923 - val_loss: 2.1290\nEpoch 8/10\n684523/684523 [==============================] - 58s 85us/step - loss: 0.5653 - val_loss: 2.1378\nEpoch 9/10\n684523/684523 [==============================] - 59s 86us/step - loss: 0.5453 - val_loss: 2.4168\nEpoch 10/10\n684523/684523 [==============================] - 59s 86us/step - loss: 0.5325 - val_loss: 2.7167\ntime: 10min 5s\n" ], [ "# Save the fitted model history to a file\nwith open('./histories/' + modname + '.pkl' , 'wb') as file_pi: pickle.dump(history.history, file_pi)\n \nprint(\"Save history in \", './histories/' + modname + '.pkl')", "Save history in ./histories/dense_2_Concatenate_10_embeddings_10_epochs.pkl\ntime: 1.93 ms\n" ], [ "def disp_model(path,file,suffix):\n model = load_model(path+file+suffix) \n ## Summarise the model \n model.summary() \n # Extract the learnt user and item embeddings, i.e., a table with number of items and users rows and columns, with number of columns is the dimension of the trained embedding.\n # In our case, the embeddings correspond exactly to the weights of the model:\n weights = model.get_weights()\n print (\"embeddings \\ weights shapes\",[w.shape for w in weights]) \n return model\n \nmodel_path = \"./models/\"", "time: 2 ms\n" ], [ "def plt_pickle(path,file,suffix):\n with open(path+file+suffix , 'rb') as file_pi: \n thepickle= pickle.load(file_pi)\n plot(thepickle[\"loss\"],label ='Train Error ' + file,linestyle=\"--\")\n plot(thepickle[\"val_loss\"],label='Validation Error ' + file) \n plt.legend()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Error\") \n ##plt.ylim(0, 0.1)\n return pd.DataFrame(thepickle,columns =['loss','val_loss'])\n\nhist_path = \"./histories/\"", "time: 2.1 ms\n" ], [ "print(model_path)\nprint(modname)\nmodel=disp_model(model_path, modname, '.h5')", "./models/\ndense_2_Concatenate_10_embeddings_10_epochs\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\nuser (InputLayer) (None, 1) 0 \n__________________________________________________________________________________________________\nitem (InputLayer) (None, 1) 0 \n__________________________________________________________________________________________________\nuser_embedding (Embedding) (None, 1, 10) 5130100 user[0][0] \n__________________________________________________________________________________________________\nitem_embedding (Embedding) (None, 1, 10) 2456360 item[0][0] \n__________________________________________________________________________________________________\nreshape_1 (Reshape) (None, 10) 0 user_embedding[0][0] \n__________________________________________________________________________________________________\nreshape_2 (Reshape) (None, 10) 0 item_embedding[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 64) 704 reshape_1[0][0] \n__________________________________________________________________________________________________\ndense_2 (Dense) (None, 64) 704 reshape_2[0][0] \n__________________________________________________________________________________________________\nconcatenate_1 (Concatenate) (None, 128) 0 dense_1[0][0] \n dense_2[0][0] \n__________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 concatenate_1[0][0] \n__________________________________________________________________________________________________\ndense_3 (Dense) (None, 128) 16512 dropout_1[0][0] \n__________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 128) 0 dense_3[0][0] \n__________________________________________________________________________________________________\ndense_4 (Dense) (None, 128) 16512 dropout_2[0][0] \n__________________________________________________________________________________________________\ndropout_3 (Dropout) (None, 128) 0 dense_4[0][0] \n__________________________________________________________________________________________________\ndense_5 (Dense) (None, 1) 129 dropout_3[0][0] \n==================================================================================================\nTotal params: 7,621,021\nTrainable params: 7,621,021\nNon-trainable params: 0\n__________________________________________________________________________________________________\nembeddings \\ weights shapes [(513010, 10), (245636, 10), (10, 64), (64,), (10, 64), (64,), (128, 128), (128,), (128, 128), (128,), (128, 1), (1,)]\ntime: 2.04 s\n" ], [ "# Display the model using keras\nSVG(model_to_dot(model).create(prog='dot', format='svg'))", "_____no_output_____" ], [ "x=plt_pickle(hist_path , modname , '.pkl')\nx.head(20).transpose()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecb3079d55c681918aa706b6f8850310c6f0ff5a
78,039
ipynb
Jupyter Notebook
julia_paresto/ABC_parmest_jbr_io.ipynb
notesofdabbler/learn_Julia
ec8d07f162a9f28c72345aca143159548dd37abf
[ "MIT" ]
null
null
null
julia_paresto/ABC_parmest_jbr_io.ipynb
notesofdabbler/learn_Julia
ec8d07f162a9f28c72345aca143159548dd37abf
[ "MIT" ]
null
null
null
julia_paresto/ABC_parmest_jbr_io.ipynb
notesofdabbler/learn_Julia
ec8d07f162a9f28c72345aca143159548dd37abf
[ "MIT" ]
null
null
null
103.089828
10,463
0.617473
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]