path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Geometric Problems/Learning a quadratic pseudo-metric from distance measurements.ipynb | ###Markdown
 FormulationLet's define: $\Delta_i = x_i - y_i$ $L_i = \Delta_i^T P \Delta_i$ The problem is:minimize $\sum -2 d_i (L_i)^{0.5} + L_i$ s.t $P \succeq 0$ Problem data
###Code
X = [
[1.164954,1.696142,-1.446172,-0.360030,-0.044881,0.513478,0.375041,-0.322940,0.847649,-0.557094,-1.098195,-0.977814,-0.507700,-0.612911,1.133000,0.924159,0.394600,-0.137414,0.039885,-0.786457,-0.127443,-0.620214,-0.262681,0.499521,0.438705,0.292315,-0.759697,0.670292,-0.902031,0.846106,0.526163,0.889164,-1.010674,-1.406949,-0.165923,0.041430,-0.844414,0.336297,1.487524,0.786430,-0.702669,1.802440,-1.931134,-1.238566,1.547177,-2.526706,0.899884,-0.382918,-0.594524,1.879957,0.396727,-0.031243,0.251078,0.862500,0.968992,0.536007,1.524681,1.157783,-1.898778,-1.454749,0.418469,1.766708,-0.159448,-1.179060,-1.226502,0.658153,-0.397914,1.271743,-1.389722,-0.797533,-0.268286,1.939318,1.338814,0.420989,0.139860,-2.014986,-0.267458,0.605540,0.186747,0.434313,0.149996,1.136805,-1.378907,0.086932,-0.066596,0.642066,0.565239,-0.591204,0.393682,0.667201,-0.514013,1.289554,-0.227852,-0.904204,-1.586917,-0.047555,-0.391039,-0.956374,1.042360,-1.024905],
[0.626839,0.059060,-0.701165,-0.135576,-0.798945,0.396681,1.125162,0.317988,0.268101,-0.336706,1.122648,-1.021466,0.885299,-0.209144,0.149994,-1.814115,0.639406,0.615770,-2.482843,0.634809,0.554172,0.237149,0.976490,-1.055375,-1.247344,2.565910,-0.674721,0.420146,-2.053257,-0.184538,-0.184454,-1.299152,-0.960498,1.030812,0.300907,-1.098050,-0.311630,-0.221361,-0.836821,-1.461639,0.356429,-0.642984,0.660300,-1.889236,0.644933,-0.312981,-0.200899,0.155083,0.130246,-1.003849,-0.527115,0.778212,-0.310471,-1.034706,-0.747317,0.298451,-0.195261,0.161908,1.822525,0.466546,0.247349,-0.382104,2.704026,-0.277776,0.069600,0.491314,0.864280,-0.035344,0.229328,-0.936741,-1.082140,-0.895840,1.222299,-0.433373,-0.748089,0.491717,-0.570245,-0.624481,1.594939,-0.386207,0.542038,0.391314,-0.260172,1.955674,0.373381,0.923087,-0.610781,1.691546,-0.905427,-0.067794,1.896261,-0.530575,0.376770,0.698670,-0.920783,-0.614736,-1.382045,0.451807,1.209120,-1.056846],
[0.075080,1.797072,1.245982,-1.349338,-0.765172,0.756219,0.728642,-0.511172,-0.923489,0.415227,0.581667,0.317688,-0.248094,0.562148,0.703144,0.034973,0.874213,0.977894,1.158655,0.820410,-1.097344,-1.586847,0.977815,-0.450743,0.324667,-0.457816,-1.171687,-2.872751,0.089086,1.030714,0.198783,1.182573,0.691160,-0.759874,-0.322467,1.566724,0.397810,0.016649,-1.300982,1.554466,0.652636,0.109555,-1.102510,-0.973585,-2.148359,-0.593618,-0.233735,-0.964648,0.035014,-0.497446,0.344571,2.180484,-0.923004,-0.192673,-2.796024,0.284043,0.017260,1.557064,-1.518415,0.545437,0.704110,-0.911425,-0.198500,-1.581053,-0.396516,0.800734,-0.177618,-1.501329,0.271190,-0.002433,2.014134,-0.304158,-1.595978,0.706252,-0.628975,-1.554975,-0.187267,0.572228,0.321307,-0.112564,0.254409,1.605148,0.994768,0.161454,0.217314,-1.555108,1.231111,0.953356,-1.274473,-1.735660,-0.253230,-0.692971,1.221556,0.482598,-0.614274,0.240362,1.076292,-1.253778,0.780955,2.887723],
[0.351607,0.264069,-0.638977,-1.270450,0.861735,0.400486,-2.377454,-0.002041,-0.070499,1.557814,-0.271354,1.516108,-0.726249,-1.063923,-0.052412,-1.807862,1.752402,-1.115348,-1.026279,-0.176027,-0.731301,-0.401485,1.170021,1.270378,0.390070,-1.610827,2.032930,1.685874,2.087099,-1.527623,1.590427,1.817472,-0.758618,0.874127,-0.368411,-1.048423,1.049786,-1.192361,1.574132,-0.597535,0.215671,-0.719038,-0.102971,0.212116,-1.028845,0.332322,1.449907,0.038756,-0.624674,-1.504397,-0.723291,0.437814,-0.384776,-1.299723,0.696732,0.959664,0.246340,-0.193544,-1.051071,1.320319,0.631939,-0.996090,-0.141405,1.049022,1.388807,-0.767269,1.874381,0.365373,-0.366360,0.396086,1.944031,0.555253,-1.067730,0.227857,1.394831,-0.140609,1.208557,-0.724410,0.866841,-0.964333,-0.307241,0.825892,1.834034,-0.628688,-0.179457,0.663594,0.994300,-1.930055,0.346546,0.806349,-0.174531,-0.859806,1.098288,0.811696,-0.334672,0.125017,1.269466,0.256435,-1.179900,-0.267744],
[-0.696513,0.871673,0.577350,0.984570,-0.056225,-1.341381,-0.273782,1.606511,0.147891,-2.444299,0.414191,0.749432,-0.445040,0.351589,2.018496,1.028193,-0.320051,-0.550021,1.153487,0.562474,1.404732,-0.770692,0.159311,0.898694,-0.405138,-2.669524,0.968481,0.027925,0.365118,0.964939,0.032192,-0.584302,-0.096972,0.761127,1.147895,0.422724,-0.340796,-0.131646,1.166040,-1.210568,-0.263896,0.420628,-1.059802,0.493442,-0.141582,0.558851,1.836132,0.765458,-0.539775,-0.095449,1.268193,1.333329,1.158181,0.306596,3.206908,2.087593,-0.854485,1.651301,0.049931,-0.404494,-0.992362,1.195143,0.411268,0.302689,1.364422,0.364420,0.172400,-0.198660,1.376960,-0.508693,-1.521529,-0.324247,-0.759919,-1.016992,-1.647691,0.244944,-0.638855,1.192196,1.291844,-2.057251,-0.417112,1.470390,-1.715910,-1.438824,0.025673,-0.609500,-0.803475,0.512845,-1.195235,-0.914801,0.978788,0.529038,-0.853014,0.327883,0.080345,-0.223605,0.487268,0.421229,1.001450,-0.488540],
]
Y = [
[0.419420,-0.611729,-0.506138,-2.122378,-0.673263,-1.350292,0.202680,0.186106,1.408075,0.179925,-0.683631,0.450343,-0.201343,-0.906374,-0.179097,0.067372,1.177170,1.173296,-0.574005,-0.081630,1.662312,1.166705,-0.960461,-0.915962,0.427947,0.213963,0.261843,0.144555,-0.972946,-0.534127,-0.310909,-1.719190,-0.345134,-0.785496,-0.275569,-0.744296,2.680118,-0.583258,-2.068566,0.385524,0.610146,-0.226541,0.263481,-0.988875,-0.130638,-1.266094,-0.768533,1.100780,-0.328912,-1.555024,0.698124,1.361879,-1.159160,-1.450383,-1.304731,1.000335,0.125589,-0.260304,-1.212525,-0.265477,-1.474263,-2.366324,1.195417,1.966075,2.955089,-1.133640,-2.032843,-0.902634,-1.327697,0.323356,0.096060,-0.875772,-1.672760,-1.548104,-0.426525,1.189467,0.750603,-1.340946,-0.876102,0.982860,0.016264,-0.934128,0.660062,0.131692,1.855048,-0.835704,-1.685751,-0.632046,1.599021,-0.245918,1.132966,-0.997240,-0.242387,0.082218,0.836056,-2.938220,1.116575,0.750101,-1.146451,-0.040269],
[-0.742745,0.100402,-0.983137,-0.850378,0.989850,0.092861,-0.421487,0.667174,-1.476422,0.029963,0.813277,-0.119641,-1.236394,-0.515380,0.168926,0.184150,-1.298539,1.611950,1.362962,-1.223518,-0.256088,-0.659430,0.130287,1.490409,0.357348,0.100046,-0.440424,0.810685,1.150638,0.688182,0.177537,0.090665,-1.015168,-0.242882,2.150379,-0.200403,-0.384886,-1.039926,1.674866,-0.784288,0.478515,1.329307,0.647336,1.036852,0.015360,1.299368,1.038744,-0.393357,1.739255,0.853390,0.623186,-0.105752,1.466680,0.475028,0.443241,-0.010695,1.233429,1.302974,0.593773,1.787029,0.268053,-0.869979,-1.700570,-2.301185,1.874092,0.462473,0.564390,1.279284,0.786037,0.615560,-0.395321,0.665887,-0.027883,0.612520,-0.508498,-0.363852,0.547452,0.181441,-1.256036,1.333580,-1.287073,0.035241,-1.229566,1.644603,2.084384,-1.018327,0.358190,0.783785,0.990909,1.646456,-0.492347,1.105070,-0.445072,0.732257,-1.958656,-1.104897,-1.372281,-0.728689,1.365443,-0.281505],
[0.572912,-1.112905,-0.437222,-0.424095,-0.577293,1.390198,1.218891,-0.429976,-0.809376,-0.711098,0.137012,0.299881,0.576187,1.181516,0.796552,-0.056014,-0.410115,2.318210,1.148564,-1.084040,-0.807258,0.385522,0.893410,2.148636,-0.513751,0.721376,-1.816264,-0.617132,0.042902,1.722587,-0.976696,1.660189,0.805136,-0.020406,-1.161508,-0.276941,-0.057147,-1.398089,-0.582561,0.384008,0.620631,-1.655171,0.814268,-0.057835,-0.673946,0.531241,0.639814,-0.222340,-1.587151,-0.779175,0.625826,-2.359716,0.255200,-0.586808,-0.400561,-0.832345,1.059862,-1.358675,0.597249,0.034741,1.765786,-0.699984,-0.585549,-0.994563,0.109977,-0.046205,1.156908,-0.346535,1.117178,-1.723877,-1.651466,0.937835,0.438835,0.193465,0.241914,-0.228302,-1.737480,0.030161,-0.269798,-0.926771,-1.553320,1.576796,-0.570628,-0.236055,-0.227784,-0.945054,-0.671933,0.240538,0.066988,1.561130,-0.336589,-1.148196,0.748184,-0.962436,-0.779537,-1.547502,2.074836,-0.648186,-0.632439,-1.227824],
[-0.143680,0.957172,1.640728,-1.029596,-1.278514,0.153271,-1.751493,-1.146584,-0.196892,0.456901,0.403788,0.643104,0.933544,0.200532,-0.464512,0.674326,-1.847284,-0.814667,-0.378804,-0.440653,-0.077400,-0.187585,0.348338,1.129853,-0.086701,0.091444,0.528719,-1.191801,0.664119,-1.273178,-0.765922,-0.653223,-1.287555,-1.440631,-0.001266,-0.975253,-1.728765,0.844010,-0.785796,0.839488,-1.770459,-0.060916,-0.709093,-0.823255,-0.525083,-0.719614,-0.661728,-0.259005,1.013978,0.844147,-1.408990,-0.777909,-0.146252,0.305144,-0.621204,-1.181208,-1.755407,0.359975,-0.294485,0.325478,-1.768180,0.859512,2.205565,0.106741,-1.372973,-1.298474,1.253641,0.081997,-0.540079,1.241964,-1.098188,1.563100,0.800308,-1.093574,-0.408954,-0.824891,-0.296406,0.381845,-0.732456,1.154125,-0.059866,-1.532254,-0.682228,-0.706986,-0.265914,-0.865398,-1.021033,-1.470454,1.993145,-0.758432,0.059077,-0.473563,-2.271209,0.876035,1.224104,-1.434385,1.485758,0.138927,1.959653,1.159358],
[-0.156844,-0.577886,0.530367,-0.349036,0.281387,0.801381,2.945547,-1.873306,-1.127700,-1.077242,1.288723,-1.775968,1.131390,-0.364330,-0.814449,2.557545,-0.899062,0.853069,-2.380476,-1.522343,-1.595641,-1.598445,0.760433,-1.434479,-0.668682,-0.002400,-0.351920,-2.082037,1.577989,-1.126164,-2.372749,2.050284,2.145294,0.780599,-1.142249,0.397122,1.280767,0.248825,-0.594914,0.471760,-0.351909,-0.470341,-0.669992,1.956099,0.711471,-0.107439,-0.170603,-1.262057,1.207766,-0.643000,-1.170419,-0.059081,-0.108548,1.017417,1.200154,-0.766562,-0.554640,1.314540,-0.935959,0.565107,1.627511,-1.296415,-0.827220,1.375143,-1.354662,0.080468,0.309434,-1.756170,-0.920435,0.568060,-1.103339,-0.756307,-0.974407,0.977825,-1.021484,0.610675,-0.369597,1.463439,0.279208,2.135796,0.582646,-0.254342,0.254562,-0.588331,-0.021790,-0.808710,2.498869,0.334877,-0.633251,-0.903983,-0.991926,0.333565,-1.080591,-0.633172,0.576336,0.128015,-0.043598,-0.153634,0.431631,-1.274743],
]
X_test = [
[-0.186647,-0.636795,-0.242266,-1.000994,-1.094656,-0.088627,0.357200,1.236353,-0.657828,-1.081924],
[-2.001022,0.007438,0.545292,1.134810,1.258890,-0.459909,1.365078,-1.397270,-0.864847,0.965412],
[-1.028858,-0.002620,1.980639,0.758663,0.336024,-0.261001,-0.718739,-1.722115,-2.186815,0.701975],
[0.545433,0.056516,0.111102,0.291716,1.496372,0.085050,0.415523,-1.234309,-0.785680,-1.487673],
[0.224813,-1.022040,3.533658,2.245274,-0.665822,-0.009841,0.179097,0.494105,-0.905888,-0.197859],
]
Y_test = [
[-0.294687,-0.689915,-2.285898,-0.938975,0.035156,-0.430063,0.352267,-0.385081,-0.752931,-0.014699],
[-0.222314,0.699612,-0.264101,-0.767007,-0.183959,1.502923,-0.280606,1.081048,0.391797,-0.011185],
[1.801455,0.772426,-0.945537,0.322200,0.057886,0.579556,0.961475,-1.396751,0.404377,-0.063508],
[-0.133797,0.497607,0.310190,0.612367,-0.926959,-0.457190,1.309079,-1.575387,-1.116767,-1.027934],
[-2.243783,0.391663,0.852659,0.069602,2.284313,-0.057675,-1.306810,-0.515741,-1.484789,0.988259],
]
d = [3.105698,9.303920,6.834464,8.535880,6.895867,2.084421,5.802307,6.078630,7.676743,7.889291,1.747867,5.421094,8.056460,5.403059,6.134915,9.260686,11.292909,6.465282,12.659127,6.716904,8.247420,7.677115,2.345364,10.289954,7.556104,9.927747,2.885653,8.667243,10.105910,8.164997,4.403754,10.905269,6.736946,7.881454,9.098149,5.616785,13.511874,8.607833,10.158668,7.828967,6.669338,10.942197,7.102851,12.512170,1.693926,5.316018,6.161766,7.008868,8.568092,13.728702,4.080557,10.282838,6.515821,11.142170,8.083361,4.659479,7.252958,11.903167,9.148000,7.844158,7.144369,12.485157,16.621630,13.365911,10.855162,4.169473,3.658437,6.554199,5.956399,6.189959,15.132870,8.958080,11.450199,6.767207,6.598192,8.818651,8.531837,5.173845,8.337579,10.310235,6.315191,1.352438,12.100806,2.871881,5.391262,5.899694,12.221590,4.330038,5.430671,8.585915,9.817138,8.901824,9.322942,3.233721,4.747448,5.238966,4.640416,5.379597,11.164867,10.616969]
d_test = [7.600672,4.423181,9.997974,8.315172,12.786013,7.426758,11.055029,8.688143,6.585704,4.253190]
X = np.array(X)
Y = np.array(Y)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
d = np.array(d)
d_test = np.array(d_test)
print('Shapes X,Y,d :', X.shape , Y.shape , d.shape)
print('Shapes X,Y,d test:',X_test.shape, Y_test.shape, d_test.shape)
###Output
Shapes X,Y,d : (5, 100) (5, 100) (100,)
Shapes X,Y,d test: (5, 10) (5, 10) (10,)
###Markdown
Solving
###Code
n, m = X.shape
delta = X - Y
P = cp.Variable((n,n), PSD = True)
error = 0
for i in range(m):
delta_i = delta[:,i]
L_i = cp.quad_form(delta_i, P)
error += -2 * d[i] * cp.sqrt(L_i) + L_i + d[i]**2
error /= m
obj = cp.Minimize(error)
prob = cp.Problem(obj)
prob.solve()
assert prob.status == cp.OPTIMAL
print("P:")
print(P.value)
print('error:', error.value)
def get_error(P, X, Y, d_true):
delta = X - Y
return ((np.sum((P @ delta) * delta, axis = 0)**0.5 - d_true)**2).mean()
get_error(P.value, X, Y, d).round(2)
get_error(P.value, X_test, Y_test, d_test).round(2)
###Output
_____no_output_____ |
notebook/cmx/cmx_sky_ratio.ipynb | ###Markdown
Q: For exposure time calculations, should we be using the (bright sky)/(nominal dark sky) ratio derived at 5000A (b spectrograph) or 6500A (r spectrograph)? In this notebook, I'll measure this sky ratio at 5000A and 6500A for the CMX sky data updates- **06/08/2020** sky ratios at more wavelengths and broader smoothing to erase out some of the features- **06/09/2020** sky ratios that were brighter at higher wavelenghts were exposures taken during 20200314. I will rerun the fsky wavelength test excluding these exposures.
###Code
import h5py
import numpy as np
from scipy.signal import medfilt
from scipy.signal import medfilt2d
# -- astropy --
import astropy.units as u
# -- desihub --
import desisim.simexp
from desimodel.io import load_throughput
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.rcParams['medfilt2dtex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
# read nominal dark sky surface brightness
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
# Generate specsim config object for a given wavelength grid
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(config.atmosphere.sky, 'surface_brightness', as_dict=True)
# read sky surface brightnesses for CMX BGS exposures
fskies = h5py.File('/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/sky_fibers.cmx.v1.hdf5', 'r')
skies = {}
for k in fskies.keys():
skies[k] = fskies[k][...]
fskies.close()
iexp = 0
for k in skies.keys():
if 'wave' not in k and 'sky_sb' not in k:
print(k, skies[k][iexp])
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(wave, nominal_surface_brightness_dict['dark'], c='k', s=1)
for band in ['b', 'r', 'z']:
sub.scatter(skies['wave_%s' % band], skies['sky_sb_%s' % band][iexp], s=1)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('sky surface brightness', fontsize=20)
sub.set_ylim(0., 10)
expids = np.unique(skies['expid'])
smooth_skies_b, smooth_skies_r = [], []
for expid in expids:
isexp = (skies['expid'] == expid)
smooth_skies_b.append(medfilt(np.median(skies['sky_sb_b'][isexp], axis=0), 501))
smooth_skies_r.append(medfilt(np.median(skies['sky_sb_r'][isexp], axis=0), 501))
smooth_skies_b = np.array(smooth_skies_b)
smooth_skies_r = np.array(smooth_skies_r)
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(wave, nominal_surface_brightness_dict['dark'], c='k', s=1)
sub.plot(wave, medfilt(nominal_surface_brightness_dict['dark'], 501), c='r', ls='--')
for band in ['b', 'r', 'z']:
sub.scatter(skies['wave_%s' % band], skies['sky_sb_%s' % band][iexp], s=1)
sub.plot(skies['wave_b'], smooth_skies_b[iexp], c='k', ls='--')
sub.plot(skies['wave_r'], smooth_skies_r[iexp], c='k', ls='--')
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('sky surface brightness', fontsize=20)
sub.set_ylim(0., 10)
waves = [4000, 5000, 6000, 7000]
sky_ratios = []
for w in waves:
if w < skies['wave_b'].max():
_wave = skies['wave_b']
_smooth_skies = smooth_skies_b
elif w < skies['wave_r'].max():
_wave = skies['wave_r']
_smooth_skies = smooth_skies_r
else:
raise ValueError
nom_near_wave = np.median(medfilt(nominal_surface_brightness_dict['dark'], 501)[(wave.value > w-5.) & (wave.value < w+5.)])
near_wave = (_wave > w-5.) & (_wave < w+5.)
sky_ratio = np.median(_smooth_skies[:, near_wave], axis=1) / nom_near_wave
sky_ratios.append(sky_ratio)
fig = plt.figure(figsize=(18,6))
for i in range(3):
sub = fig.add_subplot(1,3,i+1)
sub.scatter(sky_ratios[i+1], sky_ratios[0])
sub.plot([0., 6.], [0., 6.], c='k', ls='--')
sub.set_xlabel('sky ratio at %i' % waves[i+1], fontsize=20)
sub.set_xlim(0., 6)
if i == 0: sub.set_ylabel('sky ratio at %iA' % waves[0], fontsize=20)
sub.set_ylim(0., 6)
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.scatter(sky_ratios[3], sky_ratios[1])
sub.plot([0., 6.], [0., 6.], c='k', ls='--')
sub.set_xlabel('sky ratio at %iA' % waves[3], fontsize=20)
sub.set_xlim(0., 6)
sub.set_ylabel('sky ratio at %iA' % waves[1], fontsize=20)
sub.set_ylim(0., 6)
###Output
_____no_output_____
###Markdown
Exposures taken during 2020/03/14
###Code
dates = []
for expid in expids:
dates.append(skies['date'][skies['expid'] == expid][0])
subset = (np.array(dates) == 20200314)
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.scatter(sky_ratios[3], sky_ratios[1])
sub.scatter(np.array(sky_ratios[3])[subset], np.array(sky_ratios[1])[subset], c='C1')
sub.plot([0., 6.], [0., 6.], c='k', ls='--')
sub.set_xlabel('sky ratio at %iA' % waves[3], fontsize=20)
sub.set_xlim(0., 6)
sub.set_ylabel('sky ratio at %iA' % waves[1], fontsize=20)
sub.set_ylim(0., 6)
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.scatter(sky_ratios[3], sky_ratios[1])
sub.scatter(np.array(sky_ratios[3])[sky_ratios[3] > 5], np.array(sky_ratios[1])[sky_ratios[3] > 5], c='C1')
sub.plot([0., 6.], [0., 6.], c='k', ls='--')
sub.set_xlabel('sky ratio at %iA' % waves[3], fontsize=20)
sub.set_xlim(0., 6)
sub.set_ylabel('sky ratio at %iA' % waves[1], fontsize=20)
sub.set_ylim(0., 6)
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(wave, nominal_surface_brightness_dict['dark'], c='k', s=1)
sub.plot(wave, medfilt(nominal_surface_brightness_dict['dark'], 501), c='r', ls='--')
for i, expid in enumerate(expids[sky_ratios[3] > 5]):
isexp = (skies['expid'] == expid)
for band in ['b', 'r', 'z']:
sub.scatter(skies['wave_%s' % band], np.median(skies['sky_sb_%s' % band][isexp], axis=0), s=1, c='C%i' % i)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('sky surface brightness', fontsize=20)
sub.set_ylim(0., 10)
subset = (sky_ratios[0] > 2) & (sky_ratios[3] < 3) & (sky_ratios[0] > sky_ratios[3])
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.scatter(sky_ratios[3], sky_ratios[1])
sub.scatter(np.array(sky_ratios[3])[subset], np.array(sky_ratios[1])[subset], c='C1')
sub.plot([0., 6.], [0., 6.], c='k', ls='--')
sub.set_xlabel('sky ratio at %iA' % waves[3], fontsize=20)
sub.set_xlim(0., 6)
sub.set_ylabel('sky ratio at %iA' % waves[1], fontsize=20)
sub.set_ylim(0., 6)
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.scatter(wave, nominal_surface_brightness_dict['dark'], c='k', s=1)
sub.plot(wave, medfilt(nominal_surface_brightness_dict['dark'], 501), c='r', ls='--')
for i, expid in enumerate(expids[subset]):
isexp = (skies['expid'] == expid)
for band in ['b', 'r', 'z']:
sub.scatter(skies['wave_%s' % band], np.median(skies['sky_sb_%s' % band][isexp], axis=0), s=1, c='C%i' % (i % 10))
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('sky surface brightness', fontsize=20)
sub.set_ylim(0., 10)
###Output
_____no_output_____ |
Convolutional Neural Networks/Week 2/Assignments/Residual+Networks+-+v2.ipynb | ###Markdown
Residual NetworksWelcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible.**In this assignment, you will:**- Implement the basic building blocks of ResNets. - Put together these building blocks to implement and train a state-of-the-art neural network for image classification. This assignment will be done in Keras. Before jumping into the problem, let's run the cell below to load the required packages.
###Code
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
###Output
Using TensorFlow backend.
###Markdown
1 - The problem of very deep neural networksLast week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers.The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the lower layers) to very complex features (at the deeper layers). However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent unbearably slow. More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and "explode" to take very large values). During training, you might therefore see the magnitude (or norm) of the gradient for the earlier layers descrease to zero very rapidly as training proceeds: **Figure 1** : **Vanishing gradient** The speed of learning decreases very rapidly for the early layers as the network trains You are now going to solve this problem by building a Residual Network! 2 - Building a Residual NetworkIn ResNets, a "shortcut" or a "skip connection" allows the gradient to be directly backpropagated to earlier layers: **Figure 2** : A ResNet block showing a **skip-connection** The image on the left shows the "main path" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. We also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function--even more than skip connections helping with vanishing gradients--accounts for ResNets' remarkable performance.)Two main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them. 2.1 - The identity blockThe identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps: **Figure 3** : **Identity block.** Skip connection "skips over" 2 layers. The upper path is the "shortcut path." The lower path is the "main path." In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! In this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection "skips over" 3 hidden layers rather than 2 layers. It looks like this: **Figure 4** : **Identity block.** Skip connection "skips over" 3 layers.Here're the individual steps.First component of main path: - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. - The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Second component of main path:- The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is "same" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. - The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Third component of main path:- The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. - The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. Final step: - The shortcut and the input are added together.- Then apply the ReLU activation function. This has no name and no hyperparameters. **Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read over this carefully to make sure you understand what it is doing. You should implement the rest. - To implement the Conv2D step: [See reference](https://keras.io/layers/convolutional/conv2d)- To implement BatchNorm: [See reference](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the channels axis))- For the activation, use: `Activation('relu')(X)`- To add the value passed forward by the shortcut: [See reference](https://keras.io/layers/merge/add)
###Code
# GRADED FUNCTION: identity_block
def identity_block(X, f, filters, stage, block):
"""
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (≈3 lines)
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
###Output
out = [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003]
###Markdown
**Expected Output**: **out** [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003] 2.2 - The convolutional blockYou've implemented the ResNet identity block. Next, the ResNet "convolutional block" is the other type of block. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: **Figure 4** : **Convolutional block** The CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. The details of the convolutional block are as follows. First component of main path:- The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '2a'`. - The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Second component of main path:- The second CONV2D has $F_2$ filters of (f,f) and a stride of (1,1). Its padding is "same" and it's name should be `conv_name_base + '2b'`.- The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`.- Then apply the ReLU activation function. This has no name and no hyperparameters. Third component of main path:- The third CONV2D has $F_3$ filters of (1,1) and a stride of (1,1). Its padding is "valid" and it's name should be `conv_name_base + '2c'`.- The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. Shortcut path:- The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '1'`.- The BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '1'`. Final step: - The shortcut and the main path values are added together.- Then apply the ReLU activation function. This has no name and no hyperparameters. **Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader.- [Conv Hint](https://keras.io/layers/convolutional/conv2d)- [BatchNorm Hint](https://keras.io/layers/normalization/batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))- For the activation, use: `Activation('relu')(X)`- [Addition Hint](https://keras.io/layers/merge/add)
###Code
# GRADED FUNCTION: convolutional_block
def convolutional_block(X, f, filters, stage, block, s = 2):
"""
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(filters=F1, kernel_size=(1, 1), strides = (s,s), name = conv_name_base + '2a', padding="valid", kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (≈3 lines)
X = Conv2D(filters=F2, kernel_size=(f, f), strides = (1,1), name = conv_name_base + '2b', padding="same", kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(filters=F3, kernel_size=(1, 1), strides = (1,1), name = conv_name_base + '2c', padding="valid", kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
##### SHORTCUT PATH #### (≈2 lines)
X_shortcut = Conv2D(filters=F3, kernel_size=(1, 1), strides = (s,s), name = conv_name_base + '1', padding="valid", kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
###Output
out = [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603]
###Markdown
**Expected Output**: **out** [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603] 3 - Building your first ResNet model (50 layers)You now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. "ID BLOCK" in the diagram stands for "Identity block," and "ID BLOCK x3" means you should stack 3 identity blocks together. **Figure 5** : **ResNet-50 model** The details of this ResNet-50 model are:- Zero-padding pads the input with a pad of (3,3)- Stage 1: - The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). Its name is "conv1". - BatchNorm is applied to the channels axis of the input. - MaxPooling uses a (3,3) window and a (2,2) stride.- Stage 2: - The convolutional block uses three set of filters of size [64,64,256], "f" is 3, "s" is 1 and the block is "a". - The 2 identity blocks use three set of filters of size [64,64,256], "f" is 3 and the blocks are "b" and "c".- Stage 3: - The convolutional block uses three set of filters of size [128,128,512], "f" is 3, "s" is 2 and the block is "a". - The 3 identity blocks use three set of filters of size [128,128,512], "f" is 3 and the blocks are "b", "c" and "d".- Stage 4: - The convolutional block uses three set of filters of size [256, 256, 1024], "f" is 3, "s" is 2 and the block is "a". - The 5 identity blocks use three set of filters of size [256, 256, 1024], "f" is 3 and the blocks are "b", "c", "d", "e" and "f".- Stage 5: - The convolutional block uses three set of filters of size [512, 512, 2048], "f" is 3, "s" is 2 and the block is "a". - The 2 identity blocks use three set of filters of size [512, 512, 2048], "f" is 3 and the blocks are "b" and "c".- The 2D Average Pooling uses a window of shape (2,2) and its name is "avg_pool".- The flatten doesn't have any hyperparameters or name.- The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. Its name should be `'fc' + str(classes)`.**Exercise**: Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2.) Make sure you follow the naming convention in the text above. You'll need to use this function: - Average pooling [see reference](https://keras.io/layers/pooling/averagepooling2d)Here're some other functions we used in the code below:- Conv2D: [See reference](https://keras.io/layers/convolutional/conv2d)- BatchNorm: [See reference](https://keras.io/layers/normalization/batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis))- Zero padding: [See reference](https://keras.io/layers/convolutional/zeropadding2d)- Max pooling: [See reference](https://keras.io/layers/pooling/maxpooling2d)- Fully conected layer: [See reference](https://keras.io/layers/core/dense)- Addition: [See reference](https://keras.io/layers/merge/add)
###Code
# GRADED FUNCTION: ResNet50
def ResNet50(input_shape = (64, 64, 3), classes = 6):
"""
Implementation of the popular ResNet50 the following architecture:
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
Arguments:
input_shape -- shape of the images of the dataset
classes -- integer, number of classes
Returns:
model -- a Model() instance in Keras
"""
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
### START CODE HERE ###
# Stage 3 (≈4 lines)
X = convolutional_block(X,f=3, filters=[128, 128, 512], stage = 3, block='a', s = 2)
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
# Stage 4 (≈6 lines)
X = convolutional_block(X,f=3, filters=[256, 256, 1024], stage = 4, block='a', s = 2)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
# Stage 5 (≈3 lines)
X = convolutional_block(X,f=3, filters=[512, 512, 2048], stage = 5, block='a', s = 2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
# AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)"
X = AveragePooling2D((2, 2), name="avg_pool")(X)
### END CODE HERE ###
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
###Output
_____no_output_____
###Markdown
Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below.
###Code
model = ResNet50(input_shape = (64, 64, 3), classes = 6)
###Output
_____no_output_____
###Markdown
As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.
###Code
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
The model is now ready to be trained. The only thing you need is a dataset. Let's load the SIGNS Dataset. **Figure 6** : **SIGNS dataset**
###Code
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
###Output
number of training examples = 1080
number of test examples = 120
X_train shape: (1080, 64, 64, 3)
Y_train shape: (1080, 6)
X_test shape: (120, 64, 64, 3)
Y_test shape: (120, 6)
###Markdown
Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch.
###Code
model.fit(X_train, Y_train, epochs = 2, batch_size = 32)
###Output
Epoch 1/2
1080/1080 [==============================] - 272s - loss: 3.1091 - acc: 0.2454
Epoch 2/2
1080/1080 [==============================] - 254s - loss: 2.2991 - acc: 0.3315
###Markdown
**Expected Output**: ** Epoch 1/2** loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours. ** Epoch 2/2** loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing. Let's see how this model (trained on only two epochs) performs on the test set.
###Code
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
###Output
120/120 [==============================] - 9s
Loss = 2.23173121611
Test Accuracy = 0.166666666667
###Markdown
**Expected Output**: **Test Accuracy** between 0.16 and 0.25 For the purpose of this assignment, we've asked you to train the model only for two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well. After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU. Using a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take ≈1min to load the model.
###Code
model = load_model('ResNet50.h5')
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
###Output
120/120 [==============================] - 9s
Loss = 0.530178320408
Test Accuracy = 0.866666662693
###Markdown
ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.Congratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system! 4 - Test on your own image (Optional/Ungraded) If you wish, you can also take a picture of your own hand and see the output of the model. To do this: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the following code 4. Run the code and check if the algorithm is right!
###Code
img_path = 'images/my_image.jpg'
img = image.load_img(img_path, target_size=(64, 64))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
my_image = scipy.misc.imread(img_path)
imshow(my_image)
print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ")
print(model.predict(x))
###Output
Input image shape: (1, 64, 64, 3)
class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] =
[[ 1. 0. 0. 0. 0. 0.]]
###Markdown
You can also print a summary of your model by running the following code.
###Code
model.summary()
###Output
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 64, 64, 3) 0
____________________________________________________________________________________________________
zero_padding2d_1 (ZeroPadding2D) (None, 70, 70, 3) 0 input_1[0][0]
____________________________________________________________________________________________________
conv1 (Conv2D) (None, 32, 32, 64) 9472 zero_padding2d_1[0][0]
____________________________________________________________________________________________________
bn_conv1 (BatchNormalization) (None, 32, 32, 64) 256 conv1[0][0]
____________________________________________________________________________________________________
activation_4 (Activation) (None, 32, 32, 64) 0 bn_conv1[0][0]
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 15, 15, 64) 0 activation_4[0][0]
____________________________________________________________________________________________________
res2a_branch2a (Conv2D) (None, 15, 15, 64) 4160 max_pooling2d_1[0][0]
____________________________________________________________________________________________________
bn2a_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2a[0][0]
____________________________________________________________________________________________________
activation_5 (Activation) (None, 15, 15, 64) 0 bn2a_branch2a[0][0]
____________________________________________________________________________________________________
res2a_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_5[0][0]
____________________________________________________________________________________________________
bn2a_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2a_branch2b[0][0]
____________________________________________________________________________________________________
activation_6 (Activation) (None, 15, 15, 64) 0 bn2a_branch2b[0][0]
____________________________________________________________________________________________________
res2a_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_6[0][0]
____________________________________________________________________________________________________
res2a_branch1 (Conv2D) (None, 15, 15, 256) 16640 max_pooling2d_1[0][0]
____________________________________________________________________________________________________
bn2a_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2a_branch2c[0][0]
____________________________________________________________________________________________________
bn2a_branch1 (BatchNormalization (None, 15, 15, 256) 1024 res2a_branch1[0][0]
____________________________________________________________________________________________________
add_2 (Add) (None, 15, 15, 256) 0 bn2a_branch2c[0][0]
bn2a_branch1[0][0]
____________________________________________________________________________________________________
activation_7 (Activation) (None, 15, 15, 256) 0 add_2[0][0]
____________________________________________________________________________________________________
res2b_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_7[0][0]
____________________________________________________________________________________________________
bn2b_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2a[0][0]
____________________________________________________________________________________________________
activation_8 (Activation) (None, 15, 15, 64) 0 bn2b_branch2a[0][0]
____________________________________________________________________________________________________
res2b_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_8[0][0]
____________________________________________________________________________________________________
bn2b_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2b_branch2b[0][0]
____________________________________________________________________________________________________
activation_9 (Activation) (None, 15, 15, 64) 0 bn2b_branch2b[0][0]
____________________________________________________________________________________________________
res2b_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_9[0][0]
____________________________________________________________________________________________________
bn2b_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2b_branch2c[0][0]
____________________________________________________________________________________________________
add_3 (Add) (None, 15, 15, 256) 0 bn2b_branch2c[0][0]
activation_7[0][0]
____________________________________________________________________________________________________
activation_10 (Activation) (None, 15, 15, 256) 0 add_3[0][0]
____________________________________________________________________________________________________
res2c_branch2a (Conv2D) (None, 15, 15, 64) 16448 activation_10[0][0]
____________________________________________________________________________________________________
bn2c_branch2a (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2a[0][0]
____________________________________________________________________________________________________
activation_11 (Activation) (None, 15, 15, 64) 0 bn2c_branch2a[0][0]
____________________________________________________________________________________________________
res2c_branch2b (Conv2D) (None, 15, 15, 64) 36928 activation_11[0][0]
____________________________________________________________________________________________________
bn2c_branch2b (BatchNormalizatio (None, 15, 15, 64) 256 res2c_branch2b[0][0]
____________________________________________________________________________________________________
activation_12 (Activation) (None, 15, 15, 64) 0 bn2c_branch2b[0][0]
____________________________________________________________________________________________________
res2c_branch2c (Conv2D) (None, 15, 15, 256) 16640 activation_12[0][0]
____________________________________________________________________________________________________
bn2c_branch2c (BatchNormalizatio (None, 15, 15, 256) 1024 res2c_branch2c[0][0]
____________________________________________________________________________________________________
add_4 (Add) (None, 15, 15, 256) 0 bn2c_branch2c[0][0]
activation_10[0][0]
____________________________________________________________________________________________________
activation_13 (Activation) (None, 15, 15, 256) 0 add_4[0][0]
____________________________________________________________________________________________________
res3a_branch2a (Conv2D) (None, 8, 8, 128) 32896 activation_13[0][0]
____________________________________________________________________________________________________
bn3a_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2a[0][0]
____________________________________________________________________________________________________
activation_14 (Activation) (None, 8, 8, 128) 0 bn3a_branch2a[0][0]
____________________________________________________________________________________________________
res3a_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_14[0][0]
____________________________________________________________________________________________________
bn3a_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3a_branch2b[0][0]
____________________________________________________________________________________________________
activation_15 (Activation) (None, 8, 8, 128) 0 bn3a_branch2b[0][0]
____________________________________________________________________________________________________
res3a_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_15[0][0]
____________________________________________________________________________________________________
res3a_branch1 (Conv2D) (None, 8, 8, 512) 131584 activation_13[0][0]
____________________________________________________________________________________________________
bn3a_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3a_branch2c[0][0]
____________________________________________________________________________________________________
bn3a_branch1 (BatchNormalization (None, 8, 8, 512) 2048 res3a_branch1[0][0]
____________________________________________________________________________________________________
add_5 (Add) (None, 8, 8, 512) 0 bn3a_branch2c[0][0]
bn3a_branch1[0][0]
____________________________________________________________________________________________________
activation_16 (Activation) (None, 8, 8, 512) 0 add_5[0][0]
____________________________________________________________________________________________________
res3b_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_16[0][0]
____________________________________________________________________________________________________
bn3b_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2a[0][0]
____________________________________________________________________________________________________
activation_17 (Activation) (None, 8, 8, 128) 0 bn3b_branch2a[0][0]
____________________________________________________________________________________________________
res3b_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_17[0][0]
____________________________________________________________________________________________________
bn3b_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3b_branch2b[0][0]
____________________________________________________________________________________________________
activation_18 (Activation) (None, 8, 8, 128) 0 bn3b_branch2b[0][0]
____________________________________________________________________________________________________
res3b_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_18[0][0]
____________________________________________________________________________________________________
bn3b_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3b_branch2c[0][0]
____________________________________________________________________________________________________
add_6 (Add) (None, 8, 8, 512) 0 bn3b_branch2c[0][0]
activation_16[0][0]
____________________________________________________________________________________________________
activation_19 (Activation) (None, 8, 8, 512) 0 add_6[0][0]
____________________________________________________________________________________________________
res3c_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_19[0][0]
____________________________________________________________________________________________________
bn3c_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2a[0][0]
____________________________________________________________________________________________________
activation_20 (Activation) (None, 8, 8, 128) 0 bn3c_branch2a[0][0]
____________________________________________________________________________________________________
res3c_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_20[0][0]
____________________________________________________________________________________________________
bn3c_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3c_branch2b[0][0]
____________________________________________________________________________________________________
activation_21 (Activation) (None, 8, 8, 128) 0 bn3c_branch2b[0][0]
____________________________________________________________________________________________________
res3c_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_21[0][0]
____________________________________________________________________________________________________
bn3c_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3c_branch2c[0][0]
____________________________________________________________________________________________________
add_7 (Add) (None, 8, 8, 512) 0 bn3c_branch2c[0][0]
activation_19[0][0]
____________________________________________________________________________________________________
activation_22 (Activation) (None, 8, 8, 512) 0 add_7[0][0]
____________________________________________________________________________________________________
res3d_branch2a (Conv2D) (None, 8, 8, 128) 65664 activation_22[0][0]
____________________________________________________________________________________________________
bn3d_branch2a (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2a[0][0]
____________________________________________________________________________________________________
activation_23 (Activation) (None, 8, 8, 128) 0 bn3d_branch2a[0][0]
____________________________________________________________________________________________________
res3d_branch2b (Conv2D) (None, 8, 8, 128) 147584 activation_23[0][0]
____________________________________________________________________________________________________
bn3d_branch2b (BatchNormalizatio (None, 8, 8, 128) 512 res3d_branch2b[0][0]
____________________________________________________________________________________________________
activation_24 (Activation) (None, 8, 8, 128) 0 bn3d_branch2b[0][0]
____________________________________________________________________________________________________
res3d_branch2c (Conv2D) (None, 8, 8, 512) 66048 activation_24[0][0]
____________________________________________________________________________________________________
bn3d_branch2c (BatchNormalizatio (None, 8, 8, 512) 2048 res3d_branch2c[0][0]
____________________________________________________________________________________________________
add_8 (Add) (None, 8, 8, 512) 0 bn3d_branch2c[0][0]
activation_22[0][0]
____________________________________________________________________________________________________
activation_25 (Activation) (None, 8, 8, 512) 0 add_8[0][0]
____________________________________________________________________________________________________
res4a_branch2a (Conv2D) (None, 4, 4, 256) 131328 activation_25[0][0]
____________________________________________________________________________________________________
bn4a_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2a[0][0]
____________________________________________________________________________________________________
activation_26 (Activation) (None, 4, 4, 256) 0 bn4a_branch2a[0][0]
____________________________________________________________________________________________________
res4a_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_26[0][0]
____________________________________________________________________________________________________
bn4a_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4a_branch2b[0][0]
____________________________________________________________________________________________________
activation_27 (Activation) (None, 4, 4, 256) 0 bn4a_branch2b[0][0]
____________________________________________________________________________________________________
res4a_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_27[0][0]
____________________________________________________________________________________________________
res4a_branch1 (Conv2D) (None, 4, 4, 1024) 525312 activation_25[0][0]
____________________________________________________________________________________________________
bn4a_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4a_branch2c[0][0]
____________________________________________________________________________________________________
bn4a_branch1 (BatchNormalization (None, 4, 4, 1024) 4096 res4a_branch1[0][0]
____________________________________________________________________________________________________
add_9 (Add) (None, 4, 4, 1024) 0 bn4a_branch2c[0][0]
bn4a_branch1[0][0]
____________________________________________________________________________________________________
activation_28 (Activation) (None, 4, 4, 1024) 0 add_9[0][0]
____________________________________________________________________________________________________
res4b_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_28[0][0]
____________________________________________________________________________________________________
bn4b_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2a[0][0]
____________________________________________________________________________________________________
activation_29 (Activation) (None, 4, 4, 256) 0 bn4b_branch2a[0][0]
____________________________________________________________________________________________________
res4b_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_29[0][0]
____________________________________________________________________________________________________
bn4b_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4b_branch2b[0][0]
____________________________________________________________________________________________________
activation_30 (Activation) (None, 4, 4, 256) 0 bn4b_branch2b[0][0]
____________________________________________________________________________________________________
res4b_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_30[0][0]
____________________________________________________________________________________________________
bn4b_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4b_branch2c[0][0]
____________________________________________________________________________________________________
add_10 (Add) (None, 4, 4, 1024) 0 bn4b_branch2c[0][0]
activation_28[0][0]
____________________________________________________________________________________________________
activation_31 (Activation) (None, 4, 4, 1024) 0 add_10[0][0]
____________________________________________________________________________________________________
res4c_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_31[0][0]
____________________________________________________________________________________________________
bn4c_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2a[0][0]
____________________________________________________________________________________________________
activation_32 (Activation) (None, 4, 4, 256) 0 bn4c_branch2a[0][0]
____________________________________________________________________________________________________
res4c_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_32[0][0]
____________________________________________________________________________________________________
bn4c_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4c_branch2b[0][0]
____________________________________________________________________________________________________
activation_33 (Activation) (None, 4, 4, 256) 0 bn4c_branch2b[0][0]
____________________________________________________________________________________________________
res4c_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_33[0][0]
____________________________________________________________________________________________________
bn4c_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4c_branch2c[0][0]
____________________________________________________________________________________________________
add_11 (Add) (None, 4, 4, 1024) 0 bn4c_branch2c[0][0]
activation_31[0][0]
____________________________________________________________________________________________________
activation_34 (Activation) (None, 4, 4, 1024) 0 add_11[0][0]
____________________________________________________________________________________________________
res4d_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_34[0][0]
____________________________________________________________________________________________________
bn4d_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2a[0][0]
____________________________________________________________________________________________________
activation_35 (Activation) (None, 4, 4, 256) 0 bn4d_branch2a[0][0]
____________________________________________________________________________________________________
res4d_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_35[0][0]
____________________________________________________________________________________________________
bn4d_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4d_branch2b[0][0]
____________________________________________________________________________________________________
activation_36 (Activation) (None, 4, 4, 256) 0 bn4d_branch2b[0][0]
____________________________________________________________________________________________________
res4d_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_36[0][0]
____________________________________________________________________________________________________
bn4d_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4d_branch2c[0][0]
____________________________________________________________________________________________________
add_12 (Add) (None, 4, 4, 1024) 0 bn4d_branch2c[0][0]
activation_34[0][0]
____________________________________________________________________________________________________
activation_37 (Activation) (None, 4, 4, 1024) 0 add_12[0][0]
____________________________________________________________________________________________________
res4e_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_37[0][0]
____________________________________________________________________________________________________
bn4e_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2a[0][0]
____________________________________________________________________________________________________
activation_38 (Activation) (None, 4, 4, 256) 0 bn4e_branch2a[0][0]
____________________________________________________________________________________________________
res4e_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_38[0][0]
____________________________________________________________________________________________________
bn4e_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4e_branch2b[0][0]
____________________________________________________________________________________________________
activation_39 (Activation) (None, 4, 4, 256) 0 bn4e_branch2b[0][0]
____________________________________________________________________________________________________
res4e_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_39[0][0]
____________________________________________________________________________________________________
bn4e_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4e_branch2c[0][0]
____________________________________________________________________________________________________
add_13 (Add) (None, 4, 4, 1024) 0 bn4e_branch2c[0][0]
activation_37[0][0]
____________________________________________________________________________________________________
activation_40 (Activation) (None, 4, 4, 1024) 0 add_13[0][0]
____________________________________________________________________________________________________
res4f_branch2a (Conv2D) (None, 4, 4, 256) 262400 activation_40[0][0]
____________________________________________________________________________________________________
bn4f_branch2a (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2a[0][0]
____________________________________________________________________________________________________
activation_41 (Activation) (None, 4, 4, 256) 0 bn4f_branch2a[0][0]
____________________________________________________________________________________________________
res4f_branch2b (Conv2D) (None, 4, 4, 256) 590080 activation_41[0][0]
____________________________________________________________________________________________________
bn4f_branch2b (BatchNormalizatio (None, 4, 4, 256) 1024 res4f_branch2b[0][0]
____________________________________________________________________________________________________
activation_42 (Activation) (None, 4, 4, 256) 0 bn4f_branch2b[0][0]
____________________________________________________________________________________________________
res4f_branch2c (Conv2D) (None, 4, 4, 1024) 263168 activation_42[0][0]
____________________________________________________________________________________________________
bn4f_branch2c (BatchNormalizatio (None, 4, 4, 1024) 4096 res4f_branch2c[0][0]
____________________________________________________________________________________________________
add_14 (Add) (None, 4, 4, 1024) 0 bn4f_branch2c[0][0]
activation_40[0][0]
____________________________________________________________________________________________________
activation_43 (Activation) (None, 4, 4, 1024) 0 add_14[0][0]
____________________________________________________________________________________________________
res5a_branch2a (Conv2D) (None, 2, 2, 512) 524800 activation_43[0][0]
____________________________________________________________________________________________________
bn5a_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2a[0][0]
____________________________________________________________________________________________________
activation_44 (Activation) (None, 2, 2, 512) 0 bn5a_branch2a[0][0]
____________________________________________________________________________________________________
res5a_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_44[0][0]
____________________________________________________________________________________________________
bn5a_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5a_branch2b[0][0]
____________________________________________________________________________________________________
activation_45 (Activation) (None, 2, 2, 512) 0 bn5a_branch2b[0][0]
____________________________________________________________________________________________________
res5a_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_45[0][0]
____________________________________________________________________________________________________
res5a_branch1 (Conv2D) (None, 2, 2, 2048) 2099200 activation_43[0][0]
____________________________________________________________________________________________________
bn5a_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5a_branch2c[0][0]
____________________________________________________________________________________________________
bn5a_branch1 (BatchNormalization (None, 2, 2, 2048) 8192 res5a_branch1[0][0]
____________________________________________________________________________________________________
add_15 (Add) (None, 2, 2, 2048) 0 bn5a_branch2c[0][0]
bn5a_branch1[0][0]
____________________________________________________________________________________________________
activation_46 (Activation) (None, 2, 2, 2048) 0 add_15[0][0]
____________________________________________________________________________________________________
res5b_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_46[0][0]
____________________________________________________________________________________________________
bn5b_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2a[0][0]
____________________________________________________________________________________________________
activation_47 (Activation) (None, 2, 2, 512) 0 bn5b_branch2a[0][0]
____________________________________________________________________________________________________
res5b_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_47[0][0]
____________________________________________________________________________________________________
bn5b_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5b_branch2b[0][0]
____________________________________________________________________________________________________
activation_48 (Activation) (None, 2, 2, 512) 0 bn5b_branch2b[0][0]
____________________________________________________________________________________________________
res5b_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_48[0][0]
____________________________________________________________________________________________________
bn5b_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5b_branch2c[0][0]
____________________________________________________________________________________________________
add_16 (Add) (None, 2, 2, 2048) 0 bn5b_branch2c[0][0]
activation_46[0][0]
____________________________________________________________________________________________________
activation_49 (Activation) (None, 2, 2, 2048) 0 add_16[0][0]
____________________________________________________________________________________________________
res5c_branch2a (Conv2D) (None, 2, 2, 512) 1049088 activation_49[0][0]
____________________________________________________________________________________________________
bn5c_branch2a (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2a[0][0]
____________________________________________________________________________________________________
activation_50 (Activation) (None, 2, 2, 512) 0 bn5c_branch2a[0][0]
____________________________________________________________________________________________________
res5c_branch2b (Conv2D) (None, 2, 2, 512) 2359808 activation_50[0][0]
____________________________________________________________________________________________________
bn5c_branch2b (BatchNormalizatio (None, 2, 2, 512) 2048 res5c_branch2b[0][0]
____________________________________________________________________________________________________
activation_51 (Activation) (None, 2, 2, 512) 0 bn5c_branch2b[0][0]
____________________________________________________________________________________________________
res5c_branch2c (Conv2D) (None, 2, 2, 2048) 1050624 activation_51[0][0]
____________________________________________________________________________________________________
bn5c_branch2c (BatchNormalizatio (None, 2, 2, 2048) 8192 res5c_branch2c[0][0]
____________________________________________________________________________________________________
add_17 (Add) (None, 2, 2, 2048) 0 bn5c_branch2c[0][0]
activation_49[0][0]
____________________________________________________________________________________________________
activation_52 (Activation) (None, 2, 2, 2048) 0 add_17[0][0]
____________________________________________________________________________________________________
avg_pool (AveragePooling2D) (None, 1, 1, 2048) 0 activation_52[0][0]
____________________________________________________________________________________________________
flatten_1 (Flatten) (None, 2048) 0 avg_pool[0][0]
____________________________________________________________________________________________________
fc6 (Dense) (None, 6) 12294 flatten_1[0][0]
====================================================================================================
Total params: 23,600,006
Trainable params: 23,546,886
Non-trainable params: 53,120
____________________________________________________________________________________________________
###Markdown
Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png".
###Code
plot_model(model, to_file='model.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
###Output
_____no_output_____ |
notebooks_vacios/Clase1c_ElSaltoDeLaRana.ipynb | ###Markdown
El juego del salto de la rana Objetivos del ejercicio En todo curso de programación se enseñan tarde o temprano las funciones, y este por supuesto no ha sido una excepción. Lo que realmente diferencia un curso de programación de un curso de algorítmica o de métodos numéricos es como se enseñan dichas funciones, o más importante, como se saca partido de las mismas.En ese sentido, La armada americana (US Navy) ya introdujo en 1960 el principio KISS
###Code
Image(filename='../static/kiss.jpg')
###Output
_____no_output_____
###Markdown
Principio que de alguna forma toma prestado el zen de Python. Desafortunadamente, ese principio no siempre se cumple, y a menudo es debido a una deficiente o incluso negligente formación.Bien es cierto, y es de hecho el origen del problema, que el principio KISS para un profano en programación simplemente carece de significado. Por eso, durante este ejercicio vamos a intentar demostrar de forma práctica las ventajas de tomar como propio ese principio, usando un paradigma que yo he dado en describir como:**“_Programar como humanos, no como máquinas_** Descripción del juego El juego es realmente un acertijo. Supongamos que tenemos un tablero como el siguiente:
###Code
Image(filename='../static/juego_rana_001.png')
###Output
_____no_output_____
###Markdown
Un tablero con 7 huecos, en los que los 3 huecos de la izquierda contienen fichas rojas, mientras que los 3 huecos de la derecha contienen fichas azules.El objetivo del juego/acertijo, es conseguir que todas las fichas rojas ocupen las posiciones de las fichas azules, y viceversa. Para alcanzar el objetivo las fichas rojas sólo pueden mover hacia la derecha mientras que las fichas azules sólo pueden mover hacia la izquierda.Los movimientos permitidos son los siguientes:* Se puede mover una ficha una única casilla hasta el hueco, que por supuesto deberá ser contiguo:
###Code
Image(filename='../static/juego_rana_002.png')
###Output
_____no_output_____
###Markdown
* O se permite avanzar dos casillas con una ficha, saltando otra ficha del color contrario:
###Code
Image(filename='../static/juego_rana_003.png')
###Output
_____no_output_____ |
Resistor Calculations/Resistors in Parallel.ipynb | ###Markdown
Contents* [Total resistance of resistors in parallel](Total-resistance-of-resistors-in-parallel) Total resistance of resistors in parallelCalculate the total resistance of resistors in parallel.
###Code
import sys
import os
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import common_util
resistor_collection = []
resistor_val = ''
resistor_pos = 1
total_resistance = 0
while True:
resistor_val = input("Resistor " + str(resistor_pos) + ": ")
# Check for empty string to break the input loop.
if resistor_val.strip() == '':
break
else:
resistor_collection.append(resistor_val)
resistor_pos = resistor_pos + 1
if len(resistor_collection) > 0:
for resistor in resistor_collection:
if common_util.is_float_number(resistor):
resistor_val = float(resistor)
else:
resistor_val = common_util.decode_resistor_value(resistor)
if total_resistance == 0:
total_resistance = resistor_val
else:
total_resistance = (total_resistance * resistor_val) / (resistor_val + total_resistance)
print("Total resistance = " + common_util.format_resistor_value(total_resistance))
###Output
Resistor 1: 100
Resistor 2: 220
Resistor 3: 1.2k
Resistor 4:
Total resistance = 65.024631Ω
|
Course1_Neural-Networks-and-Deep-Learning/Week2/Logistic-Regression-with-Neural-Network/Logistic+Regression+with+a+Neural+Network+mindset+v5.ipynb | ###Markdown
Logistic Regression with a Neural Network mindsetWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.**Instructions:**- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.**You will learn to:**- Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. 1 - Packages First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
###Code
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
###Output
_____no_output_____
###Markdown
2 - Overview of the Problem set **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.Let's get more familiar with the dataset. Load the data by running the following code.
###Code
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
###Output
_____no_output_____
###Markdown
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
###Code
# Example of a picture
index = 20
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
###Output
y = [0], it's a 'non-cat' picture.
###Markdown
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. **Exercise:** Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image)Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
###Code
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
###Output
Number of training examples: m_train = 209
Number of testing examples: m_test = 50
Height/Width of each image: num_px = 64
Each image is of size: (64, 64, 3)
train_set_x shape: (209, 64, 64, 3)
train_set_y shape: (1, 209)
test_set_x shape: (50, 64, 64, 3)
test_set_y shape: (1, 50)
###Markdown
**Expected Output for m_train, m_test and num_px**: **m_train** 209 **m_test** 50 **num_px** 64 For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```pythonX_flatten = X.reshape(X.shape[0], -1).T X.T is the transpose of X```
###Code
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
###Output
train_set_x_flatten shape: (12288, 209)
train_set_y shape: (1, 209)
test_set_x_flatten shape: (12288, 50)
test_set_y shape: (1, 50)
sanity check after reshaping: [17 31 56 22 33]
###Markdown
**Expected Output**: **train_set_x_flatten shape** (12288, 209) **train_set_y shape** (1, 209) **test_set_x_flatten shape** (12288, 50) **test_set_y shape** (1, 50) **sanity check after reshaping** [17 31 56 22 33] To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). Let's standardize our dataset.
###Code
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
###Output
_____no_output_____
###Markdown
**What you need to remember:**Common steps for pre-processing a new dataset are:- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)- "Standardize" the data 3 - General Architecture of the learning algorithm It's time to design a simple algorithm to distinguish cat images from non-cat images.You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!****Mathematical expression of the algorithm**:For one example $x^{(i)}$:$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$The cost is then computed by summing over all training examples:$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$**Key steps**:In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude 4 - Building the parts of our algorithm The main steps for building a Neural Network are:1. Define the model structure (such as number of input features) 2. Initialize the model's parameters3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent)You often build 1-3 separately and integrate them into one function we call `model()`. 4.1 - Helper functions**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
###Code
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
###Output
sigmoid([0, 2]) = [ 0.5 0.88079708]
###Markdown
**Expected Output**: **sigmoid([0, 2])** [ 0.5 0.88079708] 4.2 - Initializing parameters**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
###Code
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
###Output
w = [[ 0.]
[ 0.]]
b = 0
###Markdown
**Expected Output**: ** w ** [[ 0.] [ 0.]] ** b ** 0 For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). 4.3 - Forward and Backward propagationNow that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.**Hints**:Forward Propagation:- You get X- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
###Code
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T, X) + b) # compute activation
cost = -(np.dot(Y, (np.log(A)).T) + np.dot((1-Y), (np.log(1-A)).T)) / m # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = np.dot(X, (A-Y).T) / m
db = np.sum(A-Y) / m
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
###Output
dw = [[ 0.99845601]
[ 2.39507239]]
db = 0.00145557813678
cost = 5.801545319394553
###Markdown
**Expected Output**: ** dw ** [[ 0.99845601] [ 2.39507239]] ** db ** 0.00145557813678 ** cost ** 5.801545319394553 4.4 - Optimization- You have initialized your parameters.- You are also able to compute a cost function and its gradient.- Now, you want to update the parameters using gradient descent.**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
###Code
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
###Output
w = [[ 0.19033591]
[ 0.12259159]]
b = 1.92535983008
dw = [[ 0.67752042]
[ 1.41625495]]
db = 0.219194504541
###Markdown
**Expected Output**: **w** [[ 0.19033591] [ 0.12259159]] **b** 1.92535983008 **dw** [[ 0.67752042] [ 1.41625495]] **db** 0.219194504541 **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$2. Convert the entries of a into 0 (if activation 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
###Code
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T, X) + b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if A[0, i] > 0.5:
Y_prediction[0, i] = 1
else:
Y_prediction[0, i] = 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
###Output
predictions = [[ 1. 1. 0.]]
###Markdown
**Expected Output**: **predictions** [[ 1. 1. 0.]] **What to remember:**You've implemented several functions that:- Initialize (w,b)- Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent- Use the learned (w,b) to predict the labels for a given set of examples 5 - Merge all functions into a model You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.**Exercise:** Implement the model function. Use the following notation: - Y_prediction_test for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize()
###Code
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
###Output
_____no_output_____
###Markdown
Run the following cell to train your model.
###Code
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
###Output
Cost after iteration 0: 0.693147
Cost after iteration 100: 0.584508
Cost after iteration 200: 0.466949
Cost after iteration 300: 0.376007
Cost after iteration 400: 0.331463
Cost after iteration 500: 0.303273
Cost after iteration 600: 0.279880
Cost after iteration 700: 0.260042
Cost after iteration 800: 0.242941
Cost after iteration 900: 0.228004
Cost after iteration 1000: 0.214820
Cost after iteration 1100: 0.203078
Cost after iteration 1200: 0.192544
Cost after iteration 1300: 0.183033
Cost after iteration 1400: 0.174399
Cost after iteration 1500: 0.166521
Cost after iteration 1600: 0.159305
Cost after iteration 1700: 0.152667
Cost after iteration 1800: 0.146542
Cost after iteration 1900: 0.140872
train accuracy: 99.04306220095694 %
test accuracy: 70.0 %
###Markdown
**Expected Output**: **Cost after iteration 0 ** 0.693147 $\vdots$ $\vdots$ **Train Accuracy** 99.04306220095694 % **Test Accuracy** 70.0 % **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
###Code
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
###Output
/opt/conda/lib/python3.5/site-packages/ipykernel/__main__.py:4: DeprecationWarning: using a non-integer number instead of an integer will result in an error in the future
###Markdown
Let's also plot the cost function and the gradients.
###Code
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
###Output
_____no_output_____
###Markdown
**Interpretation**:You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. 6 - Further analysis (optional/ungraded exercise) Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. Choice of learning rate **Reminder**:In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
###Code
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
###Output
learning rate is: 0.01
train accuracy: 99.52153110047847 %
test accuracy: 68.0 %
-------------------------------------------------------
learning rate is: 0.001
train accuracy: 88.99521531100478 %
test accuracy: 64.0 %
-------------------------------------------------------
learning rate is: 0.0001
train accuracy: 68.42105263157895 %
test accuracy: 36.0 %
-------------------------------------------------------
###Markdown
**Interpretation**: - Different learning rates give different costs and thus different predictions results.- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.- In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) 7 - Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
y = 0.0, your algorithm predicts a "non-cat" picture.
|
notebooks/readme_example.ipynb | ###Markdown
Validation skconfig creates a DSL for defining the search space for a sklearn model. For example, we can defined a LogRegressionValidator as follows:
###Code
class LogRegressionValidator(BaseValidator):
estimator = LogisticRegression
penalty = StringParam("l2", "l1")
dual = BoolParam()
tol = FloatIntervalParam(lower=0, include_lower=False)
C = FloatIntervalParam(lower=0)
fit_intercept = BoolParam()
intercept_scaling = FloatIntervalParam(lower=0, include_lower=False)
class_weight = NoneParam()
random_state = UnionParam(IntParam(), NoneParam())
solver = StringParam("newton-cg", "lbfgs", "liblinear", "sag", "saga", "warn")
max_iter = IntIntervalParam(lower=1)
multi_class = StringParam("ovr", "multinomial", "auto", "warn")
verbose = IntParam()
warm_start = BoolParam()
n_jobs = UnionParam(NoneParam(), IntIntervalParam(lower=-1))
forbiddens = [
ForbiddenAnd([ForbiddenEquals("penalty", "l1"), ForbiddenIn("solver", ["newton-cg", "sag", "lbfgs"])]),
ForbiddenAnd([ForbiddenEquals("solver", "liblinear"), ForbiddenEquals("multi_class", "multinomial")]),
]
###Output
_____no_output_____
###Markdown
With this validator object, we can validate a set of parameters:
###Code
validator = LogRegressionValidator()
validator.validate_params(multi_class="ovr") # Does not raise
validator.validate_params(penalty="hello world")
validator.validate_params(solver="liblinear", multi_class="multinomial")
validator.validate_params(penalty="l1", solver="sag")
params_dict = {"penalty": "l1", "solver": "sag"}
validator.validate_params(**params_dict)
###Output
_____no_output_____
###Markdown
Or validate a estimator:
###Code
est = LogisticRegression()
validator.validate_estimator(est)
###Output
_____no_output_____
###Markdown
SamplingTo sample the parameter space, a skconfig has a DSL for defining the distribution to be sampled from:
###Code
validator = LogRegressionValidator()
sampler = Sampler(validator,
dual=UniformBoolDistribution(),
C=UniformFloatDistribution(0.0, 1.0),
solver=CategoricalDistribution(["newton-cg", "lbfgs", "liblinear", "sag", "saga"]),
random_state=UnionDistribution(ConstantDistribution(None), UniformIntDistribution(0, 10)),
penalty=CategoricalDistribution(["l2", "l1"]),
multi_class=CategoricalDistribution(["ovr", "multinomial"])
)
params_sample = sampler.sample(5)
params_sample
###Output
_____no_output_____
###Markdown
Create an estimator from the first param from params_sample
###Code
est = LogisticRegression(**params_sample[0])
est.get_params()
###Output
_____no_output_____
###Markdown
SerializationThe sampler can be serialized into a json:
###Code
import json
from IPython.display import JSON
serialized = sampler.to_dict()
json_serialized = json.dumps(serialized, indent=2)
JSON(serialized)
sampler_dict = json.loads(json_serialized)
sampler_new = Sampler(validator).from_dict(sampler_dict)
sampler_new
###Output
_____no_output_____ |
examples/iv_using-formulas.ipynb | ###Markdown
Using formulas to specify models Basic UsageFormulas provide an alternative method to specify a model. The formulas used here utilize [patsy](http://patsy.readthedocs.io/en/latest/) are similar to those in [statsmodels](http://www.statsmodels.org), although they use an enhanced syntax to allow identification of endogenous regressors. The basis formula syntax for a single variable regression would be```y ~ 1 + x```where the `1` indicates that a constant should be included and `x` is the regressor. In the context of an instrumental variables model, it is necessary to mark variables as endogenous and to provide a list of instruments that are included only in the model for the endogenous variables. In a basic single regressor model, this would be specified using `[]` to surround an inner model.```y ~ 1 + [x ~ z]```In this expression, `x` is now marked as endogenous and `z` is an instrument. Any exogenous variable will automatically be used when instrumenting `x` so there is no need to repeat these here (in this example, the "first stage" would include a constant and z). Multiple Endogenous VariablesMultiple endogenous variables are specified in a similar manner. The basic concept is that any model can be expressed as ```dep ~ exog + [ endog ~ instruments]```and it must be the case that ```dep ~ exog + endog```and```dep ~ exog + instruments```are valid patsy formulas. This means that multiple endogenous regressors or instruments should be joined with `+`, but that the first endogenous or first instrument should not have a leading `+`. A simple example with 2 endogenous variables and 3 instruments would be ```y ~ 1 + x1 + x2 + x3 + [ x4 + x5 ~ z1 + z2 + z3]```In this example, the "submodels" `y ~ 1 + x1 + x2 +x3 + x4 + x5` and `y ~ 1 + x1 + x2 + x3 + z1 + z2 +z3` are both valid patsy expressions. Standard patsyAside from this change, the standard rules of patsy apply, and so it is possible to use mathematical expression or other patsy-specific features. See the [patsy quickstart](http://patsy.readthedocs.io/en/latest/quickstart.html) for some examples of what is possible. MEPS dataThis example shows the use of formulas to estimate both IV and OLS models using the [medical expenditure panel survey](https://meps.ahrq.gov). The model measures the effect of various characteristics on the log of drug expenditure and instruments the variable that measures where a subject was insured through a union with their social security to income ratio.This first block imports the data and numpy.
###Code
import numpy as np
from linearmodels.datasets import meps
from linearmodels.iv import IV2SLS
data = meps.load()
data = data.dropna()
print(meps.DESCR)
###Output
_____no_output_____
###Markdown
Estimating a model with a formulaThis model uses a formula which is input using the `from_formula` interface. Unlike direct initialization, this interface takes the formula and a DataFrame containing the data necessary to evaluate the formula.
###Code
formula = (
"ldrugexp ~ 1 + totchr + female + age + linc + blhisp + [hi_empunion ~ ssiratio]"
)
mod = IV2SLS.from_formula(formula, data)
iv_res = mod.fit(cov_type="robust")
print(iv_res)
###Output
_____no_output_____
###Markdown
Mathematical expression in formulasStandard patsy expression, such as using mathematical expressions, can be readily used.
###Code
formula = (
"np.log(drugexp) ~ 1 + totchr + age + linc + blhisp + [hi_empunion ~ ssiratio]"
)
mod = IV2SLS.from_formula(formula, data)
iv_res2 = mod.fit(cov_type="robust")
###Output
_____no_output_____
###Markdown
OLSOmitting the block that marks a variable as endogenous will produce OLS -- just like using `None` for both `endog` and `instruments`.
###Code
formula = "ldrugexp ~ 1 + totchr + female + age + linc + blhisp + hi_empunion"
ols = IV2SLS.from_formula(formula, data)
ols_res = ols.fit(cov_type="robust")
print(ols_res)
###Output
_____no_output_____
###Markdown
Comparing resultsThe function `compare` can be used to compare the result of multiple models. Here dropping `female` from the IV regression improves the $R^2$.
###Code
from linearmodels.iv import compare
print(compare({"IV": iv_res, "OLS": ols_res, "IV-formula": iv_res2}))
###Output
_____no_output_____
###Markdown
Using formulas to specify models Basic UsageFormulas provide an alternative method to specify a model. The formulas used here utilize [formulaic](https://github.com/matthewwardrop/formulaic/) ([documentation](https://matthewwardrop.github.io/formulaic/)) are similar to those in [statsmodels](http://www.statsmodels.org), although they use an enhanced syntax to allow identification of endogenous regressors. The basis formula syntax for a single variable regression would be```y ~ 1 + x```where the `1` indicates that a constant should be included and `x` is the regressor. In the context of an instrumental variables model, it is necessary to mark variables as endogenous and to provide a list of instruments that are included only in the model for the endogenous variables. In a basic single regressor model, this would be specified using `[]` to surround an inner model.```y ~ 1 + [x ~ z]```In this expression, `x` is now marked as endogenous and `z` is an instrument. Any exogenous variable will automatically be used when instrumenting `x` so there is no need to repeat these here (in this example, the "first stage" would include a constant and z). Multiple Endogenous VariablesMultiple endogenous variables are specified in a similar manner. The basic concept is that any model can be expressed as ```dep ~ exog + [ endog ~ instruments]```and it must be the case that ```dep ~ exog + endog```and```dep ~ exog + instruments```are valid formulaic formulas. This means that multiple endogenous regressors or instruments should be joined with `+`, but that the first endogenous or first instrument should not have a leading `+`. A simple example with 2 endogenous variables and 3 instruments would be```y ~ 1 + x1 + x2 + x3 + [ x4 + x5 ~ z1 + z2 + z3]```In this example, the "submodels" `y ~ 1 + x1 + x2 +x3 + x4 + x5` and `y ~ 1 + x1 + x2 + x3 + z1 + z2 +z3` are both valid formulaic expressions. Standard formulaicAside from this change, the standard rules of formulaic apply, and so it is possible to use mathematical expression or other formulaic-specific features. See the [formulaic quickstart](https://matthewwardrop.github.io/formulaic/guides/quickstart/) for some examples of what is possible. MEPS dataThis example shows the use of formulas to estimate both IV and OLS models using the [medical expenditure panel survey](https://meps.ahrq.gov). The model measures the effect of various characteristics on the log of drug expenditure and instruments the variable that measures where a subject was insured through a union with their social security to income ratio.This first block imports the data and numpy.
###Code
import numpy as np
from linearmodels.datasets import meps
from linearmodels.iv import IV2SLS
data = meps.load()
data = data.dropna()
print(meps.DESCR)
###Output
_____no_output_____
###Markdown
Estimating a model with a formulaThis model uses a formula which is input using the `from_formula` interface. Unlike direct initialization, this interface takes the formula and a DataFrame containing the data necessary to evaluate the formula.
###Code
formula = (
"ldrugexp ~ 1 + totchr + female + age + linc + blhisp + [hi_empunion ~ ssiratio]"
)
mod = IV2SLS.from_formula(formula, data)
iv_res = mod.fit(cov_type="robust")
print(iv_res)
###Output
_____no_output_____
###Markdown
Mathematical expression in formulasStandard formulaic syntax, such as using mathematical expressions, can be readily used.
###Code
formula = (
"np.log(drugexp) ~ 1 + totchr + age + linc + blhisp + [hi_empunion ~ ssiratio]"
)
mod = IV2SLS.from_formula(formula, data)
iv_res2 = mod.fit(cov_type="robust")
###Output
_____no_output_____
###Markdown
OLSOmitting the block that marks a variable as endogenous will produce OLS -- just like using `None` for both `endog` and `instruments`.
###Code
formula = "ldrugexp ~ 1 + totchr + female + age + linc + blhisp + hi_empunion"
ols = IV2SLS.from_formula(formula, data)
ols_res = ols.fit(cov_type="robust")
print(ols_res)
###Output
_____no_output_____
###Markdown
Comparing resultsThe function `compare` can be used to compare the result of multiple models. Here dropping `female` from the IV regression improves the $R^2$.
###Code
from linearmodels.iv import compare
print(compare({"IV": iv_res, "OLS": ols_res, "IV-formula": iv_res2}))
###Output
_____no_output_____ |
IoTFuzzyThreeState/IoT_Python_Fuzzy_SMS.ipynb | ###Markdown
Crisp output for inputs of temp and gas('temp=', 71.1, ' ,gas=', 1.34)66.73498401885462 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
###Code
# if-else code for IoTFuzzyFourStateTALKBACK
'''
if crispO/P<=t1:
requests.put(url,data=dataAlarmOn1)
#client.send_message({'from': 'Nexmo', 'to': '971563201593', 'text': 'Emergency Shutdown Reqd'})
elif crispO/P>t1 and crispO/P<=t2:
requests.put(url,data=dataAlarmOn2)
#client.send_message({'from': 'Nexmo', 'to': '971563201593', 'text': 'Situation:Critical'})
elif crispO/P>t1 and crispO/P<=t2:
requests.put(url,data=dataAlarmOn3)
client.send_message({'from': 'Nexmo', 'to': '971563201593', 'text': 'Inspection Regd'})
else:
requests.put(url,data=dataAlarmOff)'''
# A demo showing only the fuzzy inference system computing output based on sample input values of temperature and gas
'''
#inputs
tempField_data=30
gasField_data=0.5
FgcsIot.input['temp_sense']=tempField_data
FgcsIot.input['gas_sense']=gasField_data*100
#computation
FgcsIot.compute()
#df2.ix[3,6]=FgcsIot.output['fire_gasLeak_sense']
#output
crispOP=FgcsIot.output['fire_gasLeak_sense'] #Crisp output
print(crispOP)
fire_gasLeak_sense.view(sim=FgcsIot)
'''
###Output
45.0
|
data/compare_data/compare_data.ipynb | ###Markdown
Compare Spectra Plot spectrum of transitional disks, separate laterfig=plt.figure(figsize=(18,8))Disks wavelengths within range of 4.645 - 4.785ax1=fig.add_subplot(211)Transitional Disksax1.plot(citau_wave, citau_flux, label='Ci Tau')ax1.plot(lkha330_wave, lkha330_flux, label='LkHa 330')ax1.plot(twhya_wave, twhya_flux, label='TW Hya')ax1.plot(doar44_wave, doar44_flux, label='DoAr 44')ax1.plot(hd135344_wave, hd135344_flux, label='HD 135344')ax1.plot(uxtau_wave, uxtau_flux, label='UX Tau')Classical Disksax1.plot(doar24_wave, doar24_flux, label='DoAr 24')ax1.plot(dftau_wave, dftau_flux, label='DF Tau')ax1.plot(dltau_wave, dltau_flux, label='DL Tau')Rangeax1.set_xlim(4.645,4.785)ax1.set_ylim(-0.5,4.5)for i,mywave in enumerate(hitran_data['wave']): if( (mywave>4.645) & (mywave<4.785) ): ax1.axvline(mywave,color='C1') ax1.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip()) Labelsax1.set_ylabel('Flux [Jy]',fontsize=14)ax1.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)ax1.set_title('Composite Lineshape of Classical Disks')Disks wavelengths within range of 4.95 - 5.10ax2=fig.add_subplot(212)Transitional Disksax2.plot(citau_wave, citau_flux, label='Ci Tau')ax2.plot(lkha330_wave, lkha330_flux, label='LkHa 330')ax2.plot(twhya_wave, twhya_flux, label='TW Hya')ax2.plot(doar44_wave, doar44_flux, label='DoAr 44')ax2.plot(hd135344_wave, hd135344_flux, label='HD 135344')ax2.plot(uxtau_wave, uxtau_flux, label='UX Tau')Classical Disksax2.plot(doar24_wave, doar24_flux, label='DoAr 24')ax2.plot(dftau_wave, dftau_flux, label='DF Tau')ax2.plot(dltau_wave, dltau_flux, label='DL Tau')ax2.set_xlim(4.95,5.10)ax2.set_ylim(-0.5,4.5)for i,mywave in enumerate(hitran_data['wave']): if( (mywave>4.95) & (mywave<5.10) ): ax2.axvline(mywave,color='C1') ax2.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())ax2.set_xlabel('Wavelength [$\mu$m]',fontsize=14)ax2.set_ylabel('Flux [Jy]',fontsize=14)ax2.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)ax2.set_title('Composite Lineshape of Classical Disks')
###Code
# Both Transitional and Classical
#Plot spectrum of transitional disks, separate later
fig=plt.figure(figsize=(18,8))
#Disks wavelengths within range of 4.645 - 4.785
ax1=fig.add_subplot(211)
#Transitional Disks
ax1.plot(citau_wave, citau_flux, label='Ci Tau', color='firebrick')
ax1.plot(lkha330_wave, lkha330_flux, label='LkHa 330', color='firebrick')
ax1.plot(twhya_wave, twhya_flux, label='TW Hya', color='firebrick')
ax1.plot(doar44_wave, doar44_flux, label='DoAr 44', color='firebrick')
ax1.plot(hd135344_wave, hd135344_flux, label='HD 135344', color='firebrick')
ax1.plot(uxtau_wave, uxtau_flux, label='UX Tau', color='firebrick')
#Classical Disks
ax1.plot(doar24_wave, doar24_flux, label='DoAr 24', color='steelblue')
ax1.plot(dftau_wave, dftau_flux, label='DF Tau', color='steelblue')
ax1.plot(dltau_wave, dltau_flux, label='DL Tau', color='steelblue')
#Range
ax1.set_xlim(4.645,4.785)
ax1.set_ylim(-0.5,4.5)
for i,mywave in enumerate(hitran_data['wave']):
if( (mywave>4.645) & (mywave<4.785) ):
ax1.axvline(mywave,color='C1')
ax1.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())
#Labels
ax1.set_ylabel('Flux [Jy]',fontsize=14)
#ax1.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)
ax1.set_title('Composite Lineshape of Classical Disks')
#Disks wavelengths within range of 4.95 - 5.10
ax2=fig.add_subplot(212)
#Transitional Disks
ax2.plot(citau_wave, citau_flux, label='Ci Tau', color='firebrick')
ax2.plot(lkha330_wave, lkha330_flux, label='LkHa 330', color='firebrick')
ax2.plot(twhya_wave, twhya_flux, label='TW Hya', color='firebrick')
ax2.plot(doar44_wave, doar44_flux, label='DoAr 44', color='firebrick')
ax2.plot(hd135344_wave, hd135344_flux, label='HD 135344', color='firebrick')
ax2.plot(uxtau_wave, uxtau_flux, label='UX Tau', color='firebrick')
#Classical Disks
ax2.plot(doar24_wave, doar24_flux, label='DoAr 24', color='steelblue')
ax2.plot(dftau_wave, dftau_flux, label='DF Tau', color='steelblue')
ax2.plot(dltau_wave, dltau_flux, label='DL Tau', color='steelblue')
ax2.set_xlim(4.95,5.10)
ax2.set_ylim(-0.5,4.5)
for i,mywave in enumerate(hitran_data['wave']):
if( (mywave>4.95) & (mywave<5.10) ):
ax2.axvline(mywave,color='C1')
ax2.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())
ax2.set_xlabel('Wavelength [$\mu$m]',fontsize=14)
ax2.set_ylabel('Flux [Jy]',fontsize=14)
#ax2.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)
ax2.set_title('Composite Lineshape of Classical Disks')
#Transitional Disks
#Plot spectrum of transitional disks
fig=plt.figure(figsize=(18, 10))
#Disks wavelengths within range of 4.645 - 4.785
ax1=fig.add_subplot(211)
#Transitional Disks
ax1.plot(citau_wave, citau_flux, label='Ci Tau')
ax1.plot(lkha330_wave, lkha330_flux, label='LkHa 330')
ax1.plot(twhya_wave, twhya_flux, label='TW Hya')
ax1.plot(doar44_wave, doar44_flux, label='DoAr 44')
ax1.plot(hd135344_wave, hd135344_flux, label='HD 135344')
ax1.plot(uxtau_wave, uxtau_flux, label='UX Tau')
#Range
ax1.set_xlim(4.645,4.785)
ax1.set_ylim(-0.5,4.5)
for i,mywave in enumerate(hitran_data['wave']):
if( (mywave>4.645) & (mywave<4.785) ):
ax1.axvline(mywave,color='C1')
ax1.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())
#Labels
ax1.set_ylabel('Flux [Jy]',fontsize=14)
#ax1.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)
ax1.legend()
ax1.set_title('Composite Lineshape of Transitional Disks')
#Disks wavelengths within range of 4.95 - 5.10
ax2=fig.add_subplot(212)
#Transitional Disks
ax2.plot(citau_wave, citau_flux, label='Ci Tau')
ax2.plot(lkha330_wave, lkha330_flux, label='LkHa 330')
ax2.plot(twhya_wave, twhya_flux, label='TW Hya')
ax2.plot(doar44_wave, doar44_flux, label='DoAr 44')
ax2.plot(hd135344_wave, hd135344_flux, label='HD 135344')
ax2.plot(uxtau_wave, uxtau_flux, label='UX Tau')
ax2.set_xlim(4.95,5.10)
ax2.set_ylim(-0.5,4.5)
for i,mywave in enumerate(hitran_data['wave']):
if( (mywave>4.95) & (mywave<5.10) ):
ax2.axvline(mywave,color='C1')
ax2.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())
ax2.set_xlabel('Wavelength [$\mu$m]',fontsize=14)
ax2.set_ylabel('Flux [Jy]',fontsize=14)
#ax2.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)
ax2.legend()
ax2.set_title('Composite Lineshape of Transitional Disks')
#Classical Disks
#Plot spectrum of classical disks
fig=plt.figure(figsize=(18,8))
#Disks wavelengths within range of 4.645 - 4.785
ax1=fig.add_subplot(211)
#Classical Disks
ax1.plot(doar24_wave, doar24_flux, label='DoAr 24')
ax1.plot(dftau_wave, dftau_flux, label='DF Tau')
ax1.plot(dltau_wave, dltau_flux, label='DL Tau')
#Range
ax1.set_xlim(4.645,4.785)
ax1.set_ylim(-0.5,4.5)
for i,mywave in enumerate(hitran_data['wave']):
if( (mywave>4.645) & (mywave<4.785) ):
ax1.axvline(mywave,color='C1')
ax1.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())
#Labels
ax1.set_ylabel('Flux [Jy]',fontsize=14)
#ax1.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)
ax1.set_title('Composite Lineshape of Classical Disks')
#Disks wavelengths within range of 4.95 - 5.10
ax2=fig.add_subplot(212)
#Classical Disks
ax2.plot(doar24_wave, doar24_flux, label='DoAr 24')
ax2.plot(dftau_wave, dftau_flux, label='DF Tau')
ax2.plot(dltau_wave, dltau_flux, label='DL Tau')
ax2.set_xlim(4.95,5.10)
ax2.set_ylim(-0.5,4.5)
for i,mywave in enumerate(hitran_data['wave']):
if( (mywave>4.95) & (mywave<5.10) ):
ax2.axvline(mywave,color='C1')
ax2.text(hitran_data['wave'][i],4.2,hitran_data['Qpp'][i].strip())
ax2.set_xlabel('Wavelength [$\mu$m]',fontsize=14)
ax2.set_ylabel('Flux [Jy]',fontsize=14)
#ax2.legend(loc="upper center", bbox_to_anchor=(0.5, 1.6), ncol=2)
ax2.set_title('Composite Lineshape of Classical Disks')
###Output
_____no_output_____
###Markdown
Compare Composite Lineshapes
###Code
#Transitional Disks
#CI Tau data
citau_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/transitional_disks/citau_lineflux_data.p','rb'))
citau_lineshape_data=make_lineshape(citau_wave, citau_flux, citau_lineflux_data)
#LkHa 330 data
lkha330_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/transitional_disks/lkha330_lineflux_data.p','rb'))
lkha330_lineshape_data=make_lineshape(lkha330_wave, lkha330_flux, lkha330_lineflux_data)
#TW Hya data
twhya_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/transitional_disks/twhya_lineflux_data.p','rb'))
twhya_lineshape_data=make_lineshape(twhya_wave, twhya_flux, twhya_lineflux_data)
#DoAr 44 data
doar44_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/transitional_disks/doar44_lineflux_data.p','rb'))
doar44_lineshape_data=make_lineshape(doar44_wave, doar44_flux, doar44_lineflux_data)
#HD 135344 data
hd135344_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/transitional_disks/hd135344_lineflux_data.p','rb'))
hd135344_lineshape_data=make_lineshape(hd135344_wave, hd135344_flux, hd135344_lineflux_data)
#UX Tau data
uxtau_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/transitional_disks/uxtau_lineflux_data.p','rb'))
uxtau_lineshape_data=make_lineshape(uxtau_wave, uxtau_flux, uxtau_lineflux_data)
#Classical Disks
#DoAr 24 data
doar24_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/classical_disks/doar24_lineflux_data.p','rb'))
doar24_lineshape_data=make_lineshape(doar24_wave, doar24_flux, doar24_lineflux_data)
#DF Tau data
dftau_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/classical_disks/dftau_lineflux_data.p','rb'))
dftau_lineshape_data=make_lineshape(dftau_wave, dftau_flux, dftau_lineflux_data)
#DL Tau data
dltau_lineflux_data=pickle.load(open('/Users/erichegonzales/Desktop/eriche-thesis/data/classical_disks/dltau_lineflux_data.p','rb'))
dltau_lineshape_data=make_lineshape(dltau_wave, dltau_flux, dltau_lineflux_data)
#Plotting composite lineshapes
fig=plt.figure(figsize=(16,8))
ax1=fig.add_subplot(121)
ax1.plot(citau_lineshape_data[0], citau_lineshape_data[1], label='Ci Tau')
ax1.plot(lkha330_lineshape_data[0], lkha330_lineshape_data[1], label='DoAr 44')
ax1.plot(twhya_lineshape_data[0], twhya_lineshape_data[1], label='TW Hya')
ax1.plot(doar44_lineshape_data[0], doar44_lineshape_data[1], label='DoAr 44')
ax1.plot(hd135344_lineshape_data[0], hd135344_lineshape_data[1], label='HD 135344')
ax1.plot(uxtau_lineshape_data[0], uxtau_lineshape_data[1], label='UX Tau')
#Setting labels, limits, legend
ax1.set_xlabel('Velocity [km/s]')
ax1.set_ylabel('Arbitrary flux')
ax1.set_title('Composite Lineshape of Transitional Disks')
ax1.legend(loc="upper center", bbox_to_anchor=(0.5, 1.2), ncol=2)
ax1.set_ylim(0, 4)
#Plotting composite lineshapes
ax2=fig.add_subplot(122)
ax2.plot(doar24_lineshape_data[0], doar24_lineshape_data[1], label='DoAr 24')
ax2.plot(dftau_lineshape_data[0], dftau_lineshape_data[1], label='DF Tau')
ax2.plot(dltau_lineshape_data[0], dltau_lineshape_data[1], label='DL Tau')
#Setting labels, limits, legend
ax2.set_xlabel('Velocity [km/s]')
ax2.set_ylabel('Arbitrary flux')
ax2.set_title('Composite Lineshape of Classical Disks')
ax2.legend(loc="upper center", bbox_to_anchor=(0.5, 1.2), ncol=1)
ax2.set_ylim(0, 4)
###Output
_____no_output_____
###Markdown
Compare Star Properties and Parameters
###Code
data = pd.read_csv("/Users/erichegonzales/Desktop/eriche-thesis/data/star_data.csv")
print(data)
fig=plt.figure(figsize=(12,8))
markers = ['x', 'o', '^']
groups = data.groupby("disk_type")
for name, group in groups:
plt.plot(group["solar_mass"], group["temp"], marker='o', linestyle="", label=name)
plt.legend()
data
mass_t = data['solar_mass'][data['disk_type'] == 'Transitional']
radius_t = data['disk_radius'][data['disk_type'] == 'Transitional']
print(mass_t)
plt.scatter(mass_t, radius_t)
(data.loc[data['disk_type'] == 'Transitional'])['solar_mass']
data_t = data.loc[data['disk_type'] == 'Transitional']
data_c = data.loc[data['disk_type'] == 'Classical']
data_h = data.loc[data['disk_type'] == 'Herbig']
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(data_t['solar_mass'], data_t['temp'], 'ro')
ax.set_ylim(0,3000)
###Output
_____no_output_____ |
FinalAssignment/01_assignment.ipynb | ###Markdown
Projekt zaliczeniowy - Przetwarzanie obrazów cyfrowychAutor: **Patryk Ciepiela**
###Code
# Ładowanie bibliotek
import numpy as np
import matplotlib.pyplot as plt
from skimage import segmentation, exposure, morphology, io, img_as_ubyte
from skimage.color import rgb2gray
from scipy.spatial import distance
import cv2
import warnings
import time
import math
import colorsys
COLOR_FOREGROUND = 255
COLOR_BACKGROUND = 0
COLOR_FOREGROUND_INV = 0
COLOR_BACKGROUND_INV = 255
DEBUG = False
if not DEBUG:
warnings.filterwarnings('ignore')
# Metody pomocnicze
def is_in_image(shape, px=0, py=0):
return ((px>=0) and (px < shape[0]) and (py >= 0) and (py < shape[1]))
def bfs(i,j,image,color):
q = [(i,j)]
while q:
ii,jj = q.pop(0)
for dx in range(-1,2):
for dy in range(-1,2):
if dx == 0 and dy == 0:
continue
a = ii + dx
b = jj + dy
if is_in_image(image.shape, a, b) and image[a][b] == COLOR_FOREGROUND_INV:
image[a][b] = color
q.append((a,b))
def segment(image):
segment_table = image.copy()
cnt = 1
x = 0
for i in range(segment_table.shape[0]):
for j in range(segment_table.shape[1]):
v = segment_table[i][j]
if v == COLOR_FOREGROUND_INV:
x += 1
segment_table[i][j] = cnt
bfs(i,j,segment_table,cnt)
cnt += 1
for i in range(image.shape[0]):
for j in range(image.shape[1]):
v = segment_table[i][j]
if v == COLOR_BACKGROUND_INV:
segment_table[i][j] = 0
return x,segment_table
# metoda wyświetlająca obraz w notatniku
def showimg(img, title="Obraz", verbose=False, cmap="gray"):
if verbose:
print(img.shape, img.dtype)
plt.figure(figsize=(9,6))
plt.imshow(img, cmap=cmap)
plt.axis('off')
plt.suptitle(title)
plt.show()
sourceimg = io.imread("source.jpg")
showimg(sourceimg, title="Obraz źródłowy")
processedimg = sourceimg.copy()
processedimg = img_as_ubyte(rgb2gray(processedimg))
processedimg = cv2.blur(processedimg, (11,11))
bwblurredimg = processedimg.copy()
showimg(bwblurredimg, title="Obraz po wstępnym przetworzeniu")
th = 128
th, bim = cv2.threshold(bwblurredimg, thresh=th, maxval=255, type=cv2.THRESH_OTSU)
processedimg = bim
count = np.count_nonzero(processedimg)
print("Obiekty zajmują %.3f procent obrazu" % ((count/(bim.shape[0] * bim.shape[1]))*100) )
showimg(processedimg, title="Obraz poddany binaryzacji metodą Otsu")
morphKernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
processedimg = cv2.bitwise_not(processedimg)
processedimg = cv2.morphologyEx(processedimg, op=cv2.MORPH_OPEN, kernel=morphKernel, iterations=4)
processedimg = cv2.morphologyEx(processedimg, op=cv2.MORPH_DILATE, kernel=morphKernel, iterations=5)
processedimg = cv2.bitwise_not(processedimg)
binaryimg = processedimg.copy()
showimg(processedimg, title="Obraz po wykonaniu łańcucha operacji morfologicznych")
distimg = cv2.distanceTransform(binaryimg, cv2.DIST_L2, 5)
distimg = np.uint8(distimg)
_, distimg = cv2.threshold(distimg, thresh=46, maxval=255, type=cv2.THRESH_BINARY)
distimg = cv2.morphologyEx(distimg, op=cv2.MORPH_DILATE, kernel=morphKernel, iterations=10)
showimg(distimg, title="Zbinaryzowany obraz po wykonaniu transformacji odległościowej")
_, contours, hierarchy = cv2.findContours(distimg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contoursimg = np.zeros_like(binaryimg)
for i in range(len(contours)):
mycircle = contours[i]
(x,y),radius = cv2.minEnclosingCircle(mycircle)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(contoursimg,center,radius,255,3)
contoursimg = cv2.morphologyEx(contoursimg, op=cv2.MORPH_CLOSE, kernel=morphKernel, iterations=15)
showimg(contoursimg, title="Znalezione kontury")
separated_img = cv2.bitwise_and(processedimg, cv2.bitwise_not(contoursimg))
showimg(separated_img, title="Obraz z rozdzielonymi obiektami")
time_now = time.time()
_, segment_table = segment(cv2.bitwise_not(separated_img))
time_delta = time.time() - time_now
print("Segmentacja ukończona w %.3f sekund" % time_delta)
denoised_segment = morphology.remove_small_objects(segment_table, min_size=10000)
unique_elements, counts_elements = np.unique(denoised_segment, return_counts=True)
obj_area = dict(zip(unique_elements[1:], counts_elements[1:]))
count = np.count_nonzero(denoised_segment)
print("Liczba obiektów: %d" % len(obj_area))
print("Obiekty po segmentacji zajmują %.3f procent obrazu" % ((count/(bim.shape[0] * bim.shape[1]))*100))
print(obj_area)
showimg(denoised_segment, cmap="tab20", title="Wizualizacja segmentacji po usunięciu nieznaczących obiektów (< 0,16% powierzchni obrazu)")
detected_coins = sourceimg.copy()
bbox_margin = 10;
obj_data = []
for key in obj_area.keys():
_, contours, hier = cv2.findContours(np.uint8(denoised_segment==key), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for c in contours:
x, y, w, h = cv2.boundingRect(c)
(x2,y2),radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
obj_data.append({'id':key, 'x': x, 'y': y, 'width': w, 'height': h, 'centroid': (cX, cY), 'contours':c, 'area': obj_area[key]})
cv2.circle(detected_coins, (cX,cY), 5, (0,0,255), 5)
cv2.circle(detected_coins, (int(x2),int(y2)), int(radius), (0,255,0), 3)
cv2.rectangle(detected_coins, (x+bbox_margin, y+bbox_margin), (x+w-bbox_margin, y+h-bbox_margin), (0, 255, 0), 3)
cv2.putText(detected_coins, str(key), (x+bbox_margin, y+bbox_margin+25), cv2.FONT_HERSHEY_SIMPLEX, 3, (255,0,0), 5)
showimg(detected_coins, title="Wykryte monety")
def getFigure(labelledImage, obj_id):
points = []
for y in range(labelledImage.shape[0]):
for x in range(labelledImage.shape[1]):
if labelledImage[y,x] == obj_id:
points.append((y,x))
return points
def BlairBlissCoeff(points, obj_centroid):
s = len(points)
mx, my = obj_centroid
r = 0
for point in points:
r = r + distance.euclidean(point,(my,mx))**2
return s/(math.sqrt(2*math.pi*r))
def FeretCoeff(points):
px = [x for (y,x) in points]
py = [y for (y,x) in points]
fx = max(px) - min(px)
fy = max(py) - min(py)
return float(fy)/float(fx)
def HaralickCoeff(centroid, contours):
n = len(contours)
mx, my = centroid
d1 = 0
d2 = 0
for i in range(n):
d1 += distance.euclidean((contours[i][0][1], contours[i][0][0]),(my,mx))
d2 += (distance.euclidean((contours[i][0][1], contours[i][0][0]),(my,mx))**2 - 1)
return math.sqrt((d1**2)/(n*d2))
def AverageColor(image):
avg = image.mean(axis=0).mean(axis=0)
h,s,v = colorsys.rgb_to_hsv(avg[0]/255, avg[1]/255, avg[2]/255)
return (h*360, s*100, v*100)
def is_within(value, desired, margin=0):
is_above_min = value >= (desired - margin)
is_below_max = value < (desired+margin)
return is_above_min and is_below_max
def TryToGuessValue(area, color):
penny_h = 38
dime_h = 27
h_margin = 6
h,s,v = color
if(area < 20000 and is_within(h,penny_h,h_margin)): # 1gr
return 0.01
elif(is_within(area, 25000, 1000) and is_within(h, penny_h,h_margin)): # 2gr
return 0.02
elif(is_within(area, 34000, 4000) and is_within(h, penny_h,h_margin)): # 5gr
return 0.05
elif(is_within(area, 23000, 1500) and is_within(h, dime_h, h_margin)): # 10gr
return 0.1
elif(is_within(area, 29000, 2000) and is_within(h, dime_h, h_margin)): # 20gr
return 0.2
elif(is_within(area, 36000, 3000) and is_within(h, dime_h, h_margin)): # 50gr
return 0.5
elif(area > 45000 and is_within(h, dime_h, h_margin)): # 1zl
return 1
elif(is_within(area, 45000, 2500) and is_within(h, penny_h, h_margin)): # 2zl
return 2
elif(area > 50000 and is_within(h, penny_h, h_margin)): # 5zl
return 5
else:
return 0
print("Obliczanie współczynników dla obiektów")
for obj in obj_data:
points = getFigure(denoised_segment, obj["id"])
centroid = obj["centroid"]
feretCoeff = FeretCoeff(points)
bbCoeff = BlairBlissCoeff(points, centroid)
haraCoeff = HaralickCoeff(centroid, obj["contours"])
print("ID: %d\t| Centroid: (%d,%d)\t| Feret: %.9f\t| Blair-Bliss: %.9f\t| Haralick: %.9f"
% (obj["id"], centroid[0], centroid[1], feretCoeff, bbCoeff, haraCoeff))
moneysum = 0
for myObj in obj_data:
objColorImg = sourceimg[myObj['y']:myObj['y']+myObj['height'],myObj['x']:myObj['x']+myObj['width'],:]
objColorImg = cv2.blur(objColorImg, (9,9))
imgColor = AverageColor(objColorImg)
moneysum += TryToGuessValue(myObj["area"], imgColor)
print("Łącznie na obrazku \"jest\" około " + str(round(moneysum, 2)) + " zł")
###Output
Łącznie na obrazku "jest" około 15.11 zł
|
chapter_2/distributions.ipynb | ###Markdown
Some common distributions to know
###Code
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
Discrete distributions The binomial distribution$$f(k|n,\theta) = \binom{n}{k}\theta^k(1-\theta)^{n-k}$$e.g. Toss a coin n times
###Code
from scipy.stats import binom
n,theta = 100, 0.5
mean, var, skew, kurt = binom.stats(n, theta, moments='mvsk')
fig, ax = plt.subplots(1, 1)
x = np.arange(binom.ppf(0.01, n, theta), binom.ppf(0.99, n, theta))
ax.vlines(x, 0, binom.pmf(x, n, theta), colors='b', lw=5, alpha=0.5)
plt.ylabel('Mass')
plt.xlabel('k')
plt.xlim(25,75)
plt.ylim(0.0 ,.1)
###Output
_____no_output_____
###Markdown
The bernoulli distribution\begin{align*} f(x|\theta) = \begin{cases} \theta & \text{if $x=1$} \\ 1-\theta & \text{if $x=0$} \end{cases}\end{align*}e.g. Toss a coin once
###Code
from scipy.stats import bernoulli
theta = 0.5
mean, var, skew, kurt = bernoulli.stats(theta, moments='mvsk')
fig, ax = plt.subplots(1, 1)
x = np.arange(0,1.1)
ax.vlines(x, 0, bernoulli.pmf(x, theta), colors='b', lw=5, alpha=0.5)
plt.ylabel('Mass')
plt.xlabel('x')
plt.xlim(-0.1 ,1.1)
plt.ylim(0.0 ,1)
###Output
_____no_output_____
###Markdown
The poisson distribution$$f(x|\theta) = e^{-\lambda} \frac{\lambda^x}{x!}$$e.g. rare events, radioactive decay
###Code
from scipy.stats import poisson
lambda_ = 0.6
mean, var, skew, kurt = poisson.stats(lambda_, moments='mvsk')
fig, ax = plt.subplots(1, 1)
x = np.arange(0,3)
ax.vlines(x, 0, poisson.pmf(x, lambda_), colors='b', lw=5, alpha=0.5)
plt.ylabel('Mass')
plt.xlabel('x')
plt.xlim(-0.1 ,3)
plt.ylim(0.0 ,1)
###Output
_____no_output_____
###Markdown
The emperical distribution$$f(A) = \frac{1}{N}\sum^{N}_{i=1}\delta_{x_{i}}(A)$$\begin{align*} \delta_{x_{i}}(A) = \begin{cases} 0 & \text{if $x\notin A$} \\ 1 & \text{if $x\in A$} \end{cases}\end{align*} Continuous Distributions Gaussian (normal)$$f(x|\mu, \sigma^2) = \frac{1}{\sqrt{2\pi\sigma^2}}e^{-\frac{1}{2\sigma^2}(x-\mu)^2}$$
###Code
from scipy.stats import norm
fig, ax = plt.subplots(1, 1)
mean, var, skew, kurt = norm.stats(moments='mvsk')
x = np.linspace(norm.ppf(0.01),norm.ppf(0.99), 100)
ax.plot(x, norm.pdf(x),
'b-', lw=3, alpha=0.6, label='norm pdf')
plt.xlim(-20 ,20)
plt.ylim(0.0 ,1)
plt.ylabel('Density')
plt.xlabel('x')
###Output
_____no_output_____
###Markdown
Students t (special cases cauchy lorentz)$$ f(x|v) = \frac{\Gamma(\frac{v+1}{2})}{\sqrt{v\pi}\Gamma(\frac{v}{2})} \Big( 1+\frac{x^2}{v} \Big)^{-\frac{v+1}{2}}, v= df $$ e.g. scienceing
###Code
from scipy.stats import t
fig, ax = plt.subplots(1, 1)
df = 3
mean, var, skew, kurt = t.stats(df, moments='mvsk')
x = np.linspace(t.ppf(0.01, df),
t.ppf(0.99, df), 100)
ax.plot(x, t.pdf(x, df),
'b-', lw=3, alpha=0.6, label='t pdf')
plt.ylabel('Density')
plt.xlabel('x')
###Output
_____no_output_____
###Markdown
Laplace$$ f(x|\mu,b) = \frac{1}{2b}e^{\big(-\frac{|x-\mu|)}{b}\big)}$$e.g. like normal but with more sparsity, brownian motion
###Code
from scipy.stats import laplace
mean, var, skew, kurt = laplace.stats(moments='mvsk')
fig, ax = plt.subplots(1, 1)
x = np.linspace(laplace.ppf(0.01),
laplace.ppf(0.99), 100)
ax.plot(x, laplace.pdf(x),
'b-', lw=3, alpha=0.6, label='laplace pdf')
plt.ylabel('Density')
plt.xlabel('x')
###Output
_____no_output_____
###Markdown
Gamma$$ f(x|a,b) = \frac{b^a}{\Gamma(a)}x^{a-1}e^{-xb} $$, where the shape a >0, and the rate b >0 e.g.
###Code
from scipy.stats import gamma
fig, ax = plt.subplots(1, 1)
a = 2
mean, var, skew, kurt = gamma.stats(a, moments='mvsk')
x = np.linspace(gamma.ppf(0.01, a),
gamma.ppf(0.99, a), 100)
ax.plot(x, gamma.pdf(x, a),
'b-', lw=3, alpha=0.6, label='gamma pdf')
plt.ylabel('Density')
plt.xlabel('x')
###Output
_____no_output_____
###Markdown
The beta distribution$$f(x|a,b) = \frac{1}{B(a,b)}x^{a-1}(1-x)^{b-1}$$$$B(a,b) = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}$$
###Code
from scipy.stats import beta
a, b = 2, 0.8
mean, var, skew, kurt = beta.stats(a, b, moments='mvsk')
fig, ax = plt.subplots(1, 1)
x = np.linspace(beta.ppf(0.01, a, b),
beta.ppf(0.99, a, b), 100)
ax.plot(x, beta.pdf(x, a, b),
'b-', lw=3, alpha=0.6, label='beta pdf')
plt.ylabel('Density')
plt.xlabel('x')
###Output
_____no_output_____
###Markdown
pareto\begin{align*} f(x| k,m) = \begin{cases} \frac{kx_m^k}{x^{k+1}} & \text{if $x \ge x_m$} \\ 0 & \text{if $x < x_m$} \end{cases}\end{align*}
###Code
from scipy.stats import pareto
fig, ax = plt.subplots(1, 1)
b = 2.62
mean, var, skew, kurt = pareto.stats(b, moments='mvsk')
x = np.linspace(pareto.ppf(0.01, b),
pareto.ppf(0.99, b), 100)
ax.plot(x, pareto.pdf(x, b),
'b-', lw=3, alpha=0.6, label='pareto pdf')
plt.ylabel('Density')
plt.xlabel('x')
###Output
_____no_output_____
###Markdown
The multivariate Gaussian$$f(x|\mu, \Sigma) = \frac{1}{(2\pi)^{d/2}|\Sigma|^{1/2}}e^{\frac{1}{2}(x-\mu)^T\Sigma^{-1}(x-\mu)}$$
###Code
from scipy.stats import multivariate_normal
mean, cov = [0, 1], [(1, .5), (.5, 1)]
x, y = np.random.multivariate_normal(mean, cov, 1000).T
with sns.axes_style("white"):
sns.jointplot(x=x, y=y, kind="hex", color="b");
###Output
_____no_output_____
###Markdown
The Dirichlet distribution$$f(x|\alpha) = \frac{1}{B(\alpha)}\prod_{k=1}^{K}x_k^{\alpha_k-1}I(x\in S_k)$$$$B(\alpha) = \frac{\prod_{k=1}^{K}\Gamma(\alpha_k)}{\Gamma(\alpha_0)}$$e.g. multivariate generalization of beta distribution
###Code
#The code below to visualize was taken from Thomas boggs elegant contours here:http://blog.bogatron.net/blog/2014/02/02/visualizing-dirichlet-distributions/
import matplotlib.tri as tri
_corners = np.array([[0, 0], [1, 0], [0.5, 0.75**0.5]])
_triangle = tri.Triangulation(_corners[:, 0], _corners[:, 1])
_midpoints = [(_corners[(i + 1) % 3] + _corners[(i + 2) % 3]) / 2.0 \
for i in range(3)]
def xy2bc(xy, tol=1.e-3):
'''Converts 2D Cartesian coordinates to barycentric.
Arguments:
`xy`: A length-2 sequence containing the x and y value.
'''
s = [(_corners[i] - _midpoints[i]).dot(xy - _midpoints[i]) / 0.75 \
for i in range(3)]
return np.clip(s, tol, 1.0 - tol)
class Dirichlet(object):
def __init__(self, alpha):
'''Creates Dirichlet distribution with parameter `alpha`.'''
from math import gamma
from operator import mul
self._alpha = np.array(alpha)
self._coef = gamma(np.sum(self._alpha)) / \
reduce(mul, [gamma(a) for a in self._alpha])
def pdf(self, x):
'''Returns pdf value for `x`.'''
from operator import mul
return self._coef * reduce(mul, [xx ** (aa - 1)
for (xx, aa)in zip(x, self._alpha)])
def sample(self, N):
'''Generates a random sample of size `N`.'''
return np.random.dirichlet(self._alpha, N)
def draw_pdf_contours(dist, border=False, nlevels=200, subdiv=8, **kwargs):
'''Draws pdf contours over an equilateral triangle (2-simplex).
Arguments:
`dist`: A distribution instance with a `pdf` method.
`border` (bool): If True, the simplex border is drawn.
`nlevels` (int): Number of contours to draw.
`subdiv` (int): Number of recursive mesh subdivisions to create.
kwargs: Keyword args passed on to `plt.triplot`.
'''
from matplotlib import ticker, cm
import math
refiner = tri.UniformTriRefiner(_triangle)
trimesh = refiner.refine_triangulation(subdiv=subdiv)
pvals = [dist.pdf(xy2bc(xy)) for xy in zip(trimesh.x, trimesh.y)]
plt.tricontourf(trimesh, pvals, nlevels, **kwargs)
plt.axis('equal')
plt.xlim(0, 1)
plt.ylim(0, 0.75**0.5)
plt.axis('off')
if border is True:
plt.hold(1)
plt.triplot(_triangle, linewidth=1)
def plot_points(X, barycentric=True, border=True, **kwargs):
'''Plots a set of points in the simplex.
Arguments:
`X` (ndarray): A 2xN array (if in Cartesian coords) or 3xN array
(if in barycentric coords) of points to plot.
`barycentric` (bool): Indicates if `X` is in barycentric coords.
`border` (bool): If True, the simplex border is drawn.
kwargs: Keyword args passed on to `plt.plot`.
'''
if barycentric is True:
X = X.dot(_corners)
plt.plot(X[:, 0], X[:, 1], 'k.', ms=1, **kwargs)
plt.axis('equal')
plt.xlim(0, 1)
plt.ylim(0, 0.75**0.5)
plt.axis('off')
if border is True:
plt.hold(1)
plt.triplot(_triangle, linewidth=1)
if __name__ == '__main__':
f = plt.figure(figsize=(8, 6))
alphas = [[0.999] * 3,
[5] * 3,
[2, 5, 15]]
for (i, alpha) in enumerate(alphas):
plt.subplot(2, len(alphas), i + 1)
dist = Dirichlet(alpha)
draw_pdf_contours(dist)
title = r'$\alpha$ = (%.3f, %.3f, %.3f)' % tuple(alpha)
plt.title(title, fontdict={'fontsize': 8})
plt.subplot(2, len(alphas), i + 1 + len(alphas))
plot_points(dist.sample(5000))
print 'Wrote plots to "dirichlet_plots.png".'
draw_pdf_contours(Dirichlet([5, 5, 5]))
###Output
Wrote plots to "dirichlet_plots.png".
###Markdown
The multinomial distribution$$f(x|n,\theta) = \binom{n}{x_1 \ldots x_K}\prod^{K}_{j=1}\theta^{x_j}_j$$e.g. Roll a K-sided die n times
###Code
#from scipy.stats import multinomial $ waiting until scipy 0.19 is released
x = np.arange(0,6)
theta = [1/6, 1/6, 1/6, 1/6, 1/6, 1/6]
n = 100 # number of trials
mean, var, skew, kurt = multinomial.stats(theta, moments='mvsk')
fig, ax = plt.subplots(1, 1)
ax.vlines(x, 0, multinomial.pmf(x, theta), colors='b', lw=5, alpha=0.5)
plt.ylabel('Mass')
plt.xlabel('x')
plt.xlim(-0.1 ,1.1)
plt.ylim(0.0 ,1)
###Output
_____no_output_____ |
data-science/python-plotting/Week3.ipynb | ###Markdown
Subplots
###Code
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
plt.subplot?
plt.figure()
# subplot with 1 row, 2 columns, and current axis is 1st subplot axes
plt.subplot(1, 2, 1)
linear_data = np.array([1,2,3,4,5,6,7,8])
plt.plot(linear_data, '-o')
exponential_data = linear_data**2
# subplot with 1 row, 2 columns, and current axis is 2nd subplot axes
plt.subplot(1, 2, 2)
plt.plot(exponential_data, '-o')
# plot exponential data on 1st subplot axes
plt.subplot(1, 2, 1)
plt.plot(exponential_data, '-x')
plt.figure()
ax1 = plt.subplot(1, 2, 1)
plt.plot(linear_data, '-o')
# pass sharey=ax1 to ensure the two subplots share the same y axis
ax2 = plt.subplot(1, 2, 2, sharey=ax1)
plt.plot(exponential_data, '-x')
plt.figure()
# the right hand side is equivalent shorthand syntax
plt.subplot(1,2,1) == plt.subplot(121)
# create a 3x3 grid of subplots
fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True)
# plot the linear_data on the 5th subplot axes
ax5.plot(linear_data, '-')
# set inside tick labels to visible
for ax in plt.gcf().get_axes():
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_visible(True)
# necessary on some systems to update the plot
plt.gcf().canvas.draw()
###Output
_____no_output_____
###Markdown
Histograms
###Code
# create 2x2 grid of axis subplots
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
# draw n = 10, 100, 1000, and 10000 samples from the normal distribution and plot corresponding histograms
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample)
axs[n].set_title('n={}'.format(sample_size))
# repeat with number of bins set to 100
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True)
axs = [ax1,ax2,ax3,ax4]
for n in range(0,len(axs)):
sample_size = 10**(n+1)
sample = np.random.normal(loc=0.0, scale=1.0, size=sample_size)
axs[n].hist(sample, bins=100)
axs[n].set_title('n={}'.format(sample_size))
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
plt.scatter(X,Y)
# use gridspec to partition the figure into subplots
import matplotlib.gridspec as gridspec
plt.figure()
gspec = gridspec.GridSpec(3, 3)
top_histogram = plt.subplot(gspec[0, 1:])
side_histogram = plt.subplot(gspec[1:, 0])
lower_right = plt.subplot(gspec[1:, 1:])
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
lower_right.scatter(X, Y)
top_histogram.hist(X, bins=100)
s = side_histogram.hist(Y, bins=100, orientation='horizontal')
# clear the histograms and plot normed histograms
top_histogram.clear()
top_histogram.hist(X, bins=100, normed=True)
side_histogram.clear()
side_histogram.hist(Y, bins=100, orientation='horizontal', normed=True)
# flip the side histogram's x axis
side_histogram.invert_xaxis()
# change axes limits
for ax in [top_histogram, lower_right]:
ax.set_xlim(0, 1)
for ax in [side_histogram, lower_right]:
ax.set_ylim(-5, 5)
%%HTML
<img src='http://educationxpress.mit.edu/sites/default/files/journal/WP1-Fig13.jpg' />
###Output
_____no_output_____
###Markdown
Box and Whisker Plots
###Code
import pandas as pd
normal_sample = np.random.normal(loc=0.0, scale=1.0, size=10000)
random_sample = np.random.random(size=10000)
gamma_sample = np.random.gamma(2, size=10000)
df = pd.DataFrame({'normal': normal_sample,
'random': random_sample,
'gamma': gamma_sample})
df.describe()
plt.figure()
# create a boxplot of the normal data, assign the output to a variable to supress output
_ = plt.boxplot(df['normal'], whis='range')
# clear the current figure
plt.clf()
# plot boxplots for all three of df's columns
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
plt.figure()
_ = plt.hist(df['gamma'], bins=100)
import mpl_toolkits.axes_grid1.inset_locator as mpl_il
plt.figure()
plt.boxplot([ df['normal'], df['random'], df['gamma'] ], whis='range')
# overlay axis on top of another
ax2 = mpl_il.inset_axes(plt.gca(), width='60%', height='40%', loc=2)
ax2.hist(df['gamma'], bins=100)
ax2.margins(x=0.5)
# switch the y axis ticks for ax2 to the right side
ax2.yaxis.tick_right()
# if `whis` argument isn't passed, boxplot defaults to showing 1.5*interquartile (IQR) whiskers with outliers
plt.figure()
_ = plt.boxplot([ df['normal'], df['random'], df['gamma'] ] )
###Output
_____no_output_____
###Markdown
Heatmaps
###Code
plt.figure()
Y = np.random.normal(loc=0.0, scale=1.0, size=10000)
X = np.random.random(size=10000)
_ = plt.hist2d(X, Y, bins=25)
plt.figure()
_ = plt.hist2d(X, Y, bins=100)
# add a colorbar legend
plt.colorbar()
###Output
_____no_output_____
###Markdown
Animations
###Code
import matplotlib.animation as animation
n = 100
x = np.random.randn(n)
# create the function that will do the plotting, where curr is the current frame
def update(curr):
# check if animation is at the last frame, and if so, stop the animation a
if curr == n:
a.event_source.stop()
plt.cla()
bins = np.arange(-4, 4, 0.5)
plt.hist(x[:curr], bins=bins)
plt.axis([-4,4,0,30])
plt.gca().set_title('Sampling the Normal Distribution')
plt.gca().set_ylabel('Frequency')
plt.gca().set_xlabel('Value')
plt.annotate('n = {}'.format(curr), [3,27])
fig = plt.figure()
a = animation.FuncAnimation(fig, update, interval=100)
###Output
_____no_output_____
###Markdown
Interactivity
###Code
plt.figure()
data = np.random.rand(10)
plt.plot(data)
def onclick(event):
plt.cla()
plt.plot(data)
plt.gca().set_title('Event at pixels {},{} \nand data {},{}'.format(event.x, event.y, event.xdata, event.ydata))
# tell mpl_connect we want to pass a 'button_press_event' into onclick when the event is detected
plt.gcf().canvas.mpl_connect('button_press_event', onclick)
from random import shuffle
origins = ['China', 'Brazil', 'India', 'USA', 'Canada', 'UK', 'Germany', 'Iraq', 'Chile', 'Mexico']
shuffle(origins)
df = pd.DataFrame({'height': np.random.rand(10),
'weight': np.random.rand(10),
'origin': origins})
df
plt.figure()
# picker=5 means the mouse doesn't have to click directly on an event, but can be up to 5 pixels away
plt.scatter(df['height'], df['weight'], picker=5)
plt.gca().set_ylabel('Weight')
plt.gca().set_xlabel('Height')
def onpick(event):
origin = df.iloc[event.ind[0]]['origin']
plt.gca().set_title('Selected item came from {}'.format(origin))
# tell mpl_connect we want to pass a 'pick_event' into onpick when the event is detected
plt.gcf().canvas.mpl_connect('pick_event', onpick)
###Output
_____no_output_____ |
Section 6 - Analyzing Big Data/6.2/data_exploration.ipynb | ###Markdown
First look at the test data
###Code
# Count of data
print(f"Overall data count: {raw_test_data.count()}")
# Data summary
display(raw_test_data.summary().toPandas())
print("Data schema")
raw_test_data.printSchema()
# Let's look at 50 rows of data
display(raw_test_data.limit(50).toPandas())
###Output
_____no_output_____
###Markdown
First look at the training data
###Code
# Count of data
print(f"Overall data count: {raw_training_data.count()}")
# Data summary
display(raw_training_data.summary().toPandas())
print("Data schema")
raw_training_data.printSchema()
# Let's look at 50 rows of data
display(raw_training_data.limit(50).toPandas())
###Output
_____no_output_____
###Markdown
Test data:- 498 rows of test_dataTraining data:- 1600000 rows of training_data Initial Findings:- We need to apply a proper schema- The date column needs fixing- We need to extract twitter user names/handles (we'll extract it and call the output column `users_mentioned`)- We need to extract hashtags and replace them with the words from the hashtag (we'll extract it and call the output column `hashtags`)- We need to extract URLs, as our algorithm won't need that or use that (we'll simply remove it from the data)- The same goes for email-address- HTML does not appear properly unescaped, we're going to have to fix that (example: `<3` and `s&^t`)- Encoding seems to be 'broken' (example: `�����ߧ�ǿ�����ж���� <<----I DID NOT KNOW I CUD or HOW TO DO ALL DAT ON MY PHONE TIL NOW. WOW..MY LIFE IS NOW COMPLETE. JK.`) Detailed statistics PolarityAccording to Sentiment140 documentation, we would expect the `polarity` column to have one of three values representing user sentiment:- 0 = negative- 2 = neutral- 4 = positiveOnce we train our own model, we don't want data-skew to introduce bias. So let's see how polarity is distributed in the data that we have. Polarity column (test data)Let's first look at the test data.
###Code
df = raw_test_data.select("polarity").na.drop()
print(f"No of rows with Polarity: {df.count()}/{raw_test_data.count()}")
sns.distplot(df.toPandas())
###Output
_____no_output_____
###Markdown
Polarity column (training data)Now let's look at the training data.
###Code
df = raw_training_data.select("polarity").na.drop()
print(f"No of rows with Polarity: {df.count()} / {raw_training_data.count()}")
sns.distplot(df.toPandas())
###Output
_____no_output_____
###Markdown
Results:We can clearly see that the training data only has polarity data centered around 0 (Negative) and 4 (Positive).Let's confirm this:
###Code
polarity_df = raw_training_data.select("polarity").cache()
polarity_df.groupBy("polarity").count().toPandas()
###Output
_____no_output_____
###Markdown
Very nice! We have a nice even 50/50 split between polarity. Conclusions:- As 498 rows is way too little for us to train a model on, we're going to disregard this dataset and focus on the Training Data. - We've determined the steps that need to be taken to clean the data Store our raw dataNow it's time for us to write the raw data we intend to use to disk. We're going to:- keep the format CSV- partition the data by polarity, this will create 2 subfolders inside our output folder - repartition the data in 20 partitions: This will ensure that we have 20 smaller csv files per partition
###Code
raw_training_data.repartition(20).write.partitionBy("polarity").csv(
OUTPUT_PATH, mode="overwrite"
)
###Output
_____no_output_____ |
onem2m-02-basic-resources.ipynb | ###Markdown
oneM2M - Basic Resources and InteractionsThis notebook shows the basic interactions with a CSE using REST calls. Examples include:- Create an <AE> resource- Create a <Container> resource- Create one or more <ContentInstance> resources- Retrieve the latest <ContentInstance> resource- Update the <Container> resource- Retrieve the <Container> resource IntitializationThe section does import necessary modules and configurations.
###Code
%run init.py
###Output
_____no_output_____
###Markdown
Create an <AE> ResourceThis example creates a new <AE> resource in the CSE. <AE>'s represent applications or services.Creating this (or other) resource is done using a POST request and with providing a couple of mandatory attributes:- **api** : Application Identifier. An Application Identifier uniquely identifies an M2M Application in a given context.- **rr** : Request Reachability. This attribute indicates whether a resource can receive requests.- **srv** : Supported Release Versions. An array that specifies the supported oneM2M specification releases.Note, that the request target is the <CSEBase> resource. All create requests target a parent resource.
###Code
CREATE ( # CREATE request
url,
# Request Headers
{
'X-M2M-Origin' : 'C', # Ask the CSE to assign a new CSE
'X-M2M-RI' : '123', # Request identifier
'X-M2M-RVI' : '3', # Release verson indicator
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=2' # Content is JSON, and represents an <AE> resource
},
# Request Body
{
'm2m:ae': {
'rn': 'Notebook-AE',
'api': 'NnotebookAE',
'rr': True,
'srv': [ '3' ]
}
}
)
###Output
_____no_output_____
###Markdown
The response introduces some new attributes:- **pi** : This is the identifier of the parent resource.- **et** : Expiration time/date after which the CSE will delete the resource.- **aei** : An Application Entity Identifier uniquely identifies an AE globally.**Note**: If you see an error "409" or "Name already present" then don't worry. It just means that an <AE> resource with the same name already exists in the CSE, perhaps from a previous run of this notebook cell. Create a <Container> ResourceIn this section we add a <Container> resource to the <AE> resource. A <Container> represents a data point that can hold a configurable number of dsta instances. A <Container> may also hold a sub-containers.If created with no further attributes, the CSE will assign defaults to the <Container> resource.
###Code
CREATE ( # CREATE request
f'{url}/Notebook-AE',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '123', # Request identifier
'X-M2M-RVI' : '3', # Release verson indicator
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=3' # Content is JSON, and represents an <Container> resource
},
# Request Body
{
'm2m:cnt': {
'rn':'Container'
}
}
)
###Output
_____no_output_____
###Markdown
The following attributes are used with the <Container> resource:- **st** : The State Tag is incremented every time the resource is modified.- **mni** : Maximum number of direct data instances in the <Container> resource.- **mbs** : Maximum size in bytes of data.- **mia** : Maximum age of a direct data instances in the <Container> resource.- **cni** : Current number of direct data instances in the <Container> resource.- **cbs** : Current size in bytes of data.- **ol** : Resource identifier of a virtual resource that points to the oldest data instance of the <Container> resource.- **la** : Resource identifier of a virtual resource that points to the latest data instance of the <Container> resource.**Note**: If you see an error "409" or "Name already present" then don't worry. It just means that an <Container> resource with the same name already exists in the CSE, perhaps from a previous run of this notebook cell. Add a <ContentInstance> to the <Container>Now, we add an actual value to the *myContainer* <Container>. These attributes are part of the request:- **cnf** : This specifies the content format. It specifies the media type as well as an encoding type.- **con** : This is the actual content (ie. the value) that will be stored in the <Container resource. It must contain media information and may optionally specify an optional content encoding (here 0 means "plain, no transfer encoding"), and content security.<ContentInstance>'s can only be added and read, but not updated or deleted.**Note**: You can execute the following code as often as you like in order to create more <ContentInstance> resources.
###Code
CREATE ( # CREATE request
f'{url}/Notebook-AE/Container',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '123', # Request identifier
'X-M2M-RVI' : '3', # Release verson indicator
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=4' # Content is JSON, and represents an <ContentInstance> resource
},
# Request Body
{
'm2m:cin': {
'cnf': 'text/plain:0',
'con': 'Hello, World!'
}
}
)
###Output
_____no_output_____
###Markdown
A new attribute:- **cs** : This attribute contains the size of the content of the **con** attribute. Retrieve the latest <ContentInstance> resourceThis request will retrieve the latest data instance from the <Container>.
###Code
RETRIEVE ( # RETRIEVE request
url + '/Notebook-AE/Container/la',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '123', # Unique request identifier
'X-M2M-RVI' : '3', # Release verson indicator
'Accept' : 'application/json' # Response shall be JSON
}
)
###Output
_____no_output_____
###Markdown
Update the <Container> ResourceWith this request we will set the *MinimumNumberOfInstances* (**mni**) attribute to a new value.
###Code
UPDATE ( # UPDATE request
f'{url}/Notebook-AE/Container',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '123', # Request identifier
'X-M2M-RVI' : '3', # Release verson indicator
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=3' # Content is JSON, and represents an <Container> resource
},
# Request Body
{
'm2m:cnt': {
'mni': 10001
}
}
)
###Output
_____no_output_____
###Markdown
The CSE returns the resource. Also note the change of the *lastModificationTime* (lt) and *status* (st) attributes. Check the <Container> resourceRetrieve the <Container> resource to see all the changes and its current state.
###Code
RETRIEVE ( # RETRIEVE request
f'{url}/Notebook-AE/Container',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '123', # Unique request identifier
'X-M2M-RVI' : '3', # Release verson indicator
'Accept' : 'application/json' # Response shall be JSON
}
)
###Output
_____no_output_____
###Markdown
oneM2M - Basic Resources and InteractionsThis notebook shows the basic interactions with a CSE using REST calls. Examples include:- Create an <AE> resource- Create a <Container> resource- Create one or more <ContentInstance> resources- Retrieve the latest <ContentInstance> resource- Update the <Container> resource- Retrieve the <Container> resource IntitializationThe section does import necessary modules and configurations.
###Code
%run init.py
###Output
_____no_output_____
###Markdown
Create an <AE> ResourceThis example creates a new <AE> resource in the CSE. <AE>'s represent applications or services.Creating this (or other) resource is done using a POST request and with providing a couple of mandatory attributes:- **api** : Application Identifier. An Application Identifier uniquely identifies an M2M Application in a given context.- **rr** : Request Reachability. This attribute indicates whether a resource can receive requests.- **srv** : Supported Release Versions. An array that specifies the supported oneM2M specification releases.Note, that the request target is the <CSEBase> resource. All create requests target a parent resource.
###Code
CREATE ( # CREATE request
url,
# Request Headers
{
'X-M2M-Origin' : 'C', # Set the originator
'X-M2M-RI' : '0', # Request identifier
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=2' # Content is JSON, and represents an <AE> resource
},
# Request Body
{
"m2m:ae": {
"rn": "Notebook-AE",
"api": "AE",
"rr": True,
"srv": [ "3" ]
}
}
)
###Output
_____no_output_____
###Markdown
The response introduces some new attributes:- **pi** : This is the identifier of the parent resource.- **et** : Expiration time/date after which the CSE will delete the resource.- **aei** : An Application Entity Identifier uniquely identifies an AE globally.**Note**: If you see an error "409" or "Name already present" then don't worry. It just means that an <AE> resource with the same name already exists in the CSE, perhaps from a previous run of this notebook cell. Create a <Container> ResourceIn this section we add a <Container> resource to the <AE> resource. A <Container> represents a data point that can hold a configurable number of dsta instances. A <Container> may also hold a sub-containers.If created with no further attributes, the CSE will assign defaults to the <Container> resource.
###Code
CREATE ( # CREATE request
url + '/Notebook-AE',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '0', # Request identifier
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=3' # Content is JSON, and represents an <Container> resource
},
# Request Body
{
"m2m:cnt": {
"rn":"Container"
}
}
)
###Output
_____no_output_____
###Markdown
The following attributes are used with the <Container> resource:- **st** : The State Tag is incremented every time the resource is modified.- **mni** : Maximum number of direct data instances in the <Container> resource.- **mbs** : Maximum size in bytes of data.- **mia** : Maximum age of a direct data instances in the <Container> resource.- **cni** : Current number of direct data instances in the <Container> resource.- **cbs** : Current size in bytes of data.- **ol** : Resource identifier of a virtual resource that points to the oldest data instance of the <Container> resource.- **la** : Resource identifier of a virtual resource that points to the latest data instance of the <Container> resource.**Note**: If you see an error "409" or "Name already present" then don't worry. It just means that an <Container> resource with the same name already exists in the CSE, perhaps from a previous run of this notebook cell. Add a <ContentInstance> to the <Container>Now, we add an actual value to the *myContainer* <Container>. These attributes are part of the request:- **cnf** : This specifies the content format. It specifies the media type as well as an encoding type.- **con** : This is the actual content (ie. the value) that will be stored in the <Container resource. It must contain media information and may optionally specify an optional content encoding (here 0 means "plain, no transfer encoding"), and content security.<ContentInstance>'s can only be added and read, but not updated or deleted.**Note**: You can execute the following code as often as you like in order to create more <ContentInstance> resources.
###Code
CREATE ( # CREATE request
url + '/Notebook-AE/Container',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '0', # Request identifier
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=4' # Content is JSON, and represents an <ContentInstance> resource
},
# Request Body
{
"m2m:cin": {
"cnf": "text/plain:0",
"con": "Hello, World!"
}
}
)
###Output
_____no_output_____
###Markdown
A new attribute:- **cs** : This attribute contains the size of the content of the **con** attribute. Retrieve the latest <ContentInstance> resourceThis request will retrieve the latest data instance from the <Container>.
###Code
RETRIEVE ( # RETRIEVE request
url + '/Notebook-AE/Container/la',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '0', # Unique request identifier
'Accept' : 'application/json' # Response shall be JSON
}
)
###Output
_____no_output_____
###Markdown
Update the <Container> ResourceWith this request we will set the *MinimumNumberOfInstances* (**mni**) attribute to a new value.
###Code
UPDATE ( # UPDATE request
url + '/Notebook-AE/Container',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '0', # Request identifier
'Accept' : 'application/json', # Response shall be JSON
'Content-Type' : 'application/json;ty=3' # Content is JSON, and represents an <Container> resource
},
# Request Body
{
"m2m:cnt": {
"mni": 10001
}
}
)
###Output
_____no_output_____
###Markdown
The CSE returns the resource. Also note the change of the *lastModificationTime* (lt) and *status* (st) attributes. Check the <Container> resourceRetrieve the <Container> resource to see all the changes and its current state.
###Code
RETRIEVE ( # RETRIEVE request
url + '/Notebook-AE/Container',
# Request Headers
{
'X-M2M-Origin' : originator, # Set the originator
'X-M2M-RI' : '0', # Unique request identifier
'Accept' : 'application/json' # Response shall be JSON
}
)
###Output
_____no_output_____ |
FIFA_Project_Student-Template.ipynb | ###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train_data = pd.read_csv(r'C:\Users\kamlesh\Downloads\machine learing\train.csv')
submission_data = pd.read_csv(r'C:\Users\kamlesh\Downloads\machine learing\sample_submission.csv')
train_data.dtypes
train_data.isna().sum()
train_data = train_data.fillna("Unknown")
train_data.isna().sum()
train_data.describe().loc[['min','max']]
train_data.head(5)
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
sns.set_style("whitegrid")
plt.figure(figsize=(15,5))
sns.barplot(x='Club', y='Age', data= train_data.loc[0:10])
# Code Starts here
Numeric_cols = ['Id','Age','Overall','Potential','Wage (M)']
Categorical_cols = ['Name','Nationality','Club','Position']
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(train_data['Name'])
train_data['Name']=le.transform(train_data['Name'])
le.fit(train_data['Nationality'])
train_data['Nationality']=le.transform(train_data['Nationality'])
le.fit(train_data['Club'])
train_data['Club']=le.transform(train_data['Club'])
le.fit(train_data['Position'])
train_data['Position']=le.transform(train_data['Position'])
# Code ends here
corr = train_data.corr()
sns.heatmap(corr, xticklabels = corr.columns, yticklabels = corr.columns, annot = True, cmap= 'viridis')
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
X = train_data.drop(['Value (M)'],axis=1)
y = train_data['Value (M)']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
r2 = r2_score(y_test, y_pred)
print("r2", r2)
mae = mean_squared_error(y_test, y_pred)
print("mae", mae)
from sklearn.preprocessing import PolynomialFeatures
ploy = PolynomialFeatures(4)
X_train_2 = ploy.fit_transform(X_train)
X_test_2 = ploy.transform(X_test)
model = LinearRegression()
model.fit(X_train_2, y_train)
y_pred_2 = model.predict(X_test_2)
r2 = r2_score(y_test,y_pred_2)
print("r2", r2)
mae = mean_squared_error(y_test, y_pred_2)
print("mae", mae)
# Code ends here
###Output
r2 0.7995328796583645
mae 5.135528618563141
r2 0.9656068307365638
mae 0.8810776786460299
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
test_data = pd.read_csv(r'C:\Users\kamlesh\Downloads\machine learing\test.csv')
test_id = test_data['Id']
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(test_data['Name'])
test_data['Name']=le.transform(test_data['Name'])
le.fit(test_data['Nationality'])
test_data['Nationality']=le.transform(test_data['Nationality'])
le.fit(test_data['Club'])
test_data['Club']=le.transform(test_data['Club'])
le.fit(test_data['Position'])
test_data['Position']=le.transform(test_data['Position'])
X = test_data
X_test = ploy.transform(X)
y_pred_test = model.predict(X_test)
print(y_pred_test)
# Code ends here
submission_result = pd.DataFrame(y_pred_test, index = test_id, columns = ['Value (M)'])
print(round(submission_result,2))
submission_result.to_csv('FIFO Value Prediction.csv', index = True)
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
submission = pd.read_csv('sample_submission.csv')
train.drop('Id', axis = 1, inplace = True)
train.head()
train.shape, test.shape
train.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 14384 entries, 0 to 14383
Data columns (total 9 columns):
Name 14384 non-null object
Age 14384 non-null int64
Nationality 14384 non-null object
Overall 14384 non-null int64
Potential 14384 non-null int64
Club 14173 non-null object
Value (M) 14384 non-null float64
Wage (M) 14384 non-null float64
Position 14384 non-null object
dtypes: float64(2), int64(3), object(4)
memory usage: 1011.5+ KB
###Markdown
Check Numerical and Categorical columns
###Code
def numeric_features (dataframe):
num_column = dataframe.select_dtypes(include = np.number).columns.tolist()
return num_column
num_col = numeric_features(train)
def categorical_features(dataframe):
cat_column = dataframe.select_dtypes(exclude = np.number).columns.tolist()
return cat_column
cat_col = categorical_features(train)
num_col, cat_col
train[num_col].describe()
corr_num = train[num_col].corr()
print(corr_num)
###Output
Age Overall Potential Value (M) Wage (M)
Age 1.000000 0.459678 -0.224055 0.082716 0.149757
Overall 0.459678 1.000000 0.678228 0.635618 0.589736
Potential -0.224055 0.678228 1.000000 0.595095 0.512910
Value (M) 0.082716 0.635618 0.595095 1.000000 0.845124
Wage (M) 0.149757 0.589736 0.512910 0.845124 1.000000
###Markdown
No highly correlated observations Check Missing Values
###Code
train.isnull().sum()
train[train['Club'].isnull()]
train[train['Wage (M)'] == 0].equals(train[train['Club'].isnull()])
###Output
_____no_output_____
###Markdown
where ever Club data is null, Wage data is '0'
###Code
train[train['Nationality'] == 'Ivory Coast'].head(10)
def missing_data(dataframe):
total = dataframe.isnull().sum().sort_values(ascending = False)
percentage = (dataframe.isnull().sum()/dataframe.count()).sort_values(ascending = False)
missing_data = pd.concat([total, percentage], axis = 1, keys = ['Total', 'Percentage'])
return missing_data
missing_data = missing_data(train)
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
plt.figure(figsize = (16,8))
plt.title('Grouping players by preferred position', fontsize = 15, fontweight = 'bold')
plt.xlabel('Position', fontsize = 12)
plt.ylabel('count')
sns.countplot(x = 'Position', data = train)
plt.show()
plt.figure(figsize = (10,8))
plt.title('Wage distribution of players', fontsize = 15,fontweight = 'bold' )
plt.xlabel('Wage')
plt.ylabel('frequency')
sns.distplot(train['Wage (M)'])
value_dist = train.sort_values('Wage (M)', ascending = False).reset_index().head(100)[['Name', 'Wage (M)']]
plt.figure(figsize=(16,8))
sns.set_style("whitegrid")
plt.ylabel('Player Wage', fontsize = 15)
plt.plot(value_dist['Wage (M)'])
plt.figure()
overall = train.sort_values('Overall')['Overall'].unique()
over_all_value = train.groupby('Overall')['Value (M)'].mean()
plt.figure(figsize = (16,8))
plt.title('Overall vs Value', fontsize=20, fontweight='bold')
plt.xlabel('Overall', fontsize=15)
plt.ylabel('Value', fontsize=15)
plt.plot(overall, over_all_value, label = 'Value in (M)')
# Code Starts here
Potential = train.sort_values('Potential')['Potential'].unique()
potential_values = train.groupby('Potential')['Value (M)'].mean()
plt.figure(figsize= (10,6))
plt.plot(Potential, potential_values)
plt.xlabel('Potential')
plt.ylabel("Value")
plt.title('Potential Vs Value')
plt.show()
# Code ends here
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
X = train.drop('Value (M)', axis = 1)
y = train['Value (M)']
# independent variables
X = X[['Overall','Potential','Wage (M)', 'Age']]
X_train, X_test, Y_train, Y_test = train_test_split(X, y, random_state = 40, test_size = 0.3)
model = LinearRegression()
model.fit(X_train, Y_train)
y_pred = model.predict(X_test)
mse = mean_squared_error(Y_test, y_pred)
r2 = r2_score(Y_test, y_pred)
mae = mean_absolute_error(Y_test, y_pred)
print(mse, r2, mae)
# Code ends here
X_train.head()
###Output
_____no_output_____
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
Id = test['Id']
Y_pred = model.predict(test[['Overall', 'Potential', 'Wage (M)', 'Age']])
submission_sample = pd.DataFrame({'Id' : Id, 'Value (M)' : Y_pred})
submission_sample.to_csv('sample_submission.csv', index = False)
# Code ends here
## Instantiate third degree polynomial features
poly = PolynomialFeatures(degree=2)
# fit and transform polynomial features on X_train
X_train_2 = poly.fit_transform(X_train)
# instantiate Linear regression model
model=LinearRegression()
# fit the model
model.fit(X_train_2,Y_train)
# transform on x_test
X_test_2 = poly.transform(X_test)
# predict the model performance
y_pred_2=model.predict(X_test_2)
# Calculate the mean absolute error
mae= mean_absolute_error(Y_test, y_pred_2)
print (mae)
# calculate the r2 score
r2= r2_score(Y_test, y_pred_2)
print(r2)
Id = test['Id']
test_obsev = test[['Overall', 'Potential', 'Wage (M)', 'Age']]
test_data = poly.fit_transform(test_obsev)
Y_pred = model.predict(test_data)
submission_sample = pd.DataFrame({'Id' : Id, 'Value (M)' : Y_pred})
submission_sample.to_csv('sample_submission.csv', index = False)
train['Club']= train['Club'].str.replace('unknown', train.Club.mode()[0])
le = LabelEncoder()
train['Position'] = le.fit_transform(train['Position'])
train['Nationality'] = le.fit_transform(train['Nationality'])
poly = PolynomialFeatures(degree=2)
X = train.drop('Value (M)', axis = 1)
y = train['Value (M)']
# independent variables
X = X[['Overall','Potential','Wage (M)', 'Age', 'Position', 'Nationality']]
X_train, X_test, Y_train, Y_test = train_test_split(X, y, random_state = 40, test_size = 0.3)
X_train_2 = poly.fit_transform(X_train)
model = LinearRegression()
model.fit(X_train_2, Y_train)
X_test_2 = poly.transform(X_test)
y_pred = model.predict(X_test_2)
mse = mean_squared_error(Y_test, y_pred)
r2 = r2_score(Y_test, y_pred)
mae = mean_absolute_error(Y_test, y_pred)
print(mse, r2, mae)
Id = test['Id']
test['Position'] = le.fit_transform(test['Position'])
test['Nationality'] = le.fit_transform(test['Nationality'])
Y_pred = model.predict(poly.transform(test[['Overall', 'Potential', 'Wage (M)', 'Age', 'Position', 'Nationality']]))
submission_sample = pd.DataFrame({'Id' : Id, 'Value (M)' : Y_pred})
submission_sample.to_csv('sample_submission.csv', index = False)
## Instantiate third degree polynomial features
poly = PolynomialFeatures(degree=2)
# fit and transform polynomial features on X_train
X_train_2 = poly.fit_transform(X_train)
# instantiate Linear regression model
model=LinearRegression()
# fit the model
model.fit(X_train_2,Y_train)
# transform on x_test
X_test_2 = poly.transform(X_test)
# predict the model performance
y_pred_2=model.predict(X_test_2)
# Calculate the mean absolute error
mae= mean_absolute_error(Y_test, y_pred_2)
print (mae)
# calculate the r2 score
r2= r2_score(Y_test, y_pred_2)
print(r2)
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train = pd.read_csv('E:/GreyAtom/glab proj/FIFA/train.csv')
#train_data.head(10)
train.head(10)
# Shape of the data
print("Shape of the data is:", train.shape)
#Checking statistical properties of data
print("Statistical properties of data are as follows")
print(train.describe())
print("Skewness for different features is shown as below")
print(train.skew())
# Split into features and target
X = train[['Id','Overall','Potential','Wage (M)']]
y = train['Value (M)']
#Reading features (X)
X.head(10)
#Reading Target (y)
y.head(10)
# Separate into train and test data
X_train,X_test,y_train,y_test=train_test_split(X,y ,test_size=0.3,random_state=6)
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# Code Starts here
#Checking the best plots for plotting between continuous features and try making some inferences from these plots.
cols = X_train.columns
print("Below columns are present in dataset:")
print(cols)
# fig, axes = plt.subplots(nrows = 3, ncols = 3, figsize=(20,20))
# for i in range(0,3):
# for j in range(0,3):
# col = cols[i*3 + j]
# axes[i,j].set_title(col)
# #axes[i,j].scatter(X_train[col],y_train)
# axes[i,j].set_xlabel(col)
# axes[i,j].set_ylabel('Wage (M)')
# plt.show()
#Feature Selection
#selecting suitable threshold and dropping columns
# Plotting a heatmap using to check for correlation between the features
sns.heatmap(train.corr())
# Selecting upper and lower threshold
upper_threshold = 0.5
lower_threshold = -0.5
# List the correlation pairs
correlation = train.corr().unstack().sort_values(kind='quicksort')
correlation
# Select the highest correlation pairs having correlation greater than upper threshold and lower than lower threshold
corr_var_list = correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]
print(corr_var_list)
###Output
Id Overall -0.975595
Overall Id -0.975595
Id Potential -0.653503
Potential Id -0.653503
Id Value (M) -0.548213
Value (M) Id -0.548213
Id Wage (M) -0.519570
Wage (M) Id -0.519570
Potential Wage (M) 0.512910
Wage (M) Potential 0.512910
Overall Wage (M) 0.589736
Wage (M) Overall 0.589736
Potential Value (M) 0.595095
Value (M) Potential 0.595095
Overall Value (M) 0.635618
Value (M) Overall 0.635618
Overall Potential 0.678228
Potential Overall 0.678228
Wage (M) Value (M) 0.845124
Value (M) Wage (M) 0.845124
dtype: float64
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
#Instantiate linear regression model
regressor = LinearRegression()
# fit the model
regressor.fit(X_train,y_train)
# predict the result
y_pred = regressor.predict(X_test)
y_pred
# Calculate mse
mse = mean_squared_error(y_test,y_pred)
mse
# Calculate r2_score
r2 = r2_score(y_test,y_pred)
r2
#Residual Check
residual = y_test - y_pred
print("Residual : ",residual)
plt.figure(figsize=(15,8))
plt.hist(residual, bins=30)
plt.xlabel("Residual")
plt.ylabel("Frequency")
plt.title("Residual Plot")
plt.show()
###Output
Residual : 13328 -0.685999
2639 -2.859364
5353 0.750055
13126 -5.949938
5554 -1.144565
...
10011 -0.846442
12592 -0.321063
3598 -0.687268
13385 -0.707041
8482 -0.486819
Name: Value (M), Length: 4316, dtype: float64
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
test = pd.read_csv("E:/GreyAtom/glab proj/FIFA/test.csv")
test.head(10)
id_ = test['Id']
test.drop(['Name','Age', 'Nationality', 'Club', 'Position'],1,inplace=True)
test.head()
y_pred_test = regressor.predict(test)
y_pred_test
final_submission = pd.DataFrame({'Id':id_,'Value (M)':y_pred_test})
final_submission.head(10)
final_submission.to_csv('final_submission.csv',index=False)
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train_data=pd.read_csv('train.csv')
train_data
train_data.describe()
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# Code Starts here
# Age counts
train_data.Age.value_counts().plot(kind='bar')
# Top 10 values w.r.t Nationality
g_age=train_data.groupby(['Nationality'])['Value (M)'].mean().sort_values(ascending=False).head(10)
g_age.plot(kind='bar')
plt.ylabel('Value (M)')
plt.title('Top 10 values (M) w.r.t Nationality')
plt.show()
# Count of players by there position
plt.figure(figsize=(8,5))
plt.title('Grouping players by Prefered Position', fontsize=12, fontweight='bold',y=1.06)
sns.countplot(x="Position", data= train_data)
plt.xlabel('Position', fontsize=12)
plt.ylabel('Number of players', fontsize=12)
plt.show()
# Wage distribution of top 100 players
distribution_values = train_data.sort_values("Wage (M)",ascending=False).reset_index().head(10)[["Name", "Wage (M)"]]
distribution_values.plot.barh(x='Name', y='Wage (M)')
plt.xlabel('Value (M)')
plt.show()
plt.scatter(x=train_data['Age'], y=train_data['Value (M)'], c='c')
plt.xlabel('Age of players', fontsize=12)
plt.ylabel('Value (M)', fontsize=12)
plt.show
#sns.scatterplot(x=train_data['Age'], y=train_data['Value (M)'])
plt.figure(figsize=(8,6))
sns.heatmap(train_data.corr(), annot = True, vmin=-1, vmax=1, center= 0, cmap= 'coolwarm')
# Code ends here
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
x=train_data.drop(['Value (M)', 'Id'], axis=1)
y=train_data['Value (M)']
# independent variables
X = x[['Overall','Potential','Wage (M)']]
# Separate into train and test data
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=42,test_size=0.2)
print(X_train.head(10))
print("-------------------------------------")
print(y_train.head(5))
# Linear regression
model = LinearRegression()
# fit the model on training data
model.fit(X_train,y_train)
# make prediction
y_pred = model.predict(X_test)
y_pred
# Plot outputs
plt.scatter(X_test, y_test, color='black')
plt.plot(X_test, y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# Mean_absolute_error
mae = mean_absolute_error(y_test,y_pred)
mae
# r2 score
r2 = r2_score(y_test,y_pred)
r2
# Code ends here
###Output
_____no_output_____
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
# Read the test data
test = pd.read_csv('test.csv')
test.head(5)
# Storing the id from the test file
id_ = test['Id']
# Dropping the same columns from the test data
test = test[['Overall','Potential','Wage (M)']]
test.head()
# Predict on the test data
y_pred_test = model.predict(test)
y_pred_test
# Create a sample submission file
sample_submission = pd.DataFrame({'Id':id_,'Value (M)':y_pred_test})
# Convert the sample submission file into a csv file
sample_submission.to_csv('sample_submission.csv',index=False)
# Code ends here
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train=pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
train.isna().sum()
train=train.drop(['Id','Name','Nationality','Club','Position'],axis=1)
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# Code Starts here
numcols= train.select_dtypes(include=['number']).columns.tolist()
for i in numcols:
plt.figure(figsize=(8,4))
sns.set_style('whitegrid')
sns.distplot(train[i],kde=False,color='blue')
plt.show()
# Code ends here
corr = train.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
f, ax = plt.subplots(figsize=(9, 7))
ax = sns.heatmap(corr,mask=mask,square=True,annot=True,fmt='0.2f',linewidths=.8,cmap="hsv")
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
Y=train['Value (M)']
X=train.drop(['Value (M)','Age'],axis=1)
X_train,X_test,y_train,y_test=train_test_split(X,Y,train_size=0.8,random_state=0)
poly = PolynomialFeatures(5)
X_train_2=poly.fit_transform(X_train)
X_test_2 = poly.fit_transform(X_test)
model=LinearRegression()
model.fit(X_train_2,y_train)
y_pred=model.predict(X_test_2)
rme=mean_squared_error(y_test,y_pred)
r2=r2_score(y_test,y_pred)
print(rme,r2)
# Code ends here
###Output
0.8199533541796538 0.9549481143923584
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
id_ = test['Id']
# Dropping the same columns from the test data
test = test[['Overall','Potential','Wage (M)']]
# Applying rfe on test data
test_poly = poly.transform(test)
# Predict on the test data
y_pred_test = model.predict(test_poly)
print(y_pred_test)
y_pred_test = y_pred_test.flatten()
print(y_pred_test)
# Create a sample submission file
sample_submission = pd.DataFrame({'Id':id_,'Value (M)':y_pred_test})
# Code ends here
sample_submission.to_csv('FIFA.csv',index=False)
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
df_train = pd.read_csv("train.csv")
df_train.head()
df_train.isnull().sum()
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
categorical_var = df_train.select_dtypes(include = 'object')
categorical_var
df_train.drop(["Name","Nationality","Club","Position"],axis = 1, inplace = True)
df_train.head()
# Code Starts here
numerical_var = df_train.select_dtypes(include = 'number')
numerical_var
# Code ends here
numerical = ["Potential","Age","Overall"]
for i in range(0,len(numerical),2):
if len(numerical) > i+1:
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.scatter(df_train[numerical[i]],df_train["Value (M)"])
plt.title('Plotting target against '+numerical[i])
plt.xlabel(numerical[i])
plt.ylabel("Value (M)")
plt.subplot(122)
plt.scatter(df_train[numerical[i+1]],df_train["Value (M)"])
plt.title('Plotting target against '+numerical[i+1])
plt.xlabel(numerical[i+1])
plt.ylabel("Value (M)")
plt.tight_layout()
plt.show()
else:
plt.scatter(df_train[numerical[i]],df_train["Value (M)"])
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
df_train.columns
# Code Starts here
X = df_train[['Id', 'Age', 'Overall', 'Potential', 'Wage (M)']]
y = df_train["Value (M)"]
X_train,X_val,y_train,y_val = train_test_split(X, y, test_size = 0.3,random_state = 6)
print(X_train.shape)
print(y_train.shape)
# Code ends here
df_train.info()
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_val)
y_pred
#Calculate R^2
r2 = r2_score(y_val,y_pred)
r2
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(2)
X_train_poly = poly.fit_transform(X_train)
X_val_poly = poly.transform(X_val)
regressor.fit(X_train_poly, y_train)
y_pred_poly = regressor.predict(X_val_poly)
r2 = r2_score(y_val,y_pred_poly)
r2
###Output
_____no_output_____
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
df_test = pd.read_csv("test.csv")
df_test.shape
df_test.columns
# Code Starts here
df_test.drop(["Name","Nationality","Club","Position"],axis = 1, inplace = True)
# Code ends here
id = df_test["Id"]
y_pred_df_test = regressor.predict(df_test)
y_pred_df_test
poly1 = PolynomialFeatures(2)
df_test_poly1 = poly1.fit_transform(df_test)
#df_test_poly = poly.transform(df_test)
#regressor.fit(df_test_poly1, y_train)
y_pred_poly = regressor.predict(df_test_poly)
y_pred_poly
final_sub = pd.DataFrame({"Id": id, "Value (M)": y_pred_poly})
#final_sub
final_sub.to_csv("FIFA_Submission.csv", index=False)
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
fifa = pd.read_csv('train.csv')
print(fifa.sample(n=20))
fifa.info()
fifa.describe()
fifa.columns
print(fifa['Nationality'].nunique())
print(fifa['Club'].nunique())
print(fifa['Position'].nunique())
fifa['Nationality_cat'] = fifa['Nationality'].astype('category').cat.codes
fifa['Club_cat'] = fifa['Club'].astype('category').cat.codes
fifa['Position_cat'] = fifa['Position'].astype('category').cat.codes
###Output
Id Name Age Nationality Overall Potential \
14307 12268 C. Dickinson 30 England 63 63
12057 4058 S. Filip 23 Romania 71 78
7264 10639 E. Upson 27 England 65 66
4600 3598 D. Andrade 26 Colombia 72 75
13766 5758 A. Olanare 23 Nigeria 69 73
12851 13103 D. Keita-Ruel 27 Germany 62 62
12457 2085 J. Schunke 30 Argentina 74 74
10829 6730 C. Duvall 25 United States 68 69
3716 10386 C. Chaplin 20 England 65 82
4232 1252 R. Yanbaev 33 Russia 76 76
801 11225 P. Zulu 24 South Africa 64 67
11038 7792 H. Olvera 27 Mexico 67 68
10662 9857 A. Nordvik 30 Norway 66 66
4476 13926 J. Stockley 23 England 61 68
5846 8021 L. Olum 32 Kenya 67 67
6052 3307 Rober Ibáñez 24 Spain 72 78
1261 17972 A. Conway 19 Republic of Ireland 47 63
14274 2400 L. Ulloa 30 Argentina 74 74
2731 6481 W. Larrondo 33 Chile 69 69
3484 9649 A. Barada 26 Japan 66 69
Club Value (M) Wage (M) Position
14307 Notts County 0.270 0.004 LWB
12057 NaN 0.000 0.000 LM
7264 Milton Keynes Dons 0.500 0.004 CM
4600 Asociacion Deportivo Cali 3.200 0.003 LB
13766 CSKA Moscow 1.400 0.023 ST
12851 SC Fortuna Köln 0.325 0.001 LW
12457 Estudiantes de La Plata 4.200 0.016 CB
10829 Montreal Impact 0.875 0.005 LB
3716 Portsmouth 1.200 0.003 ST
4232 FC Krasnodar 3.000 0.041 RM
801 Kaizer Chiefs 0.575 0.001 LW
11038 Lobos de la BUAP 0.700 0.005 LB
10662 Viking FK 0.475 0.002 LB
4476 Exeter City 0.350 0.003 ST
5846 Portland Timbers 0.450 0.005 CDM
6052 Valencia CF 4.300 0.017 LM
1261 Galway United 0.060 0.001 CB
14274 Leicester City 5.500 0.062 ST
2731 Santiago Wanderers 0.650 0.004 RM
3484 Omiya Ardija 0.775 0.002 CAM
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 14384 entries, 0 to 14383
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Id 14384 non-null int64
1 Name 14384 non-null object
2 Age 14384 non-null int64
3 Nationality 14384 non-null object
4 Overall 14384 non-null int64
5 Potential 14384 non-null int64
6 Club 14173 non-null object
7 Value (M) 14384 non-null float64
8 Wage (M) 14384 non-null float64
9 Position 14384 non-null object
dtypes: float64(2), int64(4), object(4)
memory usage: 1.1+ MB
158
647
15
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# Code Starts here
#sns.pairplot(fifa,corner=True,kind='reg')
sns.pairplot(fifa,kind='reg')
# Feature relation with Target
fifa_corr = fifa.corr()
plt.figure(figsize=(10,10))
sns.heatmap(fifa_corr,annot=True,cmap=plt.cm.Reds)
plt.plot()
# Code ends here
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
# Separate features and target
fifa['Overall-2'] = np.power(fifa['Overall'],2)
fifa['Potential-2'] = np.power(fifa['Potential'],2)
X = fifa[['Age','Overall', 'Potential', 'Wage (M)','Overall-2','Potential-2']]
y= fifa['Value (M)']
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Fit the train data
basemodel = LinearRegression()
basemodel.fit(X_train,y_train)
# Predict
y_pred = basemodel.predict(X_test)
# R-squared score
r2=r2_score(y_test, y_pred)
print('r2: ', round(r2,4))
plt.figure(figsize=(10,10))
sns.heatmap(X,annot=True,cmap=plt.cm.Reds)
plt.plot()
# Code ends here
###Output
r2: 0.8601
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code starts here
#Load test data
test_data = pd.read_csv('test.csv')
# Store Id
results = pd.DataFrame(test_data['Id'])
# Separate features and target
test_data['Overall-2'] = np.power(test_data['Overall'],2)
test_data['Potential-2'] = np.power(test_data['Potential'],2)
test_data_features = test_data[['Age','Overall', 'Potential', 'Wage (M)','Overall-2','Potential-2']]
# Predict
test_data_pred = basemodel.predict(test_data_features)
# Add to results
results['Value (M)'] = test_data_pred.tolist()
# Write to CSV
results.to_csv('results.csv',index=False)
# Code ends here.
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
print('Skewness for the different features is as shown below: ')
print(df.skew())
# sns.heatmap(df.corr())
# Selecting upper and lower threshold
upper_threshold = 0.5
lower_threshold = -0.5
# List the correlation pairs
correlation = df.corr().unstack().sort_values(kind='quicksort')
# Select the highest correlation pairs having correlation greater than upper threshold and lower than lower threshold
corr_var_list = correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]
print(corr_var_list)
###Output
Id Overall -0.975595
Overall Id -0.975595
Id Potential -0.653503
Potential Id -0.653503
Id Value (M) -0.548213
Value (M) Id -0.548213
Id Wage (M) -0.519570
Wage (M) Id -0.519570
Potential Wage (M) 0.512910
Wage (M) Potential 0.512910
Overall Wage (M) 0.589736
Wage (M) Overall 0.589736
Potential Value (M) 0.595095
Value (M) Potential 0.595095
Overall Value (M) 0.635618
Value (M) Overall 0.635618
Overall Potential 0.678228
Potential Overall 0.678228
Wage (M) Value (M) 0.845124
Value (M) Wage (M) 0.845124
dtype: float64
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# print(df.columns)
# df_group = df.groupby(['Position']).sum()
# #Code starts here
# sns.countplot(x='Position', data=df)
# value_distribution_values = df.sort_values("Wage (M)", ascending=False).reset_index().head(100)[["Name", "Wage (M)"]]
# sns.countplot(x='Wage (M)', data=value_distribution_values)
# # value_distribution_values = df[]
# overall = df.sort_values("Overall")
# overall_value = overall.groupby(['Overall'])['Value (M)'].mean()#
# # Code ends here
# p_list_1= ['GK', 'LB', 'CB', 'CB', 'RB', 'LM', 'CDM', 'RM', 'LW', 'ST', 'RW']
# p_list_2 = ['GK', 'LWB', 'CB', 'RWB', 'LM', 'CDM', 'CAM', 'CM', 'RM', 'LW', 'RW']
# # p_list_1 stats
# df_copy = df.copy()
# store = []
# for i in p_list_1:
# store.append([i,
# df_copy.loc[[df_copy[df_copy['Position'] == i]['Overall'].idxmax()]]['Name'].to_string(
# index=False), df_copy[df_copy['Position'] == i]['Overall'].max()])
# df_copy.drop(df_copy[df_copy['Position'] == i]['Overall'].idxmax(), inplace=True)
# # return store
# df1= pd.DataFrame(np.array(store).reshape(11, 3), columns=['Position', 'Player', 'Overall'])
# # p_list_2 stats
# df_copy = df.copy()
# store = []
# for i in p_list_2:
# store.append([i,
# df_copy.loc[[df_copy[df_copy['Position'] == i]['Overall'].idxmax()]]['Name'].to_string(
# index=False), df_copy[df_copy['Position'] == i]['Overall'].max()])
# df_copy.drop(df_copy[df_copy['Position'] == i]['Overall'].idxmax(), inplace=True)
# # return store
# df2= pd.DataFrame(np.array(store).reshape(11, 3), columns=['Position', 'Player', 'Overall'])
# if df1['Overall'].mean() > df2['Overall'].mean():
# print(df1)
# print(p_list_1)
# else:
# print(df2)
# print(p_list_2)
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error,mean_squared_error, r2_score
from math import sqrt
from sklearn.model_selection import train_test_split
# Code starts here
X = df[['Overall','Potential','Wage (M)']]
y = df['Value (M)']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=6)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
r2 = r2_score(y_test,y_pred)
print("r2", r2)
mae = mean_absolute_error(y_test, y_pred)
print("mae", mae)
# Code ends here
# --------------
from sklearn.preprocessing import PolynomialFeatures
# Code starts here
poly = PolynomialFeatures(3)
X_train_2 = poly.fit_transform(X_train)
X_test_2 = poly.transform(X_test)
model = LinearRegression()
model.fit(X_train_2, y_train)
y_pred_2 = model.predict(X_test_2)
r2 = r2_score(y_test,y_pred_2)
print("r2", r2)
mae = mean_absolute_error(y_test, y_pred_2)
print("mae", mae)
# Code ends here
# Code ends here
###Output
r2 0.7676309781948667
mae 1.3718341450247453
r2 0.9481242645946444
mae 0.5118790302908705
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
test3=pd.read_csv("./test.csv")
# Code Starts here
Id =test3['Id']
print(Id)
# Code ends here
test3=test3[['Overall','Potential','Wage (M)']]
test3.head()
test_3 = poly.transform(test3)
pred = model.predict(test_3)
submission_file1 = pd.DataFrame({'Id' : Id, 'Value' : pred})
submission_file1.to_csv('submission3.csv', index = False)
###Output
_____no_output_____
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train = pd.read_csv('../file (2)/train.csv')
train.head()
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# Code Starts here
plt.figure(figsize=(16,8))
plt.title('Grouping players by Prefered Position', fontsize = 18, fontweight = 'bold', y = 1.05)
plt.xlabel('Number of players', fontsize=12)
plt.ylabel('Player age', fontsize=12)
sns.countplot(x='Position', data=train)
#Wage distribution of top 100 players
Wage_distribution = train.sort_values("Wage (M)", ascending = False).reset_index()[:101][['Name', 'Wage (M)']]
plt.figure(figsize=(16,8))
plt.title('Top 100 Players Wage Distribution', fontsize = 20, fontweight = 'bold')
plt.xlabel('Player Wage [M€]', fontsize=15)
sns.set_style('whitegrid')
plt.plot(Wage_distribution['Wage (M)'])
# Comparision graph of Overall vs values(M)
overall = train.sort_values('Overall')['Overall'].unique()
overall_value = train.groupby(['Overall'])['Value (M)'].mean()
plt.figure()
plt.figure(figsize=(16,8))
plt.title('Overall vs Value', fontsize=20, fontweight='bold')
plt.xlabel('Overall', fontsize=15)
plt.ylabel('Value', fontsize=15)
sns.set_style("whitegrid")
plt.plot(overall, overall_value, label="Values in [M€]")
plt.legend(loc=4, prop={'size': 15}, frameon=True,shadow=True, facecolor="white", edgecolor="black")
plt.show()
# Code ends here
###Output
_____no_output_____
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
#Split into feature and target
X = train.drop(['Value (M)'], 1)
y = train[['Value (M)']]
#Independent variables
X = X[['Overall', 'Potential', 'Wage (M)']]
#Separate train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2)
print(X_train.head(2))
print(y_train.head(2))
# Instantiate linear regression
model = LinearRegression()
# fit the model on training data
model.fit(X_train, y_train)
# make prediction
y_pred = model.predict(X_test)
# calculate the mean_absolute_error
mae = mean_absolute_error(y_test, y_pred)
print(mae)
# calculate the r2 score
r2 = r2_score(y_test, y_pred)
print(r2)
# Code ends here
# Instantiate third degree polynomial features
poly = PolynomialFeatures(degree=2)
# fit and transform polynomial features on X_train
X_train_2 = poly.fit_transform(X_train)
# instantiate Linear regression model
model = LinearRegression()
# fit the model
model.fit(X_train_2, y_train)
# transform on x_test
X_test_2 = poly.transform(X_test)
# predict the model performance
y_pred_2 = model.predict(X_test_2)
# Calculate the mean absolute error
mae = mean_absolute_error(y_test, y_pred_2)
print(mae)
# calculate the r2 score
r2 = r2_score(y_test, y_pred_2)
print(r2)
###Output
0.7168874818344876
0.9285452751547129
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
# Prediction on test data
# Read the test data
test = pd.read_csv('test.csv')
# Storing the id from the test file
id_ = test['Id']
# Dropping the same columns from the test data
test = test[['Overall','Potential','Wage (M)']]
# Applying rfe on test data
test_poly = poly.transform(test)
# Predict on the test data
y_pred_test = model.predict(test_poly)
y_pred_test = y_pred_test.flatten()
print(y_pred_test)
# Create a sample submission file
sample_submission = pd.DataFrame({'Id':id_,'Value (M)':y_pred_test})
# Convert the sample submission file into a csv file
sample_submission.to_csv('sample_submission.csv',index=False)
# Code ends here
###Output
[16.20453142 19.70373631 1.36044225 ... 1.14279384 12.01775448
4.61622169]
###Markdown
Load the dataset- Load the train data and using all your knowledge of pandas try to explore the different statistical properties of the dataset.
###Code
# read the dataset and extract the features and target separately
train = pd.read_csv('train.csv')
train
train.info()
random_key = 6
X = train[['Age','Overall','Potential','Wage (M)']]
y = train['Value (M)']
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state=random_key)
X_train
###Output
_____no_output_____
###Markdown
Visualize the data- Check for the categorical & continuous features. - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.- Check for the correlation between the features
###Code
# Code Starts here
def show_boxplot(col_data, x_label, y_label, title, fig_size=(7, 7), show_outliers=True):
"""
Shows boxplot with means
Params:
-------
col_data: list or numpy array
x_label: str
y_label: str
title: str
fig_size: tupe of (int, int)
show_outliers: bool
"""
fig = plt.figure(figsize=fig_size)
plt.boxplot(col_data, showmeans=True, showfliers=show_outliers)
plt.title(title, fontsize=21, color='navy')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
for col in X_train.select_dtypes(include=np.number).columns:
x_label = col
y_label = 'Distribution'
data = X_train[col]
title = f'Distribution for {col}'
show_boxplot(col_data=data, x_label=x_label, y_label=y_label, title=title)
# Code ends here
sns.heatmap(X_train.corr())
upper_threshold = 0.5
lower_threshold = -0.5
# List the correlation pairs
correlation = train.corr().unstack().sort_values(kind='quicksort')
# Select the highest correlation pairs having correlation greater than upper threshold and lower than lower threshold
corr_var_list = correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]
print(corr_var_list)
###Output
Id Overall -0.975595
Overall Id -0.975595
Id Potential -0.653503
Potential Id -0.653503
Id Value (M) -0.548213
Value (M) Id -0.548213
Id Wage (M) -0.519570
Wage (M) Id -0.519570
Potential Wage (M) 0.512910
Wage (M) Potential 0.512910
Overall Wage (M) 0.589736
Wage (M) Overall 0.589736
Potential Value (M) 0.595095
Value (M) Potential 0.595095
Overall Value (M) 0.635618
Value (M) Overall 0.635618
Overall Potential 0.678228
Potential Overall 0.678228
Wage (M) Value (M) 0.845124
Value (M) Wage (M) 0.845124
dtype: float64
###Markdown
Model building- Separate the features and target and then split the train data into train and validation set.- Now let's come to the actual task, using linear regression, predict the `Value (M)`. - Try improving upon the `r2_score` (R-Square) using different parameters that give the best score. You can use higher degree [Polynomial Features of sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html) to improve the model prediction.
###Code
# Code Starts here
linreg = LinearRegression()
logreg = LogisticRegression()
#y = np.log(y_train)
linreg.fit(X_train,y_train)
y_pred = linreg.predict(X_test)
# display predictions
print('Mean Absolute Error :',(mean_absolute_error(y_test,y_pred)))
print('R-Square :',r2_score(y_test,y_pred))
# Code ends here
print('-'*20)
#Polynomial Feature
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(4)
X_train_2 = poly.fit_transform(X_train)
X_test_2 = poly.transform(X_test)
model = LinearRegression()
model.fit(X_train_2, y_train)
y_pred_2 = model.predict(X_test_2)
r2 = r2_score(y_test,y_pred_2)
print("R-Square :", r2)
mae = mean_absolute_error(y_test, y_pred_2)
print('Mean Absolute Error :', mae)
###Output
Mean Absolute Error : 1.36113034368551
R-Square : 0.7728182411379437
--------------------
R-Square : 0.9748469694258696
Mean Absolute Error : 0.3528900386871176
###Markdown
Prediction on the test data and creating the sample submission file.- Load the test data and store the `Id` column in a separate variable.- Perform the same operations on the test data that you have performed on the train data.- Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
###Code
# Code Starts here
test = pd.read_csv('test.csv')
Id = test['Id']
test = test.drop(["Name","Nationality","Club","Position",'Id'],axis=1)
test_poly = poly.transform(test)
y_pred_1 = model.predict(test_poly)
y_pred_1 = y_pred_1.flatten()
id_1=pd.DataFrame({'Id':id,'Value (M)':y_pred_1})
id_1.to_csv("submission.csv", encoding='utf-8', index=False)
# Code ends here
###Output
_____no_output_____ |
docs/source/notebooks/Maze.ipynb | ###Markdown
ACS2 in MazeThis notebook presents how to integrate ACS2 algorithm with maze environment (using OpenAI Gym interface).Begin with attaching required dependencies. Because most of the work is by now done locally no PIP modules are used (just pure OS paths)
###Code
# General
from __future__ import unicode_literals
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
# To avoid Type3 fonts in generated pdf file
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# Logger
import logging
logging.basicConfig(level=logging.WARN)
# ALCS + Custom environments
import sys, os
sys.path.append(os.path.abspath('../'))
# Enable automatic module reload
%load_ext autoreload
%autoreload 2
# Load PyALCS module
from lcs.agents.acs2 import ACS2, Configuration, ClassifiersList
# Load environments
import gym
import gym_maze
###Output
_____no_output_____
###Markdown
Environment - MazeWe are going to look at provided mazes. Their names starts with "_Maze..._" or "_Woods..._" so see what is possible to load:
###Code
# Custom function for obtaining available environments
filter_envs = lambda env: env.id.startswith("Maze") or env.id.startswith("Woods")
all_envs = [env for env in gym.envs.registry.all()]
maze_envs = [env for env in all_envs if filter_envs(env)]
for env in maze_envs:
print("Maze ID: [{}], non-deterministic: [{}], trials: [{}]".format(
env.id, env.nondeterministic, env.trials))
###Output
Maze ID: [MazeF1-v0], non-deterministic: [False], trials: [100]
Maze ID: [MazeF2-v0], non-deterministic: [False], trials: [100]
Maze ID: [MazeF3-v0], non-deterministic: [False], trials: [100]
Maze ID: [MazeF4-v0], non-deterministic: [True], trials: [100]
Maze ID: [Maze4-v0], non-deterministic: [False], trials: [100]
Maze ID: [Maze5-v0], non-deterministic: [False], trials: [100]
Maze ID: [Maze6-v0], non-deterministic: [True], trials: [100]
Maze ID: [Woods1-v0], non-deterministic: [False], trials: [100]
Maze ID: [Woods14-v0], non-deterministic: [False], trials: [100]
###Markdown
Let's see how it looks in action. First we are going to initialize new environment using `gym.make()` instruction from OpenAI Gym.
###Code
#MAZE = "Woods14-v0"
MAZE = "Maze5-v0"
# Initialize environment
maze = gym.make(MAZE)
# Reset it, by putting an agent into random position
situation = maze.reset()
# Render the state in ASCII
maze.render()
###Output
[30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m
[30m■[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [33m$[0m [30m■[0m
[30m■[0m [37m□[0m [37m□[0m [30m■[0m [37m□[0m [30m■[0m [30m■[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [37m□[0m [37m□[0m [30m■[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [30m■[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m [30m■[0m
[30m■[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [31mA[0m [37m□[0m [37m□[0m [37m□[0m [30m■[0m [37m□[0m [30m■[0m
[30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m
###Markdown
The `reset()` function puts an agent into random position (on path inside maze) returning current perception.> The perception consists of 8 values representing N, NE, E, SE, S, SW, W, NW directions. It outputs 0 for the path, 1 for the wall and 9 for the reward.
###Code
# Show current agents perception
situation
###Output
_____no_output_____
###Markdown
We can interact with the environment by performing actions.> Agent can perform 8 actions - moving into different directions.To do so use `step(action)` function. It will return couple interesting information:- new state perception,- reward for executing move (ie. finding the reward)- is the trial finish,- debug data
###Code
ACTION = 0 # Move N
# Execute action
state, reward, done, _ = maze.step(ACTION)
# Show new state
print(f"New state: {state}, reward: {reward}, is done: {done}")
# Render the env one more time after executing step
maze.render()
###Output
New state: ('1', '0', '0', '1', '1', '1', '0', '0'), reward: 0, is done: False
[30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m
[30m■[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [33m$[0m [30m■[0m
[30m■[0m [37m□[0m [37m□[0m [30m■[0m [37m□[0m [30m■[0m [30m■[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [37m□[0m [37m□[0m [30m■[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [30m■[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m [30m■[0m
[30m■[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m [37m□[0m [37m□[0m [30m■[0m
[30m■[0m [37m□[0m [31mA[0m [37m□[0m [37m□[0m [37m□[0m [30m■[0m [37m□[0m [30m■[0m
[30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m [30m■[0m
###Markdown
Agent - ACS2First provide a helper method for calculating obtained knowledge
###Code
def _maze_knowledge(population, environment) -> float:
transitions = environment.env.get_all_possible_transitions()
# Take into consideration only reliable classifiers
reliable_classifiers = [c for c in population if c.is_reliable()]
# Count how many transitions are anticipated correctly
nr_correct = 0
# For all possible destinations from each path cell
for start, action, end in transitions:
p0 = environment.env.maze.perception(*start)
p1 = environment.env.maze.perception(*end)
if any([True for cl in reliable_classifiers
if cl.predicts_successfully(p0, action, p1)]):
nr_correct += 1
return nr_correct / len(transitions) * 100.0
from lcs.metrics import population_metrics
def _maze_metrics(pop, env):
metrics = {
'knowledge': _maze_knowledge(pop, env)
}
# Add basic population metrics
metrics.update(population_metrics(pop, env))
return metrics
###Output
_____no_output_____
###Markdown
Exploration phase
###Code
CLASSIFIER_LENGTH=8
NUMBER_OF_POSSIBLE_ACTIONS=8
# Define agent's default configuration
cfg = Configuration(
classifier_length=CLASSIFIER_LENGTH,
number_of_possible_actions=NUMBER_OF_POSSIBLE_ACTIONS,
metrics_trial_frequency=1,
user_metrics_collector_fcn=_maze_metrics)
# Define agent
agent = ACS2(cfg)
%%time
population, metrics = agent.explore(maze, 100)
###Output
CPU times: user 5.19 s, sys: 11.7 ms, total: 5.2 s
Wall time: 5.22 s
###Markdown
We can take a sneak peek into a created list of classifiers. Let's have a look at top 10:
###Code
population.sort(key=lambda cl: -cl.fitness)
for cl in population[:10]:
print("{!r} \tq: {:.2f} \tr: {:.2f} \tir: {:.2f}".format(cl, cl.q, cl.r, cl.ir))
###Output
9####010 0 1####101 (empty) q: 0.963 r: 884.0 ir: 884.0 f: 851.7 exp: 41 tga: 645 talp: 2817 tav: 46.2 num: 1 q: 0.96 r: 884.02 ir: 884.02
9#1##010 0 1####101 (empty) q: 0.921 r: 809.5 ir: 806.3 f: 745.3 exp: 31 tga: 1682 talp: 2817 tav: 35.7 num: 1 q: 0.92 r: 809.54 ir: 806.29
##901### 2 ##110### (empty) q: 0.875 r: 762.2 ir: 762.2 f: 666.8 exp: 27 tga: 590 talp: 2875 tav: 79.1 num: 1 q: 0.87 r: 762.17 ir: 762.17
011###01 0 9#####10 (empty) q: 0.989 r: 563.2 ir: 0.0 f: 557.2 exp: 40 tga: 951 talp: 2817 tav: 43.4 num: 1 q: 0.99 r: 563.17 ir: 0.00
01##0#01 0 9#####10 (empty) q: 0.976 r: 563.2 ir: 0.0 f: 549.5 exp: 41 tga: 949 talp: 2817 tav: 42.7 num: 1 q: 0.98 r: 563.17 ir: 0.00
01110001 0 9#####10 (empty) q: 0.972 r: 563.0 ir: 0.0 f: 547.1 exp: 41 tga: 949 talp: 2817 tav: 41.3 num: 1 q: 0.97 r: 563.01 ir: 0.00
0#1##001 0 9#####10 (empty) q: 0.953 r: 553.8 ir: 0.0 f: 527.6 exp: 32 tga: 1769 talp: 2817 tav: 32.5 num: 1 q: 0.95 r: 553.76 ir: 0.00
1000#101 1 9111#010 (empty) q: 0.942 r: 347.8 ir: 0.0 f: 327.6 exp: 14 tga: 644 talp: 2795 tav: 1.46e+02 num: 1 q: 0.94 r: 347.78 ir: 0.00
1#0110## 2 ##90#1## (empty) q: 0.958 r: 290.4 ir: 0.0 f: 278.1 exp: 22 tga: 1168 talp: 2874 tav: 76.9 num: 1 q: 0.96 r: 290.39 ir: 0.00
11011001 2 ##90#1## (empty) q: 0.846 r: 290.3 ir: 0.0 f: 245.7 exp: 22 tga: 1168 talp: 2874 tav: 77.0 num: 1 q: 0.85 r: 290.33 ir: 0.00
###Markdown
Exploitation Now we can either reuse our previous agent or initialize it one more time passing the initial population of classifiers as *apriori* knowledge.
###Code
# Reinitialize agent using defined configuration and population
agent = ACS2(cfg, population)
%%time
population, metrics = agent.exploit(maze, 1)
metrics[-1]
###Output
_____no_output_____
###Markdown
Experiments
###Code
def parse_metrics_to_df(explore_metrics, exploit_metrics):
def extract_details(row):
row['trial'] = row['trial']
row['steps'] = row['steps_in_trial']
row['numerosity'] = row['numerosity']
row['reliable'] = row['reliable']
row['knowledge'] = row['knowledge']
return row
# Load both metrics into data frame
explore_df = pd.DataFrame(explore_metrics)
exploit_df = pd.DataFrame(exploit_metrics)
# Mark them with specific phase
explore_df['phase'] = 'explore'
exploit_df['phase'] = 'exploit'
# Extract details
explore_df = explore_df.apply(extract_details, axis=1)
exploit_df = exploit_df.apply(extract_details, axis=1)
# Adjuts exploit trial counter
exploit_df['trial'] = exploit_df.apply(lambda r: r['trial']+len(explore_df), axis=1)
# Concatenate both dataframes
df = pd.concat([explore_df, exploit_df])
df.set_index('trial', inplace=True)
return df
###Output
_____no_output_____
###Markdown
For various mazes visualize- classifiers / reliable classifiers for steps- optimal policy- steps (exploration | exploitation)- knowledge- parameters setting
###Code
def find_best_classifier(population, situation, cfg):
match_set = population.form_match_set(situation)
anticipated_change_cls = [cl for cl in match_set if cl.does_anticipate_change()]
if (len(anticipated_change_cls) > 0):
return max(anticipated_change_cls, key=lambda cl: cl.fitness)
return None
def build_fitness_matrix(env, population, cfg):
original = env.env.maze.matrix
fitness = original.copy()
# Think about more 'functional' way of doing this
for index, x in np.ndenumerate(original):
# Path - best classfier fitness
if x == 0:
perception = env.env.maze.perception(index[1], index[0])
best_cl = find_best_classifier(population, perception, cfg)
if best_cl:
fitness[index] = best_cl.fitness
else:
fitness[index] = -1
# Wall - fitness = 0
if x == 1:
fitness[index] = 0
# Reward - inf fitness
if x == 9:
fitness[index] = fitness.max () + 500
return fitness
def build_action_matrix(env, population, cfg):
ACTION_LOOKUP = {
0: u'↑', 1: u'↗', 2: u'→', 3: u'↘',
4: u'↓', 5: u'↙', 6: u'←', 7: u'↖'
}
original = env.env.maze.matrix
action = original.copy().astype(str)
# Think about more 'functional' way of doing this
for index, x in np.ndenumerate(original):
# Path - best classfier fitness
if x == 0:
perception = env.env.maze.perception(index[1], index[0])
best_cl = find_best_classifier(population, perception, cfg)
if best_cl:
action[index] = ACTION_LOOKUP[best_cl.action]
else:
action[index] = '?'
# Wall - fitness = 0
if x == 1:
action[index] = '\#'
# Reward - inf fitness
if x == 9:
action[index] = 'R'
return action
###Output
_____no_output_____
###Markdown
Plotting functions and settings
###Code
# Plot constants
TITLE_TEXT_SIZE=24
AXIS_TEXT_SIZE=18
LEGEND_TEXT_SIZE=16
def plot_policy(env, agent, cfg, ax=None):
if ax is None:
ax = plt.gca()
ax.set_aspect("equal")
# Handy variables
maze_countours = maze.env.maze.matrix
max_x = env.env.maze.max_x
max_y = env.env.maze.max_y
fitness_matrix = build_fitness_matrix(env, agent.population, cfg)
action_matrix = build_action_matrix(env, agent.population, cfg)
# Render maze as image
plt.imshow(fitness_matrix, interpolation='nearest', cmap='Reds', aspect='auto',
extent=[0, max_x, max_y, 0])
# Add labels to each cell
for (y,x), val in np.ndenumerate(action_matrix):
plt.text(x+0.4, y+0.5, "${}$".format(val))
ax.set_title("Policy", fontsize=TITLE_TEXT_SIZE)
ax.set_xlabel('x', fontsize=AXIS_TEXT_SIZE)
ax.set_ylabel('y', fontsize=AXIS_TEXT_SIZE)
ax.set_xlim(0, max_x)
ax.set_ylim(max_y, 0)
ax.set_xticks(range(0, max_x))
ax.set_yticks(range(0, max_y))
ax.grid(True)
def plot_knowledge(df, ax=None):
if ax is None:
ax = plt.gca()
explore_df = df.query("phase == 'explore'")
exploit_df = df.query("phase == 'exploit'")
explore_df['knowledge'].plot(ax=ax, c='blue')
exploit_df['knowledge'].plot(ax=ax, c='red')
ax.axvline(x=len(explore_df), c='black', linestyle='dashed')
ax.set_title("Achieved knowledge", fontsize=TITLE_TEXT_SIZE)
ax.set_xlabel("Trial", fontsize=AXIS_TEXT_SIZE)
ax.set_ylabel("Knowledge [%]", fontsize=AXIS_TEXT_SIZE)
ax.set_ylim([0, 105])
def plot_steps(df, ax=None):
if ax is None:
ax = plt.gca()
explore_df = df.query("phase == 'explore'")
exploit_df = df.query("phase == 'exploit'")
explore_df['steps'].plot(ax=ax, c='blue', linewidth=.5)
exploit_df['steps'].plot(ax=ax, c='red', linewidth=0.5)
ax.axvline(x=len(explore_df), c='black', linestyle='dashed')
ax.set_title("Steps", fontsize=TITLE_TEXT_SIZE)
ax.set_xlabel("Trial", fontsize=AXIS_TEXT_SIZE)
ax.set_ylabel("Steps", fontsize=AXIS_TEXT_SIZE)
def plot_classifiers(df, ax=None):
if ax is None:
ax = plt.gca()
explore_df = df.query("phase == 'explore'")
exploit_df = df.query("phase == 'exploit'")
df['numerosity'].plot(ax=ax, c='blue')
df['reliable'].plot(ax=ax, c='red')
ax.axvline(x=len(explore_df), c='black', linestyle='dashed')
ax.set_title("Classifiers", fontsize=TITLE_TEXT_SIZE)
ax.set_xlabel("Trial", fontsize=AXIS_TEXT_SIZE)
ax.set_ylabel("Classifiers", fontsize=AXIS_TEXT_SIZE)
ax.legend(fontsize=LEGEND_TEXT_SIZE)
def plot_performance(agent, maze, metrics_df, cfg, env_name):
plt.figure(figsize=(13, 10), dpi=100)
plt.suptitle(f'ACS2 Performance in {env_name} environment', fontsize=32)
ax1 = plt.subplot(221)
plot_policy(maze, agent, cfg, ax1)
ax2 = plt.subplot(222)
plot_knowledge(metrics_df, ax2)
ax3 = plt.subplot(223)
plot_classifiers(metrics_df, ax3)
ax4 = plt.subplot(224)
plot_steps(metrics_df, ax4)
plt.subplots_adjust(top=0.86, wspace=0.3, hspace=0.3)
###Output
_____no_output_____
###Markdown
Maze5
###Code
%%time
# define environment
maze5 = gym.make('Maze5-v0')
# explore
agent_maze5 = ACS2(cfg)
population_maze5_explore, metrics_maze5_explore = agent_maze5.explore(maze5, 3000)
# exploit
agent_maze5 = ACS2(cfg, population_maze5_explore)
_, metrics_maze5_exploit = agent_maze5.exploit(maze5, 400)
maze5_metrics_df = parse_metrics_to_df(metrics_maze5_explore, metrics_maze5_exploit)
plot_performance(agent_maze5, maze5, maze5_metrics_df, cfg, 'Maze5')
###Output
_____no_output_____
###Markdown
Woods14
###Code
%%time
# define environment
woods14 = gym.make('Woods14-v0')
# explore
agent_woods14 = ACS2(cfg)
population_woods14_explore, metrics_woods14_explore = agent_woods14.explore(woods14, 1000)
# exploit
agent_woods14 = ACS2(cfg, population_woods14_explore)
_, metrics_woods14_exploit = agent_woods14.exploit(woods14, 200)
woods14_metrics_df = parse_metrics_to_df(metrics_woods14_explore, metrics_woods14_exploit)
plot_performance(agent_woods14, woods14, woods14_metrics_df, cfg, 'Woods14')
###Output
_____no_output_____ |
siamese/sentence_bert_softmax.ipynb | ###Markdown
Siamese Network with BERT Pooling: Softmax Loss Function- We train our siamese network with the training data from SemEval 2014.- We use the **softmax loss function**.- We then run k-NN search with test queries (previously generated for BM25) to produce test query results. Google Colab setupsThis part only gets executed if this notebook is being run under Google Colab. **Please change the working path directory below in advance!**
###Code
# Use Google Colab
use_colab = True
# Is this notebook running on Colab?
# If so, then google.colab package (github.com/googlecolab/colabtools)
# should be available in this environment
# Previous version used importlib, but we could do the same thing with
# just attempting to import google.colab
try:
from google.colab import drive
colab_available = True
except:
colab_available = False
if use_colab and colab_available:
drive.mount('/content/drive')
# If there's a package I need to install separately, do it here
!pip install sentence-transformers==0.3.9 transformers==3.4.0 jsonlines==1.2.0
# cd to the appropriate working directory under my Google Drive
%cd '/content/drive/My Drive/CS646_Final_Project/siamese'
# List the directory contents
!ls
###Output
_____no_output_____
###Markdown
PyTorch GPU setup
###Code
# torch.device / CUDA Setup
import torch
use_cuda = True
use_colab_tpu = False
colab_tpu_available = False
if use_colab_tpu:
try:
assert os.environ['COLAB_TPU_ADDR']
colab_tpu_available = True
except:
colab_tpu_available = True
if use_cuda and torch.cuda.is_available():
torch_device = torch.device('cuda:0')
# Set this to True to make your output immediately reproducible
# Note: https://pytorch.org/docs/stable/notes/randomness.html
torch.backends.cudnn.deterministic = False
# Disable 'benchmark' mode: Set this False if you want to measure running times more fairly
# Note: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
torch.backends.cudnn.benchmark = True
# Faster Host to GPU copies with page-locked memory
use_pin_memory = True
# CUDA libraries version information
print("CUDA Version: " + str(torch.version.cuda))
print("cuDNN Version: " + str(torch.backends.cudnn.version()))
print("CUDA Device Name: " + str(torch.cuda.get_device_name()))
print("CUDA Capabilities: "+ str(torch.cuda.get_device_capability()))
elif use_colab_tpu and colab_tpu_available:
# This needs to be installed separately
# https://github.com/pytorch/xla/blob/master/contrib/colab/getting-started.ipynb
import torch_xla
import torch_xla.core.xla_model as xm
torch_device = xm.xla_device()
else:
torch_device = torch.device('cpu')
use_pin_memory = False
###Output
CUDA Version: 11.0
cuDNN Version: 8004
CUDA Device Name: GeForce RTX 2080 Ti
CUDA Capabilities: (7, 5)
###Markdown
Import packages
###Code
import os
import random
import json
import pathlib
import sentence_transformers
from sentence_transformers import losses
import numpy as np
import jsonlines
# Random seed settings
random_seed = 646
random.seed(random_seed) # Python
np.random.seed(random_seed) # NumPy
torch.manual_seed(random_seed) # PyTorch
###Output
_____no_output_____
###Markdown
Load the dataset
###Code
# 4 labels (1: Relevant, 2: Aspect only, 3: Sentiment only, 4: Not Relevant): Softmax Loss
with open(os.path.join('..', 'data', 'our_datasets_partially_correct_labels', 'laptop_train.json')) as laptop_train_file:
laptop_train = json.load(laptop_train_file)
with open(os.path.join('..', 'data', 'our_datasets_partially_correct_labels', 'restaurant_train.json')) as restaurants_train_file:
restaurants_train = json.load(restaurants_train_file)
###Output
_____no_output_____
###Markdown
Training set: Joint = Laptop + Restaurants
###Code
train_combined_examples = []
for row in laptop_train:
example = sentence_transformers.InputExample(
texts=[row['query'][0] + ', ' + row['query'][1], row['doc']], label=row['label'])
train_combined_examples.append(example)
for row in restaurants_train:
example = sentence_transformers.InputExample(
texts=[row['query'][0] + ', ' + row['query'][1], row['doc']], label=row['label'])
train_combined_examples.append(example)
print(train_combined_examples[0])
###Output
<InputExample> label: 1, texts: charges, positive; It fires up in the morning in less than 30 seconds and I have never had any issues with it freezing.
###Markdown
Siamese Network with BERT Pooling (SBERT) Model- We use the pretrained weights released by the BERT-ADA authors.- Please download and extract them to the same directory as this notebook: https://github.com/deepopinion/domain-adapted-atscrelease-of-bert-language-models-finetuned-on-a-specific-domain - **NOTE**: Because BERT-ADA was trained with an older version of `transformers`, you need to add `"model_type": "bert"` to `config.json`.
###Code
# Load the pretrained BERT-ADA model
# Extract the tar.xz file
#!tar -xf laptops_and_restaurants_2mio_ep15.tar.xz
pretrained_model_name = 'laptops_and_restaurants_2mio_ep15'
sbert_new_model_name = 'sbert_bert_ada_joint_partially_correct_softmax'
word_embedding_model = sentence_transformers.models.Transformer(
pretrained_model_name, max_seq_length=256)
pooling_model = sentence_transformers.models.Pooling(
word_embedding_model.get_word_embedding_dimension())
model = sentence_transformers.SentenceTransformer(
modules=[word_embedding_model, pooling_model])
###Output
_____no_output_____
###Markdown
Training
###Code
# PyTorch DataLoader
train_dataset = sentence_transformers.SentencesDataset(train_combined_examples, model)
train_dataloader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size=16)
# Loss function
# Tuples of (DataLoader, LossFunction)
train_softmax_loss = (train_dataloader, losses.SoftmaxLoss(model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=4))
# Tune the model
model.fit(
train_objectives=[train_softmax_loss],
epochs=20,
warmup_steps=1200,
weight_decay=0.01,
use_amp=True)
model.save(sbert_new_model_name)
###Output
_____no_output_____
###Markdown
Play with my own sentences
###Code
# Uncomment the following line to load the existing trained model.
# model = sentence_transformers.SentenceTransformer(sbert_new_model_name)
query_embedding = model.encode('Windows 8, Positive')
passage_embedding = model.encode("This laptop's design is amazing")
print("Similarity:", sentence_transformers.util.pytorch_cos_sim(query_embedding, passage_embedding))
###Output
_____no_output_____
###Markdown
k-NN Search
###Code
# Get the top k matches
top_k = 800
###Output
_____no_output_____
###Markdown
Generate query results file for `trec_eval` evaluation: Laptop
###Code
test_laptop_documents_path = os.path.join('..', 'bm25', 'collection', 'laptop_test', 'laptop_test.jsonl')
test_laptop_documents_file = jsonlines.open(test_laptop_documents_path)
test_laptop_documents_id = []
test_laptop_documents = []
for d in test_laptop_documents_file:
test_laptop_documents_id.append(d['id'])
test_laptop_documents.append(d['contents'])
# Obtain embedding vector of test documents
test_laptop_embeddings = model.encode(test_laptop_documents, convert_to_tensor=True)
test_laptop_queries_path = os.path.join('..', 'bm25', 'test_queries_laptop.txt')
test_laptop_queries = open(test_laptop_queries_path, 'r').readlines()
test_laptop_result_path = os.path.join('.', 'query_results', sbert_new_model_name, 'top_' + str(top_k))
pathlib.Path(test_laptop_result_path).mkdir(parents=True, exist_ok=True)
test_laptop_result_file = 'test_results_laptop_' + sbert_new_model_name + '.txt'
!rm {os.path.join(test_laptop_result_path, test_laptop_result_file)}
for q_num, q in enumerate(test_laptop_queries):
print("Processing query", q_num, ":", q)
query_embedding = model.encode(q, convert_to_tensor=True)
cos_scores = sentence_transformers.util.pytorch_cos_sim(query_embedding, test_laptop_embeddings)[0]
if len(cos_scores) < top_k:
top_k_retrieved = len(cos_scores)
else:
top_k_retrieved = top_k
# We use torch.topk to find the highest 5 scores
top_results = torch.topk(cos_scores, k=top_k_retrieved)
# print("\n\n======================\n\n")
# print("Query:", q)
# print("\nTop 5 most similar sentences in corpus:")
# for score, idx in zip(top_results[0], top_results[1]):
# print(test_laptop_documents[idx], "(Score: %.4f)" % (score))
# trec_eval query results file
i = 0
for score, idx in zip(top_results[0], top_results[1]):
line = str(q_num+1) + ' Q0 ' + test_laptop_documents_id[idx] + ' ' + str(i+1) + ' ' + '%.8f' % score + ' ' + sbert_new_model_name
i = i + 1
with open(os.path.join(test_laptop_result_path, test_laptop_result_file), 'a') as f:
f.write("%s\n" % line)
###Output
_____no_output_____
###Markdown
Generate query results file for `trec_eval` evaluation: Restaurant
###Code
test_restaurants_documents_path = os.path.join('..', 'bm25', 'collection', 'restaurant_test', 'restaurant_test.jsonl')
test_restaurants_documents_file = jsonlines.open(test_restaurants_documents_path)
test_restaurants_documents_id = []
test_restaurants_documents = []
for d in test_restaurants_documents_file:
test_restaurants_documents_id.append(d['id'])
test_restaurants_documents.append(d['contents'])
test_restaurants_embeddings = model.encode(test_restaurants_documents, convert_to_tensor=True)
test_restaurants_queries_path = os.path.join('..', 'bm25', 'test_queries_restaurant.txt')
test_restaurants_queries = open(test_restaurants_queries_path, 'r').readlines()
test_restaurants_result_path = os.path.join('.', 'query_results', sbert_new_model_name, 'top_' + str(top_k))
pathlib.Path(test_restaurants_result_path).mkdir(parents=True, exist_ok=True)
test_restaurants_result_file = 'test_results_restaurant_' + sbert_new_model_name + '.txt'
!rm {os.path.join(test_restaurants_result_path, test_restaurants_result_file)}
for q_num, q in enumerate(test_restaurants_queries):
print("Processing query", q_num, ":", q)
query_embedding = model.encode(q, convert_to_tensor=True)
cos_scores = sentence_transformers.util.pytorch_cos_sim(query_embedding, test_restaurants_embeddings)[0]
if len(cos_scores) < top_k:
top_k_retrieved = len(cos_scores)
else:
top_k_retrieved = top_k
# We use torch.topk to find the highest 5 scores
top_results = torch.topk(cos_scores, k=top_k_retrieved)
# print("\n\n======================\n\n")
# print("Query:", q)
# print("\nTop 5 most similar sentences in corpus:")
# for score, idx in zip(top_results[0], top_results[1]):
# print(test_laptop_documents[idx], "(Score: %.4f)" % (score))
# trec_eval query results file
i = 0
for score, idx in zip(top_results[0], top_results[1]):
line = str(q_num+1) + ' Q0 ' + test_restaurants_documents_id[idx] + ' ' + str(i+1) + ' ' + '%.8f' % score + ' ' + sbert_new_model_name
i = i + 1
with open(os.path.join(test_restaurants_result_path, test_restaurants_result_file), 'a') as f:
f.write("%s\n" % line)
###Output
_____no_output_____ |
Covid_US_region_analysis.ipynb | ###Markdown
COVID Data Visualization for 5 regions
###Code
import os
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from custuntions import phase_mask, line_plot, scatter_plot
# Use white grid plot background from seaborn
sns.set(font_scale=1.5, style="whitegrid")
dfs = {}
for file in os.listdir("./DATA"):
filename = file.split(".")
dfs[f"{filename[0]}"] = pd.read_csv(f"./DATA/{file}")
for city in dfs:
dfs[city]["Date"] = pd.to_datetime(dfs[city]["YEAR"].astype(str) + "/" + dfs[city]["MO"].astype(str) + "/" + dfs[city]["DY"].astype(str))
dfs[city].set_index('Date', inplace=True)
dfs[city].drop(["LAT", "LON", "YEAR", "MO", "DY"], axis=1, inplace=True)
df_cases = pd.read_csv("US_state_cases.csv")
df_cases
df_cases['date'] = pd.to_datetime(df_cases['date'])
df_cases.rename(columns={'date':'Date'}, inplace=True)
df_cases.set_index('Date', inplace= True)
df_cases.drop(['fips'], axis=1, inplace=True)
df_cases = df_cases.loc["2020-03-01":"2021-03-16"]
for city in dfs:
dfs[city] = pd.merge(left=dfs[city],
left_index=True,
right=df_cases.loc[(df_cases['state'] == city), ['cases', 'deaths']],
right_index=True,
how='inner')
dfs_southern = [dfs['Arizona'], dfs['Louisiana'], dfs['Texas'], dfs['Florida']]
dfs_northern = [dfs['Minnesota'], dfs['Massachusetts']]
dfs_western = [dfs['Nevada'], dfs['California'], dfs['Oregon']]
dfs_eastern = [dfs['New York'], dfs['New Jersey']]
dfs_central = [dfs['Colorado']]
dfs['Nevada']
df_southern_mean = pd.concat(dfs_southern).groupby("Date").mean()
df_northern_mean = pd.concat(dfs_northern).groupby("Date").mean()
df_eastern_mean = pd.concat(dfs_eastern).groupby("Date").mean()
df_western_mean = pd.concat(dfs_western).groupby("Date").mean()
df_central_mean = pd.concat(dfs_central).groupby("Date").mean()
columns = {'RH2M':'Relative Humidity at 2 Meters (%)',
'T2MDEW': 'Dew/Frost Point at 2 Meters (C)',
'T2M_MAX': 'Maximum Temperature at 2 Meters (C)',
'T2M_MIN' :'Minimum Temperature at 2 Meters (C)',
'T2M_RANGE': 'Temperature Range at 2 Meters (C)',
'WS50M_RANGE': 'Wind Speed Range at 50 Meters (m/s)',
'WS10M_RANGE': 'Wind Speed Range at 10 Meters (m/s)'
}
###Output
_____no_output_____
###Markdown
Southern Region
###Code
df_southern_mean.rename(columns=columns, inplace=True)
df_southern_mean
# phase one and phase 2 seperation here
phase1_southern_mean, phase2_southern_mean = phase_mask(df_southern_mean, "2020-03-01", "2020-10-01", "2020-10-01", "2021-03-16")
# dropping columns here (Drop cases and deaths to better visualize the atmospheric data)
# phase1_southern_mean = phase1_southern_mean.drop(["cases", "deaths"], axis=1)
# phase2_southern_mean = phase2_southern_mean.drop(["cases", "deaths"], axis=1)
fig, ax = line_plot(phase1_southern_mean, "Southern region covid trend phase 1")
fig, ax = line_plot(phase2_southern_mean, "Southern region covid trend phase 2")
scatter_plot(df_southern_mean, "cases", columns["T2MDEW"])
scatter_plot(df_southern_mean, "cases", columns["T2M_MAX"])
scatter_plot(df_southern_mean, "cases", columns["T2M_MIN"])
scatter_plot(df_southern_mean, "cases", columns["WS50M_RANGE"])
scatter_plot(df_southern_mean, "cases", columns["WS10M_RANGE"])
###Output
_____no_output_____
###Markdown
Nothern Region
###Code
df_northern_mean.rename(columns=columns, inplace=True)
df_northern_mean
# phase one and phase 2 seperation here
phase1_northern_mean, phase2_northern_mean = phase_mask(df_northern_mean, "2020-03-01", "2020-10-01", "2020-10-01", "2021-03-16")
# dropping columns here
# phase1_northern_mean = phase1_northern_mean.drop(["cases", "deaths"], axis=1)
# phase2_northern_mean = phase2_northern_mean.drop(["cases", "deaths"], axis=1)
fig, ax = line_plot(phase1_northern_mean, "Nothern region covid trend phase 1")
fig, ax = line_plot(phase2_northern_mean, "Nothern region covid trend phase 2")
scatter_plot(df_northern_mean, "cases", columns["T2MDEW"])
scatter_plot(df_northern_mean, "cases", columns["T2M_MAX"])
scatter_plot(df_northern_mean, "cases", columns["T2M_MIN"])
scatter_plot(df_northern_mean, "cases", columns["WS50M_RANGE"])
scatter_plot(df_northern_mean, "cases", columns["WS10M_RANGE"])
###Output
_____no_output_____
###Markdown
Eastern Region
###Code
df_eastern_mean.rename(columns=columns, inplace=True)
df_eastern_mean
# phase one and phase 2 seperation here
phase1_eastern_mean, phase2_eastern_mean = phase_mask(df_eastern_mean, "2020-03-01", "2020-10-01", "2020-10-01", "2021-03-16")
# dropping columns here (Drop cases and deaths to better visualize the atmospheric data)
# phase1_eastern_mean = phase1_eastern_mean.drop(["cases", "deaths"], axis=1)
# phase2_eastern_mean = phase2_eastern_mean.drop(["cases", "deaths"], axis=1)
fig, ax = line_plot(phase1_eastern_mean, "Eastern region covid trend phase 1")
fig, ax = line_plot(phase2_eastern_mean, "Eastern region covid trend phase 2")
scatter_plot(df_eastern_mean, "cases", columns["T2MDEW"])
scatter_plot(df_eastern_mean, "cases", columns["T2M_MAX"])
scatter_plot(df_eastern_mean, "cases", columns["T2M_MIN"])
scatter_plot(df_eastern_mean, "cases", columns["WS50M_RANGE"])
scatter_plot(df_eastern_mean, "cases", columns["WS10M_RANGE"])
###Output
_____no_output_____
###Markdown
Western Region
###Code
df_western_mean.rename(columns=columns, inplace=True)
df_western_mean
# phase one and phase 2 seperation here
phase1_western_mean, phase2_western_mean = phase_mask(df_western_mean, "2020-03-01", "2020-10-01", "2020-10-01", "2021-03-16")
# dropping columns here
# phase1_western_mean = phase1_western_mean.drop(["cases", "deaths"], axis=1)
# phase2_western_mean = phase2_western_mean.drop(["cases", "deaths"], axis=1)
fig, ax = line_plot(phase1_western_mean, "Western region covid trend phase 1")
fig, ax = line_plot(phase1_western_mean, "Western region covid trend phase 2")
scatter_plot(df_western_mean, "cases", columns["RH2M"])
scatter_plot(df_western_mean, "cases", columns["T2MDEW"])
scatter_plot(df_western_mean, "cases", columns["T2M_MAX"])
scatter_plot(df_western_mean, "cases", columns["T2M_MIN"])
scatter_plot(df_western_mean, "cases", columns["WS50M_RANGE"])
scatter_plot(df_western_mean, "cases", columns["WS10M_RANGE"])
###Output
_____no_output_____
###Markdown
Central Region
###Code
df_central_mean.rename(columns=columns, inplace=True)
df_central_mean
phase1_central_mean, phase2_central_mean = phase_mask(df_central_mean, "2020-03-01", "2020-10-01", "2020-10-01", "2021-03-16")
fig, ax = line_plot(phase1_central_mean, "Western region covid trend phase 1")
fig, ax = line_plot(phase2_central_mean, "Western region covid trend phase 1")
scatter_plot(df_central_mean, "cases", columns["RH2M"])
scatter_plot(df_central_mean, "cases", columns["T2MDEW"])
scatter_plot(df_central_mean, "cases", columns["T2M_MAX"])
scatter_plot(df_central_mean, "cases", columns["T2M_MIN"])
scatter_plot(df_central_mean, "cases", columns["WS50M_RANGE"])
scatter_plot(df_central_mean, "cases", columns["WS10M_RANGE"])
###Output
_____no_output_____
###Markdown
Converting region dataframes into excel files
###Code
df_southern_mean.to_excel("Southern_weather_cases.xlsx")
df_northern_mean.to_excel("northern_weather_cases.xlsx")
df_eastern_mean.to_excel("eastern_weather_cases.xlsx")
df_western_mean.to_excel("western_weather_cases.xlsx")
###Output
_____no_output_____ |
experiments/29_different_IC_and_models/Check_prediction.ipynb | ###Markdown
Data Paths
###Code
P = '/local/meliao/projects/fourier_neural_operator/'
DATA_DIR = os.path.join(P, 'data')
MODEL_DIR = os.path.join(P, 'experiments/29_different_IC_and_models/models')
PLOTS_DIR = os.path.join(P, 'experiments/29_different_IC_and_models/plots/')
RESULTS_DIR = os.path.join(P, 'experiments/29_different_IC_and_models/results')
if not os.path.isdir(PLOTS_DIR):
os.mkdir(PLOTS_DIR)
DSETS = ['00', '01', '02']
MODELS = ['FNO']
TIME_IDX = [1, 5, 10]
model_lst = []
model_pattern = os.path.join(MODEL_DIR, 'dset_{}_model_{}_time_{}_ep_1000')
for dset_k in DSETS:
for time_idx in TIME_IDX:
dd = {'dset': dset_k, 'time': time_idx}
dd['model'] = torch.load(model_pattern.format(dset_k, 'FNO', time_idx), map_location='cpu')
model_lst.append(dd)
dset_fp_dd = {k: os.path.join(DATA_DIR, '2021-09-29_NLS_data_00_{}_test.mat'.format(k)) for k in DSETS}
data_dd = {k: sio.loadmat(v) for k,v in dset_fp_dd.items()}
dset_dd = {k: OneStepDataSetComplex(v['output'], v['t'], v['x']) for k,v in data_dd.items()}
def prepare_input(X, x_grid=None):
# X has shape (nbatch, 1, grid_size)
s = X.shape[-1]
n_batches = X.shape[0]
# Convert to tensor
X_input = torch.view_as_real(torch.tensor(X, dtype=torch.cfloat))
if x_grid is None:
# FNO code appends the spatial grid to the input as below:
x_grid = np.linspace(-np.pi, np.pi, s+1)
x_grid = x_grid[:s]
x_grid = torch.tensor(x_grid, dtype=torch.float).view(-1,1)
# print(x_grid.shape)
# print(X_input.shape)
X_input = torch.cat((X_input, x_grid.repeat(n_batches, 1, 1)), axis=2)
return X_input
def l2_normalized_error(pred, actual):
errors = pred - actual
error_norms = torch.linalg.norm(torch.tensor(errors), dim=-1, ord=2)
actual_norms = torch.linalg.norm(torch.tensor(actual), dim=-1, ord=2)
normalized_errors = torch.divide(error_norms, actual_norms)
return normalized_errors
with torch.no_grad():
preds_dd = {}
errors_dd = {}
for model_dd in model_lst:
model_k = model_dd['dset'] + '_' + str(model_dd['time'])
dset = dset_dd[model_dd['dset']]
model_input = prepare_input(dset.X[:, 0])
model_pred = model_dd['model'](model_input)
target = dset.X[:, model_dd['time']]
preds_dd[model_k] = model_pred
errors_dd[model_k] = l2_normalized_error(model_pred, target)
print("Finished with", model_k)
def quick_boxplot(errors_dd, names_dd=None, xlab=None, ref_hline=None, fp=None, title=None):
error_lst = []
key_lst = []
for k, errors in errors_dd.items():
error_lst.append(errors.numpy())
key_lst.append(k)
if names_dd is not None:
key_lst = [names_dd.get(k, k) for k in key_lst]
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_ylabel('L2 Normalized Error')
ax.set_xlabel(xlab)
ax.set_title(title)
ax.set_xticklabels(labels=key_lst, rotation=45, ha='right')
if ref_hline is not None:
ax.hlines(ref_hline, xmin=0.5, xmax=len(key_lst)+ 0.5, linestyles='dashed')
fig.patch.set_facecolor('white')
ax.boxplot(error_lst)
fig.tight_layout()
if fp is not None:
plt.savefig(fp)
else:
plt.show()
plt.close(fig)
dd_for_plt = {k: errors_dd[k] for k in ['00_1', '01_1', '02_1']}
names_dd = {'00_1': 'Dataset 1',
'01_1': 'Dataset 2',
'02_1': 'Dataset 3'}
fp = os.path.join(PLOTS_DIR, 'FNO_time_1_errors.png')
quick_boxplot(dd_for_plt, names_dd=names_dd, title='FNO time-1 prediction performance on different datasets') #, fp=fp)
###Output
/local/meliao/conda_envs/fourier_neural_operator/lib/python3.7/site-packages/ipykernel_launcher.py:18: UserWarning: FixedFormatter should only be used together with FixedLocator
|
02_deep_learning/intro-to-pytorch/.ipynb_checkpoints/Part 1 - Tensors in PyTorch (Exercises)-checkpoint.ipynb | ###Markdown
Introduction to Deep Learning with PyTorchIn this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks. Neural NetworksDeep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply "neurons." Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.Mathematically this looks like: $$\begin{align}y &= f(w_1 x_1 + w_2 x_2 + b) \\y &= f\left(\sum_i w_i x_i +b \right)\end{align}$$With vectors this is the dot/inner product of two vectors:$$h = \begin{bmatrix}x_1 \, x_2 \cdots x_n\end{bmatrix}\cdot \begin{bmatrix} w_1 \\ w_2 \\ \vdots \\ w_n\end{bmatrix}$$ TensorsIt turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.With the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.
###Code
# First, import PyTorch
import torch
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 5 random normal variables
features = torch.randn((1, 5))
# True weights for our data, random normal variables again
weights = torch.randn_like(features)
# and a true bias term
bias = torch.randn((1, 1))
###Output
_____no_output_____
###Markdown
Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one. `weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.Finally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.PyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network. > **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.htmltorch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.
###Code
## Calculate the output of this network using the weights and bias tensors
print(features)
print(weights)
print(bias)
###Output
tensor([[-0.1468, 0.7861, 0.9468, -1.1143, 1.6908]])
tensor([[-0.8948, -0.3556, 1.2324, 0.1382, -1.6822]])
tensor([[0.3177]])
###Markdown
You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.htmltorch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.htmltorch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error```python>> torch.mm(features, weights)---------------------------------------------------------------------------RuntimeError Traceback (most recent call last) in ()----> 1 torch.mm(features, weights)RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033```As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.htmltorch.Tensor.view).* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.> **Exercise**: Calculate the output of our little network using matrix multiplication.
###Code
## Calculate the output of this network using matrix multiplication
y = activation(torch.sum(features * weights) + bias)
print(y)
y = activation((features * weights).sum() + bias)
print(y)
###Output
tensor([[0.1595]])
tensor([[0.1595]])
###Markdown
Stack them up!That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated $$\vec{h} = [h_1 \, h_2] = \begin{bmatrix}x_1 \, x_2 \cdots \, x_n\end{bmatrix}\cdot \begin{bmatrix} w_{11} & w_{12} \\ w_{21} &w_{22} \\ \vdots &\vdots \\ w_{n1} &w_{n2}\end{bmatrix}$$The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply$$y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right)$$
###Code
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
###Output
_____no_output_____
###Markdown
> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`.
###Code
## Your solution here
###Output
_____no_output_____
###Markdown
If you did this correctly, you should see the output `tensor([[ 0.3171]])`.The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions. Numpy to Torch and backSpecial bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.
###Code
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
###Output
_____no_output_____
###Markdown
The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.
###Code
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
###Output
_____no_output_____ |
spec_creation/Validate_spec_before_upload.ipynb | ###Markdown
Setup some basic stuff
###Code
import logging
logging.getLogger().setLevel(logging.DEBUG)
import folium
import folium.features as fof
import folium.utilities as ful
import branca.element as bre
import json
import geojson as gj
import arrow
import shapely.geometry as shpg
import pandas as pd
import geopandas as gpd
def lonlat_swap(lon_lat):
return list(reversed(lon_lat))
def get_row_count(n_maps, cols):
rows = (n_maps / cols)
if (n_maps % cols != 0):
rows = rows + 1
return rows
def get_one_marker(loc, disp_color):
if loc["geometry"]["type"] == "Point":
curr_latlng = lonlat_swap(loc["geometry"]["coordinates"])
return folium.Marker(curr_latlng, icon=folium.Icon(color=disp_color),
popup="%s" % loc["properties"]["name"])
elif loc["geometry"]["type"] == "Polygon":
assert len(loc["geometry"]["coordinates"]) == 1,\
"Only simple polygons supported!"
curr_latlng = [lonlat_swap(c) for c in loc["geometry"]["coordinates"][0]]
# print("Returning polygon for %s" % curr_latlng)
return folium.PolyLine(curr_latlng, color=disp_color, fill=disp_color,
popup="%s" % loc["properties"]["name"])
def get_marker(loc, disp_color):
if type(loc) == list:
return [get_one_marker(l, disp_color) for l in loc]
else:
print("Found single entry, is this expected?")
return [get_one_marker(loc, disp_color)]
###Output
_____no_output_____
###Markdown
Read the data
###Code
spec_to_validate = json.load(open("evaluation.spec.filled.json"))
sensing_configs = json.load(open("sensing_regimes.all.specs.json"))
###Output
_____no_output_____
###Markdown
Validating the time range
###Code
print("Experiment runs from %s -> %s" % (arrow.get(spec_to_validate["start_ts"]), arrow.get(spec_to_validate["end_ts"])))
start_fmt_time_to_validate = arrow.get(spec_to_validate["start_ts"]).format("YYYY-MM-DD")
end_fmt_time_to_validate = arrow.get(spec_to_validate["end_ts"]).format("YYYY-MM-DD")
if (start_fmt_time_to_validate != spec_to_validate["start_fmt_date"]):
print("VALIDATION FAILED, got start %s, expected %s" % (start_fmt_time_to_validate, spec_to_validate["start_fmt_date"]))
if (end_fmt_time_to_validate != spec_to_validate["end_fmt_date"]):
print("VALIDATION FAILED, got end %s, expected %s" % (end_fmt_time_to_validate, spec_to_validate["end_fmt_date"]))
###Output
_____no_output_____
###Markdown
Validating calibration trips
###Code
def get_map_for_calibration_test(trip):
curr_map = folium.Map()
if trip["start_loc"] is None or trip["end_loc"] is None:
return curr_map
curr_start = lonlat_swap(trip["start_loc"]["geometry"]["coordinates"])
curr_end = lonlat_swap(trip["end_loc"]["geometry"]["coordinates"])
folium.Marker(curr_start, icon=folium.Icon(color="green"),
popup="Start: %s" % trip["start_loc"]["properties"]["name"]).add_to(curr_map)
folium.Marker(curr_end, icon=folium.Icon(color="red"),
popup="End: %s" % trip["end_loc"]["properties"]["name"]).add_to(curr_map)
folium.PolyLine([curr_start, curr_end], popup=trip["id"]).add_to(curr_map)
curr_map.fit_bounds([curr_start, curr_end])
return curr_map
calibration_tests = spec_to_validate["calibration_tests"]
rows = get_row_count(len(calibration_tests), 4)
calibration_maps = bre.Figure((rows,4))
for i, t in enumerate(calibration_tests):
if t["config"]["sensing_config"] != sensing_configs[t["config"]["id"]]["sensing_config"]:
print("Mismatch in config for test" % t)
curr_map = get_map_for_calibration_test(t)
calibration_maps.add_subplot(rows, 4, i+1).add_child(curr_map)
calibration_maps
###Output
_____no_output_____
###Markdown
Validating evaluation trips
###Code
def add_waypoint_markers(waypoint_coords, curr_map):
for i, wpc in enumerate(waypoint_coords["geometry"]["coordinates"]):
folium.map.Marker(
lonlat_swap(wpc), popup="%d" % i,
icon=fof.DivIcon(class_name='leaflet-div-icon')).add_to(curr_map)
def get_map_for_travel_leg(trip):
curr_map = folium.Map()
[get_one_marker(loc, "green").add_to(curr_map) for loc in trip["start_loc"]]
[get_one_marker(loc, "red").add_to(curr_map) for loc in trip["end_loc"]]
# iterate over all reroutes
for rc in trip["route_coords"]:
coords = rc["geometry"]["coordinates"]
print("Found %d coordinates for the route" % (len(coords)))
latlng_coords = [lonlat_swap(c) for c in coords]
folium.PolyLine(latlng_coords, popup="%s: %s" % (trip["mode"], trip["name"])).add_to(curr_map)
for i, c in enumerate(latlng_coords):
folium.CircleMarker(c, radius=5, popup="%d: %s" % (i, c)).add_to(curr_map)
curr_map.fit_bounds(ful.get_bounds(latlng_coords))
return curr_map
def get_map_for_shim_leg(trip):
curr_map = folium.Map()
for loc in trip["loc"]:
mkr = get_one_marker(loc, "purple")
mkr.add_to(curr_map)
curr_map.fit_bounds(mkr.get_bounds())
return curr_map
evaluation_trips = spec_to_validate["evaluation_trips"]
map_list = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
curr_map = get_map_for_travel_leg(l)
map_list.append(curr_map)
else:
curr_map = get_map_for_shim_leg(l)
map_list.append(curr_map)
rows = get_row_count(len(map_list), 2)
evaluation_maps = bre.Figure(ratio="{}%".format((rows/2) * 100))
for i, curr_map in enumerate(map_list):
evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map)
evaluation_maps
###Output
_____no_output_____
###Markdown
Validating start and end polygons
###Code
def check_start_end_contains(leg):
for rc in leg["route_coords"]:
points = gpd.GeoSeries([shpg.Point(p) for p in rc["geometry"]["coordinates"]])
route_start_ts = rc["properties"]["valid_start_ts"]
route_end_ts = rc["properties"]["valid_end_ts"]
# query all start_locs and end_locs where [route_start_ts, route_end_ts] ∈ [loc_start_ts, loc_end_ts]
start_locs = [shpg.shape(sl["geometry"]) for sl in leg["start_loc"]
if route_start_ts >= sl["properties"]["valid_start_ts"]\
and route_end_ts <= sl["properties"]["valid_end_ts"]]
end_locs = [shpg.shape(el["geometry"]) for el in leg["end_loc"]
if route_start_ts >= el["properties"]["valid_start_ts"]\
and route_end_ts <= el["properties"]["valid_end_ts"]]
assert len(start_locs) >= 1
assert len(end_locs) >= 1
for sl in start_locs:
start_contains = points.apply(lambda p: sl.contains(p))
print(points[start_contains])
# some of the points are within the start polygon
assert start_contains.any(), leg
# the first point is within the start polygon
assert start_contains.iloc[0], points.head()
# points within polygons are contiguous
max_index_diff_start = pd.Series(start_contains[start_contains == True].index).diff().max()
assert pd.isnull(max_index_diff_start) or max_index_diff_start == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(start_contains[start_contains == True].index).diff().max(), points.head())
for el in end_locs:
end_contains = points.apply(lambda p: el.contains(p))
print(points[end_contains])
# some of the points are within the end polygon
assert end_contains.any(), leg
# the last point is within the end polygon
assert end_contains.iloc[-1], points.tail()
# points within polygons are contiguous
max_index_diff_end = pd.Series(end_contains[end_contains == True].index).diff().max()
assert pd.isnull(max_index_diff_end) or max_index_diff_end == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(end_contains[end_contains == True].index).diff().max(), points.tail())
invalid_legs = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL" and l["id"] not in invalid_legs:
print("Checking leg %s, %s" % (t["id"], l["id"]))
check_start_end_contains(l)
###Output
_____no_output_____
###Markdown
Validating sensing settings
###Code
for ss in spec_to_validate["sensing_settings"]:
for phoneOS, compare_map in ss.items():
compare_list = compare_map["compare"]
for i, ssc in enumerate(compare_map["sensing_configs"]):
if ssc["id"] != compare_list[i]:
print("Mismatch in sensing configurations for %s" % ss)
###Output
_____no_output_____
###Markdown
Validating routes for no duplicate coordinates
###Code
REL_TOL = 1e-5
def is_coords_equal(c1, c2):
return abs(c2[0] - c1[0]) < REL_TOL and abs(c2[1] - c1[1]) < REL_TOL
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
for rc in l["route_coords"]:
print("Checking leg %s, %s between dates %s, %s" % (t["id"], l["id"], rc["properties"]["valid_start_fmt_date"], rc["properties"]["valid_end_fmt_date"]))
for i in range(len(rc["geometry"]["coordinates"])):
c1 = rc["geometry"]["coordinates"][i]
for j in range(i + 1, len(rc["geometry"]["coordinates"])):
c2 = rc["geometry"]["coordinates"][j]
if is_coords_equal(c1, c2):
# print(f"Found duplicate entry, checking entries {i}...{j}")
not_matched_index = -1
for k in range(i, j+1):
c3 = rc["geometry"]["coordinates"][k]
if not is_coords_equal(c1, c3):
not_matched_index = k
if not_matched_index != -1:
assert False, (f"\tDuplicates {c1}, {c2} found @ indices {i}, {j} with non-duplicate {not_matched_index} in between")
###Output
_____no_output_____
###Markdown
Validating overlapping time ranges Representative test case (should break):
###Code
def check_overlaps(x):
ranges = sorted([(l["properties"]["valid_start_ts"], l["properties"]["valid_end_ts"]) for l in x],
key=lambda c: c[0])
for i, r in enumerate(ranges[:-1]):
assert (ts1 := r[1]) <= (ts2 := ranges[i + 1][0]), f"Overlapping timestamps: {arrow.get(ts1)}, {arrow.get(ts2)}"
breaking_example = [
{
"properties": {
"valid_start_ts": arrow.get("2020-01-01").timestamp,
"valid_end_ts": arrow.get("2020-03-30").timestamp
}
},
{
"properties": {
"valid_start_ts": arrow.get("2019-07-16").timestamp,
"valid_end_ts": arrow.get("2020-04-30").timestamp
}
}
]
try:
check_overlaps(breaking_example)
except AssertionError as e:
print(e)
###Output
_____no_output_____
###Markdown
Actual check of spec:
###Code
for t in evaluation_trips:
for l in t["legs"]:
print("Checking leg %s, %s" % (t["id"], l["id"]))
# check locs for shim legs
if "loc" in l:
print("\tChecking shim locs...")
check_overlaps(l["loc"])
# check start locs
if "start_loc" in l:
print("\tChecking start locs...")
check_overlaps(l["start_loc"])
# check end locs
if "end_loc" in l:
print("\tChecking end locs...")
check_overlaps(l["end_loc"])
# check trajectories
if l["type"] == "TRAVEL":
print("\tChecking trajectories...")
check_overlaps(l["route_coords"])
###Output
_____no_output_____
###Markdown
Setup some basic stuff
###Code
import logging
logging.getLogger().setLevel(logging.DEBUG)
import folium
import folium.features as fof
import folium.utilities as ful
import branca.element as bre
import json
import geojson as gj
import arrow
def lonlat_swap(lon_lat):
return list(reversed(lon_lat))
def get_row_count(n_maps, cols):
rows = (n_maps / cols)
if (n_maps % cols != 0):
rows = rows + 1
return rows
def get_marker(loc, disp_color):
if loc["geometry"]["type"] == "Point":
curr_latlng = lonlat_swap(loc["geometry"]["coordinates"])
return folium.Marker(curr_latlng, icon=folium.Icon(color=disp_color),
popup="%s" % loc["properties"]["name"])
elif loc["geometry"]["type"] == "Polygon":
assert len(loc["geometry"]["coordinates"]) == 1,\
"Only simple polygons supported!"
curr_latlng = [lonlat_swap(c) for c in loc["geometry"]["coordinates"][0]]
# print("Returning polygon for %s" % curr_latlng)
return folium.PolyLine(curr_latlng, color=disp_color, fill=disp_color,
popup="%s" % loc["properties"]["name"])
###Output
_____no_output_____
###Markdown
Read the data
###Code
spec_to_validate = json.load(open("train_bus_ebike_mtv_ucb.filled.json"))
sensing_configs = json.load(open("sensing_regimes.all.specs.json"))
###Output
_____no_output_____
###Markdown
Validating the time range
###Code
print("Experiment runs from %s -> %s" % (arrow.get(spec_to_validate["start_ts"]), arrow.get(spec_to_validate["end_ts"])))
start_fmt_time_to_validate = arrow.get(spec_to_validate["start_ts"]).format("YYYY-MM-DD")
end_fmt_time_to_validate = arrow.get(spec_to_validate["end_ts"]).format("YYYY-MM-DD")
if (start_fmt_time_to_validate != spec_to_validate["start_fmt_date"]):
print("VALIDATION FAILED, got start %s, expected %s" % (start_fmt_time_to_validate, spec_to_validate["start_fmt_date"]))
if (end_fmt_time_to_validate != spec_to_validate["end_fmt_date"]):
print("VALIDATION FAILED, got end %s, expected %s" % (end_fmt_time_to_validate, spec_to_validate["end_fmt_date"]))
###Output
_____no_output_____
###Markdown
Validating calibration trips
###Code
def get_map_for_calibration_test(trip):
curr_map = folium.Map()
if trip["start_loc"] is None or trip["end_loc"] is None:
return curr_map
curr_start = lonlat_swap(trip["start_loc"]["coordinates"])
curr_end = lonlat_swap(trip["end_loc"]["coordinates"])
folium.Marker(curr_start, icon=folium.Icon(color="green"),
popup="Start: %s" % trip["start_loc"]["name"]).add_to(curr_map)
folium.Marker(curr_end, icon=folium.Icon(color="red"),
popup="End: %s" % trip["end_loc"]["name"]).add_to(curr_map)
folium.PolyLine([curr_start, curr_end], popup=trip["id"]).add_to(curr_map)
curr_map.fit_bounds([curr_start, curr_end])
return curr_map
calibration_tests = spec_to_validate["calibration_tests"]
rows = get_row_count(len(calibration_tests), 4)
calibration_maps = bre.Figure((rows,4))
for i, t in enumerate(calibration_tests):
if t["config"]["sensing_config"] != sensing_configs[t["config"]["id"]]["sensing_config"]:
print("Mismatch in config for test" % t)
curr_map = get_map_for_calibration_test(t)
calibration_maps.add_subplot(rows, 4, i+1).add_child(curr_map)
calibration_maps
###Output
_____no_output_____
###Markdown
Validating evaluation trips
###Code
def get_map_for_travel_leg(trip):
curr_map = folium.Map()
get_marker(trip["start_loc"], "green").add_to(curr_map)
get_marker(trip["end_loc"], "red").add_to(curr_map)
# trips from relations won't have waypoints
if "waypoint_coords" in trip:
for i, wpc in enumerate(trip["waypoint_coords"]["geometry"]["coordinates"]):
folium.map.Marker(
lonlat_swap(wpc), popup="%d" % i,
icon=fof.DivIcon(class_name='leaflet-div-icon')).add_to(curr_map)
print("Found %d coordinates for the route" % (len(trip["route_coords"]["geometry"]["coordinates"])))
latlng_route_coords = [lonlat_swap(rc) for rc in trip["route_coords"]["geometry"]["coordinates"]]
folium.PolyLine(latlng_route_coords,
popup="%s: %s" % (trip["mode"], trip["name"])).add_to(curr_map)
for i, c in enumerate(latlng_route_coords):
folium.CircleMarker(c, radius=5, popup="%d: %s" % (i, c)).add_to(curr_map)
curr_map.fit_bounds(ful.get_bounds(trip["route_coords"]["geometry"]["coordinates"], lonlat=True))
return curr_map
def get_map_for_shim_leg(trip):
curr_map = folium.Map()
mkr = get_marker(trip["loc"], "purple")
mkr.add_to(curr_map)
curr_map.fit_bounds(mkr.get_bounds())
return curr_map
evaluation_trips = spec_to_validate["evaluation_trips"]
map_list = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
curr_map = get_map_for_travel_leg(l)
map_list.append(curr_map)
else:
curr_map = get_map_for_shim_leg(l)
map_list.append(curr_map)
rows = get_row_count(len(map_list), 2)
evaluation_maps = bre.Figure(ratio="{}%".format((rows/2) * 100))
for i, curr_map in enumerate(map_list):
evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map)
evaluation_maps
###Output
_____no_output_____
###Markdown
Validating sensing settings
###Code
for ss in spec_to_validate["sensing_settings"]:
for phoneOS, compare_map in ss.items():
compare_list = compare_map["compare"]
for i, ssc in enumerate(compare_map["sensing_configs"]):
if ssc["id"] != compare_list[i]:
print("Mismatch in sensing configurations for %s" % ss)
###Output
_____no_output_____
###Markdown
Setup some basic stuff
###Code
import logging
logging.getLogger().setLevel(logging.DEBUG)
import folium
import folium.features as fof
import folium.utilities as ful
import branca.element as bre
import json
import geojson as gj
import arrow
import shapely.geometry as shpg
import pandas as pd
import geopandas as gpd
def lonlat_swap(lon_lat):
return list(reversed(lon_lat))
def get_row_count(n_maps, cols):
rows = (n_maps / cols)
if (n_maps % cols != 0):
rows = rows + 1
return rows
def get_one_marker(loc, disp_color):
if loc["geometry"]["type"] == "Point":
curr_latlng = lonlat_swap(loc["geometry"]["coordinates"])
return folium.Marker(curr_latlng, icon=folium.Icon(color=disp_color),
popup="%s" % loc["properties"]["name"])
elif loc["geometry"]["type"] == "Polygon":
assert len(loc["geometry"]["coordinates"]) == 1,\
"Only simple polygons supported!"
curr_latlng = [lonlat_swap(c) for c in loc["geometry"]["coordinates"][0]]
# print("Returning polygon for %s" % curr_latlng)
return folium.PolyLine(curr_latlng, color=disp_color, fill=disp_color,
popup="%s" % loc["properties"]["name"])
def get_marker(loc, disp_color):
if type(loc) == list:
return [get_one_marker(l, disp_color) for l in loc]
else:
print("Found single entry, is this expected?")
return [get_one_marker(loc, disp_color)]
###Output
_____no_output_____
###Markdown
Read the data
###Code
spec_to_validate = json.load(open("final_sfbayarea_filled_reroutes/train_bus_ebike_mtv_ucb.filled.reroute.json"))
sensing_configs = json.load(open("sensing_regimes.all.specs.json"))
###Output
_____no_output_____
###Markdown
Validating the time range
###Code
print("Experiment runs from %s -> %s" % (arrow.get(spec_to_validate["start_ts"]), arrow.get(spec_to_validate["end_ts"])))
start_fmt_time_to_validate = arrow.get(spec_to_validate["start_ts"]).format("YYYY-MM-DD")
end_fmt_time_to_validate = arrow.get(spec_to_validate["end_ts"]).format("YYYY-MM-DD")
if (start_fmt_time_to_validate != spec_to_validate["start_fmt_date"]):
print("VALIDATION FAILED, got start %s, expected %s" % (start_fmt_time_to_validate, spec_to_validate["start_fmt_date"]))
if (end_fmt_time_to_validate != spec_to_validate["end_fmt_date"]):
print("VALIDATION FAILED, got end %s, expected %s" % (end_fmt_time_to_validate, spec_to_validate["end_fmt_date"]))
###Output
_____no_output_____
###Markdown
Validating calibration trips
###Code
def get_map_for_calibration_test(trip):
curr_map = folium.Map()
if trip["start_loc"] is None or trip["end_loc"] is None:
return curr_map
curr_start = lonlat_swap(trip["start_loc"]["geometry"]["coordinates"])
curr_end = lonlat_swap(trip["end_loc"]["geometry"]["coordinates"])
folium.Marker(curr_start, icon=folium.Icon(color="green"),
popup="Start: %s" % trip["start_loc"]["properties"]["name"]).add_to(curr_map)
folium.Marker(curr_end, icon=folium.Icon(color="red"),
popup="End: %s" % trip["end_loc"]["properties"]["name"]).add_to(curr_map)
folium.PolyLine([curr_start, curr_end], popup=trip["id"]).add_to(curr_map)
curr_map.fit_bounds([curr_start, curr_end])
return curr_map
calibration_tests = spec_to_validate["calibration_tests"]
rows = get_row_count(len(calibration_tests), 4)
calibration_maps = bre.Figure((rows,4))
for i, t in enumerate(calibration_tests):
if t["config"]["sensing_config"] != sensing_configs[t["config"]["id"]]["sensing_config"]:
print("Mismatch in config for test" % t)
curr_map = get_map_for_calibration_test(t)
calibration_maps.add_subplot(rows, 4, i+1).add_child(curr_map)
calibration_maps
###Output
_____no_output_____
###Markdown
Validating evaluation trips
###Code
def add_waypoint_markers(waypoint_coords, curr_map):
for i, wpc in enumerate(waypoint_coords["geometry"]["coordinates"]):
folium.map.Marker(
lonlat_swap(wpc), popup="%d" % i,
icon=fof.DivIcon(class_name='leaflet-div-icon')).add_to(curr_map)
def get_map_for_travel_leg(trip):
curr_map = folium.Map()
[get_one_marker(loc, "green").add_to(curr_map) for loc in trip["start_loc"]]
[get_one_marker(loc, "red").add_to(curr_map) for loc in trip["end_loc"]]
# iterate over all reroutes
for rc in trip["route_coords"]:
coords = rc["geometry"]["coordinates"]
print("Found %d coordinates for the route" % (len(coords)))
latlng_coords = [lonlat_swap(c) for c in coords]
folium.PolyLine(latlng_coords, popup="%s: %s" % (trip["mode"], trip["name"])).add_to(curr_map)
for i, c in enumerate(latlng_coords):
folium.CircleMarker(c, radius=5, popup="%d: %s" % (i, c)).add_to(curr_map)
curr_map.fit_bounds(ful.get_bounds(latlng_coords))
return curr_map
def get_map_for_shim_leg(trip):
curr_map = folium.Map()
for loc in trip["loc"]:
mkr = get_one_marker(loc, "purple")
mkr.add_to(curr_map)
curr_map.fit_bounds(mkr.get_bounds())
return curr_map
evaluation_trips = spec_to_validate["evaluation_trips"]
map_list = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
curr_map = get_map_for_travel_leg(l)
map_list.append(curr_map)
else:
curr_map = get_map_for_shim_leg(l)
map_list.append(curr_map)
rows = get_row_count(len(map_list), 2)
evaluation_maps = bre.Figure(ratio="{}%".format((rows/2) * 100))
for i, curr_map in enumerate(map_list):
evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map)
evaluation_maps
###Output
_____no_output_____
###Markdown
Validating start and end polygons
###Code
def check_start_end_contains(leg):
for rc in leg["route_coords"]:
points = gpd.GeoSeries([shpg.Point(p) for p in rc["geometry"]["coordinates"]])
route_start_ts = rc["properties"]["valid_start_ts"]
route_end_ts = rc["properties"]["valid_end_ts"]
# query all start_locs and end_locs where [route_start_ts, route_end_ts] ∈ [loc_start_ts, loc_end_ts]
start_locs = [shpg.shape(sl["geometry"]) for sl in leg["start_loc"]
if route_start_ts >= sl["properties"]["valid_start_ts"]\
and route_end_ts <= sl["properties"]["valid_end_ts"]]
end_locs = [shpg.shape(el["geometry"]) for el in leg["end_loc"]
if route_start_ts >= el["properties"]["valid_start_ts"]\
and route_end_ts <= el["properties"]["valid_end_ts"]]
assert len(start_locs) >= 1
assert len(end_locs) >= 1
for sl in start_locs:
start_contains = points.apply(lambda p: sl.contains(p))
print(points[start_contains])
# some of the points are within the start polygon
assert start_contains.any(), leg
# the first point is within the start polygon
assert start_contains.iloc[0], points.head()
# points within polygons are contiguous
max_index_diff_start = pd.Series(start_contains[start_contains == True].index).diff().max()
assert pd.isnull(max_index_diff_start) or max_index_diff_start == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(start_contains[start_contains == True].index).diff().max(), points.head())
for el in end_locs:
end_contains = points.apply(lambda p: el.contains(p))
print(points[end_contains])
# some of the points are within the end polygon
assert end_contains.any(), leg
# the last point is within the end polygon
assert end_contains.iloc[-1], points.tail()
# points within polygons are contiguous
max_index_diff_end = pd.Series(end_contains[end_contains == True].index).diff().max()
assert pd.isnull(max_index_diff_end) or max_index_diff_end == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(end_contains[end_contains == True].index).diff().max(), points.tail())
invalid_legs = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL" and l["id"] not in invalid_legs:
print("Checking leg %s, %s" % (t["id"], l["id"]))
check_start_end_contains(l)
###Output
_____no_output_____
###Markdown
Validating sensing settings
###Code
for ss in spec_to_validate["sensing_settings"]:
for phoneOS, compare_map in ss.items():
compare_list = compare_map["compare"]
for i, ssc in enumerate(compare_map["sensing_configs"]):
if ssc["id"] != compare_list[i]:
print("Mismatch in sensing configurations for %s" % ss)
###Output
_____no_output_____
###Markdown
Validating routes for no duplicate coordinates
###Code
REL_TOL = 1e-5
def is_coords_equal(c1, c2):
return abs(c2[0] - c1[0]) < REL_TOL and abs(c2[1] - c1[1]) < REL_TOL
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
for rc in l["route_coords"]:
print("Checking leg %s, %s between dates %s, %s" % (t["id"], l["id"], rc["properties"]["valid_start_fmt_date"], rc["properties"]["valid_end_fmt_date"]))
for i in range(len(rc["geometry"]["coordinates"])):
c1 = rc["geometry"]["coordinates"][i]
for j in range(i + 1, len(rc["geometry"]["coordinates"])):
c2 = rc["geometry"]["coordinates"][j]
if is_coords_equal(c1, c2):
# print(f"Found duplicate entry, checking entries {i}...{j}")
not_matched_index = -1
for k in range(i, j+1):
c3 = rc["geometry"]["coordinates"][k]
if not is_coords_equal(c1, c3):
not_matched_index = k
if not_matched_index != -1:
assert False, (f"\tDuplicates {c1}, {c2} found @ indices {i}, {j} with non-duplicate {not_matched_index} in between")
###Output
_____no_output_____
###Markdown
Validating overlapping time ranges Representative test case (should break):
###Code
def check_overlaps(x):
ranges = sorted([(l["properties"]["valid_start_ts"], l["properties"]["valid_end_ts"]) for l in x],
key=lambda c: c[0])
for i, r in enumerate(ranges[:-1]):
assert (ts1 := r[1]) <= (ts2 := ranges[i + 1][0]), f"Overlapping timestamps: {arrow.get(ts1)}, {arrow.get(ts2)}"
invalid_ranges = [
{
"properties": {
"valid_start_ts": arrow.get("2020-01-01").timestamp,
"valid_end_ts": arrow.get("2020-03-30").timestamp
}
},
{
"properties": {
"valid_start_ts": arrow.get("2019-07-16").timestamp,
"valid_end_ts": arrow.get("2020-04-30").timestamp
}
}
]
try:
check_overlaps(invalid_ranges)
except AssertionError as e:
print(e)
###Output
_____no_output_____
###Markdown
Actual check of spec:
###Code
for t in evaluation_trips:
for l in t["legs"]:
print("Checking leg %s, %s" % (t["id"], l["id"]))
# check locs for shim legs
if "loc" in l:
print("\tChecking shim locs...")
check_overlaps(l["loc"])
# check start locs
if "start_loc" in l:
print("\tChecking start locs...")
check_overlaps(l["start_loc"])
# check end locs
if "end_loc" in l:
print("\tChecking end locs...")
check_overlaps(l["end_loc"])
# check trajectories
if l["type"] == "TRAVEL":
print("\tChecking trajectories...")
check_overlaps(l["route_coords"])
###Output
_____no_output_____
###Markdown
Setup some basic stuff
###Code
import logging
logging.getLogger().setLevel(logging.DEBUG)
import folium
import folium.features as fof
import folium.utilities as ful
import branca.element as bre
import json
import geojson as gj
import arrow
import shapely.geometry as shpg
import pandas as pd
import geopandas as gpd
def lonlat_swap(lon_lat):
return list(reversed(lon_lat))
def get_row_count(n_maps, cols):
rows = (n_maps / cols)
if (n_maps % cols != 0):
rows = rows + 1
return rows
def get_marker(loc, disp_color):
if loc["geometry"]["type"] == "Point":
curr_latlng = lonlat_swap(loc["geometry"]["coordinates"])
return folium.Marker(curr_latlng, icon=folium.Icon(color=disp_color),
popup="%s" % loc["properties"]["name"])
elif loc["geometry"]["type"] == "Polygon":
assert len(loc["geometry"]["coordinates"]) == 1,\
"Only simple polygons supported!"
curr_latlng = [lonlat_swap(c) for c in loc["geometry"]["coordinates"][0]]
# print("Returning polygon for %s" % curr_latlng)
return folium.PolyLine(curr_latlng, color=disp_color, fill=disp_color,
popup="%s" % loc["properties"]["name"])
###Output
_____no_output_____
###Markdown
Read the data
###Code
spec_to_validate = json.load(open("train_bus_ebike_mtv_ucb.filled.json"))
sensing_configs = json.load(open("sensing_regimes.all.specs.json"))
###Output
_____no_output_____
###Markdown
Validating the time range
###Code
print("Experiment runs from %s -> %s" % (arrow.get(spec_to_validate["start_ts"]), arrow.get(spec_to_validate["end_ts"])))
start_fmt_time_to_validate = arrow.get(spec_to_validate["start_ts"]).format("YYYY-MM-DD")
end_fmt_time_to_validate = arrow.get(spec_to_validate["end_ts"]).format("YYYY-MM-DD")
if (start_fmt_time_to_validate != spec_to_validate["start_fmt_date"]):
print("VALIDATION FAILED, got start %s, expected %s" % (start_fmt_time_to_validate, spec_to_validate["start_fmt_date"]))
if (end_fmt_time_to_validate != spec_to_validate["end_fmt_date"]):
print("VALIDATION FAILED, got end %s, expected %s" % (end_fmt_time_to_validate, spec_to_validate["end_fmt_date"]))
###Output
_____no_output_____
###Markdown
Validating calibration trips
###Code
def get_map_for_calibration_test(trip):
curr_map = folium.Map()
if trip["start_loc"] is None or trip["end_loc"] is None:
return curr_map
curr_start = lonlat_swap(trip["start_loc"]["coordinates"])
curr_end = lonlat_swap(trip["end_loc"]["coordinates"])
folium.Marker(curr_start, icon=folium.Icon(color="green"),
popup="Start: %s" % trip["start_loc"]["name"]).add_to(curr_map)
folium.Marker(curr_end, icon=folium.Icon(color="red"),
popup="End: %s" % trip["end_loc"]["name"]).add_to(curr_map)
folium.PolyLine([curr_start, curr_end], popup=trip["id"]).add_to(curr_map)
curr_map.fit_bounds([curr_start, curr_end])
return curr_map
calibration_tests = spec_to_validate["calibration_tests"]
rows = get_row_count(len(calibration_tests), 4)
calibration_maps = bre.Figure((rows,4))
for i, t in enumerate(calibration_tests):
if t["config"]["sensing_config"] != sensing_configs[t["config"]["id"]]["sensing_config"]:
print("Mismatch in config for test" % t)
curr_map = get_map_for_calibration_test(t)
calibration_maps.add_subplot(rows, 4, i+1).add_child(curr_map)
calibration_maps
###Output
_____no_output_____
###Markdown
Validating evaluation trips
###Code
def get_map_for_travel_leg(trip):
curr_map = folium.Map()
get_marker(trip["start_loc"], "green").add_to(curr_map)
get_marker(trip["end_loc"], "red").add_to(curr_map)
# trips from relations won't have waypoints
if "waypoint_coords" in trip:
for i, wpc in enumerate(trip["waypoint_coords"]["geometry"]["coordinates"]):
folium.map.Marker(
lonlat_swap(wpc), popup="%d" % i,
icon=fof.DivIcon(class_name='leaflet-div-icon')).add_to(curr_map)
print("Found %d coordinates for the route" % (len(trip["route_coords"]["geometry"]["coordinates"])))
latlng_route_coords = [lonlat_swap(rc) for rc in trip["route_coords"]["geometry"]["coordinates"]]
folium.PolyLine(latlng_route_coords,
popup="%s: %s" % (trip["mode"], trip["name"])).add_to(curr_map)
for i, c in enumerate(latlng_route_coords):
folium.CircleMarker(c, radius=5, popup="%d: %s" % (i, c)).add_to(curr_map)
curr_map.fit_bounds(ful.get_bounds(trip["route_coords"]["geometry"]["coordinates"], lonlat=True))
return curr_map
def get_map_for_shim_leg(trip):
curr_map = folium.Map()
mkr = get_marker(trip["loc"], "purple")
mkr.add_to(curr_map)
curr_map.fit_bounds(mkr.get_bounds())
return curr_map
evaluation_trips = spec_to_validate["evaluation_trips"]
map_list = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
curr_map = get_map_for_travel_leg(l)
map_list.append(curr_map)
else:
curr_map = get_map_for_shim_leg(l)
map_list.append(curr_map)
rows = get_row_count(len(map_list), 2)
evaluation_maps = bre.Figure(ratio="{}%".format((rows/2) * 100))
for i, curr_map in enumerate(map_list):
evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map)
evaluation_maps
###Output
_____no_output_____
###Markdown
Validating start and end polygons
###Code
def check_start_end_contains(leg):
points = gpd.GeoSeries([shpg.Point(p) for p in leg["route_coords"]["geometry"]["coordinates"]])
start_loc = shpg.shape(leg["start_loc"]["geometry"])
end_loc = shpg.shape(leg["end_loc"]["geometry"])
start_contains = points.apply(lambda p: start_loc.contains(p))
print(points[start_contains])
end_contains = points.apply(lambda p: end_loc.contains(p))
print(points[end_contains])
# Some of the points are within the start and end polygons
assert start_contains.any()
assert end_contains.any()
# The first and last point are within the start and end polygons
assert start_contains.iloc[0], points.head()
assert end_contains.iloc[-1], points.tail()
# The points within the polygons are contiguous
max_index_diff_start = pd.Series(start_contains[start_contains == True].index).diff().max()
max_index_diff_end = pd.Series(end_contains[end_contains == True].index).diff().max()
assert pd.isnull(max_index_diff_start) or max_index_diff_start == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(start_contains[end_contains == True].index).diff().max(), points.head())
assert pd.isnull(max_index_diff_end) or max_index_diff_end == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(end_contains[end_contains == True].index).diff().max(), points.tail())
invalid_legs = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL" and l["id"] not in invalid_legs:
print("Checking leg %s, %s" % (t["id"], l["id"]))
check_start_end_contains(l)
###Output
_____no_output_____
###Markdown
Validating sensing settings
###Code
for ss in spec_to_validate["sensing_settings"]:
for phoneOS, compare_map in ss.items():
compare_list = compare_map["compare"]
for i, ssc in enumerate(compare_map["sensing_configs"]):
if ssc["id"] != compare_list[i]:
print("Mismatch in sensing configurations for %s" % ss)
###Output
_____no_output_____
###Markdown
Setup some basic stuff
###Code
import logging
logging.getLogger().setLevel(logging.DEBUG)
import folium
import folium.features as fof
import folium.utilities as ful
import branca.element as bre
import json
import geojson as gj
import arrow
import shapely.geometry as shpg
import pandas as pd
import geopandas as gpd
def lonlat_swap(lon_lat):
return list(reversed(lon_lat))
def get_row_count(n_maps, cols):
rows = (n_maps / cols)
if (n_maps % cols != 0):
rows = rows + 1
return rows
def get_marker(loc, disp_color):
if loc["geometry"]["type"] == "Point":
curr_latlng = lonlat_swap(loc["geometry"]["coordinates"])
return folium.Marker(curr_latlng, icon=folium.Icon(color=disp_color),
popup="%s" % loc["properties"]["name"])
elif loc["geometry"]["type"] == "Polygon":
assert len(loc["geometry"]["coordinates"]) == 1,\
"Only simple polygons supported!"
curr_latlng = [lonlat_swap(c) for c in loc["geometry"]["coordinates"][0]]
# print("Returning polygon for %s" % curr_latlng)
return folium.PolyLine(curr_latlng, color=disp_color, fill=disp_color,
popup="%s" % loc["properties"]["name"])
###Output
_____no_output_____
###Markdown
Read the data
###Code
spec_to_validate = json.load(open("final_sfbayarea_filled/train_bus_ebike_mtv_ucb.filled.json"))
sensing_configs = json.load(open("sensing_regimes.all.specs.json"))
###Output
_____no_output_____
###Markdown
Validating the time range
###Code
print("Experiment runs from %s -> %s" % (arrow.get(spec_to_validate["start_ts"]), arrow.get(spec_to_validate["end_ts"])))
start_fmt_time_to_validate = arrow.get(spec_to_validate["start_ts"]).format("YYYY-MM-DD")
end_fmt_time_to_validate = arrow.get(spec_to_validate["end_ts"]).format("YYYY-MM-DD")
if (start_fmt_time_to_validate != spec_to_validate["start_fmt_date"]):
print("VALIDATION FAILED, got start %s, expected %s" % (start_fmt_time_to_validate, spec_to_validate["start_fmt_date"]))
if (end_fmt_time_to_validate != spec_to_validate["end_fmt_date"]):
print("VALIDATION FAILED, got end %s, expected %s" % (end_fmt_time_to_validate, spec_to_validate["end_fmt_date"]))
###Output
_____no_output_____
###Markdown
Validating calibration trips
###Code
def get_map_for_calibration_test(trip):
curr_map = folium.Map()
if trip["start_loc"] is None or trip["end_loc"] is None:
return curr_map
curr_start = lonlat_swap(trip["start_loc"]["coordinates"])
curr_end = lonlat_swap(trip["end_loc"]["coordinates"])
folium.Marker(curr_start, icon=folium.Icon(color="green"),
popup="Start: %s" % trip["start_loc"]["name"]).add_to(curr_map)
folium.Marker(curr_end, icon=folium.Icon(color="red"),
popup="End: %s" % trip["end_loc"]["name"]).add_to(curr_map)
folium.PolyLine([curr_start, curr_end], popup=trip["id"]).add_to(curr_map)
curr_map.fit_bounds([curr_start, curr_end])
return curr_map
calibration_tests = spec_to_validate["calibration_tests"]
rows = get_row_count(len(calibration_tests), 4)
calibration_maps = bre.Figure((rows,4))
for i, t in enumerate(calibration_tests):
if t["config"]["sensing_config"] != sensing_configs[t["config"]["id"]]["sensing_config"]:
print("Mismatch in config for test" % t)
curr_map = get_map_for_calibration_test(t)
calibration_maps.add_subplot(rows, 4, i+1).add_child(curr_map)
calibration_maps
###Output
_____no_output_____
###Markdown
Validating evaluation trips
###Code
def get_map_for_travel_leg(trip):
curr_map = folium.Map()
get_marker(trip["start_loc"], "green").add_to(curr_map)
get_marker(trip["end_loc"], "red").add_to(curr_map)
# trips from relations won't have waypoints
if "waypoint_coords" in trip:
for i, wpc in enumerate(trip["waypoint_coords"]["geometry"]["coordinates"]):
folium.map.Marker(
lonlat_swap(wpc), popup="%d" % i,
icon=fof.DivIcon(class_name='leaflet-div-icon')).add_to(curr_map)
print("Found %d coordinates for the route" % (len(trip["route_coords"]["geometry"]["coordinates"])))
latlng_route_coords = [lonlat_swap(rc) for rc in trip["route_coords"]["geometry"]["coordinates"]]
folium.PolyLine(latlng_route_coords,
popup="%s: %s" % (trip["mode"], trip["name"])).add_to(curr_map)
for i, c in enumerate(latlng_route_coords):
folium.CircleMarker(c, radius=5, popup="%d: %s" % (i, c)).add_to(curr_map)
curr_map.fit_bounds(ful.get_bounds(trip["route_coords"]["geometry"]["coordinates"], lonlat=True))
return curr_map
def get_map_for_shim_leg(trip):
curr_map = folium.Map()
mkr = get_marker(trip["loc"], "purple")
mkr.add_to(curr_map)
curr_map.fit_bounds(mkr.get_bounds())
return curr_map
evaluation_trips = spec_to_validate["evaluation_trips"]
map_list = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL":
curr_map = get_map_for_travel_leg(l)
map_list.append(curr_map)
else:
curr_map = get_map_for_shim_leg(l)
map_list.append(curr_map)
rows = get_row_count(len(map_list), 2)
evaluation_maps = bre.Figure(ratio="{}%".format((rows/2) * 100))
for i, curr_map in enumerate(map_list):
evaluation_maps.add_subplot(rows, 2, i+1).add_child(curr_map)
evaluation_maps
###Output
_____no_output_____
###Markdown
Validating start and end polygons
###Code
def check_start_end_contains(leg):
points = gpd.GeoSeries([shpg.Point(p) for p in leg["route_coords"]["geometry"]["coordinates"]])
start_loc = shpg.shape(leg["start_loc"]["geometry"])
end_loc = shpg.shape(leg["end_loc"]["geometry"])
start_contains = points.apply(lambda p: start_loc.contains(p))
print(points[start_contains])
end_contains = points.apply(lambda p: end_loc.contains(p))
print(points[end_contains])
# Some of the points are within the start and end polygons
assert start_contains.any()
assert end_contains.any()
# The first and last point are within the start and end polygons
assert start_contains.iloc[0], points.head()
assert end_contains.iloc[-1], points.tail()
# The points within the polygons are contiguous
max_index_diff_start = pd.Series(start_contains[start_contains == True].index).diff().max()
max_index_diff_end = pd.Series(end_contains[end_contains == True].index).diff().max()
assert pd.isnull(max_index_diff_start) or max_index_diff_start == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(start_contains[end_contains == True].index).diff().max(), points.head())
assert pd.isnull(max_index_diff_end) or max_index_diff_end == 1, "Max diff in index = %s for points %s" % (gpd.GeoSeries(end_contains[end_contains == True].index).diff().max(), points.tail())
invalid_legs = []
for t in evaluation_trips:
for l in t["legs"]:
if l["type"] == "TRAVEL" and l["id"] not in invalid_legs:
print("Checking leg %s, %s" % (t["id"], l["id"]))
check_start_end_contains(l)
###Output
_____no_output_____
###Markdown
Validating sensing settings
###Code
for ss in spec_to_validate["sensing_settings"]:
for phoneOS, compare_map in ss.items():
compare_list = compare_map["compare"]
for i, ssc in enumerate(compare_map["sensing_configs"]):
if ssc["id"] != compare_list[i]:
print("Mismatch in sensing configurations for %s" % ss)
###Output
_____no_output_____ |
images/reading/vague-janvier-2022/Pruning_SNIP_FORCE/Force_pruning.ipynb | ###Markdown
Pruning in Deep Neural Networks Authors: Yanis Chaigneau, Nicolas Tirel Institution : GreenAI UPPA This notebook spans the state-of-the-art pruning methods in deep neural networks. Pruning is used to remove weights in a neural network, in order to reduce the number of parameters, leading to a faster training without loss in accuracy. Furthermore, it can be used to reduce the energetic consumption of ML algorithms. Indeed, the growing number of parameters in deep learning networks has an impact on the CO2 emissions of the sector, which these methods aim to reduce.In the introduction,the historic of the field is drawn, with the presentation of the skeletonization (Mozer et al, 1989) and the Optimal Brain Damage (Lecun, 1990) pruning methods. A focus is then made on the pruning methods at initialization, with a comparison of three state-of-the-art algorithms: SNIP, GRASP and FORCE. Pruning:- Cut connexions in deep neural networks- Reducing the size of a network- Less energy consumptionI) Introduction and historicsII) Pruning at initialization: SNIP, GRaSP and FORCEIII) Comparison Introduction Different methods exist to prune a model. - Pruning trained models - Induce sparsity during training - Pruning at initialization Evolution of the number of publications containing "pruning" in the artificial intelligence field. Source: dimensions.ai Pruning after trainingMozer and Smolensky in 1989: trimming the fat from a network via relevance assessment The method: Skeletonization A method designed by Mozer and Smolensky in 1989 for trimming the fat from a network via relevance assessment. To do so, the relevance of each units is defined and computed, so as to reduce its size and improving its performance.The procedure is the following: Algorithm (Mozer et al) 1) Train the network 2) Compute the relevance of the units, so as to find which units have the more weight for the accuracy. 3) Trim the least relevant units This type of algorithms is useful for many reasons: - More generalization - Reduce the energy consumption by removing hidden units - Enhance interpretability First approach: the relevance of a unit is determined by looking at the activity of each cells. The more the unit of the layer $l$ has many large-weighted connections, the more its activity should influence the other layers: $\rho_i = \sum_j w_{ij}$. However, if the effects of different connections cancel out, this is not a great metric. Second approach: The author defines the relevance of unit $i$ following the statement: "what will happen to the performance of the network when a unit is removed ?"$$\rho_i = \mathcal{L}_{without~unit~i} - \mathcal{L}_{with~unit~i}$$ where $\mathcal{L}$ is the training loss. QuestionWhat is the complexity of computing $\rho$ for all the units if we want to try all the patterns $p$ with $n$ units? Ready to see the answer? (click to expand)$$\mathcal{O}(n p)$$ where $n$ is the total number of units in the network and $p$ the number of patterns in the training set. The complexity is too large, we need to approximate the computation of $\rho$. To do so, we define $\alpha_i$ as the attentional strength of the unit, based on the attention mechanism. The idea of this coefficient is to represent whether a unit has an influence on the rest of the network. It simply weights the activation of a neuron j:$$y_j = f(\sum_i w_{ij} \alpha_i x_{ij})$$where $x_{ij}$ is the input from neuron $i$ to this neuron, $f$ the activation function, and $w_{ij}$ the input weights. Thus:- if $\alpha_i = 0$, unit $i$ does not have any influence on the rest of the network- if $\alpha_i = 1$, unit $i$ is a conventional unit. The following figure depicts the attentional strength coefficients on a simple feed-forward neural network: We then obtain a new definition for $\rho$:$$\rho_i = \mathcal{L}_{\alpha_i = 0} - \mathcal{L}_{\alpha_i = 1}$$With this definition, we can approximate the relevance of the units. To do so, let's use the derivative of the error with respect to $\alpha$:$$\frac{\partial{\mathcal{L}}}{\partial \alpha_i} \Bigr|_{\substack{\alpha_i = 1}}= \lim_{\gamma \rightarrow 1} \frac{\mathcal{L}_{\alpha_i = \gamma} - \mathcal{L}_{\alpha_i = 1}}{\gamma - 1}$$The approximation is made assuming it holds approximately for $\gamma = 0$:$$\frac{\partial{\mathcal{L}}}{\partial \alpha_i} \Bigr|_{\substack{\alpha_i = 1}}= \frac{\mathcal{L}_{\alpha_i = 0} - \mathcal{L}_{\alpha_i = 1}}{- 1} = - \rho_i$$Thus, we define $$\boxed{\hat{\rho}_i = - \frac{\partial{\mathcal{L}}}{\partial \alpha_i}} $$ with $\alpha_i$ supposed to be constant to $1$ and thus not being part of the trainable parameters of the systems. Empirically, the authors defines the estimator of the relevance with the weighted average:$$\boxed{\hat \rho_i (t+1) = 0.8 \hat \rho_i (t) + 0.2 \frac{\partial \mathcal{L} (t)}{\partial \alpha_i}}$$The relevancy parameters can then be learned in a similar way as backpropagation.The loss used to compute the relevancy parameters is the linear loss:$$\mathcal{L} = \sum |\hat{y} - y|$$ QuestionDoes you see why the quadratic loss is not a good choice ? Ready to see the answer? (click to expand) Because the derivative of the loss goes to zero as the total error decreases, which will make the relevance of all the units tend to zero as the error decreases. Then, it grossly underestimates the relevance of the outputs that are close to the target!. The algorithm For $t \in [0, \dots, T]$: Train the network until all output unit activities are within some specified margin around the target value Compute $\hat \rho_i$ for each unit $i$; Remove the unit with the smallest relevance In python, a possible implementation can be the following (taken from Sébastien Lousteau)
###Code
import torch
def forward(model,alpha, x):
x = model.activations[0](model.conv1(x))
x = x.view(-1, model.nb_filters * 8 * 8)
x = [torch.mul(elt,alpha) for elt in x]
x = torch.stack(x)
x = model.fc1(x)
return x
def relevance(model,test_dataloader):
autograd_tensor = torch.ones((model.nb_filters * 8 * 8), requires_grad=True)
loss_fn = torch.nn.CrossEntropyLoss()
num_items_read = 0
device = next(model.parameters()).device
gg = []
lengths = []
for _, (X, y) in enumerate(test_dataloader):
if 100000 <= num_items_read:
break
X = X[:min(100000 - num_items_read, X.shape[0])]
y = y[:min(100000 - num_items_read, X.shape[0])]
num_items_read = min(100000, num_items_read + X.shape[0])
X = X.to(device)
y = y.to(device)
pred = forward(model,autograd_tensor,X)
loss = loss_fn(pred, y)
gg.append(torch.autograd.grad(loss, autograd_tensor, retain_graph=True))
lengths.append(X.shape[0])
tensor_gg = torch.tensor([list(gg[k][0]) for k in range(len(gg))])
result = torch.mean(tensor_gg,0)
return(-result)
def skeletonization(model,size,dataloader):
relevance_ = relevance(model,dataloader)
keep_indices = np.argsort(-np.array(relevance_))[:size]
skeletone = ConvNet(model.nb_filters,model.channels)
skeletone.conv1.weight.data = copy.deepcopy(model.conv1.weight.data)
skeletone.fc1.weight.data = copy.deepcopy(model.fc1.weight.data)
for index in set(range(4096))-set(keep_indices):
skeletone.fc1.weight.data[:,index] = torch.zeros(10)
return(skeletone)
###Output
_____no_output_____
###Markdown
An example Let's consider the following example taken from the official paper of Mozer and Smolensky:
###Code
## Generate the problem data
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
N = 500
X = []
y = []
for i in range(N):
A = random.randint(0, 1)
B = random.randint(0, 1)
C = random.randint(0, 1)
D = random.randint(0, 1)
X.append([A, B, C, D])
if (A and B) or (not(A) and not(B) and not(C) and not(D)):
y.append(1)
else:
y.append(-1)
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2)
###Output
_____no_output_____
###Markdown
Optimal Brain Damage Optimal Brain Damage (OBD) has been developed in 1990 by Le Cun et al and is also used to reduce the size of a learning network by deleting weights. It consists in a trade off between the training error and the network complexity.This technique uses the second derivative of the objective function to compute the "saliency" of the parameters. Here, saliency is not equal to the magnitude of the weights, and a theoretical framework is developed. The saliency of a parameter in this method is computed accordingly to the objective function which changes with the deletion of the parameters. The pruning is considered as a perturbation on the weight matrix. For that, we consider a perturbation of the parameter vector $\delta U$. This perturbation will affect the loss as:$$\delta \mathcal{L} = \sum \limits_i g_i \delta u_i + \frac{1}{2} \sum \limits_i h_{ii} \delta u_i^2 + \frac{1}{2} \sum \limits_{i \not j} h_{ij} \delta u_i \delta u_j + \mathcal(O)(||\delta U||^3)$$where $\delta u_i$ are the components of $\delta U$, $g_i$ the components of $\frac{\partial \mathcal{L}}{\partial U}$ and $h_i$ the components of the hessian matrix $h_{ij} = \frac{\partial^2 \mathcal{L}}{\partial u_i \partial u_j}$After some assumptions, we reduce the equation to:$$\delta \mathcal{L} = \frac{1}{2} \sum \limits_i h_{ii} \delta u_i^2$$The second derivatives are then calculated with a backpropagation procedure. The AlgorithmFor $t \in [0, \dots, T]$: Train the network until convergence Compute the second derivatives for each parameters $h_{kk}$ Compute the saliencies for each parameter $s_k = h_{kk} u_k^2/2$ Delete the lowest saliency parameters Implementation: https://github.com/shekkizh/TensorflowProjects/blob/master/Model_Pruning/OptimalBrainDamage.py Sparsify during training Example: A BACK-PROPAGATION ALGORITHM WITH OPTIMAL USE OF HIDDEN UNITS (Chauvin, 1988) Pruning at initialization Another class of pruning algorithms prune at initialization.The three main algorithms that will be presented here are:- SNIP- GRASP- FORCEThis kind of algorithms can be seen as a form of Neural Architecture Search. SNIP : SINGLE-SHOT NETWORK PRUNING BASED ON CONNECTION SENSITIVITY The idea Prune a neural network with the solution provided by Namhoon Lee et al. (2019) aimed the same purpose as the orginal paper from Mozer & Smolensky (1989). Indeed, the big advantages of a pruned network is a network simpler, more versatile and more interpretable. At the end, they achieve a good results with a pruned network with the same accuracy as existing baselines across all tested architectures. The maths behind From a dataset $\mathcal{D} = {(x_i,y_i)}^n_{i=1}$ and a desired sparsity level $\mathcal{K}$ (the number of non-zero weights), we write the network pruning as the constrained optimization problem :$$\min_wL(w;\mathcal{D}) = \min_w\frac{1}{n}\sum^n_{i=1}l(w;(x_i,y_i)),$$ $${s.t.~} w\in\mathbb{R}^m, ||w||_0 \leq \mathcal{K}.$$l(.) -> standard loss function (e.g. cross-entropy)w -> set of parameters of the NNm-> total number of parameters$||.||_0$ the standard $L_0$ norm As we shown earlier, we can optimize this problem by adding sparsity enforcing penalty terms, but those solutions involve hyperparameter settings heavily tuned, and turn out to be inferior to saliency based methods.The latter selectively remove redundant parameters with the magnitude of the weights (below certain threshold and redundant) and Hessian of the loss with respect to the weights (high value of Hessian means high importance for the parameter)$$ s_j =\left\{ \begin{array}{ll} |w_j|, \text{ for magnitude based},\\ \frac{w_j^2 H_{jj}}{2}, \text{ for Hessian based.} \end{array} \right. $$We have for connection j:$s_j$ the saliency score$w_j$ the weight$H_{jj}$ the value of the Hessian matrix where the Hessian $H = \frac{∂^2L}{∂w^2} \in \mathbb{R}^{m*m}$ The problem of optimization can be written as :$$ \min_{c,w} L(c ⊙ w; \mathcal{D}) = \min_{c,w} \frac{1}{n} \sum^n_{i=1} l(c ⊙ w; (x_i, y_i)), $$$$ {s.t.~} w\in \mathbb{R^m},~ c\in \{0,1\}^m,~ ||c||_0 \leq \mathcal{K} $$ The main idea is to separate the weight of the connection (w) from whether the connection is present or not (c). The value of $c_j$ indactes if the connection is active (=1) or pruned (=0). Therefore we can measure the effect of connection on the loss when $c_j=1$ and $c_j=0$ keeping everything else constant. We measure the effect then with :$$\Delta L_j(\text{w};\mathcal{D}) = L(1\odot \text{w};\mathcal{D}) - L((1-e_j)\odot \text{w}; \mathcal{D}),$$where $e_j$ indicates the element j (zeros everywhere except at the index j) and 1 the vector of dimension m (positive is improving, negative the opposite).Because c is binary, to compute each $\Delta L_j$ is expensive and requires $m+1$ forward passes over the dataset, so by relaxing the constraint, we can approximate $\Delta L_j$ by the derivate of $L$ with respect to $c_j$, denote $g_j(\text{w};\mathcal{D})$. We obtain the following effect :$$ \Delta L_j(\text{w}; \mathcal{D}) ≈ g_j(\text{w};\mathcal{D}) = \frac{∂L(c \odot \text{w}; \mathcal{D})}{∂c_j}|_{c=1} = \lim_{δ→0} \frac{L(c\odot \text{w}; \mathcal{D}) - L((c-\delta e_j) \odot \text{w}; \mathcal{D})}{δ} |_{c=1}$$This formulation can be viewed as perturbing the weight $w_j$ by a multilplicative factor $δ$ and measuring the change in loss. To this end, they take the magnitude of the derivatives $g_j$ as the saliency criterion, and define connection sensitivity as the normalized magnitude of the derivatives :$$s_j = \frac{|g_j(\text{w};\mathcal{D})|}{∑^m_{k=1}|g_j(\text{w};\mathcal{D})|}$$After the computing, only the top-K ocnnections are retained, where k denotes the desired number of non-zero weights :$$c_j = \mathbf{1}[s_j - s_k ≥ 0 ], ∀j∈\{1~...m\},$$where $s_k$ is the $k$-th largest element in the vector s and $1[.]$ is the indicator function (for precision, we can broke ties aribtrarily )The criteria depend on the loss value before pruning, require pre-training and iterative optimization cycles for a minimal loss in performance (+ magnitude and Hessian method very sensitive to the architectural choices) Algorithm From the loss function $L$, trianing dataset $D$, sparsity level $k$Ensure that $||w*||_0≤k$  The Algorithm (1) Sample a mini-bacth of training data (2) compute saliency criterion (connection sensitivity) (3) pruning: choose top-K connections (4) regular training Implementation in Python Sample mini-batches
###Code
def prune(args, model, sess, dataset):
print('|========= START PRUNING =========|')
# sample a mini-batch (by default 100)
batch = dataset.get_next_batch('train', args.batch_size)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: True, model.is_train: False, model.pruned: False})
result = sess.run([model.outputs, model.sparsity], feed_dict)
print('Pruning: {:.3f} global sparsity'.format(result[-1])
###Output
_____no_output_____
###Markdown
Pruning
###Code
import tensorflow as tf
from model import Model
from dataset import Dataset
dataset = Dataset(**vars(args))
model = Model(num_classes=dataset.num_classes, **vars(args))
model.construct_model()
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
# Prune
prune.prune(args, model, sess, dataset)
###Output
_____no_output_____
###Markdown
Sort the top k-values
###Code
# sort all the scores to obtains only the top-k% (by default top-10%)
def create_sparse_mask(mask, target_sparsity):
def threshold_vec(vec, target_sparsity):
num_params = vec.shape.as_list()[0]
kappa = int(round(num_params * (1. - target_sparsity)))
topk, ind = tf.nn.top_k(vec, k=kappa, sorted=True)
mask_sparse_v = tf.sparse_to_dense(ind, tf.shape(vec),
tf.ones_like(ind, dtype=tf.float32), validate_indices=False)
return mask_sparse_v
if isinstance(mask, dict):
mask_v, restore_fn = vectorize_dict(mask)
mask_sparse_v = threshold_vec(mask_v, target_sparsity)
return restore_fn(mask_sparse_v)
else:
return threshold_vec(mask, target_sparsity)
###Output
_____no_output_____
###Markdown
Compute saliencies
###Code
# In the construction of the model
def get_sparse_mask():
w_mask = apply_mask(weights, mask_init)
logits = net.forward_pass(w_mask, self.inputs['input'],
self.is_train, trainable=False)
loss = tf.reduce_mean(compute_loss(self.inputs['label'], logits))
grads = tf.gradients(loss, [mask_init[k] for k in prn_keys])
gradients = dict(zip(prn_keys, grads))
# saliency score :
cs = normalize_dict({k: tf.abs(v) for k, v in gradients.items()})
return create_sparse_mask(cs, self.target_sparsity)
mask = tf.cond(self.compress, lambda: get_sparse_mask(), lambda: mask_prev)
###Output
_____no_output_____
###Markdown
Repo public tensorflow : https://github.com/namhoonlee/snip-publicRepo non officiel pytorch : https://github.com/mil-ad/snipArticle : https://arxiv.org/pdf/1810.02340.pdf Results  Hyper-parameters (click to expand)Datasets : MNIST, CIFAR-10 and Tiny-ImageNetThey use the $\bar{k} = \frac{(m-k)}{m}.100(\%)$ $m$ the total number of parameters and $k$ the desired number of non-zero weightsThe sensitivity scores are computed using a batch of 100 and 128 examples for MNIST and CIFARThey train the models using SGD with momentum of .9, batch size of 100 and 128 and the weight decay rate of .0005 unless stated otherwiseInitial learning rate is .1, decayed by .1 at every 25k or 30k iterations 90-10 training-validation LeNet-300-100 consists of three fc layers with 267k parametersLeNet-5-Caffe 2 fc with 431k param https://arxiv.org/pdf/2002.07376.pdfhttps://github.com/mil-ad/snipBuild on the same saliency criterion as in Mozer & Smolensky, but for training. GRASP (Gradient Signal Preservation) Another method Gradient Signal Preservation (GraSP) When H is approximated as the identity matrix, the above criterion recovers SNIP up to the absolute value(recall the SNIP criterion is |δ>g|)if S(δ) is negative, then removing the corresponding weightswill reduce the gradient flow, while if it is positive, it will increase the gradient flow.For a given pruning ratio p, we obtain the resulting pruning mask by computing the score of everyweight, and removing the top p fraction of the weights (see Algorithm 1). Hence, GraSP takes thegradient flow into account for pruninghttps://github.com/alecwangcq/GraSP $$\max_c G (\mathbf{\theta}, c) = \sum \limits_{i: c_i = 0} - \theta_i [\mathbf{H} \mathbf{g}]_i ~~ , c \in \{0,1\}^m, ~||c_0|| = k$$ with $\mathbf{H} = \nabla^2 (\mathcal{L}(\theta_0))$ the Hessian Matrix of the loss and $\mathbf{g} = \nabla (\mathcal{L} (\theta_0))$ the gradient Force In 2020, following the recent discoveries in the field of pruning, de Jorge et al published a paper describing a novel pruning algorithm: FORCE (foresight connection sensitivity).In this work, we discovered that existing methods mostly perform below random pruning atextreme sparsity regimeThis algorithm applies the pruning before the training. Hence, not only the inference times drop, as the training times are also reduced.It is based on the saliency criteria introduced by Mozer et Smolensky in (1989), skeletonization. The difference lies in the fact that the saliency that is optimized is the one lying after pruning, rather than before. Let's recall the definition of the optimization process:A sequence of iterates of parameters $\{\theta_i\}_{i=0}^{T}$ is produced during the training, with $\theta_0$ the initial set of parameters and $\theta_T$ the final one (leading to the minimal loss). In the classical pruning methods, one constraints the problem to reach a target sparsity level of $k<m$, to have $||\theta_T|| \leq k$.When the pruning is done at initialisation, the goal is to find an initialization $\theta_0$ such that $||\theta_0|| \leq k$, the sequence following a specific topology during training. We set $\mathbf{\bar{\theta}} = \mathbf{\theta} ⊙ \mathbf{c}$ where $\mathbf{c}$ is a binary mask (whether to remove the weight or not). We define the connection sensitivity $\mathbf{g}$ at $\mathbf{\bar{\theta}}$ for a given mask $\mathbf{\hat{c}}$ as:$$\mathbf{g} (\mathbf{\bar{\theta}}) = \frac{\partial \mathcal{L} (\bar{\theta})}{\partial \mathbf{c}} \Bigr|_{\substack{\mathbf{c} = \mathbf{\hat{c}}}} = \frac{\partial \mathcal{L} (\bar{\theta})}{\partial \mathbf{\bar{\theta}}} \Bigr|_{\substack{\mathbf{c} = \mathbf{\hat{c}}}} ⊙ \frac{\partial \bar{\theta}}{\partial \mathbf{c}} \Bigr|_{\substack{\mathbf{c} = \mathbf{\hat{c}}}}= \frac{\partial \mathcal{L} (\bar{\theta})}{\partial \mathbf{\bar{\theta}}} \Bigr|_{\substack{\mathbf{c} = \mathbf{\hat{c}}}} ⊙ \mathbf{\theta}$$ QuestionWhat happens when $\hat{c} = 1$ ? Ready to see the answer? (click to expand) We retrieve the SNIP formulation ! "It assumes that all the parameters are active in the network and they are removed one by one with replacement, therefore, it fails to capture the impact of removing a group of parameters" Differences with SNIP and GRaSP- The formulation of the connection sensitivity depends directly on the pruned weights $\mathbf{\bar{\theta}}$, when GRaSP and SNIP depend on the weights only.- In the case of extreme pruning ($||\mathbf{\hat{c}}||_0 << ||\mathbf{1}||_0$), we have $||\mathbf{\theta} ⊙ \mathbf{\hat{c}}||_2 << ||\mathbf{\theta}||_2$ giving highly different gradient values Objective functionFind the best sub-network$$ \max_c S(\mathbf{\theta}, \mathbf{c}) = \sum \limits_{i \in supp(\mathbf{c})} | \theta_i \nabla \mathcal{L} (\mathbf{\theta} ⊙ \mathbf{c})_i | ~~ , c \in \{0,1\}^m, ~||c_0|| = k$$ Finding the optimal solution requires to compute all the gradients of all the sub-networks. We have to approximate the solution --> Difference with SNIP Progressive PruningThe first solution is to use progressive pruning (also called iterative SNIP) with a homemade schedule $\{k_t\}_{t=1}^T,~k_T=k, ~k_t > k_{t+1}$ giving the pruning level:$$c_{t+1} = \argmax_c S(\mathbf{\bar{\theta}}, \textbf{c}) ~~,c \in \{0,1\}^m, ~||c_0|| = k_{t+1}, \mathbf{c} ⊙ \mathbf{c_t} = \mathbf{c} $$ with $\mathbf{\bar{\theta}} = \theta ⊙ \mathbf{c}_t$"The second constraint ensures that no parameter that had been pruned ealier is activated again". Indeed, let's consider this simple case:$\mathbf{c} = \begin{pmatrix} 1 & 1 \\ 0 & 1 \end{pmatrix}$ and $\mathbf{c_t} = \begin{pmatrix} 0 & 0 \\ 0 & 1 \end{pmatrix}$. This case is not possible, as it would mean that three parameters would be reactivated.The gradient approximation is then made: $\frac{\partial \mathcal{L} (\bar{\theta})}{\partial \bar{\theta}} \Bigr|_{\substack{\mathbf{c}_t}} \simeq \frac{\partial \mathcal{L} (\bar{\theta})}{\partial \bar{\theta}} \Bigr|_{\substack{\mathbf{c}_{t+1}}}$, which lies if the pruning schedule is smooth, ie $||\mathbf{c}_t||_0 \simeq ||\mathbf{c}_{t+1}||_0$ $\DeclareMathOperator*{\argmax}{arg\,max}$ Progressive sparsification (FORCE):In this method, the constraint $\mathbf{c} ⊙ \mathbf{c}_t = \mathbf{c}$ is removed. It allows for parameters to resurrect, if they were removed at the beginning of the pruning. Thus, the weights are not removed but simply set to zero. They can have a non-zero gradient.The network is pruned afterward. Sparsity ScheduleWe need to choose a schedule to iteratively prune the parameters, ie to choose a sequence $(k_t)_{t=1,\dots,T}$ with $k_T = k$ and $\forall t, ~k_t > k_{t+1}$. Futhermore, to respect the gradient approximation for the iterative SNIP case, the schedule needs to be smooth. The authors uses a simple exponential decay schedule:$$\forall t,~k_t = \exp(\alpha \log k + (1- \alpha) \log m), \alpha = \frac{t}{T}$$
###Code
import numpy as np
import matplotlib.pyplot as plt
T = 100
k=10
m=10000
tv = list(range(T))
kt = [np.exp(t/T * np.log(k) + (1-t/T) * np.log(m)) for t in range(T)]
plt.scatter(tv, kt)
plt.xlabel('Iteration')
plt.ylabel('k')
plt.title('Numbers of parameters of the network given the iteration')
###Output
_____no_output_____ |
Natural Language Processing in Tensorflow/Week 2-4 Imdb Using Pre tokenized dataset and Subwords/Week 2 - 4 Pre-Tokenized Datasets and Sub-words encoding.ipynb | ###Markdown
IMDB subwords dataset : https://github.com/tensorflow/datasets/blob/master/docs/catalog/imdb_reviews.md
###Code
# If the import fails, run this
# !pip install -q tensorflow-datasets
import tensorflow_datasets as tfds
#using subwords8k tokenizer
imdb, info = tfds.load("imdb_reviews/subwords8k", with_info=True, as_supervised=True)
info
imdb
train_data, test_data = imdb['train'], imdb['test']
tokenizer = info.features['text'].encoder
print(tokenizer.subwords)
info.features
sample_string = 'TensorFlow, from basics to mastery'
tokenized_string = tokenizer.encode(sample_string)
print ('Tokenized string is {}'.format(tokenized_string))
original_string = tokenizer.decode(tokenized_string)
print ('The original string: {}'.format(original_string))
for ts in tokenized_string:
print ('{} ----> {}'.format(ts, tokenizer.decode([ts])))
#Code to avoid some error
BUFFER_SIZE = 10000
BATCH_SIZE = 64
train_dataset = train_data.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.padded_batch(BATCH_SIZE, tf.compat.v1.data.get_output_shapes(train_dataset))
test_dataset = test_data.padded_batch(BATCH_SIZE, tf.compat.v1.data.get_output_shapes(test_data))
embedding_dim = 64
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.vocab_size, embedding_dim),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
num_epochs = 10
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
history = model.fit(train_dataset, epochs=num_epochs, validation_data=test_dataset)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
import io
out_v = io.open('imdb_vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('imdb_meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, tokenizer.vocab_size):
word = tokenizer.decode([word_num])
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
try:
from google.colab import files
except ImportError:
pass
else:
files.download('imdb_vecs.tsv')
files.download('imdb_meta.tsv')
###Output
_____no_output_____
###Markdown
- the keys in the fact that we're using sub-words and not for-words, sub-word meanings are often nonsensical and it's only when we put them together in sequences that they have meaningful semantics. - Thus, some way from learning from sequences would be a great way forward, and that's exactly what you're going to do next week with recurrent neural networks(RNN)- Now, the reason why this is happening of course is just because we're working on subwords, because we're training on things that it's very hard to pull semantics and meaning out of them and the results that we're getting are little better than 50 percent. But if you think about it in a binary classifier, a random guess would be 50 percent. - So this leads us to a problem where we've taken a little bit of a step back, but that's okay. Sometimes you take one step back to take two steps forward, and that's what we'll be learning with RNNs next week
###Code
###Output
_____no_output_____ |
notebooks/running-model-from-python.ipynb | ###Markdown
A Naive Bayes classifier IntroductionIn this notebook, we will train parameters of a Naive Bayes classifier using *online* learning. The class conditional density is a product of one dimensional densities: $p(\mathbb{x}|y=c,\mathbb{\theta}) = \prod_{d=1}^{D} p(x_{d}|y=c,\mathbb{\theta_{d,c}})$, where $D$ is the number of features. We assume that the features $\mathbb{x}$ are independent. In our example, we will use real-valued features and use Gaussian distributions, $p(\mathbb{x}|y=c,\mathbb{\theta}) = \prod_{j=1}^{D}\mathcal{N}(x_{d}|\mu_{d,c},\sigma_{d,c}^{2})$, where $\mu_{d,c}$ is the mean of feature $d$ in components of class $c$, and $\sigma_{d,c}^{2}$ is its variance. The idea is to create dependence between instances of $\mu$ and $\sigma^2$ for each training step $n$.Please see resources below for more information:- Bishop, Christopher M. Pattern recognition and machine learning. springer, 2006.- Murphy, Kevin P. Machine learning: a probabilistic perspective. MIT press, 2012. Setup
###Code
import os
import pandas as pd
import numpy as np
from pathlib import Path
import matplotlib.animation
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from matplotlib.animation import FuncAnimation
###Output
_____no_output_____
###Markdown
Config
###Code
model_dir = Path('../models/model/')
data_dir = Path('../data/')
###Output
_____no_output_____
###Markdown
Generate data
###Code
def sample_component(component, means, covars):
if component == 0:
return np.random.multivariate_normal(means[0], covars[0], 1).T
if component == 1:
return np.random.multivariate_normal(means[1], covars[1], 1).T
N = 1000
num_dist = 10
num_samples = int(N / num_dist)
theta = np.linspace(0, 2*np.pi, num_dist)
r = np.sqrt(200)
x0 = r * np.cos(theta)
x1 = r * np.sin(theta)
df_temp = []
for i in range(num_dist):
# specify class distributions
class0_weight = 0.5
class1_weight = 0.5
class0_means = [x0[i], x1[i]]
class0_covar = [[1, 0],
[0, 1]]
class1_means = [x0[i]+2, x1[i]+2]
class1_covar = [[1, 0],
[0, 1]]
means = [class0_means, class1_means]
covars = [class0_covar, class1_covar]
mask = np.random.choice([0, 1], num_samples, p=[class0_weight, class1_weight])
data = [sample_component(i, means, covars) for i in mask]
data = np.array(data).reshape(num_samples, 2)
df_data = pd.DataFrame(data, columns=['x0', 'x1'])
df_data['class'] = mask
df_temp.append(df_data)
df_data = pd.concat(df_temp).reset_index().drop(columns=['index'], axis=1)
# store dataset
df_data.to_csv(data_dir/'data.csv', sep='|', header=False, index=False)
# peak of our data set
# plt.scatter(df_data['x0'],
# df_data['x1'],
# c=df_data['class'])
# plt.title("Data")
# plt.xlabel(r"$x_0$")
# plt.ylabel(r"$x_1$")
# plt.grid()
# plt.show()
###Output
_____no_output_____
###Markdown
Running model
###Code
# run c# Infer.NET code
cmd = f'dotnet run --project {model_dir} {data_dir}/ data.csv'
cmd
!{cmd}
###Output
[?1h=[?1h=[?1h=[?1h=[?1h=[?1h=[?1h=
###Markdown
Results
###Code
# load results from file
df_result = pd.read_csv(data_dir/'results.csv', sep='|')
plt.plot(df_result['meanPost0'], label='m0c0')
plt.plot(df_result['meanPost1'], label='m1c0')
plt.plot(df_result['meanPost2'], label='m0c1')
plt.plot(df_result['meanPost3'], label='m1c1')
plt.step(np.arange(0, N, num_samples), x0, where='post', label='true mean0')
plt.step(np.arange(0, N, num_samples), x1, where='post', label='true mean1')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Running inferDotNet model from Python Introduction Setup Imports
###Code
import pandas as pd
import matplotlib.pyplot as plt
import os
import seaborn as sns
from time import time
from pathlib import Path
###Output
_____no_output_____
###Markdown
Config
###Code
model_dir = Path('../models/model/')
data_dir = Path('../data/')
###Output
_____no_output_____
###Markdown
Running Model
###Code
cmd = f'dotnet run --project {model_dir} {data_dir} second EP'
cmd
!{cmd}
df_result = pd.read_csv(data_dir/'results.csv', sep=';')
df_result
###Output
_____no_output_____
###Markdown
Results
###Code
df_results = pd.DataFrame()
for inference_method in ['EP', 'VMP', 'Gibbs']:
for observe in ['first', 'second', 'both']:
if (inference_method in ['VMP', 'Gibbs']) & (observe == 'both'):
continue
cmd = f'dotnet run --project {model_dir} {data_dir} {observe} {inference_method}'
print(cmd)
start = time()
stream = os.popen(cmd)
done = time()
elapsed = done - start
print(stream.read())
df_result = pd.read_csv(data_dir/'results.csv', sep=';')
df_result['observed'] = observe
df_result['inference'] = inference_method
df_result['time'] = elapsed
df_results = pd.concat([df_results, df_result])
df_results.head(10)
output_dir = Path('./output')
if not output_dir.exists():
os.mkdir(output_dir)
for inference_method in ['EP', 'VMP', 'Gibbs']:
g = sns.FacetGrid(data=df_results[df_results.inference == inference_method],
height=2, aspect=2,
margin_titles=True, despine=False,
col='probability',
row='observed')
g.map_dataframe(sns.scatterplot,
x='variable',
hue='variable',
s=300,
y='mean')
g.fig.subplots_adjust(wspace=0.3, hspace=0.1)
g.set(ylim=(-0.2, 1.2))
g.add_legend()
g.fig.savefig(output_dir/f'{inference_method}.png')
###Output
_____no_output_____ |
_jupyter/.ipynb_checkpoints/2020-04-22-Multi-Armed Bandits-checkpoint.ipynb | ###Markdown
Multi-Armed Bandits
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____ |
collegepredict.ipynb | ###Markdown
###Code
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, ShuffleSplit, GridSearchCV
from sklearn import svm
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
# 1.Load the data from “college.csv” that has attributes collected about private and public colleges
# for a particular year. We will try to predict the private/public status of the college from other attributes.
from google.colab import files
uploaded = files.upload()
import io
df2 = pd.read_csv(io.BytesIO(uploaded['College.csv']))
# Dataset is now stored in a Pandas Dataframe
df2.head()
labelencoder = LabelEncoder()
df2["Private"] = labelencoder.fit_transform(df2["Private"])
df2.head()
# 2.Use LabelEncoder to encode the target variable in to numerical form and split the data such that 20% of the data is set aside fortesting.
X = df2.iloc[:, 1:]
Y = df2["Private"]
train_x, test_x, train_y, test_y = train_test_split(
X, Y, test_size=0.30, random_state=10)
# 3.Fit a linear svm from scikit learn and observe the accuracy.[Hint:Use Linear SVC
model_svm = svm.LinearSVC()
model_svm.fit(train_x, train_y)
predicted_values = model_svm.predict(test_x)
print("\nAccuracy Score\n")
print(metrics.accuracy_score(predicted_values, test_y))
# 4.Preprocess the data using StandardScalar and fit the same model again and observe the change in accuracy.
# [Hint: Refer to scikitlearn’s preprocessing methods]
# http://benalexkeen.com/feature-scaling-with-scikit-learn/
scaler_df = StandardScaler().fit_transform(X)
scaler_df = pd.DataFrame(X, columns=X.columns)
X = scaler_df
Y = df2["Private"]
train_x, test_x, train_y, test_y = train_test_split(
X, Y, test_size=0.30, random_state=10)
model_svm = svm.LinearSVC()
model_svm.fit(train_x, train_y)
predicted_values = model_svm.predict(test_x)
metrics.accuracy_score(predicted_values, test_y)
#5.Use scikit learn’s gridsearch to select the best hyperparameter for a non-linear SVM,identify the model with
# best score and its parameters.
# [Hint: Refer to model_selection module of Scikit learn]
# https://chrisalbon.com/machine_learning/model_evaluation/cross_validation_parameter_tuning_grid_search/
parameter_candidates = [
{'C': [1, 10, 100, 1000], 'kernel': ['poly']},
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
# Create a classifier object with the classifier and parameter candidates
cv = ShuffleSplit()
clf = GridSearchCV(estimator=svm.SVC(max_iter=1000),
param_grid=parameter_candidates, n_jobs=-1, cv=cv)
# Train the classifier on data1's feature and target data
clf.fit(train_x, train_y)
# View the accuracy score
print('Best score for data1:', clf.best_score_)
# View the best parameters for the model found using grid search
print('Best C:', clf.best_estimator_.C)
print('Best Kernel:', clf.best_estimator_.kernel)
print('Best Gamma:', clf.best_estimator_.gamma)
###Output
Best score for data1: 0.9254545454545454
Best C: 1
Best Kernel: poly
Best Gamma: scale
|
data_infra/notebooks/notebook_data_infra_functions.ipynb | ###Markdown
Notebook: Testing Our data_infra Package data_infra Package: PostgreSQL DB; AWS SQS, S3, etc.
###Code
# NOTE: First make sure you are in the TeamReel DS parent directory
# (not the 'notebooks_modeling' directory):
ls
###Output
LICENSE [34mdummy-test-api[m[m/
README.md notebook_data_infra_functions.ipynb
[34mdata_infra[m[m/ [34mnotebooks_modeling[m[m/
###Markdown
------------------------ get_next_video(): Database Info & Raw Video File Gets the next newly uploaded video from our queue, downloads it to the project folder, queries our database for info about that video, prompt and user, and returns that info in a Python dictionary.
###Code
# Import:
# (Note: Make sure you are in the TeamReel DS parent directory, not 'notebooks_modeling'.)
from data_infra import get_next_video
help(get_next_video)
video_info = get_next_video()
video_info
# And now the raw video file is in our project folder too:
ls
###Output
ALPACAVID-wHgVXLxaK.webm [34mdummy-test-api[m[m/
LICENSE notebook_data_infra_functions.ipynb
README.md [34mnotebooks_modeling[m[m/
[34mdata_infra[m[m/
###Markdown
------------------ get_video_info(video_s3_key=______): Database Info Only Gets the video, prompt and user info for the specified video (using the input video_s3_key string) from our DB, and returns it in a Python dictionary. Does NOT download the video file.
###Code
# Import:
# (Note: Make sure you are in the TeamReel DS parent directory, not 'notebooks_modeling'.)
from data_infra import get_video_info
help(get_video_info)
video_info = get_video_info(video_s3_key='videos/ALPACAVID-i7swK-Wzc.webm')
video_info
###Output
_____no_output_____
###Markdown
--------------------------- get_feedback_for_video(video_id=_____): All feedback on that video
###Code
# Import:
# (Note: Make sure you are in the TeamReel DS parent directory, not 'notebooks_modeling'.)
from data_infra import get_feedback_for_video
help(get_feedback_for_video)
get_feedback_for_video(video_id=134)
###Output
_____no_output_____
###Markdown
---------------------------- get_feedback_for_user(user_id=int): All feedback on all of that user's videos
###Code
# Import:
# (Note: Make sure you are in the TeamReel DS parent directory, not 'notebooks_modeling'.)
from data_infra import get_feedback_for_user
help(get_feedback_for_user)
get_feedback_for_user(user_id=185)
###Output
_____no_output_____ |
Recomendation_system/content_based_filtering/contecnt_based_filtering.ipynb | ###Markdown
CONTENT-BASED FILTERINGRecommendation systems are a collection of algorithms used to recommend items to users based on information taken from the user. __These systems have become ubiquitous, and can be commonly seen in online stores, movies databases and job finders.__ In this notebook, we will explore Content-based recommendation systems and implement a simple version of one using Python and the Pandas library. Acquiring the DataDownload Data From __[HERE](https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/moviedataset.zip)__
###Code
!wget -O moviedataset.zip https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/moviedataset.zip
print('unziping ...')
!unzip -o -j moviedataset.zip
###Output
--2020-02-28 22:54:30-- https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/moviedataset.zip
Resolving s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)... 67.228.254.196
Connecting to s3-api.us-geo.objectstorage.softlayer.net (s3-api.us-geo.objectstorage.softlayer.net)|67.228.254.196|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 160301210 (153M) [application/zip]
Saving to: ‘moviedataset.zip’
moviedataset.zip 100%[===================>] 152.88M 772KB/s in 3m 52s
2020-02-28 22:58:24 (676 KB/s) - ‘moviedataset.zip’ saved [160301210/160301210]
unziping ...
Archive: moviedataset.zip
inflating: links.csv
inflating: movies.csv
inflating: ratings.csv
###Markdown
PreprocessingFirst, let's get all of the imports out of the way:
###Code
#Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load Ratings Data
###Code
ratings_df = pd.read_csv('ratings.csv')
ratings_df.head()
###Output
_____no_output_____
###Markdown
Load Movie Data
###Code
#Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
movies_df.head()
###Output
_____no_output_____
###Markdown
Let's also remove the year from the __title__ column by using pandas' replace function and store in a new __year__ column.
###Code
#Using regular expressions to find a year stored between parentheses
#We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
movies_df.head()
###Output
_____no_output_____
###Markdown
With that, let's also split the values in the Genres column into a list of Genres to simplify future use. This can be achieved by applying __Python's split__ string function on the correct column.
###Code
#Every genre is separated by a | so we simply have to call the split function on |
movies_df['genres'] = movies_df.genres.str.split('|')
movies_df.head()
###Output
_____no_output_____
###Markdown
Since keeping genres in a list format isn't optimal for the content-based recommendation system technique, we will use the One Hot Encoding technique to convert the list of genres to a vector where each column corresponds to one possible value of the feature. This encoding is needed for feeding categorical data. In this case, we store every different genre in columns that contain either 1 or 0. 1 shows that a movie has that genre and 0 shows that it doesn't. Let's also store this dataframe in another variable since genres won't be important for our first recommendation system.
###Code
#Copying the movie dataframe into a new one since we won't need to use the genre information in our first case.
moviesWithGenres_df = movies_df.copy()
#For every row in the dataframe, iterate through the list of genres and place a 1 into the corresponding column
for index, row in movies_df.iterrows():
for genre in row['genres']:
moviesWithGenres_df.at[index, genre] = 1
#Filling in the NaN values with 0 to show that a movie doesn't have that column's genre
moviesWithGenres_df = moviesWithGenres_df.fillna(0)
moviesWithGenres_df.head()
###Output
_____no_output_____
###Markdown
Next, let's look at the ratings dataframe.
###Code
ratings_df.head()
###Output
_____no_output_____
###Markdown
Every row in the ratings dataframe has a user id associated with at least one movie, a rating and a timestamp showing when they reviewed it. We won't be needing the timestamp column, so let's drop it to save on memory.
###Code
#Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
ratings_df.head()
###Output
_____no_output_____
###Markdown
Content-Based recommendation system¶Now, let's take a look at how to __implement Content-Based or Item-Item recommendation systems.__ This technique attempts to figure out what a user's favourite aspects of an item is, and then recommends items that present those aspects. In our case, we're going to try to figure out the input's favorite genres from the movies and ratings given.Let's begin by creating an input user to recommend movies to:Notice: To add more movies, simply increase the amount of elements in the __userInput.__ Feel free to add more in! Just be sure to write it in with capital letters and if a movie starts with a "The", like "The Matrix" then write it in like this: 'Matrix, The' .
###Code
userInput = [
{'title':'Breakfast Club, The', 'rating':5},
{'title':'Toy Story', 'rating':3.5},
{'title':'Jumanji', 'rating':2},
{'title':"Pulp Fiction", 'rating':5},
{'title':'Akira', 'rating':4.5}
]
inputMovies = pd.DataFrame(userInput)
inputMovies
###Output
_____no_output_____
###Markdown
Add movieId to input userWith the input complete, let's extract the input movie's ID's from the movies dataframe and add them into it.We can achieve this by first filtering out the rows that contain the input movie's title and then merging this subset with the input dataframe. We also drop unnecessary columns for the input to save memory space.
###Code
#Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
inputId.head()
# Then merging it so we can get the movieId. It's implicitly merging it by title.
inputMovies = pd.merge(inputId, inputMovies)
inputMovies.head()
# Dropping information we won't use from the input dataframe
inputMovies = inputMovies.drop('genres', 1).drop('year', 1)
inputMovies
#Final input dataframe
#If a movie you added in above isn't here, then it might not be in the original
#dataframe or it might spelled differently, please check capitalisation.
inputMovies
###Output
_____no_output_____
###Markdown
We're going to start by learning the input's preferences, so let's get the subset of movies that the input has watched from the Dataframe containing genres defined with binary values.
###Code
#Filtering out the movies from the input
userMovies = moviesWithGenres_df[moviesWithGenres_df['movieId'].isin(inputMovies['movieId'].tolist())]
userMovies
###Output
_____no_output_____
###Markdown
We'll only need the actual genre table, so let's clean this up a bit by resetting the index and dropping the movieId, title, genres and year columns.
###Code
#Resetting the index to avoid future issues
userMovies = userMovies.reset_index(drop=True)
#Dropping unnecessary issues due to save memory and to avoid issues
userGenreTable = userMovies.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
userGenreTable
###Output
_____no_output_____
###Markdown
Now we're ready to start learning the input's preferences!To do this, we're going to turn each genre into weights. We can do this by using the input's reviews and multiplying them into the input's genre table and then summing up the resulting table by column. This operation is actually a dot product between a matrix and a vector, so we can simply accomplish by calling Pandas's "dot" function.
###Code
inputMovies['rating']
#Dot produt to get weights
userProfile = userGenreTable.transpose().dot(inputMovies['rating'])
#The user profile
userProfile
###Output
_____no_output_____
###Markdown
Now, we have the weights for every of the user's preferences. This is known as the User Profile. Using this, we can recommend movies that satisfy the user's preferences.Let's start by extracting the genre table from the original dataframe:
###Code
#Now let's get the genres of every movie in our original dataframe
genreTable = moviesWithGenres_df.set_index(moviesWithGenres_df['movieId'])
#And drop the unnecessary information
genreTable = genreTable.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
genreTable.head()
genreTable.shape
###Output
_____no_output_____
###Markdown
With the input's profile and the complete list of movies and their genres in hand, we're going to take the weighted average of every movie based on the input profile and recommend the top twenty movies that most satisfy it.
###Code
#Multiply the genres by the weights and then take the weighted average
recommendationTable_df = ((genreTable*userProfile).sum(axis=1))/(userProfile.sum())
recommendationTable_df.head()
#Sort our recommendations in descending order
recommendationTable_df = recommendationTable_df.sort_values(ascending=False)
#Just a peek at the values
recommendationTable_df.head()
###Output
_____no_output_____
###Markdown
Now here's the recommendation table!
###Code
#The final recommendation table
movies_df.loc[movies_df['movieId'].isin(recommendationTable_df.head(20).keys())]
###Output
_____no_output_____ |
captcha.ipynb | ###Markdown
CREATING IMAGE CAPTCHA 1. IMPORT CAPTCHA.IMAGE 2. GENERATE A RANDOM TEXT FOR THE CAPTCHA 3. GIVE A RANDOM FILE NAME TO WRITE THE CAPTCHA
###Code
image=ImageCaptcha(width=240,height=86)
data=image.generate('kavyashree')
image.write('kavyashree','myimagecaptcha.png')
###Output
_____no_output_____
###Markdown
CREATING AUDIO CAPTCHA 1. IMPORT CAPTCHA.AUDIO 2.GENERATE AUDIO 3.GIVE A RANDOM FILE NAME TO WRITE THE AUDIO
###Code
from captcha.audio import AudioCaptcha
audio=AudioCaptcha()
data1=audio.generate('205')
audio.write('205','myaudiocaptcha.wav')
###Output
_____no_output_____ |
coco/upload_coco.ipynb | ###Markdown
Install Hub, Coco API
###Code
!pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
!pip install hub
###Output
_____no_output_____
###Markdown
Download and Unzip COCO Data
###Code
# ONLY RUN ONCE
!mkdir ./Datasets/coco
!mkdir ./Datasets/coco/annotations
!wget -P ./Datasets/coco http://images.cocodataset.org/zips/train2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/zips/val2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/zips/test2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/zips/unlabeled2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/annotations/annotations_trainval2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/annotations/image_info_test2017.zip
!wget -P ./Datasets/coco http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip
!unzip -q ./Datasets/coco/train2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/val2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/test2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/unlabeled2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/annotations_trainval2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/stuff_annotations_trainval2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/image_info_test2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/image_info_unlabeled2017.zip -d ./Datasets/coco
!unzip -q ./Datasets/coco/annotations/stuff_val2017_pixelmaps.zip -d ./Datasets/coco/annotations
!unzip -q ./Datasets/coco/annotations/stuff_train2017_pixelmaps.zip -d ./Datasets/coco/annotations
!rm -r ./Datasets/coco/train2017.zip
!rm -r ./Datasets/coco/val2017.zip
!rm -r ./Datasets/coco/test2017.zip
!rm -r ./Datasets/coco/unlabeled2017.zip
!rm -r ./Datasets/coco/stuff_annotations_trainval2017.zip
!rm -r ./Datasets/coco/image_info_unlabeled2017.zip
!rm -r ./Datasets/coco/image_info_test2017.zip
!rm -r ./Datasets/coco/annotations_trainval2017.zip
###Output
_____no_output_____
###Markdown
Import Dataset To Hub
###Code
%env BUGGER_OFF=true
!activeloop reporting --off
import hub
import numpy as np
import os
from pycocotools.coco import COCO
from PIL import Image
import time
from tqdm import tqdm
###Output
_____no_output_____
###Markdown
User Inputs
###Code
data_dir='./Datasets/coco'
data_type='val' # Valid choices are 'train' and 'val'. Testing is a special case at the end of the notebook
hub_path = './Datasets/coco_local_{}'.format(data_type) # 'hub://my_worksace/coco_{}'.format(data_type)
limit = 1e10 # Limit the number of images
###Output
_____no_output_____
###Markdown
Load Annotations
###Code
ann_file='{}/annotations/instances_{}2017.json'.format(data_dir,data_type)
ann_file_kp = '{}/annotations/person_keypoints_{}2017.json'.format(data_dir,data_type)
ann_file_stuff = '{}/annotations/stuff_{}2017.json'.format(data_dir,data_type)
img_root='{}/{}2017/'.format(data_dir,data_type)
coco = COCO(ann_file)
coco_kp=COCO(ann_file_kp)
coco_stuff=COCO(ann_file_stuff)
category_info = coco.loadCats(coco.getCatIds())
category_info_kp = coco_kp.loadCats(coco_kp.getCatIds())
category_info_stuff = coco_stuff.loadCats(coco_stuff.getCatIds())
###Output
_____no_output_____
###Markdown
Create hub dataset
###Code
# Login to activeloop if using Activeloop Storage (hub://.....)
!activeloop login -u 'username' -p 'password'
#Specify dataset path
ds = hub.empty(hub_path, overwrite = True) # Set overwrite = True if you need to start over
###Output
_____no_output_____
###Markdown
Create lists for all the class_names
###Code
cat_names = [category['name'] for category in category_info]
super_cat_names = list(set([category['supercategory'] for category in category_info]))
cat_names_kp = [category['name'] for category in category_info_kp]
super_cat_names_kp = list(set([category['supercategory'] for category in category_info_kp]))
cat_names_stuff = [category['name'] for category in category_info_stuff]
super_cat_names_stuff = list(set([category['supercategory'] for category in category_info_stuff]))
###Output
_____no_output_____
###Markdown
Upload data to Hub dataset
###Code
img_ids = sorted(coco.getImgIds()) # Image ids for uploading
count = 1
start_time = time.time()
with ds:
## ---- Create Tensors ----- ##
#Primary Data
ds.create_tensor('images', htype = 'image', sample_compression = 'jpg')
ds.create_tensor('images_meta', htype = 'json')
ds.create_tensor('masks', htype = 'binary_mask', sample_compression = 'lz4')
ds.create_tensor('boxes', htype = 'bbox')
ds.create_tensor('categories', htype = 'class_label', class_names = cat_names)
ds.create_tensor('super_categories', htype = 'class_label', class_names = super_cat_names)
ds.create_tensor('areas', dtype = 'uint32')
ds.create_tensor('iscrowds', dtype = 'bool')
#Pose
ds.create_group('pose')
ds.pose.create_tensor('categories', htype = 'class_label', class_names = cat_names_kp)
ds.pose.create_tensor('super_categories', htype = 'class_label', class_names = super_cat_names_kp)
ds.pose.create_tensor('boxes', htype = 'bbox')
ds.pose.create_tensor('keypoints', htype = 'keypoints_coco', dtype = 'int32')
ds.pose.create_tensor('masks', htype = 'binary_mask', sample_compression = 'lz4')
#Stuff Segmentation
ds.create_group('stuff')
ds.stuff.create_tensor('masks', htype = 'binary_mask', sample_compression = 'lz4')
ds.stuff.create_tensor('boxes', htype = 'bbox')
ds.stuff.create_tensor('categories', htype = 'class_label', class_names = cat_names_stuff)
ds.stuff.create_tensor('super_categories', htype = 'class_label', class_names = super_cat_names_stuff)
ds.stuff.create_tensor('areas', dtype = 'uint32')
ds.stuff.create_tensor('iscrowds', dtype = 'bool')
#Further updates to meta information
ds.categories.info.update(category_info = category_info, notes = 'Numeric labels for categories represent the position of the class in the ds.categories.info.class_names list, and not the COCO category id.')
ds.super_categories.info.update(category_info = category_info, notes = 'Numeric labels for super_categories represent the position of the class in the ds.super_categories.info.class_names list, and not the COCO category id.')
ds.masks.info.update(notes = 'All segmentation polygons and RLEs were converted to stacked binary masks')
ds.pose.masks.info.update(category_info = category_info_kp, notes = 'All segmentation polygons and RLEs were converted to stacked binary masks')
ds.pose.keypoints.info.update(keypoints = [category['keypoints'] for category in category_info_kp][0], connections = [category['skeleton'] for category in category_info_kp][0])
ds.stuff.masks.info.update(category_info = category_info_stuff, notes = 'All segmentation polygons and RLEs were converted to stacked binary masks')
## ---- Iterate through each image and upload data ----- ##
for img_id in img_ids:
ann_ids = coco.getAnnIds(img_id)
ann_ids_kp = coco_kp.getAnnIds(img_id)
ann_ids_stuff = coco_stuff.getAnnIds(img_id)
anns = coco.loadAnns(ann_ids)
anns_kp = coco_kp.loadAnns(ann_ids_kp)
anns_stuff = coco_stuff.loadAnns(ann_ids_stuff)
img_coco = coco.loadImgs(img_id)[0]
img_fn = os.path.join(img_root, img_coco['file_name'])
img = Image.open(img_fn)
dims = img.size
#Iterate through annotations and parse each
#First Create empty arrays for all annotations
masks = np.zeros((dims[1], dims[0], len(anns)))
boxes = np.zeros((len(anns),4))
categories = np.zeros((len(anns)))
supercats = np.zeros((len(anns)))
areas = np.zeros((len(anns)))
iscrowds = np.zeros((len(anns)))
supercats = np.zeros((len(anns)))
#Then populate the arrays with the annotations data
for i, ann in enumerate(anns):
mask = coco.annToMask(ann) #Convert annotation to mask
masks[:,:,i] = mask
boxes[i,:] = ann['bbox']
# Do a brute force search and make no assumptions between order of relationship of category ids
categories[i] = cat_names.index([category_info[i]['name'] for i in range(len(category_info)) if category_info[i]['id']==ann['category_id']][0])
supercats[i] = super_cat_names.index([category_info[i]['supercategory'] for i in range(len(category_info)) if category_info[i]['id']==ann['category_id']][0])
areas[i] = ann['area']
iscrowds[i] = ann['iscrowd']
if 'segmentation' not in ann:
print('--- No segmentation found in annotations. ---')
print('Annotation length: {}'.format(len(anns)))
print('--- image id: {} ---'.format(img_id))
#Iterate through keypoints and parse each
categories_kp = np.zeros((len(anns_kp)))
supercats_kp = np.zeros((len(anns_kp)))
masks_kp = np.zeros((dims[1], dims[0], len(anns_kp)))
boxes_kp = np.zeros((len(anns_kp),4))
keypoints_kp = np.zeros((51,len(anns_kp)))
for j, ann_kp in enumerate(anns_kp):
categories_kp[j] = cat_names_kp.index([category_info_kp[i]['name'] for i in range(len(category_info_kp)) if category_info_kp[i]['id']==ann_kp['category_id']][0])
supercats_kp[j] = super_cat_names_kp.index([category_info_kp[i]['supercategory'] for i in range(len(category_info_kp)) if category_info_kp[i]['id']==ann_kp['category_id']][0])
mask_kp = coco.annToMask(ann_kp) #Convert annotation to mask
masks_kp[:,:,j] = mask_kp
boxes_kp[j,:] = ann_kp['bbox']
keypoints_kp[:,j] = np.array(ann_kp['keypoints'])
#Iterate through stuff and parse each
masks_stuff = np.zeros((dims[1], dims[0], len(anns_stuff)))
boxes_stuff = np.zeros((len(anns_stuff),4))
categories_stuff = np.zeros((len(anns_stuff)))
supercats_stuff = np.zeros((len(anns_stuff)))
areas_stuff = np.zeros((len(anns_stuff)))
iscrowds_stuff = np.zeros((len(anns_stuff)))
supercats_stuff = np.zeros((len(anns_stuff)))
for k, ann_stuff in enumerate(anns_stuff):
mask_stuff = coco.annToMask(ann_stuff) #Convert annotation to mask
masks_stuff[:,:,k] = mask_stuff
boxes_stuff[k,:] = ann['bbox']
# Do a brute force search and make no assumptions between order of relationship of category ids
categories_stuff[k] = cat_names_stuff.index([category_info_stuff[i]['name'] for i in range(len(category_info_stuff)) if category_info_stuff[i]['id']==ann_stuff['category_id']][0])
supercats_stuff[k] = super_cat_names_stuff.index([category_info_stuff[i]['supercategory'] for i in range(len(category_info_stuff)) if category_info_stuff[i]['id']==ann_stuff['category_id']][0])
areas_stuff[k] = ann_stuff['area']
iscrowds_stuff[k] = ann_stuff['iscrowd']
if 'segmentation' not in ann_stuff:
print('--- No segmentation found in stuff annotations. ---')
print('Annotation length: {}'.format(len(anns)))
print('--- image id: {} ---'.format(img_id))
#Append data to hub. Only do this after all annotations have been parsed.
try:
ds.images.append(hub.read(img_fn, verify = True))
ds.images_meta.append(img_coco)
ds.masks.append(masks.astype('bool'))
ds.boxes.append(boxes.astype('float32'))
ds.categories.append(categories.astype('uint32'))
ds.super_categories.append(supercats.astype('uint32'))
ds.areas.append(areas.astype('uint32'))
ds.iscrowds.append(iscrowds.astype('bool'))
ds.pose.categories.append(categories_kp.astype('uint32'))
ds.pose.super_categories.append(supercats_kp.astype('uint32'))
ds.pose.boxes.append(boxes_kp.astype('float32'))
ds.pose.masks.append(masks_kp.astype('bool'))
ds.pose.keypoints.append(keypoints_kp.astype('int32'))
ds.stuff.masks.append(masks_stuff.astype('bool'))
ds.stuff.boxes.append(boxes_stuff.astype('float32'))
ds.stuff.categories.append(categories_stuff.astype('uint32'))
ds.stuff.super_categories.append(supercats_stuff.astype('uint32'))
ds.stuff.areas.append(areas_stuff.astype('uint32'))
ds.stuff.iscrowds.append(iscrowds_stuff.astype('bool'))
except Exception as e:
print(e)
if count%100==0:
print('Uploaded {} images'.format(count))
if count>=limit:
break
count+=1
print('Finished')
end_time = time.time()
print('Upload took {} seconds'.format(end_time-start_time))
###Output
_____no_output_____
###Markdown
Special case - COCO Test dataset without annotations
###Code
data_dir='./Datasets/coco'
data_type='test'
hub_path = './Datasets/coco_local_{}'.format(data_type) # 'hub://my_worksace/coco_{}'.format(data_type)
limit = 1e10 # Limit the number of images
ann_file='{}/annotations/image_info_{}2017.json'.format(data_dir,data_type) #There are no actual annotations, just images
img_root='{}/{}2017/'.format(data_dir,data_type)
coco = COCO(ann_file)
#Specify dataset path
ds = hub.empty(hub_path) # Set overwrite = True if you need to start over
img_ids = sorted(coco.getImgIds()) # Image ids for uploading
count = 1
start_time = time.time()
with ds:
## ---- Create Tensors ----- ##
ds.create_tensor('images', htype = 'image', sample_compression = 'jpg')
ds.create_tensor('images_meta', htype = 'json')
## ---- Iterate through each image and upload data ----- ##
for img_id in img_ids:
img_coco = coco.loadImgs(img_id)[0]
img_fn = os.path.join(img_root, img_coco['file_name'])
img = Image.open(img_fn)
dims = img.size
#Append data to hub
try:
ds.images.append(hub.read(img_fn, verify = True))
ds.images_meta.append(img_coco)
except Exception as e:
print(e)
if count%100==0:
print('Uploaded {} images'.format(count))
if count>=limit:
break
count+=1
print('Finished')
end_time = time.time()
print('Upload took {} seconds'.format(end_time-start_time))
###Output
_____no_output_____ |
content/06. Where in the world are we/06.4 Keeping track of direction.ipynb | ###Markdown
4 Keeping track of direction – which way are we heading?As well as keeping track of how much the wheels have turned, and estimating location on that basis, we can also use the robot’s gyroscope – often referred to as a ‘gyro’ – sensor to tell us which direction it is facing.In the following activities, you will see how the gyroscope and position sensors can be used to keep track of where the robot has been, as well as helping it get to where it needs to go.So let’s get the simulated loaded in the normal way and then find out where we’re heading next...
###Code
from nbev3devsim.load_nbev3devwidget import roboSim, eds
%load_ext nbev3devsim
###Output
_____no_output_____
###Markdown
4.1 Activity – Detecting orientationThe following program defines a simple edge follower that allows the robot to navigate its way around the shape described in the *Two_shapes* background, logging the gyro sensor as it does so.Show the chart, enable the gyro trace, and download and run the program. Purely by observation of the chart view of the gyro data, do you think you would be able to determine the shape corresponding to the path followed by the robot?*Stop the downloaded program executing either from the _Simulator controls_ or the simulator keyboard shortcut (`S`).*
###Code
%%sim_magic_preloaded -c -b Two_shapes -x 400 -y 700 -a -90
colorRight = ColorSensor(INPUT_3)
gyro = GyroSensor(INPUT_4)
while True:
# Get the gyro value
print('Gyro: '+str(gyro.angle))
intensity_right = colorRight.reflected_light_intensity_pc
if intensity_right > 70:
left_motor_speed = SpeedPercent(0)
right_motor_speed = SpeedPercent(20)
else:
left_motor_speed = SpeedPercent(20)
right_motor_speed = SpeedPercent(0)
tank_drive.on(left_motor_speed, right_motor_speed)
###Output
_____no_output_____
###Markdown
*Add your observations about the gyro data trace as the robot follows the boundary of the provided shape. To what extent can you use the data to identify the shape of the route taken by the robot? How might you identify the path more exactly?* Example observations*Click on the arrow in the sidebar or run this cell to reveal some example observations.* The gyro sensor values follow a stepped trace in the chart, dropping by 90 or so every time the robot turns a corner, corresponding to a right-angled turn anticlockwise. The values oscillate as the robot proceeds, wiggling as it follows the edge of the line. The width (as measured along the *x*-axis) of each step is roughly the same, so the robot is describing a square.I also noticed that the angle count is not a direction: it seems to be an accumulated count of degrees turned in a particular direction. If the robot were to turn the other way then I would expect the count to go down. I even did a little experiment to check that.
###Code
%%sim_magic_preloaded -c
gyro = GyroSensor(INPUT_4)
say('Turn one way')
tank_drive.on(SpeedPercent(20), SpeedPercent(0))
while gyro.angle < 90:
print('Gyro: '+str(gyro.angle))
say('and the other')
# Turn the other way
tank_drive.on(SpeedPercent(0), SpeedPercent(20))
while gyro.angle > 0:
print('Gyro: '+str(gyro.angle))
say('all done')
###Output
_____no_output_____
###Markdown
4.2 Challenge – Navigating to a specified locationThe *WRO_2018_Regular_Junior* challenge background has several coloured areas marked on it at (350, 580), (1180, 960) and (2000, 580).__You should not spend more than 30–45 minutes on this challenge.__From the starting location of the robot at (1180, 150, 90), see if you can write a program that drives the robot using dead reckoning – that is, using just the motor `position` and the gyro `angle` values – to drive the robot to one of those locations. Then see if you can get it to drive to one of the other locations.The background coordinates give locations in millimetres relative to a fixed origin.Once you have got your program to work reasonably reliably, try adding some noise to the motors using the *Wheel noise* slider in the simulator. Does this have any effect on the performance of your control program? *You may find it helpful to do some sums to calculate how far the robot has to travel. Make some notes on that here.*
###Code
# Maybe try out some sums here?
%%sim_magic_preloaded -p -x -1180 -y 150 -a 90 -b WRO_2018_Regular_Junior
# YOUR CODE HERE
###Output
_____no_output_____
###Markdown
4 Keeping track of direction — which way are we heading?As well as keeping track of how much the wheels have turned, and estimating location on that basis, we can also use the robot’s gyroscope – often referred to as a ‘gyro’ – sensor to tell us which direction it is facing.In the following activities, you will see how the gyroscope and position sensors can be used to keep track of where the robot has been, as well as helping it get to where it needs to go.So let's get the simulated loaded in the normal way and then find out where we're heading next...
###Code
from nbev3devsim.load_nbev3devwidget import roboSim, eds
%load_ext nbev3devsim
###Output
_____no_output_____
###Markdown
4.1 Activity — Detecting orientationThe following program defines a simple edge follower that allows the robot to navigate its way around the shape described in the *Two_shapes* background, logging the gyro sensor as it does so.Show the chart, enable the gyro trace, and download and run the program. Purely by observation of the chart view of the gyro data, do you think you would be able to determine the shape corresponding to the path followed by the robot?*Stop the downloaded program executing either from the _Simulator controls_ or the simulator keyboard shortcut (`S`).*
###Code
%%sim_magic_preloaded -c -b Two_shapes -x 400 -y 700 -a -90
colorRight = ColorSensor(INPUT_3)
gyro = GyroSensor(INPUT_4)
while True:
#Get the gyro value
print('Gyro: '+str(gyro.angle))
intensity_right = colorRight.reflected_light_intensity_pc
if intensity_right > 70:
left_motor_speed = SpeedPercent(0)
right_motor_speed = SpeedPercent(20)
else:
left_motor_speed = SpeedPercent(20)
right_motor_speed = SpeedPercent(0)
tank_drive.on(left_motor_speed, right_motor_speed)
###Output
_____no_output_____
###Markdown
*Add your observations about the gyro data trace as the robot follows the boundary of the provided shape. To what extent can you use the data to identify the shape of the route taken by the robot? How might you identify the path more exactly?* Example observations*Click on the arrow in the sidebar or run this cell to reveal some example observations.* The gyro sensor values follow a stepped trace in the chart, dropping by 90 or so every time the robot turns a corner, corresponding to a right-angled turn anticlockwise. The values oscillate as the robot proceeds, wiggling as it follows the edge of the line. The width (as measured along the x-axis) of each step is roughly the same, so the robot is describing a square.I also noticed that the angle count is not a direction: it seems to be an accumulated count of degrees turned in a particular direction. If the robot were to turn the other way then I would expect the count to go down. I even did a little experiment to check that.
###Code
%%sim_magic_preloaded -c
gyro = GyroSensor(INPUT_4)
say('Turn one way')
tank_drive.on(SpeedPercent(20), SpeedPercent(0))
while gyro.angle < 90:
print('Gyro: '+str(gyro.angle))
say('and the other')
# Turn the other way
tank_drive.on(SpeedPercent(0), SpeedPercent(20))
while gyro.angle > 0:
print('Gyro: '+str(gyro.angle))
say('all done')
###Output
_____no_output_____
###Markdown
4.2 Challenge — Navigating to a specified locationThe *WRO_2018_Regular_Junior* challenge background has several coloured areas marked on it at (350, 580), (1180, 960) and (2000, 580).__You should not spend more than 30-45 minutes on this challenge.__From the starting location of the robot at (1180, 150, 90), see if you can write a program that drives the robot using dead reckoning – that is, using just the motor `position` and the gyro `angle` values – to drive the robot to one of those locations. Then see if you can get it to drive to one of the other locations.The background co-ordinates give locations in millimeters relative to a fixed origin.Once you have got your program to work reasonably reliably, try adding some noise to the motors using the *Wheel noise* slider in the simulator. Does this have any effect on the performance of your control program? *You may find it helpful to do some sums to calculate how far the robot has to travel. Make some notes on that here.*
###Code
# Maybe try our some sums here?
%%sim_magic_preloaded -p -x -1180 -y 150 -a 90 -b WRO_2018_Regular_Junior
# YOUR CODE HERE
###Output
_____no_output_____ |
Cycle_GAN.ipynb | ###Markdown
Parameter Setting* You can adjust the parameters to yourself
###Code
print('STEP 0: PARAMETER SETTING')
# Data root directory
train_X_root = 'dataset/lab14/mnist/'
train_Y_root = 'dataset/lab14/svhn/'
# Weight save directory
vis_num = 2
save_dir = 'cyclegan'
if not osp.exists(save_dir):
os.makedirs(save_dir)
# Batch size during training
bs = 64
# Size of image
img_height = 32
img_width = 32
img_size = 32
img_channel = 3
# Channels of generator feature
gfc = 32
# Channels of discriminator feature
dfc = 32
# Number of training epochs
num_epochs = 5
# Learning rate for optimizing
lr = 0.0001
# Beta1 hyperparameter for Adam optimizers
beta1 = 0.5
# Real or Fake label
real_label = 0.97
fake_label = 0.03
print('STEP 1: LOADING DATASET')
transform_1ch = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
transform_3ch = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_X_set = dset.MNIST(root=train_X_root,
train=True,
transform=transform_1ch,
download=False)
train_Y_set = dset.SVHN(root=train_Y_root,
split='train',
transform=transform_3ch,
download=False)
test_X_set = dset.MNIST(root=train_X_root,
train=False,
transform=transform_1ch,
download=False)
test_Y_set = dset.SVHN(root=train_Y_root,
split='test',
transform=transform_3ch,
download=False)
print('STEP 2: MAKING DATASET ITERABLE')
train_X_loader = torch.utils.data.DataLoader(train_X_set,
batch_size=bs,
shuffle=True,
drop_last=True)
train_Y_loader = torch.utils.data.DataLoader(train_Y_set,
batch_size=bs,
shuffle=True,
drop_last=True)
test_X_loader = torch.utils.data.DataLoader(test_X_set,
batch_size=bs,
shuffle=False,
drop_last=True)
test_Y_loader = torch.utils.data.DataLoader(test_Y_set,
batch_size=bs,
shuffle=False,
drop_last=True)
###Output
STEP 2: MAKING DATASET ITERABLE
###Markdown
Visualize a few images
###Code
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.5, 0.5, 0.5])
std = np.array([0.5, 0.5, 0.5])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
data_loader_X_sample = torch.utils.data.DataLoader(train_X_set,
batch_size=4,
shuffle=True)
data_loader_Y_sample = torch.utils.data.DataLoader(train_Y_set,
batch_size=4,
shuffle=True)
# Get a batch of training data
X = next(iter(data_loader_X_sample))
Y = next(iter(data_loader_Y_sample))
# Make a grid from batch
out_X = torchvision.utils.make_grid(X[0])
out_Y = torchvision.utils.make_grid(Y[0])
imshow(out_X)
imshow(out_Y)
def conv2d(params_list, batch_norm = True):
channel_in, channel_out, kernel_size, stride, padding, activation = params_list
layers = []
if batch_norm:
layers += [nn.Conv2d(channel_in, channel_out, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(channel_out)]
nn.init.xavier_uniform_(layers[0].weight)
else:
layers += [nn.Conv2d(channel_in, channel_out, kernel_size, stride, padding, bias=False)]
nn.init.xavier_uniform_(layers[0].weight)
if activation.lower() == 'relu':
layers += [nn.ReLU(inplace=True)]
if activation.lower() == 'leakyrelu':
layers += [nn.LeakyReLU(0.2, inplace=True)]
if activation.lower() == 'tanh':
layers += [nn.Tanh()]
if activation.lower() == 'sigmoid':
layers += [nn.Sigmoid()]
return nn.Sequential(*layers)
def upconv2d(params_list, batch_norm = True):
channel_in, channel_out, kernel_size, stride, padding, activation = params_list
layers = []
if batch_norm:
layers += [nn.ConvTranspose2d(channel_in, channel_out, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(channel_out)]
nn.init.xavier_uniform_(layers[0].weight)
else:
layers += [nn.ConvTranspose2d(channel_in, channel_out, kernel_size, stride, padding, bias=False)]
nn.init.xavier_uniform_(layers[0].weight)
if activation.lower() == 'relu':
layers += [nn.ReLU(inplace=True)]
if activation.lower() == 'leakyrelu':
layers += [nn.LeakyReLU(0.2, inplace=True)]
if activation.lower() == 'tanh':
layers += [nn.Tanh()]
if activation.lower() == 'sigmoid':
layers += [nn.Sigmoid()]
return nn.Sequential(*layers)
def transpose(ndarray):
return np.transpose(ndarray, [0,2,3,1])
def gray2rgb(ndarray):
return np.concatenate((ndarray, ndarray, ndarray), axis=2)
print('STEP 3-1: CREATE MODEL CLASS (Generator)')
class ResidualBlock(nn.Module):
def __init__(self, params_list):
super().__init__()
self.block = conv2d(params_list)
def forward(self, x):
return x + self.block(x)
# [input channels, output channels, kernel_size, strides, paddings]
cfg_g_enc_X = [[1, gfc, 7, 2, 3, 'leakyrelu'], [gfc, gfc*2, 3, 2, 1, 'leakyrelu']]
cfg_g_enc_Y = [[3, gfc, 7, 2, 3, 'leakyrelu'], [gfc, gfc*2, 3, 2, 1, 'leakyrelu']]
cfg_g_trans = [[gfc*2, gfc*2, 3, 1, 1, 'leakyrelu'], [gfc*2, gfc*2, 3, 1, 1, 'leakyrelu'], [gfc*2, gfc*2, 3, 1, 1, 'leakyrelu']]
cfg_g_dec_X = [[gfc*2, gfc, 4, 2, 1, 'leakyrelu'],[gfc, 3, 4, 2, 1, 'tanh']]
cfg_g_dec_Y = [[gfc*2, gfc, 4, 2, 1, 'leakyrelu'],[gfc, 1, 4, 2, 1, 'tanh']]
class Generator_X(nn.Module):
def __init__(self):
super(Generator_X, self).__init__()
# Encoder
self.Encoder = nn.Sequential(
conv2d(cfg_g_enc_X[0], batch_norm=False),
conv2d(cfg_g_enc_X[1])
)
# Transformer
self.Trans = nn.Sequential(
ResidualBlock(cfg_g_trans[0]),
ResidualBlock(cfg_g_trans[1]),
ResidualBlock(cfg_g_trans[2])
)
# Decoder
self.Decoder = nn.Sequential(
upconv2d(cfg_g_dec_X[0]),
upconv2d(cfg_g_dec_X[1], batch_norm=False)
)
def forward(self, x):
out = self.Encoder(x)
out = self.Trans(out)
out = self.Decoder(out)
return out
class Generator_Y(nn.Module):
def __init__(self):
super(Generator_Y, self).__init__()
# Encoder
self.Encoder = nn.Sequential(
conv2d(cfg_g_enc_Y[0], batch_norm=False),
conv2d(cfg_g_enc_Y[1])
)
# Transformer
self.Trans = nn.Sequential(
ResidualBlock(cfg_g_trans[0]),
ResidualBlock(cfg_g_trans[1]),
ResidualBlock(cfg_g_trans[2])
)
# Decoder
self.Decoder = nn.Sequential(
upconv2d(cfg_g_dec_Y[0]),
upconv2d(cfg_g_dec_Y[1], batch_norm=False)
)
def forward(self, x):
out = self.Encoder(x)
out = self.Trans(out)
out = self.Decoder(out)
return out
###Output
STEP 3-1: CREATE MODEL CLASS (Generator)
###Markdown
2.2 Write the code (Discriminator) [3 points]* You need to set the hyperparameters for implementing the convolutions (params_list)* There are 'ReLU', 'LeakyReLU', 'Tanh', and 'Sigmoid' for the activation functions* If you do not want to use the activation function, just put '' in the position of the activation function* Other parameters, such as paddings, can be determined by calculating the formulation of convolutional process (See in https://pytorch.org/docs/stable/nn.html)* You have to use the functions **conv2d()** or **upconv2d()** which are defined from above
###Code
print('STEP 3-2: CREATE MODEL CLASS (Discriminator)')
# [input channels, output channels, kernel_size, strides, paddings]
cfg_d_X = [[1, dfc, 4, 2, 1, 'leakyrelu'], [dfc, dfc*2, 4, 2, 2, 'leakyrelu'], [dfc*2, dfc*4, 4, 1, 1, 'leakyrelu'], [dfc*4, 1, 1, 1, 0, 'sigmoid']]
cfg_d_Y = [[3, dfc, 4, 2, 1, 'leakyrelu'], [dfc, dfc*2, 4, 2, 2, 'leakyrelu'], [dfc*2, dfc*4, 4, 1, 1, 'leakyrelu'], [dfc*4, 1, 1, 1, 0, 'sigmoid']]
class Discriminator_X(nn.Module):
def __init__(self):
super(Discriminator_X, self).__init__()
# Conv
self.Conv_X = nn.Sequential(
conv2d(cfg_d_X[0], batch_norm=False),
conv2d(cfg_d_X[1]),
conv2d(cfg_d_X[2]),
conv2d(cfg_d_X[3], batch_norm=False)
)
def forward(self, x):
return self.Conv_X(x)
class Discriminator_Y(nn.Module):
def __init__(self):
super(Discriminator_Y, self).__init__()
# Conv
self.Conv_Y = nn.Sequential(
conv2d(cfg_d_Y[0], batch_norm=False),
conv2d(cfg_d_Y[1]),
conv2d(cfg_d_Y[2]),
conv2d(cfg_d_Y[3], batch_norm=False)
)
def forward(self, x):
return self.Conv_Y(x)
print('STEP 4: INSTANTIATE MODEL CLASS')
model_G_X = Generator_X()
model_G_Y = Generator_Y()
model_D_X = Discriminator_X()
model_D_Y = Discriminator_Y()
#######################
# USE GPU FOR MODEL #
#######################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_G_X.to(device)
print(model_G_X)
model_G_Y.to(device)
print(model_G_Y)
model_D_X.to(device)
print(model_D_X)
model_D_Y.to(device)
print('STEP 5: INSTANTIATE LOSS CLASS')
criterion_GAN = nn.BCELoss()
criterion_L1 = nn.L1Loss()
print('STEP 6: INSTANTIATE OPTIMIZER CLASS')
optimizer_G_X = torch.optim.Adam(model_G_X.parameters(), lr=lr, betas=(beta1, 0.999))
optimizer_G_Y = torch.optim.Adam(model_G_Y.parameters(), lr=lr, betas=(beta1, 0.999))
optimizer_D_X = torch.optim.Adam(model_D_X.parameters(), lr=lr, betas=(beta1, 0.999))
optimizer_D_Y = torch.optim.Adam(model_D_Y.parameters(), lr=lr, betas=(beta1, 0.999))
print('STEP 7: TRAIN THE MODEL')
label_real = torch.full((bs, 1, 8, 8), real_label, dtype=torch.float32, device=device)
label_fake = torch.full((bs, 1, 8, 8), fake_label, dtype=torch.float32, device=device)
num_iter = 0
max_iter = num_epochs*len(train_X_loader)
train_start_time = time.time()
for epoch in range(1, num_epochs+1):
for batch_index, data_X in enumerate(train_X_loader):
model_G_X.train()
model_G_Y.train()
model_D_X.train()
model_D_Y.train()
data_X = data_X[0].to(device)
data_Y = next(iter(train_Y_loader))[0].to(device)
### D
optimizer_D_X.zero_grad()
optimizer_D_Y.zero_grad()
### D_X
output_D_X_real = model_D_X(data_X)
err_D_X_real = criterion_GAN(output_D_X_real, label_real)
output_D_X_fake = model_D_X(model_G_Y(data_Y))
err_D_X_fake = criterion_GAN(output_D_X_fake, label_fake)
err_D_X = err_D_X_real + err_D_X_fake
err_D_X.backward()
optimizer_D_X.step()
### D_Y
output_D_Y_real = model_D_Y(data_Y)
err_D_Y_real = criterion_GAN(output_D_Y_real, label_real)
output_D_Y_fake = model_D_Y(model_G_X(data_X))
err_D_Y_fake = criterion_GAN(output_D_Y_fake, label_fake)
err_D_Y = err_D_Y_real + err_D_Y_fake
err_D_Y.backward()
optimizer_D_Y.step()
err_D = err_D_Y + err_D_X
### G
optimizer_G_X.zero_grad()
optimizer_G_Y.zero_grad()
### G_Y
fake_MNIST = model_G_Y(data_Y)
out1 = model_D_X(fake_MNIST)
err_G1 = criterion_GAN(out1, label_real)
Cycle_fake_SVHN = model_G_X(fake_MNIST)
err_C1 = criterion_L1(Cycle_fake_SVHN, data_Y)
### G_X
fake_SVHN = model_G_X(data_X)
out2 = model_D_Y(fake_SVHN)
err_G2 = criterion_GAN(out2, label_real)
Cycle_fake_MNIST = model_G_Y(fake_SVHN)
err_C2 = criterion_L1(Cycle_fake_MNIST, data_X)
err_C = err_C1 + err_C2
err_G = err_G1 + err_G2 + err_C
err_G.backward()
optimizer_G_X.step()
optimizer_G_Y.step()
num_iter += 1
# Output training stats
if num_iter%100 == 0:
print('it[{:04d}/{:04d}] \tLoss_D:{:.4f} \tLoss_G:{:.4f} \tLoss_C:{:.4f} \telapsed_time:{:.2f}mins'.format(
num_iter, max_iter, err_D.item(), err_G.item(), err_C.item(), (time.time()-train_start_time)/60
))
if num_iter%1000==0 or num_iter==max_iter:
save_name = osp.join(save_dir, 'it{:04d}.pt'.format(num_iter))
torch.save({
'model_G_X': model_G_X.state_dict(),
'model_G_Y': model_G_Y.state_dict()
}, save_name)
with torch.no_grad():
model_G_X.eval()
model_G_Y.eval()
for test_index, data_X in enumerate(test_X_loader):
if test_index == 0:
data_X = data_X[0].to(device)
data_Y = next(iter(test_Y_loader))[0].to(device)
output_X = model_G_X(data_X)
output_Y = model_G_Y(data_Y)
data_X = ((data_X+1)/2).cpu().data.numpy()
data_Y = ((data_Y+1)/2).cpu().data.numpy()
output_X = ((output_X + 1)/2).cpu().data.numpy()
output_Y = ((output_Y + 1)/2).cpu().data.numpy()
for vis_idx in range(vis_num):
data_X_, data_Y_ = gray2rgb(transpose(data_X)[vis_idx]), transpose(data_Y)[vis_idx]
output_X_, output_Y_ = transpose(output_X)[vis_idx], gray2rgb(transpose(output_Y)[vis_idx])
outputs = np.concatenate((data_X_, output_X_, data_Y_, output_Y_), axis=1)
plt.imshow(outputs)
plt.pause(0.001)
###Output
STEP 7: TRAIN THE MODEL
it[0100/4685] Loss_D:2.5846 Loss_G:2.1097 Loss_C:0.6558 elapsed_time:0.24mins
it[0200/4685] Loss_D:2.5356 Loss_G:1.9450 Loss_C:0.4312 elapsed_time:0.47mins
it[0300/4685] Loss_D:2.5181 Loss_G:1.9181 Loss_C:0.3512 elapsed_time:0.69mins
it[0400/4685] Loss_D:2.5458 Loss_G:1.8516 Loss_C:0.3066 elapsed_time:0.91mins
it[0500/4685] Loss_D:2.5344 Loss_G:1.8421 Loss_C:0.2801 elapsed_time:1.13mins
it[0600/4685] Loss_D:2.5419 Loss_G:1.8222 Loss_C:0.2671 elapsed_time:1.35mins
it[0700/4685] Loss_D:2.5466 Loss_G:1.8052 Loss_C:0.2440 elapsed_time:1.57mins
it[0800/4685] Loss_D:2.5518 Loss_G:1.7987 Loss_C:0.2357 elapsed_time:1.80mins
it[0900/4685] Loss_D:2.5491 Loss_G:1.8335 Loss_C:0.2491 elapsed_time:2.02mins
it[1000/4685] Loss_D:2.5427 Loss_G:1.8250 Loss_C:0.2500 elapsed_time:2.24mins
|
tutorials/tutorial-3.customize-the-module.ipynb | ###Markdown
Customize the module before install Let's suppose we have identified two backends that we like (one for 2D plots, the other for 3D plots). Then, instead of passing in the keyword `backend=SOMETHING` each time we need to create a plot, we can customize the module to make the plotting functions use our backends.Let's import the necessary tools:
###Code
from spb.defaults import cfg, set_defaults
display(cfg)
help(set_defaults)
###Output
_____no_output_____
###Markdown
We need to change the values in the `cfg` dictionary and then use the `set_defaults` function to apply the new configurations.Let's say we would like to:* use Bokeh for 2D plots and Plotly for 3D plots;* use `"seaborn"` theme in Plotly.Then:
###Code
# we write the name of the plotting library
# available options: bokeh, matplotlib, mayavi, k3d, plotly
cfg["backend_2D"] = "bokeh"
cfg["backend_3D"] = "plotly"
# the following depends on the plotting library
cfg["plotly"]["theme"] = "seaborn"
set_defaults(cfg)
###Output
_____no_output_____
###Markdown
We can test our changes right away.
###Code
from sympy import *
from spb import *
from spb.backends.plotly import PB
var("u, v, x, y")
plot(sin(x), cos(x), log(x), legend=True)
n = 125
r = 2 + sin(7 * u + 5 * v)
expr = (
r * cos(u) * sin(v),
r * sin(u) * sin(v),
r * cos(v)
)
plot3d_parametric_surface((*expr, (u, 0, 2 * pi), (v, 0, pi), "expr"), n=n, backend=PB)
###Output
_____no_output_____ |
src/nn/nn-sequential-conv2.ipynb | ###Markdown
Prepare dataset
###Code
import data # src/data.py
dataset = data.init_dataset()
###Output
_____no_output_____
###Markdown
Select the amount of classes that will be used
###Code
# pick the n classes with the most occuring instances
amt = 3
classes = data.top_classes(dataset.labels, amt)
classes
name_list = []
n_per_class = []
tail = '.jpg'
for cls in classes:
names = data.items_with_label(dataset.labels, cls)
train_names = [f for f in names if (f + tail) in dataset.train]
name_list.append(train_names)
n_per_class.append(len(train_names))
n = min(n_per_class)
# (optional) reduce n to check whether the model can rember its input
reduced_n = 50
if n > reduced_n: n = reduced_n
x = []
for ls in name_list:
for name in ls[:n]:
x.append(name)
random.shuffle(x)
len(x)
# TODO rmv faces
###Output
_____no_output_____
###Markdown
Load & convert imagesAll input images should have the same size
###Code
# TODO use crop
# TODO first
x_train, y_train, n = data.extract_all(dataset, x)
n
data.show_info(x_train)
plot.multiple(x_train[:10])
###Output
_____no_output_____
###Markdown
Prepare the labelsEncode the labels to one-hot vectors
###Code
# TODO y_test
y_test = y_train
y_train, y_test = data.labels_to_vectors(dataset, y_train, y_test)
y_train[0]
###Output
_____no_output_____
###Markdown
Train a Sequential model (keras)
###Code
n_samples = x_train.shape[0] # = length of the list of images (matrices)
input_shape = x_train.shape[1:] # = shape of an individual image (matrix)
output_length = (y_train[0]).shape[0] # = length of an individual label
print(n_samples, input_shape)
print('output length', output_length)
x_train.shape
input_shape
def sequential_conv(input_shape, output_length, dropout=0.10):
# Convolutional layers
model = Sequential()
model.add(Conv2D(4, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(8, (3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2))) # strides=(2,2) - watch dims!?!
model.add(Conv2D(32, (9, 9), activation='relu'))
# reduce the number of dimensions
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(8, (6, 6), activation='relu'))
model.add(Dropout(dropout))
# Dense layers
model.add(Flatten())
# model.add(Dense(512, activation='relu'))
model.add(Dense(128, activation='relu'))
# model.add(Dense(16, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(output_length,activation='softmax'))
return model, model.summary
dropout = 0.10
model, summary = sequential_conv(input_shape, output_length, dropout)
summary()
# model, summary = models.sequential_conv(input_shape, output_length)
# summary()
###Output
_____no_output_____
###Markdown
Loss function- Categorical cross-entropy loss
###Code
learning_rate = 0.001
# Adam, SGD
# sgd = Keras.optimizers.SGD(lr=0.01, clipnorm=1.)
optimizer = Adam(lr=learning_rate)
# top_k_categorical_accuracy(y_true, y_pred, k=5)
# https://keras.io/metrics/
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy',
'mean_squared_error','categorical_crossentropy','top_k_categorical_accuracy'])
# model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Train the model
###Code
batch_size = 8
# n epochs = n iterations over all the training data
epochs = 32
config.tmp_model_dir
# model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=1/6)
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size,
validation_split=1/5, callbacks=[TensorBoard(log_dir=config.tmp_model_dir)])
result = model.predict(x_train[:10])
# result
result[0]
###Output
_____no_output_____
###Markdown
i = result[0].argmax()result[0][i]
###Code
np.mean(np.array(result[0]))
###Output
_____no_output_____ |
0.15/_downloads/plot_ems_filtering.ipynb | ###Markdown
==============================================Compute effect-matched-spatial filtering (EMS)==============================================This example computes the EMS to reconstruct the time course of theexperimental effect as described in [1]_.This technique is used to create spatial filters based on the differencebetween two conditions. By projecting the trial onto the corresponding spatialfilters, surrogate single trials are created in which multi-sensor activity isreduced to one time series which exposes experimental effects, if present.We will first plot a trials x times image of the single trials and order thetrials by condition. A second plot shows the average time series for eachcondition. Finally a topographic plot is created which exhibits the temporalevolution of the spatial filters.References----------.. [1] Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing multi-sensor data to a single time course that reveals experimental effects", BMC Neuroscience 2013, 14:122.
###Code
# Author: Denis Engemann <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import EMS, compute_ems
from sklearn.model_selection import StratifiedKFold
print(__doc__)
data_path = sample.data_path()
# Preprocess the data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_ids = {'AudL': 1, 'VisL': 3}
# Read data and create epochs
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(0.5, 45, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_ids, tmin=-0.2, tmax=0.5, picks=picks,
baseline=None, reject=dict(grad=4000e-13, eog=150e-6),
preload=True)
epochs.drop_bad()
epochs.pick_types(meg='grad')
# Setup the data to use it a scikit-learn way:
X = epochs.get_data() # The MEG data
y = epochs.events[:, 2] # The conditions indices
n_epochs, n_channels, n_times = X.shape
# Initialize EMS transformer
ems = EMS()
# Initialize the variables of interest
X_transform = np.zeros((n_epochs, n_times)) # Data after EMS transformation
filters = list() # Spatial filters at each time point
# In the original paper, the cross-validation is a leave-one-out. However,
# we recommend using a Stratified KFold, because leave-one-out tends
# to overfit and cannot be used to estimate the variance of the
# prediction within a given fold.
for train, test in StratifiedKFold().split(X, y):
# In the original paper, the z-scoring is applied outside the CV.
# However, we recommend to apply this preprocessing inside the CV.
# Note that such scaling should be done separately for each channels if the
# data contains multiple channel types.
X_scaled = X / np.std(X[train])
# Fit and store the spatial filters
ems.fit(X_scaled[train], y[train])
# Store filters for future plotting
filters.append(ems.filters_)
# Generate the transformed data
X_transform[test] = ems.transform(X_scaled[test])
# Average the spatial filters across folds
filters = np.mean(filters, axis=0)
# Plot individual trials
plt.figure()
plt.title('single trial surrogates')
plt.imshow(X_transform[y.argsort()], origin='lower', aspect='auto',
extent=[epochs.times[0], epochs.times[-1], 1, len(X_transform)],
cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Trials (reordered by condition)')
# Plot average response
plt.figure()
plt.title('Average EMS signal')
mappings = [(key, value) for key, value in event_ids.items()]
for key, value in mappings:
ems_ave = X_transform[y == value]
plt.plot(epochs.times, ems_ave.mean(0), label=key)
plt.xlabel('Time (ms)')
plt.ylabel('a.u.')
plt.legend(loc='best')
plt.show()
# Visualize spatial filters across time
evoked = EvokedArray(filters, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap()
###Output
_____no_output_____
###Markdown
Note that a similar transformation can be applied with `compute_ems`However, this function replicates Schurger et al's original paper, and thusapplies the normalization outside a leave-one-out cross-validation, which werecommend not to do.
###Code
epochs.equalize_event_counts(event_ids)
X_transform, filters, classes = compute_ems(epochs)
###Output
_____no_output_____ |
module4-logistic-regression/Jake_Dennis_LS_DS_214_assignment.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 4*--- Logistic Regression Assignment 🌯You'll use a [**dataset of 400+ burrito reviews**](https://srcole.github.io/100burritos/). How accurately can you predict whether a burrito is rated 'Great'?> We have developed a 10-dimensional system for rating the burritos in San Diego. ... Generate models for what makes a burrito great and investigate correlations in its dimensions.- [ ] Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later.- [ ] Begin with baselines for classification.- [ ] Use scikit-learn for logistic regression.- [ ] Get your model's validation accuracy. (Multiple times if you try multiple iterations.)- [ ] Get your model's test accuracy. (One time, at the end.)- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals- [ ] Add your own stretch goal(s) !- [ ] Make exploratory visualizations.- [ ] Do one-hot encoding.- [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html).- [ ] Get and plot your coefficients.- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Linear-Models/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Load data downloaded from https://srcole.github.io/100burritos/
import pandas as pd
df = pd.read_csv(DATA_PATH+'burritos/burritos.csv')
# Derive binary classification target:
# We define a 'Great' burrito as having an
# overall rating of 4 or higher, on a 5 point scale.
# Drop unrated burritos.
df = df.dropna(subset=['overall'])
df['Great'] = df['overall'] >= 4
# Clean/combine the Burrito categories
df['Burrito'] = df['Burrito'].str.lower()
california = df['Burrito'].str.contains('california')
asada = df['Burrito'].str.contains('asada')
surf = df['Burrito'].str.contains('surf')
carnitas = df['Burrito'].str.contains('carnitas')
df.loc[california, 'Burrito'] = 'California'
df.loc[asada, 'Burrito'] = 'Asada'
df.loc[surf, 'Burrito'] = 'Surf & Turf'
df.loc[carnitas, 'Burrito'] = 'Carnitas'
df.loc[~california & ~asada & ~surf & ~carnitas, 'Burrito'] = 'Other'
# Drop some high cardinality categoricals
df = df.drop(columns=['Notes', 'Location', 'Reviewer', 'Address', 'URL', 'Neighborhood'])
# Drop some columns to prevent "leakage"
df = df.drop(columns=['Rec', 'overall'])
###Output
_____no_output_____
###Markdown
Do train/validate/test split. Train on reviews from 2016 & earlier. Validate on 2017. Test on 2018 & later.
###Code
df.head()
df['Date'] = pd.to_datetime(df['Date'], infer_datetime_format=True)
df = df.replace({False:0, True:1})
train = df[df['Date'].dt.year < 2017]
validate = df[df['Date'].dt.year == 2017]
test = df[df['Date'].dt.year > 2017]
df['Great'].value_counts()
###Output
_____no_output_____
###Markdown
Begin with baselines for classification
###Code
target = 'Great'
y_train = train[target]
y_train.value_counts(normalize=True)
majority_class = y_train.mode()[0]
y_pred = [majority_class] * len(y_train)
y_pred
from sklearn.metrics import accuracy_score
accuracy_score(y_train, y_pred)
y_val = validate[target]
y_pred = [majority_class] * len(y_val)
accuracy_score(y_val, y_pred)
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Use scikit-learn for logistic regression.
###Code
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
features = ['Synergy', 'Uniformity', 'Tortilla']
X_train = train[features]
X_val = validate[features]
imputer = SimpleImputer()
X_train_imputed = imputer.fit_transform(X_train)
X_val_imputed = imputer.transform(X_val)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_imputed)
X_val_scaled = scaler.transform(X_val_imputed)
log_reg = LogisticRegression()
log_reg.fit(X_train_scaled, y_train)
print('Validation Accuracy', log_reg.score(X_val_scaled, y_val))
###Output
Validation Accuracy 0.7764705882352941
###Markdown
Get your model's validation accuracy. (Multiple times if you try multiple iterations.)
###Code
y_test = test[target]
X_test = test[features]
X_test_imputed = imputer.fit_transform(X_test)
X_test_scaled = scaler.transform(X_test_imputed)
print('Test Accuracy', log_reg.score(X_test_scaled, y_test))
###Output
Test Accuracy 0.8421052631578947
|
Telco_Churn.ipynb | ###Markdown
Q1a. Demographics
###Code
#More of Non Senior Citizens Patronize Brand
df.SeniorCitizen.value_counts()
df.gender.value_counts()
df.groupby(['SeniorCitizen','gender'])['gender'].count()
df.groupby(['SeniorCitizen','gender','Partner','Dependents']).agg({'gender':'count'}).sort_values(by='SeniorCitizen',ascending=True)
###Output
_____no_output_____
###Markdown
Q1a. Ans:Young Citizens (Male and Female) with no dependents nor partners Q1b: Customer Retention
###Code
df.query('Churn=="Yes"').groupby(['SeniorCitizen','gender','Partner','Dependents']).agg({'Churn':'count'}).sort_values(by='SeniorCitizen',ascending=True)
###Output
_____no_output_____
###Markdown
Q1b. Ans: Target especially Young Singles with no dependents or partners, as well as people without dependents. Q2a. Monthly Service Charges & higher Churning
###Code
df.query('Churn=="Yes"').groupby(['PhoneService'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs1=df.query('Churn=="Yes"').groupby(['PhoneService'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "Phone Service Charges" , size = 14 )
x=dfsvs1.PhoneService;y=dfsvs1.MonthlyCharges
sns.barplot(x,y)
df.query('Churn=="Yes"').groupby(['MultipleLines'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs2=df.query('Churn=="Yes"').groupby(['MultipleLines'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "MultipleLine Service Charges" , size = 14 )
sns.barplot(dfsvs2.MultipleLines,dfsvs2.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['InternetService'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs3=df.query('Churn=="Yes"').groupby(['InternetService'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "Internet Service Charges" , size = 14 )
sns.barplot(dfsvs3.InternetService,dfsvs3.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['OnlineSecurity'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs4=df.query('Churn=="Yes"').groupby(['OnlineSecurity'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "Online Security Charges" , size = 14 )
sns.barplot(dfsvs4.OnlineSecurity,dfsvs4.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['OnlineBackup'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs5=df.query('Churn=="Yes"').groupby(['OnlineBackup'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "Online Backup Charges" , size = 14 )
sns.barplot(dfsvs5.OnlineBackup,dfsvs5.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['DeviceProtection'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs5=df.query('Churn=="Yes"').groupby(['DeviceProtection'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title("Device Protection Charges" , size = 14 )
sns.barplot(dfsvs5.DeviceProtection,dfsvs5.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['TechSupport'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs5=df.query('Churn=="Yes"').groupby(['TechSupport'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "Tech Support Service Charges" , size = 14 )
sns.barplot(dfsvs5.TechSupport,dfsvs5.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['StreamingTV'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs5=df.query('Churn=="Yes"').groupby(['StreamingTV'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "StreamingTV Service Charges" , size = 14 )
sns.barplot(dfsvs5.StreamingTV,dfsvs5.MonthlyCharges)
df.query('Churn=="Yes"').groupby(['StreamingMovies'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
dfsvs5=df.query('Churn=="Yes"').groupby(['StreamingMovies'],as_index=False).agg({'MonthlyCharges':'sum'}).sort_values(by='MonthlyCharges',ascending=False)
fig, ax = plt.subplots(figsize=(6, 6))
ax.set_title( "Streaming Movies Service Charges" , size = 14 )
sns.barplot(dfsvs5.StreamingMovies,dfsvs5.MonthlyCharges)
###Output
C:\Users\Puffs\anaconda3\lib\site-packages\seaborn\_decorators.py:36: FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
warnings.warn(
###Markdown
Q2a. Ans: Top 3 Services with increasing charges resulting in Churn:Internet Services (Fibre Optic), Phone Services and Multiple Lines Service Q2b. Phone Service Contract
###Code
df.columns
df.query('Churn=="No"').groupby(['PhoneService','Contract']).agg({'Contract':'count'}).sort_values(by='PhoneService',ascending=False)
###Output
_____no_output_____
###Markdown
Q2b. Ans: Month-to-Month Contract Type is needed here Q3a.Customer Services
###Code
#df_bouquet=df.query('Churn=="No"').groupby(['PhoneService']).agg({'MonthlyCharges':'sum'})
df.query('Churn=="No"').groupby(['PhoneService']).agg({'MonthlyCharges':'sum'}).sort_values(by='PhoneService',ascending=False)
df.query('Churn=="No"').groupby(['InternetService']).agg({'MonthlyCharges':'sum'}).sort_values(by='InternetService',ascending=False)
df.query('Churn=="No"').groupby(['MultipleLines']).agg({'MonthlyCharges':'sum'}).sort_values(by='MultipleLines',ascending=False)
df.query('Churn=="No"').groupby(['OnlineSecurity']).agg({'MonthlyCharges':'sum'}).sort_values(by='OnlineSecurity',ascending=False)
df.query('Churn=="No"').groupby(['OnlineBackup']).agg({'MonthlyCharges':'sum'}).sort_values(by='OnlineBackup',ascending=False)
df.query('Churn=="No"').groupby(['DeviceProtection']).agg({'MonthlyCharges':'sum'}).sort_values(by='DeviceProtection',ascending=False)
df.query('Churn=="No"').groupby(['TechSupport']).agg({'MonthlyCharges':'sum'}).sort_values(by='TechSupport',ascending=False)
df.query('Churn=="No"').groupby(['StreamingTV']).agg({'MonthlyCharges':'sum'}).sort_values(by='StreamingTV',ascending=False)
df.query('Churn=="No"').groupby(['StreamingMovies']).agg({'MonthlyCharges':'sum'}).sort_values(by='StreamingMovies',ascending=False)
df_Service=df.query('Churn=="No"').groupby(['PhoneService','MultipleLines']).agg({'MonthlyCharges':'mean'}).sort_values(by='MonthlyCharges',ascending=False)
boxplot=df_Service.boxplot(column=['MonthlyCharges'])
###Output
_____no_output_____
###Markdown
Q3a. Ans: Basic 47.5, Plus=55, Premium=65 Q3b.Customer Services
###Code
df_bill=df.query('Churn=="No"').groupby(['PaperlessBilling']).agg({'PaymentMethod':'count'})
df_bill.reset_index(inplace=True)
df_bill
plt.figure(figsize=(6,6))
sns.barplot(x="PaperlessBilling", y="PaymentMethod", hue="PaperlessBilling", data=df_bill)
plt.xlabel("Paperless Billing")
plt.ylabel("Payment Method")
###Output
_____no_output_____
###Markdown
1. Business UnderstandingCustomer Churn is one of the most challagengeing aspacts in subscription based business like the telco industry. This dataset contains churn data of a fictional telco company with realistic data.The Business questions this notebook aims to address are the following:1. How many custumers churn? 2. What is the impact on expected revenue?3. What features are correlated to churn?4. How well can a Model predict custumer churn? 2. Data Understanding
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model, preprocessing, svm
from sklearn.preprocessing import StandardScaler, Normalizer
import math
import matplotlib as mpl
import seaborn as sns
%matplotlib inline
df = pd.read_csv('WA_Fn-UseC_-Telco-Customer-Churn.csv')
df.head()
###Output
_____no_output_____
###Markdown
2.1 Overview of the Data
###Code
#How many rows and how many columns are in the Dataset?
df.shape
#How do the continues variables look like
df.describe()
#overview on all columns
df.info()
#convert columns to its expected datatypes
df['TotalCharges'] = pd.to_numeric(df['TotalCharges'],errors='coerce')
# Are all customers ids unique?
df['customerID'].nunique() / df['customerID'].count() == 1
###Output
_____no_output_____
###Markdown
How big is the customer loss and what effects does this have on turnover?
###Code
#How many churners are in the dataset?
plt.figure(figsize=(20, 20))
plt.subplot(3, 2, 2)
sns.countplot('Churn',data=df,)
print("The Churn-Rate in percent is:" )
print(round((df['Churn'].value_counts()[1] / df['Churn'].count() * 100),2))
(df[df['Churn'] == 'Yes'][['MonthlyCharges','TotalCharges']].sum()) / (df[['MonthlyCharges','TotalCharges']].sum() )
keep = df[df['Churn'] == 'No']['MonthlyCharges'].sum()
keep
###Output
_____no_output_____
###Markdown
How much money will the company lose in monthly income?
###Code
loss = df[df['Churn'] == 'Yes']['MonthlyCharges'].sum()
loss
#What porpotin of money does the company losees?
loss/(keep+loss)*100
###Output
_____no_output_____
###Markdown
Visualization of churning custumers compared to their spendigs
###Code
plt.figure(figsize=(20, 20))
plt.subplot(3, 2, 2)
df[df['Churn'] == 'No']['MonthlyCharges'].hist(bins=35, alpha=0.6, label='Churn=No')
df[df['Churn'] == 'Yes']['MonthlyCharges'].hist(bins=35, alpha=0.6, label='Churn=Yes')
plt.legend()
plt.xlabel('Mothly Charges')
plt.subplot(3, 2, 1)
df[df['Churn'] == 'No']['TotalCharges'].hist(bins=35, alpha=0.6, label='Churn=No')
df[df['Churn'] == 'Yes']['TotalCharges'].hist(bins=35, alpha=0.6, label='Churn=Yes')
plt.legend()
plt.xlabel('Total Charges')
#looking at some more Variabels
plt.figure(figsize=(15, 15))
plt.subplot(3, 2, 1)
sns.countplot('MultipleLines', data=df, hue='Churn')
plt.subplot(3, 2, 2)
sns.countplot('TechSupport', data=df, hue='Churn')
plt.subplot(3, 2, 3)
sns.countplot('Partner', data=df, hue='Churn')
plt.subplot(3, 2, 4)
sns.countplot('Dependents', data=df, hue='Churn')
plt.subplot(3, 2, 5)
sns.countplot('PhoneService', data=df, hue='Churn')
plt.subplot(3, 2, 6)
sns.countplot('PaperlessBilling', data=df, hue='Churn')
plt.subplot(3, 2, 6)
sns.countplot('PaperlessBilling', data=df, hue='Churn')
###Output
/Applications/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:21: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
###Markdown
How well we Predict Customer Churn in oder to derive a campaign to retain our most valuable customers? 3.Data Preprocessing Feature Engineering Missing Values
###Code
#Checking for missing Values
df.isna().sum()
df.isnull()['TotalCharges'].mean() * 100# Proportion of missings in percent
#bcause the small porpotion of missings the rows with missings are deleted from the dataset / A mean value imputation would also work
df1 = df.dropna(subset =['TotalCharges'], axis=0 )
###Output
_____no_output_____
###Markdown
Handle Continues Variabbes
###Code
cont_vars = df1[['tenure','TotalCharges','MonthlyCharges']]
cont_vars.head()
#Function that groups the tenure column into half years
def tenure_bin (t):
if t <= 6*1:
return 1
elif t<= 6*2:
return 2
elif t<= 6*3:
return 3
elif t<= 6*4:
return 4
elif t<= 6*5:
return 5
elif t<= 6*6:
return 6
else:
return 7
###Output
_____no_output_____
###Markdown
Hanlde Discrete Variables
###Code
df1 = df1[['gender','SeniorCitizen','Partner','Dependents','PhoneService','PaperlessBilling','MultipleLines', 'InternetService', 'Contract', 'PaymentMethod', 'tenure','Churn','TotalCharges','MonthlyCharges']]
df1
df1_enc = pd.get_dummies(df1,drop_first = True)
df1_enc
###Output
_____no_output_____
###Markdown
Correlation Analysis
###Code
#Correlation Analysis
plt.figure(figsize=(15,12))
sns.heatmap(df1_enc.corr(),annot=True)
###Output
_____no_output_____
###Markdown
Variables that correlate with churn
###Code
df1_enc.drop('Churn_Yes', axis=1).corrwith(df1_enc['Churn_Yes']).plot(kind='barh', figsize=(9,7))
###Output
_____no_output_____
###Markdown
Model building (Logistic Regression)
###Code
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
X = df1_enc.drop('Churn_Yes', axis=1)
y = df1_enc['Churn_Yes']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
scaler = MinMaxScaler()
X_train_std = scaler.fit_transform(X_train)
X_test_std = scaler.transform(X_test)
X_std = scaler.transform(X)
###Output
_____no_output_____
###Markdown
Model evaluatuon
###Code
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
print(f"Accuracy Score: {accuracy_score(y_train, pred) * 100:.2f}%")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_train, pred) * 100:.2f}%")
print(f"\t\t\tRecall Score: {recall_score(y_train, pred) * 100:.2f}%")
print(f"\t\t\tF1 score: {f1_score(y_train, pred) * 100:.2f}%")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, pred)}\n")
elif train==False:
pred = clf.predict(X_test)
print(f"Accuracy Score: {accuracy_score(y_test, pred) * 100:.2f}%")
print("_______________________________________________")
print("Classification Report:", end='')
print(f"\tPrecision Score: {precision_score(y_test, pred) * 100:.2f}%")
print(f"\t\t\tRecall Score: {recall_score(y_test, pred) * 100:.2f}%")
print(f"\t\t\tF1 score: {f1_score(y_test, pred) * 100:.2f}%")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
from sklearn.linear_model import LogisticRegression
lr_classifier = LogisticRegression(solver='liblinear', penalty='l1')
lr_classifier.fit(X_train_std, y_train)
print_score(lr_classifier, X_train_std, y_train, X_test_std, y_test, train=True)
print_score(lr_classifier, X_train_std, y_train, X_test_std, y_test, train=False)
from sklearn.metrics import plot_confusion_matrix, plot_roc_curve
disp = plot_confusion_matrix(lr_classifier, X_test_std, y_test,
cmap='Reds', values_format='d',
display_labels=['Retention', 'Churn'])
disp = plot_roc_curve(lr_classifier, X_test_std, y_test)
###Output
_____no_output_____ |
Names.ipynb | ###Markdown
NamesThis project looks at the popularity of names from 1880 to 2017, showing naming trends through history. After exploring some of the features of pandas, I begin to ask questions about specific names, just out of curiosity. Next steps planned are at the end of the document.This project uses Python 3.5, numPy, matplotlib, pandas, and seaborn.
###Code
import numpy as np
import matplotlib.pyplot as pp
import pandas as pd
import seaborn
###Output
_____no_output_____
###Markdown
Have a look at the file to see the data format
###Code
file_path='data/yob2017.txt'
open(file_path,'r').readlines()[:5]
###Output
_____no_output_____
###Markdown
Load into dataframe
###Code
names2017 = pd.read_csv(file_path, names = ['Name', 'Gender', 'Num_Babies'])
names2017.head()
###Output
_____no_output_____
###Markdown
Read all years into a single array then concatenate into single dataframe
###Code
all_years = []
loop_file_path = file_path[:-8]
for year in range(1880, 2017 + 1): # we have data files from 1880 to 2015
all_years.append(pd.read_csv(loop_file_path + '{}.txt'.format(year),
names = ['Name', 'Gender', 'Num_Babies']))
all_years[-1]['Year'] = year
all_names = pd.concat(all_years)
all_names.tail()
###Output
_____no_output_____
###Markdown
Groupby will segment the data into meaningful groups. For example, here we look at the number of Female & Male Names by Year
###Code
group_name = all_names.groupby(['Gender', 'Year'])
group_name.size().unstack()
###Output
_____no_output_____
###Markdown
The gives a sum of all the babies born in 2015 (by using our first dataset created just from 2015 data), but only for babies with names used at least 5 times. The table above counts only names, one count per name. The numbers below will count babies, not names.
###Code
names2017.groupby(['Gender']).sum()
###Output
_____no_output_____
###Markdown
Pivot tables summarize data (sort, count, total, average). The pd.pivot_table() function needs the name of the dataframe, the data field to be grouped, and a field for each dimension that it will be grouped by.
###Code
pd.pivot_table(all_names, 'Num_Babies', 'Name', 'Year')
###Output
_____no_output_____
###Markdown
The many missing values is to be expected based on the great increase we saw earlier in the number of names used over the years (from around 1,000 per gender in 1880 to over 14,000 in 2015).It is possible to have two fields in a column rather than one as columns and one as rows:
###Code
pd.pivot_table(all_names, 'Num_Babies', ['Name', 'Year'])
###Output
_____no_output_____
###Markdown
To see the changing popularity of a given name, indexes need to be set and sorted. In pandas, data can be manipulated in multiple dimensions.
###Code
all_names_index = all_names.set_index(['Gender', 'Name', 'Year']).sort_index()
all_names_index
def name_plot(gender, name):
''' plot the popularity of a name over time '''
data = all_names_index.loc[gender, name] #gather data for this gender/name
pp.plot(data.index, data.values) #plot gender/name data against index (year)
name_plot('F', 'Danica')
###Output
_____no_output_____
###Markdown
Look at trends over time across different names.
###Code
pp.figure(figsize = (18, 8)) # make the plot a bit bigger
names = ['Sammy', 'Jesse', 'Drew', 'Jamie']
for name in names:
name_plot('F', name) # try first as female name
pp.legend(names) # add a legend
for name in names:
name_plot('M', name) # now try as male name
pp.legend(names) # add a legend
###Output
_____no_output_____
###Markdown
Who, after 1945, would name their child Adolf?
###Code
name_plot('M', 'Adolf')
pp.legend(['Adolf'])
###Output
_____no_output_____
###Markdown
Madonna went solo as a pop singer and became famous in 1981. This chart shows a spike in babies named "Madonna" soon after.
###Code
name_plot('F', 'Madonna')
pp.legend(['Madonna'])
#data = all_names[all_names.Name.str.startswith('A')]
all_names[all_names.Name.str.startswith('A')].to_csv('NamesExport.csv', sep=',')
###Output
_____no_output_____
###Markdown
Visualize DataBy using pandas with other packages like matplotlib we can visualize data within our notebook.
###Code
# We’re going to index our data with information on Sex, then Name, then Year. We’ll also want to sort the index:
all_names_index = all_names.set_index(['Sex','Name','Year']).sort_index()
# multi - index
all_names_index
def name_plot(sex, name):
data = all_names_index.loc[sex, name]
pp.plot(data.index, data.values)
###Output
_____no_output_____
###Markdown
Type ALT + ENTER to run and move into the next cell. We can now call the function with the sex and name of our choice, such as F for female name with the given name Danica.
###Code
name_plot('F', 'Danica')
pp.figure(figsize = (18, 8))
names = ['Sammy', 'Jesse', 'Drew', 'Jamie', 'Tyler']
for name in names:
name_plot('F', name)
pp.legend(names)
pp.figure(figsize = (18, 8))
names = ['Sammy', 'Jesse', 'Drew', 'Jamie', 'Tylor']
for name in names:
name_plot('M', name)
pp.legend(names)
###Output
_____no_output_____ |
archived/Mega_Detector.ipynb | ###Markdown
Setup
###Code
!yes | pip uninstall tensorflow
!pip install tensorflow-gpu==1.13.1 humanfriendly jsonpickle
import tensorflow as tf
print(tf.__version__)
!wget -O /content/megadetector_v4_1_0.pb https://lilablobssc.blob.core.windows.net/models/camera_traps/megadetector/md_v4.1.0/md_v4.1.0.pb
!git clone https://github.com/microsoft/CameraTraps
!git clone https://github.com/microsoft/ai4eutils
!cp /content/CameraTraps/detection/run_tf_detector_batch.py .
!cp /content/CameraTraps/visualization/visualize_detector_output.py .
import json
import os
import shutil
from pathlib import Path
from tqdm import tqdm
os.environ['PYTHONPATH'] += ":/content/ai4eutils"
os.environ['PYTHONPATH'] += ":/content/CameraTraps"
!echo "PYTHONPATH: $PYTHONPATH"
###Output
_____no_output_____
###Markdown
Get Raw Data
###Code
#@title Connect to Google Drive
from google.colab import drive
drive.mount('/content/drive')
google_drive_folder_name = 'sample' #@param {type: "string"}
images_dir = '/content/drive/My Drive/' + google_drive_folder_name
!ls "$images_dir"
Path(f'{images_dir}/output').mkdir(exist_ok=True)
Path(f'{images_dir}/output/no_detections').mkdir(exist_ok=True)
Path(f'{images_dir}/output/with_detections_and_bb').mkdir(exist_ok=True)
Path(f'{images_dir}/output/with_detections').mkdir(exist_ok=True)
###Output
_____no_output_____
###Markdown
Run The Model
###Code
# choose a location for the output JSON file
output_file_path = f'{images_dir}/output' + '/data.json'
!python run_tf_detector_batch.py megadetector_v4_1_0.pb "$images_dir" "$output_file_path" --recursive
###Output
_____no_output_____
###Markdown
Get The Results
###Code
visualization_dir = '/content/viz' # pick a location for annotated images
!python visualize_detector_output.py "$output_file_path" "$visualization_dir" --confidence 0.01 --images_dir "$images_dir"
def categorize(string):
return string.replace('1', 'animal').replace('2', 'person').replace('3', 'vehicle')
with open(output_file_path) as j:
data = json.load(j)
%cd CameraTraps
from data_management.annotations.annotation_constants import (
detector_bbox_category_id_to_name)
from visualization import visualization_utils as vis_utils
%cd ..
Path(f'{images_dir}/output/no_detections').mkdir(exist_ok=True)
display_images_here = False #@param {type: "boolean"}
if display_images_here:
if len(data['images']) > 20:
print('There are too many images to display! View the images on Google Drive.')
display_images_here = False
copy_images_to_drive = False #@param {type: "boolean"}
for image in tqdm(data['images']):
if not image['detections']:
im = vis_utils.resize_image(
vis_utils.open_image(image['file']), 700)
if display_images_here:
display(im)
if copy_images_to_drive:
out_path = f'{images_dir}/output/no_detections/{Path(image["file"]).name}'
if not Path(out_path).exists():
shutil.copy2(image['file'], out_path)
Path(f'{images_dir}/output/with_detections_and_bb').mkdir(exist_ok=True)
Path(f'{images_dir}/output/with_detections').mkdir(exist_ok=True)
min_detection_conf_to_save = "0.5" #@param {type: "string"}
display_images_here = False #@param {type: "boolean"}
if display_images_here:
if len(data['images']) > 20:
print('There are too many images to display! View the images on Google Drive.')
display_images_here = False
copy_images_to_drive = False #@param {type: "boolean"}
for image in data['images']:
if image['detections']:
if image['max_detection_conf'] >= float(min_detection_conf_to_save):
print('-' * 79)
print(image['file'])
res = [(categorize(x['category']), x['conf']) for x in image['detections']]
for n, x in enumerate(res):
print(f'{n + 1}. {x[0]} (conf: {x[1]})')
img_file = visualization_dir + '/anno_' + images_dir.replace('/', '~') + '~' + Path(image['file']).name
im = vis_utils.resize_image(vis_utils.open_image(img_file), 700)
if display_images_here:
display(im)
if copy_images_to_drive:
out_path_with_bb = f'{images_dir}/output/with_detections_and_bb/{Path(img_file).name}'
if not Path(out_path_with_bb).exists():
shutil.copy2(img_file, out_path_with_bb)
out_path = f'{images_dir}/output/with_detections/{Path(image["file"]).name}'
if not Path(out_path).exists():
shutil.copy2(image["file"], out_path)
###Output
_____no_output_____ |
Classifiers/Kernal_SVM/Kernal_SVM.ipynb | ###Markdown
**Kernal** Support Vector Machine Importing Libraires
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing Dataset
###Code
from google.colab import files
files.upload()
###Output
_____no_output_____
###Markdown
Splitting Dataset into X & Y
###Code
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[: , :-1].values
Y = dataset.iloc[: , -1].values
###Output
_____no_output_____
###Markdown
Splitting Dataset into Training & Test Set
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
###Output
_____no_output_____
###Markdown
Feature Scaling
###Code
from sklearn.preprocessing import StandardScaler
feat_scale = StandardScaler()
X_train = feat_scale.fit_transform(X_train)
X_test = feat_scale.transform(X_test)
###Output
_____no_output_____
###Markdown
Training the SVM model on Training Set
###Code
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state= 0)
classifier.fit(X_train, Y_train)
###Output
_____no_output_____
###Markdown
Predicting the Test Set Result
###Code
y_pred = classifier.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred), 1), Y_test.reshape(len(Y_test), 1)), 1))
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score
confusionMatrix = confusion_matrix(Y_test,y_pred)
dis = ConfusionMatrixDisplay(confusionMatrix, display_labels=classifier.classes_)
print(confusionMatrix)
print(accuracy_score(Y_test, y_pred))
dis.plot()
plt.show()
###Output
[[64 4]
[ 3 29]]
0.93
###Markdown
Visulization of Training Set Result
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = feat_scale.inverse_transform(X_train), Y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25),
np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))
plt.contourf(X1, X2, classifier.predict(feat_scale.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Support Vector Machine (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
###Markdown
Visulization of Test Set Result
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = feat_scale.inverse_transform(X_test), Y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 0.25),
np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 0.25))
plt.contourf(X1, X2, classifier.predict(feat_scale.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Support Vector Machine (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
###Output
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
|
Network_Flows_LP.ipynb | ###Markdown
Exploring network constraints on concentration dynamics Semidán Robaina EstévezReaction networks are models of (bio)chemical systems in which chemical species are interconverted through the action of chemical reactions. Typically, a reaction network is represented by its stoichiometrix matrix $S$, in which each entry $s_{ij}$ denotes the stoichiometric coefficients with which species $i$ participates in reaction $j$ — negative coefficients are assigned to substrates of the reaction, while positive to products. Since most species only participate in a small set of reactions, $S$ is typically sparse.Suppose that we are interested in evaluating the concentration dynamics of the chemical species in a homogeneous chemical system. We can model that with a system of ordinary differential equations:\begin{equation} \frac{d}{dt} x_i = \sum_j s_{ij} v_j,\label{eq:1}\tag{1}\end{equation}with $x_i$ representing the concentration of species $i$ and $v_j$ the flux through reaction $j$. However, to this end, we need to assign a reaction kinetic to the system. The simplest reaction kinetic is the [Mass Action Kinetic](https://en.wikipedia.org/wiki/Law_of_mass_action), which models a homogeneous, no enzymatically catalyzed system:\begin{equation} v_j = k_j \prod_{i\in Subs} x_i^{s_{ij}}. \label{eq:2} \tag{2}\end{equation}That is, the flux $v_j$ through a reaction is modelled as a monomial on the concentrations of the reaction substrates. Parameter $k_j$, the reaction rate constant, captures the speed at which the reaction takes place given fixed substrate concentatrions. There are, however, other reaction kinetics, such as the [Michaelis-Menten formalism](https://en.wikipedia.org/wiki/Michaelis%E2%80%93Menten_kinetics) and its variants, which model enzyme-catalyzed reactions.Reaction kinetics are crucial to investigate chemical dynamics. Unfortunately, the parameterization of such systems is not straighforward. Oftentimes, a large fraction of parameter values are not accessible, which makes their investigation complicated. However, in such escenarios, it may still be possible to evaluate how network and stoichiometric constraints affect concentration dynamics. Specifically, we can apply [constraint-based modeling techniques](https://en.wikipedia.org/wiki/Metabolic_network_modelling) to the linear system in eq \ref{eq:1}.In constraint-based metabolic modeling, a steady-state scenario is typically assumed, i.e, we have $Sv=0$. Thus, we can only interrogate the space of steady-state flux values for all reactions in a system. For instance, the most popular constraint-based metabolic modeling method is the so-called Flux Balance Analysis, which typically solves the following Linear Program (LP):\begin{align} \begin{aligned} &\max_v c^T v \\ &\mathrm{s.t.} \\ &Sv=0 \\ &v_{min} \le v \le v_{max}. \end{aligned}\label{eq:3}\tag{3}\end{align}In eq \ref{eq:3} a linear function of the reaction fluxes $v$ is optimized over the feasible space corresponding to the system at steady state with some boundary conditions on the reaction fluxes. However, there is nothing preventing us from exploring the feasible space around the steady state condition, i.e., using the constraint $\dot x_{min} \le Sv \le \dot x_{max}$ in which the dot notation represents the first derivative. Moreover, we can discretize time and define $x_{t+1} = x_t + \dot x_t$ for each fixed time period $t$. Hence, we could define variables $x_t$ representing the concentrations of chemical species at each time step, $t \in {0,\dots,n}$. This way, we would be able to explore the feasible space of concentration values of our system in eq \ref{eq:1}, provided suitable bounds are given for each variable and, of course, only for a time-discretized system.Putting together the previous ideas, we arrive at the LP:\begin{align} \begin{aligned} &Z = \max_{v_t,\dot x_t,x_t} \sum_{t\,=\,0}^{t_f}\phi(x_t) \\ &\mathrm{s.t.} \\ &1.\;Sv_t = \dot x_t \\ &2.\;x_{t+1} = x_t + \dot x_t \\ &3.\;v^{lb}_t \leq v_t \leq v^{ub}_t \\ &4.\;\dot x^{lb}_t \leq \dot x_t \leq \dot x^{ub}_t \\ &5.\;x_t \geq 0 \\ &t = \{0,\dots,t_f\}. \end{aligned} \label{eq:4} \tag{4}\end{align}In LP \ref{eq:4}, we maximize the sum of linear function of the concentrations, $x_t$, over all time steps $t$. For instance, we could maximize the total concentration of chemical species $x_{it}$ over all time steps, i.e., the discrete equivalent of the integral over the time period. However, we can modify the objective function at our convinience. Note that we impose bounds on the derivatives at each time step, $\dot x_t$. These constraints are crucial to render the feasible space of LP \ref{eq:4} realistic, i.e., constraining the increase or decrease in concentration that the system can maintain in each time step. To further render the feasible space more realistic, we can add a constraint to control the change in flux values between time steps: $v_{t + 1} = v_t + \delta$, with $\delta_{min} \leq \delta \leq \delta_{max}$. In this manner, we impose a notion of continuity between time steps, avoiding large jumps in flux values between time steps. Exploring the optimal space of concentration dynamicsSolving LP \ref{eq:4} will render a single optimal solution. However, the system will most likely be proned to host a space of alternative optimal solutions, a situaltion that is common in constraint-based metabolic modeling setups. We can explore the space of alternative optimal concentration trajectories in two ways. On the one hand, we can compute the minimum and maximum concentration values for each chemical species along the trajectory. On the other hand, we can randomly sample the space of alternative optimal concentration trajectories, e.g, to conduct statistical analyses on them.First, let's adapt LP \ref{eq:4} to compute the concentration bounds along the trajectory. Specifically, we need to solve the following two LPs for each $x_{it},\;i\in \{1,\dots,m\},\;t\in\{t_0,\dots,t_f\}$ to compute the maximum and minimum concentrations for each time step:\begin{align} \begin{aligned} & x^{\mathrm{min}}_{it},\; x^{\mathrm{max}}_{it} = \min_{v_t,\dot x_t,x_t} x_{it}, \;\max_{v_t,\dot x_t,x_t} x_{it} \\ &\mathrm{s.t.} \\ &1.\;Sv_t = \dot x_t \\ &2.\;x_{t+1} = x_t + \dot x_t \\ &3.\;v^{lb}_t \leq v_t \leq v^{ub}_t \\ &4.\;\dot x^{lb}_t \leq \dot x_t \leq \dot x^{ub}_t \\ &5.\;x_t \geq 0 \\ &6.\;\sum_{t\,=\,0}^{t_f}\phi(x_t) = Z \\ &t = \{0,...,t_f\}, \end{aligned} \label{eq:5} \tag{5}\end{align}where $Z$ corresponds to the optimal value of the objective function in LP \ref{eq:4}. Now that we can compute the concentration bounds in the optimal solution space, we can proceed to generate a random sample of optimal concentration trajectories. To this end, we first generate a random vector of concentration trajectories, $x_{\mathrm{rand}}$, and then find the closest point in the optimal soluction space. To this end, we can employ the first norm: $\epsilon = ||x - x_{\mathrm{rand}}||_1 = \sum_k |x_k - x_{\mathrm{rand}k}|$. However, to facilitate the computation, we will employ the transformation: $\epsilon^+ - \epsilon^- = ||x - x_{\mathrm{rand}}||_1$, with $\epsilon^+, \epsilon^- \ge 0$. The solution to the following LP generates a random trajectories which achieve the same optimal value, $Z$, of LP \ref{eq:4}:\begin{align} \begin{aligned} &\min_{\substack{v_t,\dot x_t,x_t,\\ \epsilon_t^+,\epsilon_t^-}} \sum_{i=1}^{m} \sum_{t=0}^{t_f+1} (\epsilon_{it}^+ + \epsilon_{it}^-) \\ &\mathrm{s.t.} \\ &1.\;Sv_t = \dot x_t \\ &2.\;x_{t+1} = x_t + \dot x_t \\ &3.\;v^{lb}_t \leq v_t \leq v^{ub}_t \\ &4.\;\dot x^{lb}_t \leq \dot x_t \leq \dot x^{ub}_t \\ &5.\;x_t \geq 0 \\ &6.\;\sum_{t\,=\,0}^{t_f}\phi(x_t) = Z \\ &7.\;x_{t} - x_{\mathrm{rand}_t} = \epsilon_{t}^+ - \epsilon_{t}^- \\ &8.\;\epsilon_t^+,\;\epsilon_{t}^+ \geq 0 \\ &t = \{0,\dots,t_f\}. \end{aligned} \label{eq:6} \tag{6}\end{align}We just need to repeat the process of generating $x_{\mathrm{rand}}$ and solving LP \ref{eq:6} $n$ times to collect a sample of size $n$ of alternative optimal concentration trajectories for our chemical system. An illustrationLet's exemplify the methods presented in the previous section with the following chemical network:with chemical species, $A,B,C,D,E,F$ and reactions $v_1,v_2,v_3,v_4,v_5$, which has stoichiometric matrix:$$S = \begin{pmatrix} -1 & 0 & 0 & -1 & 0\\ -1 & 0 & 0 & 0 & 0\\ 2 & -1 & 1 & 0 & -1\\ 0 & 1 & 0 & 0 & 0\\ 0 & 0 & -1 & 1 & 1\\ 0 & 1 & 0 & 0 & 0\\end{pmatrix}.$$We will use LPs \ref{eq:4},\ref{eq:5},\ref{eq:6} to explore the alternative optimal space resulting from maximizing the total concentration, i.e., sum over all time steps, of species $C$.
###Code
import numpy as np
from trajectoryLP import NetworkFlow
S = np.array([
[-1,0,0,-1,0],
[-1,0,0,0,0],
[2,-1,1,0,-1],
[0,1,0,0,0],
[0,0,-1,1,1],
[0,1,0,0,0]
])
var_names = ['A', 'B', 'C', 'D', 'E', 'F']
flux_names=['v1', 'v2', 'v3', 'v4', 'v5']
# Define initial conditions
var_x0 = [10, 5, 5, 1, 2, 2]
Network = NetworkFlow(S, obj_x='C', n_steps=100,
x_names=var_names, x_0=var_x0,
xp_min=-10, xp_max=10, v_names=flux_names,
v_delta_max=0.1)
Network.solve(verbose=False)
Network.findAlternativeOptimaBounds()
Network.sampleAlternativeOptimaSpace(n_samples=500)
Network.plotXSolution('A')
Network.plotXSolution('B')
Network.plotXSolution('C')
Network.plotXSolution('D')
Network.plotXSolution('E')
Network.plotXSolution('F')
###Output
_____no_output_____ |
_build/html/_sources/materials/CL-Answers/CL1-Tooling.ipynb | ###Markdown
Coding Lab 1: Tech Setup & ToolingWelcome to the first coding lab!CodingLab labs are meant to be interactive - so, feel free to find another person to work together with on your CodingLab notebooks. For this lab, it's really important that you're comfortable with all of the tools we introduce here, as we'll be using them throughout the quarter. So, while you should feel free to consult with your classmates, you'll want to be sure you carry out each part on your own. If you have a question about how something works / what something does - try it out, and see what happens! If you get stuck, consult your classmates. If you're still stuck, your instructional staff are there to help! Reminders: - **PLEASE DO NOT CHANGE THE NAME OF THIS FILE.**- **PLEASE DO NOT COPY & PASTE OR DELETE CELLS INLCUDED IN THE ASSIGNMENT.** (Adding new cells is allowed!) Part 1: JupyterThis is a Jupyter Notebook! They are a very helpful learning tool because they allow plain text (like this!) and code (coming up!) to be combined in a single document.The notes presented in lecture are Jupyter Notebooks. Your CodingLabs will be in Jupyter Notebooks. And, your assignments will be completed in Jupyter Notebooks. So, you'll want to get very comfortable with working within a notebook. Cells The operational unit of the notebook is the cell.Cells are primarily either text (Markdown) or code. If you click on this cell, you should see in the menu above that it says "Markdown" YOUR TURN: Add a new cellSingle click on this cell. Then, click the '+' icon on the toolbar at the top to add a new cell below this one. The cell you just added above is a code cell by default. You can tell by the `In [ ]:` to the left of the cell and the fact that the drop-down box above says "Code"Use that drop-down menu to change the type of that cell you just created to be a text cell. Type the following in that cell "Learning to program in Python makes me ..." and finish the sentence with how you feel about learning to program in python. YOUR TURN: Editing Text CellsTo edit the text in this cell, double-click on it.Add information below about yourself to get practice with editing text cells.Name: Professor PID: A1234567 College: ERC Major: All things Python! MarkdownAs discussed in lecture, these cells are formatted using [Markdown syntax](https://www.markdownguide.org/basic-syntax/). Edit the text in the cell below so that it has the formatting specified by the text. For example, if the text said:This sentence is bold.You would edit the text so that it was bold:**This sentence is bold.**Note that to see the formatting changes, you'll have to run the cell using the Run icon above (or more simply use 'Shift + Enter' on your keyboard. YOUR TURN: Edit this text This is a heading level 4 (H4)*This sentence is italicized.*Below is a bulleted list:- bullet item 1 - bullet item 2- bullet item 3Below is a numbered list:1. list item 11. list item 21. list item 3***This sentence is bold AND italic.*** Code CellsBelow this cell, you see a code cell. In it you will see ```python YOUR CODE HEREraise NotImplementedError()```Any time you see this in a coding lab, assignment, or exam, you'll replace it with your answer.Code is added to cells, but they have to be Run (executed) for the code to be processed.Type `x = 3` in the code cell below. Then, click "Run" from the menu above of press 'Shift + Enter' on your keybood to execute that code.
###Code
### BEGIN SOLUTION
x = 3
### END SOLUTION
# this should not give any output if you did the above
# this checks to make sure x exists
# but it does NOT check that it has the right value
assert x
###Output
_____no_output_____
###Markdown
After running the code cell above, there will be a number between the brackets to the left of the cell. Each time you run a code cell, this number will increase by 1. Run the code cell below to see what we mean.
###Code
y = 29
###Output
_____no_output_____
###Markdown
One thing that sometimes trips users up is the fact that cells do NOT have to be run in order from top to bottom. Python remembers whatever was executed most recently. To see what we mean, run the cell below and see what it returns:
###Code
x
###Output
_____no_output_____ |
01a - Example of Phase-Plane Analysis.ipynb | ###Markdown
Introduction Consider the following 2nd order system (Mass-Damper-Spring):$\ddot{x} + \dot{x} = 3x + x^2 = 0 \Longleftrightarrow \ddot{x} + c \dot{x} + f(x) = 0$$c$ is the dampening force and $f(x)$ is the non-linear spring. The system can be placed into the $\dot{\underline{x}} = \underline{f}(\underline{x})$ form:$\begin{cases} \dot{x}_1 = x_2 \\ \dot{x}_2 = -3x_1 - x_1^2 - x_2\end{cases}$ Analysis of the system: ManualAnalysis of the system $\Rightarrow$ Phase Plane $(x_1, x_2)$: Step 1: Find the Equillibrium Points:$\begin{cases} f_1(x_1, x_2) = 0 \\ f_2(x_2, x_2) = 0\end{cases} \Rightarrow\begin{cases} x_2 && = 0 \\ -3x_1 - x_1^2 - x_2 && = 0 \, \Rightarrow \, -3x_1 - x_1^2 = 0\end{cases}$$\begin{cases} x_2 = 0 \\ x_1(-3 - x_1) = 0\end{cases} \Rightarrow$ 2 equillibrium points $\Rightarrow\begin{cases} \underline{x}_1 = [0, 0]^T \\ \underline{x}_2 = [-3, 0]^T\end{cases}$ Step 2: Linearize the System Around the Equillibrium Points:$\displaystyle\dot{\underline{x}} = \underline{f}(\underline{x}) \Rightarrow \approx \underline{f}(\underline{x}^*) + \left . \frac{\partial \underline{f}}{\partial \underline{x}} \right |_{\underline{x}^*} (\underline{x} - \underline{x}^*)$$\displaystyle A = \left . \frac{\partial\underline{f}}{\partial\underline{x}} \right |_{\underline{x}_{1, \, 2}^*} = \begin{bmatrix} 0 && 1 \\ -3 - 2x_1 && -1\end{bmatrix}_{\underline{x}_1^*, \, \underline{x}_2^*}$ For $\underline{x} = \underline{x}_1$ We have the following system:$\dot{\underline{x}} = A_1 \underline{x} \Rightarrow A_1 = \begin{bmatrix} 0 && 1 \\ -3 && -1\end{bmatrix}$Look for the behavior of the system near $\underline{x}_1 = \underline{0}$. e-value analysis:$\det (A_1 - \lambda I) = \begin{vmatrix} -\lambda && 1 \\ -3 && -1 - \lambda\end{vmatrix}= \lambda \big(\lambda + 1\big) + 3 = 0$$\Rightarrow \lambda^2 + \lambda + 3 = 0 \Rightarrow \lambda_{12} = -\frac{1}{2} \left( 1 \pm \sqrt{1 - 12}\right) = -\frac{1}{2} \left( 1 \pm \sqrt{11} j\right)$ For $\underline{x} = \underline{x}_2$We have the following system:$\dot{\underline{x}} = A_2 \underline{x} \Rightarrow A_2 = \begin{bmatrix} 0 && 1 \\ 3 && -1\end{bmatrix}$Look for the behavior of the system near $\underline{x}_2 = \underline{0}$. e-value analysis:$\left|(A - \lambda I ) \right| = \begin{vmatrix} -\lambda && 1 \\ 3 && -1 - \lambda\end{vmatrix} = 0$$\Rightarrow \lambda \left(\lambda + 1 \right) - 3 = 0 \Rightarrow \, \lambda^2 + \lambda - 3 = 0$$\lambda = -\frac{1}{2} \left ( 1 \pm \sqrt{1 + 12} \right) = - \frac{1}{2} \pm \frac{1}{2} \sqrt{13}$Note that both solutions of $\lambda$ are REAL! Therefore:$\begin{matrix} \lambda_1 = -\frac{1}{2} \left( 1 - \sqrt{13} \right ) > 0 \\ \lambda_2 = -\frac{1}{2} \left( 1 + \sqrt{13} \right ) < 0\end{matrix} \Rightarrow \text{ Unstable } \Rightarrow \underline{\text{Saddle Point}}$Compute the e-vectors $\underline{v}_1,\, \underline{v}_2$ to determine the direction of convergence Step 3: Put Everything Together: !!!!! TODO: Insert Graphic !!!!! Analysis of the System: Python Given that:$\begin{cases} \dot{x}_1 = f_1(x_1, x_2) = x_2 \\ \dot{x}_2 = f_2(x_2, x_2) = -3x_1 - x_1^2 - x_2\end{cases}$We can define two variables to these functions:
###Code
f_1 = x_2
f_2 = -3 * x_1 - x_1 ** 2 - x_2
###Output
_____no_output_____
###Markdown
We can solve the system of equations using the `solve` function.
###Code
slns = sp.solve([f_1, f_2])
slns
###Output
_____no_output_____
###Markdown
We get the same solutions that we found previously. To linearize the system, we need to take the derivatives of each function with respect to $x_1$ and $x_2$. We'll compile this into a matrix A_1 like so:
###Code
A = sp.Matrix([[sp.diff(f_1, x_1), sp.diff(f_1, x_2)], [sp.diff(f_2, x_1), sp.diff(f_2, x_2)]])
A
###Output
_____no_output_____
###Markdown
Now we let $\dot{\underline{x}} = A_1 \, \underline{x}$. Therefore `A_1` would be:
###Code
A_1 = sp.Matrix([[0, 1], [-3, -1]])
A_1
###Output
_____no_output_____
###Markdown
Next we find the determinate of $A_1 - \lambda I$
###Code
λ_fun = sp.det(A_1 - λ * sp.eye(2))
λ_fun
sp.solve(λ_fun)
###Output
_____no_output_____
###Markdown
Analysis of the System: Numpy Meshgrid
###Code
import numpy as np
from matplotlib import pyplot as plt
x1, x2 = np.meshgrid(np.linspace(-.5, .5, 10), np.linspace(-.5, .5, 10))
x1dot = x2
x2dot = -3 * x1 - x1 ** 2 - x2
plt.figure()
plt.quiver(x1, x2, x1dot, x2dot)
plt.show()
@widgets.interact (
x_start=(-10.0, 10.0, 0.1),
x_stop=(-10.0, 10.0, 0.1),
y_start=(-10.0, 10.0, 0.1),
y_stop=(-10.0, 10.0, 0.1),
space=(10, 50)
)
def inter_plot(x_start, x_stop, y_start, y_stop, space):
x1, x2 = np.meshgrid(np.linspace(x_start, x_stop, space), np.linspace(y_start, y_stop, space))
x1dot = x2
x2dot = -3 * x1 - x1 ** 2 - x2
plt.figure()
plt.quiver(x1, x2, x1dot, x2dot)
plt.show()
###Output
_____no_output_____ |
Counter_Current_Heat_Exchanger.ipynb | ###Markdown
Counter Current Heat Exchanger at Steady State Rajas Mehendale18CHE160TY B Chem Engg
###Code
import scipy
import numpy as np
from scipy.integrate import quad
import scipy.optimize
import scipy.interpolate
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
from IPython.display import display
%config InlineBackend.figure_format = 'svg'
style.use("seaborn-bright")
###Output
_____no_output_____
###Markdown
Hot side$$ \frac{dH^H}{dz} = - \frac{UP}{m_H} (T_H - T_C)$$Cold side$$ \frac{dH^C}{dz} = - \frac{UP}{m_C} (T_H - T_C)$$Boundary Conditions$$ T_H|_{(z=0)} = T_H^{in}$$$$ T_C|_{(z=L)} = T_C^{in}$$ Hot Fluid - 1-nonanolCold Fluid - Water
###Code
def integrand(T,cp):
I = (cp[0])+(cp[1]*T)+(cp[2]*(T**2))+(cp[3]*(T**3))+(cp[4]*(T**4))
return I
def H_H(T): #T in K
cp = [10483000, -115220, 476.87, -0.85381, 0.00056246]
# 1-Nonanol
I1 = (cp[0]/1)* np.power(T-273.16,1)
I2 = (cp[1]/2)* np.power(T-273.16,2)
I3 = (cp[2]/3)* np.power(T-273.16,3)
I4 = (cp[3]/4)* np.power(T-273.16,4)
I5 = (cp[4]/5)* np.power(T-273.16,5)
H = (I1+I2+I3+I4+I5)/(144.225) #J/(kg-K)
return H
def H_C(T): # T in K
# Water
cp = [276370, -2090.1, 8.125, -0.014116, 9.3701E-06]
I1 = (cp[0]/1)* np.power(T-273.16,1)
I2 = (cp[1]/2)* np.power(T-273.16,2)
I3 = (cp[2]/3)* np.power(T-273.16,3)
I4 = (cp[3]/4)* np.power(T-273.16,4)
I5 = (cp[4]/5)* np.power(T-273.16,5)
H = (I1+I2+I3+I4+I5)/(18.015) #J/(kg-K)
return H
T = np.linspace(275, 425, 2000);
plt.figure()
plt.plot(T, H_H(T), 'r', label="Hot Fluid");
plt.plot(T, H_C(T), 'b', label="Cold Fluid");
plt.legend()
plt.xlabel("Temperature (K)", fontsize=10);
plt.ylabel(r"$H \ (\frac{J}{kg})$", fontsize=10);
plt.xlim([275,425]);
plt.grid();
#plt.ylim([1400,5000]);
T_H = scipy.interpolate.UnivariateSpline(H_H(T), T, k=1, s=0)
T_C = scipy.interpolate.UnivariateSpline(H_C(T), T, k=1, s=0)
def model(SV, z, heatx):
[H_H, H_C] = SV
U = heatx.U
P = heatx.P
mH = heatx.mH
mC = heatx.mC
T_H = heatx.T_H(H_H)
T_C = heatx.T_C(H_C)
dH_Hbydz = -U*P/mH * (T_H - T_C)
dH_Cbydz = -U*P/mC * (T_H - T_C)
return [dH_Hbydz, dH_Cbydz]
def shoot(T_Cout, heatx):
heatx.T_Cout = T_Cout
SV0 = [H_H(heatx.T_Hin), H_C(heatx.T_Cout)]
z = [0, heatx.L]
solution = scipy.integrate.odeint(
model,
SV0,
z,
args = (heatx,)
)
H_Cin = solution[-1, 1]
T_Cin = heatx.T_C(H_Cin)
error = [T_Cin - heatx.T_Cin]
return error
class HeatX:
def __init__(self):
self.U = 400.0 #W/m2-K
self.P = 0.2 #m2/m
self.L = 4 #m
self.mH = 8.0 #kg/s
self.mC = 8.0 #kg/s
self.T_Hin = 50.0+273.16 #K
self.T_Cin = 5.0+273.16#K
def initialize(self):
T = np.linspace(self.T_Cin, self.T_Hin, 1000)
self.T_H = scipy.interpolate.UnivariateSpline(H_H(T), T, k=1, s=0)
self.T_C = scipy.interpolate.UnivariateSpline(H_C(T), T, k=1, s=0)
def solve(self, n = 2000):
self.initialize()
guess = [self.T_Cin + 0.0]
lsq = scipy.optimize.least_squares(shoot, guess, args = (self,))
SV0 = [H_H(self.T_Hin), H_C(self.T_Cout)]
z = np.linspace(0,self.L, n)
solution = scipy.integrate.odeint(
model,
SV0,
z,
args = (self,)
)
H_Hsol = solution[:,0]
H_Csol = solution[:,1]
self.delT_in = self.T_H(H_Hsol[0]) - self.T_C(H_Csol[0])
self.delT_out = self.T_H(H_Hsol[-1]) - self.T_C(H_Csol[-1])
self.lmtd = (self.delT_in-self.delT_out)/np.log(self.delT_in/self.delT_out)
self.solutiondf = pd.DataFrame({
"z":z,
"T_H":self.T_H(H_Hsol),
"T_C":self.T_C(H_Csol)
})
def heatx_plots(self):
solutiondf = self.solutiondf
ax = plt.figure()
plt.plot(solutiondf.z, solutiondf.T_H, 'r', label=r"Hot fluid side $\rightarrow$")
plt.plot(solutiondf.z, solutiondf.T_C, 'b', label=r"Cold fluid side $\leftarrow$")
plt.legend(fontsize=10)
plt.xlabel("Length axis (z=0 to z= %.1f m)" %(self.L))
plt.ylabel("Temperature (K)")
plt.grid()
plt.xlim([0, self.L])
plt.ylim([270, 330])
textstr =("Temp Difference (K)\n \n"+
"@(z=0): %.1f\n" %(self.delT_in)+
"@(z=%.1f): %.1f\n" %(self.L, self.delT_out)+
"LMTD: %.1f" %(self.lmtd))
props = dict(boxstyle='round', facecolor='gold', alpha=0.5)
ax.text(0.95, 0.5, textstr, fontsize=10,
verticalalignment='top', bbox=props);
heatx = HeatX()
heatx.mH = 0.01 #kg/s
heatx.mC = 0.01 #kg/s
heatx.T_Hin = 50.0+273.16 #K
heatx.T_Cin = 10.0+273.16#K
heatx.solve()
heatx.heatx_plots()
heatx = HeatX()
heatx.locbox = [0.5,0.5]
heatx.mH = 0.6 #kg/s
heatx.mC = 0.2 #kg/s
heatx.T_Hin = 50.0+273.16 #K
heatx.T_Cin = 5.0+273.16#K
heatx.solve()
heatx.heatx_plots()
heatx = HeatX()
heatx.mH = 0.1 #kg/s
heatx.mC = 5.0 #kg/s
heatx.T_Hin = 50.0+273.16 #K
heatx.T_Cin = 5.0+273.16#K
heatx.solve()
heatx.heatx_plots()
heatx = HeatX()
heatx.mH = 4.0 #kg/s
heatx.mC = 0.1 #kg/s
heatx.T_Hin = 30.0+273.16 #K
heatx.T_Cin = 5.0+273.16#K
heatx.solve()
heatx.heatx_plots()
###Output
_____no_output_____ |
waf_admin/backend/SQLi Classification.ipynb | ###Markdown
END
###Code
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
input_dim = X.shape[1] # Number of features
cvscores = []
model = Sequential()
model.add(layers.Dense(20, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(10, activation='tanh'))
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
classifier_nn = model.fit(X_train,y_train,
epochs=10,
verbose=True,
validation_data=(X_test, y_test),
batch_size=15)
# Save the model
filepath = './saved_model'
save_model(model, filepath)
test_model = load_model(filepath, compile=True)
yhat = test_model.predict(X_test)
print("Accuracy of Neural network: %.3f "%accuracy_score(np.rint(yhat),y_test))
print("F1_score of Neural network: %.3f "%f1_score(np.rint(yhat),y_test))
print("auc_roc of Neural network: %.3f "%roc_auc_score(np.rint(yhat),y_test))
test = pd.read_csv('../data/Malicious_data.csv')
test_cv = vectorizer.transform(test.Payload[test['Classification']=='SQLI'])
pred= model.predict(test_cv)
np.rint(pred)
yhat = test_model.predict(X_test)
print("F1_score of neural network: %.3f "%f1_score(yhat,y_test))
df[df['Sentence'].str.contains('Password')]
import gensim.downloader
# Show all available models in gensim-data
print(list(gensim.downloader.info()['models'].keys()))
glove_vectors = gensim.downloader.load('glove-wiki-gigaword-50')
glove_vectors.most_similar('username')
###Output
_____no_output_____ |
modules/04-machine-learning-in-python/04-case-study.ipynb | ###Markdown
Case Study - Facial Recognition with Machine Learning Using SVM and PCA The purpose of this case study is to show you a practical application of Machine Learning with SVM (Support Vector Machines) and PCA (Principal Component Analysis) algorithms for dimensionality reduction. Problem Definition We will create a model for facial recognition, using SVM and PCA. This approach treats face recognition as a 2-dimensional recognition problem, taking advantage of the fact that faces are usually in an upright position and therefore can be described by a small set of 2D features. Face images are projected in a resource space(face space) that best encodes the variation between known face images. The PCA is applied to reduce the dimensionality of the data and then train the SVM model for a classification task. Loading Packages
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn import svm
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Loading the Dataset We will use the **Dataset Labeled Faces in the Wild Home**, which gives us a image faces set prepared for Computer Vision tasks. The dataset can be downloaded from http://vis-www.cs.umass.edu/lfw/, but it is already available on Keras, the most widely used Deep Learning framework today. The download is performed when the below cell is executed for the first time. We will download at least 70 images per person, with a scaling factor of 0.4.
###Code
dataset_faces = datasets.fetch_lfw_people(min_faces_per_person = 70, resize = 0.4)
dataset_faces.data.shape
print(dataset_faces.data)
###Output
[[254. 254. 251.66667 ... 87.333336 88.666664 86.666664]
[ 39.666668 50.333332 47. ... 117.666664 115. 133.66667 ]
[ 89.333336 104. 126. ... 175.33333 183.33333 183. ]
...
[ 86. 80.333336 74.666664 ... 44. 49.666668 44.666668]
[ 50.333332 65.666664 88. ... 197. 179.33333 166.33333 ]
[ 30. 27. 32.666668 ... 35. 35.333332 61. ]]
###Markdown
Preparing the Dataset **Let's extract the image shapes details**
###Code
n_samples, height, width = dataset_faces.images.shape
print(f'Samples Number: {n_samples}')
print(f'Images Height: {height}')
print(f'Images Width: {width}')
###Output
Samples Number: 1288
Images Height: 50
Images Width: 37
###Markdown
We have 1288 images, each one with the dimensions of 50x37 pixels. When we load data with Keras, it offers two attributes for the dataset: data and target.We'll store the data in **x** (input variables) and the target in **y** (output variable).
###Code
x = dataset_faces.data
y = dataset_faces.target
n_attributes = x.shape[1]
target_names = dataset_faces.target_names
n_classes = target_names.shape[0]
print(f'Number of Attributes: {n_attributes}')
print(f'Number of Classes: {n_classes}')
###Output
Number of Attributes: 1850
Number of Classes: 7
###Markdown
The value of 1850 represents the number of pixels that we will be working with to train the model. There are 1850 variables in the dataset, each one representing a pixel. The value of 7 represents the number of people that our model can classify. That is, we have pictures of 7 different people.
###Code
print(x)
print(y)
###Output
[5 6 3 ... 5 3 5]
###Markdown
Dataset Summary
###Code
print('Total Dataset Size\n')
print(f'Samples Number: {n_samples}')
print(f'Number of Attributes: {n_attributes}')
print(f'Number of Classes: {n_classes}')
###Output
Total Dataset Size
Samples Number: 1288
Number of Attributes: 1850
Number of Classes: 7
###Markdown
Data Visualization
###Code
fig = plt.figure(figsize = (12, 8))
for i in range(15):
ax = fig.add_subplot(3, 5, i + 1, xticks = [], yticks = [])
ax.imshow(dataset_faces.images[i], cmap = plt.cm.bone)
###Output
_____no_output_____
###Markdown
Dataset Distribution Visualization
###Code
plt.figure(figsize = (10, 2))
unique_targets = np.unique(dataset_faces.target)
counts = [(dataset_faces.target == i).sum() for i in unique_targets]
plt.xticks(unique_targets, dataset_faces.target_names[unique_targets])
locs, labels = plt.xticks()
plt.setp(labels, rotation = 90, size = 14)
_ = plt.bar(unique_targets, counts)
###Output
_____no_output_____
###Markdown
These faces have already been located and resized to a common size. This is an important pre-processing factor for facial recognition, and it's a process that may require a large collection of training data. This can be done with Scikit-Learn, but the challenge is to gather enough training data for the algorithm to work. We need to split the data into training and testing, as in any Machine Learning model.
###Code
x_training, \
x_test, \
y_training, \
y_test = train_test_split(
dataset_faces.data,
dataset_faces.target,
random_state = 0)
print(x_training.shape, x_test.shape)
###Output
(966, 1850) (322, 1850)
###Markdown
For Training: 966 images and 1850 attributes (images pixels).For Test: 322 images and 1850 attributes (images pixels). Pre-Processing: Principal Component Analysis (PCA) The 1850 attributes represent 1850 dimensions, which is a lot for SVM models. We can use the PCA to reduce these 1850 resources to a manageable level while keeping most of the information in the dataset. Here it is useful to use a variant of the PCA called **RandomizedPCA**, which is an approximation of the PCA that can be much faster for large datasets. Let's create the PCA model with 150 components, each one will have the same information as a group of variables. In this way, we will reduce the dimensions from 1850 to 150.
###Code
pca = decomposition.PCA(
n_components = 150,
whiten = True,
random_state = 1999,
svd_solver = 'randomized'
)
pca.fit(x_training)
x_training_pca = pca.transform(x_training)
x_test_pca = pca.transform(x_test)
print(f'Training Shape: {x_training_pca.shape}')
print(f'Test Shape: {x_test_pca.shape}')
###Output
Training Shape: (966, 150)
Test Shape: (322, 150)
###Markdown
These 150 components correspond to factors in a linear combination of images, so that the combination approaches to the original face. In general, PCA can be a powerful pre-processing technique that can significantly improve classification performance. Machine Learning Model Construction with SVM
###Code
model_svm = svm.SVC(C = 5., gamma = 0.001)
model_svm.fit(x_training_pca, y_training)
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
print(x_test.shape)
fig = plt.figure(figsize = (12, 8))
for i in range(15):
ax = fig.add_subplot(3, 5, i + 1, xticks = [], yticks = [])
# Dataset real image
ax.imshow(x_test[i].reshape((50, 37)), cmap = plt.cm.bone)
# Class prediction with the trained model
y_prediction = model_svm.predict(x_test_pca[i].reshape(1, -1))[0]
# Set black labels for correct predictions (prediction classes equal to real classes),
# and red labels for the opposite
color = 'black' if y_prediction == y_test[i] else 'red'
ax.set_title(dataset_faces.target_names[y_prediction], fontsize = 'small', color = color)
print(model_svm.score(x_test_pca, y_test))
###Output
0.8416149068322981
|
Course4_CNN/ww3/Autonomous_driving_application_Car_detection_v3a.ipynb | ###Markdown
Autonomous driving - Car detectionWelcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242). **You will learn to**:- Use object detection on a car detection dataset- Deal with bounding boxes Updates If you were working on the notebook before this update...* The current notebook is version "3a".* You can find your original work saved in the notebook with the previous version name ("v3") * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory. List of updates* Clarified "YOLO" instructions preceding the code. * Added details about anchor boxes.* Added explanation of how score is calculated.* `yolo_filter_boxes`: added additional hints. Clarify syntax for argmax and max.* `iou`: clarify instructions for finding the intersection.* `iou`: give variable names for all 8 box vertices, for clarity. Adds `width` and `height` variables for clarity.* `iou`: add test cases to check handling of non-intersecting boxes, intersection at vertices, or intersection at edges.* `yolo_non_max_suppression`: clarify syntax for tf.image.non_max_suppression and keras.gather.* "convert output of the model to usable bounding box tensors": Provides a link to the definition of `yolo_head`.* `predict`: hint on calling sess.run.* Spelling, grammar, wording and formatting updates to improve clarity. Import librariesRun the following cell to load the packages and dependencies that you will find useful as you build the object detector!
###Code
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
**Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`. 1 - Problem StatementYou are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around. Pictures taken from a car-mounted camera while driving around Silicon Valley. We thank [drive.ai](htps://www.drive.ai/) for providing this dataset.You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like. **Figure 1** : **Definition of a box** If you have 80 classes that you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step. In this exercise, you will learn how "You Only Look Once" (YOLO) performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use. 2 - YOLO "You Only Look Once" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes. 2.1 - Model details Inputs and outputs- The **input** is a batch of images, and each image has the shape (m, 608, 608, 3)- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers. Anchor Boxes* Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt'* The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$.* The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85). EncodingLet's look in greater detail at what this encoding represents. **Figure 2** : **Encoding architecture for YOLO** If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object. Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425). **Figure 3** : **Flattening the last two last dimensions** Class scoreNow, for each box (of each cell) we will compute the following element-wise product and extract a probability that the box contains a certain class. The class score is $score_{c,i} = p_{c} \times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$. **Figure 4** : **Find the class detected by each box** Example of figure 4* In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1). * The probability that the object is the class "category 3 (a car)" is $c_{3}=0.73$. * The score for box 1 and for category "3" is $score_{1,3}=0.60 \times 0.73 = 0.44$. * Let's say we calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So we'll assign the score 0.44 and class "3" to this box "1". Visualizing classesHere's one way to visualize what YOLO is predicting on an image:- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes).- Color that grid cell according to what object that grid cell considers the most likely.Doing this results in this picture: **Figure 5** : Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell. Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm. Visualizing bounding boxesAnother way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this: **Figure 6** : Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. Non-Max suppressionIn the figure above, we plotted only boxes for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects. To do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps: - Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class; either due to the low probability of any object, or low probability of this particular class).- Select only one box when several boxes overlap with each other and detect the same object. 2.2 - Filtering with a threshold on class scoresYou are going to first apply a filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold. The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It is convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables: - `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.- `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell.- `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the "class probabilities" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell. **Exercise**: Implement `yolo_filter_boxes()`.1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \times c$). The following code may help you choose the right operator: ```pythona = np.random.randn(19*19, 5, 1)b = np.random.randn(19*19, 5, 80)c = a * b shape of c will be (19*19, 5, 80)```This is an example of **broadcasting** (multiplying vectors of different sizes).2. For each box, find: - the index of the class with the maximum box score - the corresponding box score **Useful references** * [Keras argmax](https://keras.io/backend/argmax) * [Keras max](https://keras.io/backend/max) **Additional Hints** * For the `axis` parameter of `argmax` and `max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`. * Applying `max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. We don't need to keep the last dimension after applying the maximum here. * Even though the documentation shows `keras.backend.argmax`, use `keras.argmax`. Similarly, use `keras.max`.3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep. 4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. **Useful reference**: * [boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) **Additional Hints**: * For the `tf.boolean_mask`, we can keep the default `axis=None`.**Reminder**: to call a Keras function, you should use `K.function(...)`.
###Code
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence*box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores,axis=-1) #return the index value, shape is (19,19,5)
box_class_scores = K.max(box_scores,axis=-1) #return the max value, shape is (19,19,5)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = box_class_scores>=threshold
### END CODE HERE ###
# Step 4: Apply the mask to box_class_scores, boxes and box_classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
###Output
scores[2] = 10.7506
boxes[2] = [ 8.42653275 3.27136683 -0.5313437 -4.94137383]
classes[2] = 7
scores.shape = (?,)
boxes.shape = (?, 4)
classes.shape = (?,)
###Markdown
**Expected Output**: **scores[2]** 10.7506 **boxes[2]** [ 8.42653275 3.27136683 -0.5313437 -4.94137383] **classes[2]** 7 **scores.shape** (?,) **boxes.shape** (?, 4) **classes.shape** (?,) **Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative. 2.3 - Non-max suppression Even after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS). **Figure 7** : In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU. **Figure 8** : Definition of "Intersection over Union". **Exercise**: Implement iou(). Some hints:- In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, we move to the right. As y increases, we move down.- For this exercise, we define a box using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. (This makes it a bit easier to calculate the intersection).- To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. (Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative.- To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$: - Feel free to draw some examples on paper to clarify this conceptually. - The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom. - The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top. - The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero). - The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero.**Additional Hints**- `xi1` = **max**imum of the x1 coordinates of the two boxes- `yi1` = **max**imum of the y1 coordinates of the two boxes- `xi2` = **min**imum of the x2 coordinates of the two boxes- `yi2` = **min**imum of the y2 coordinates of the two boxes- `inter_area` = You can use `max(height, 0)` and `max(width, 0)`
###Code
# GRADED FUNCTION: iou
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
"""
# Assign variable names to coordinates for clarity
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 7 lines)
xi1 = max(box1_x1,box2_x1)
yi1 = max(box1_y1,box2_y1)
xi2 = min(box1_x2,box2_x2)
yi2 = min(box1_y2,box2_y2)
inter_width = xi2-xi1
inter_height = yi2-yi1
inter_area = max(inter_width,0)*max(inter_height,0) # set edge to 0 when it is negative set area to 0, if they do not intersect
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = (box1_y2-box1_y1)*(box1_x2-box1_x1)
box2_area = (box2_y2-box2_y1)*(box2_x2-box2_x1)
union_area = box1_area+box2_area-inter_area
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = inter_area/union_area
### END CODE HERE ###
return iou
## Test case 1: boxes intersect
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou for intersecting boxes = " + str(iou(box1, box2)))
## Test case 2: boxes do not intersect
box1 = (1,2,3,4)
box2 = (5,6,7,8)
print("iou for non-intersecting boxes = " + str(iou(box1,box2)))
## Test case 3: boxes intersect at vertices only
box1 = (1,1,2,2)
box2 = (2,2,3,3)
print("iou for boxes that only touch at vertices = " + str(iou(box1,box2)))
## Test case 4: boxes intersect at edge only
box1 = (1,1,3,3)
box2 = (2,3,3,4)
print("iou for boxes that only touch at edges = " + str(iou(box1,box2)))
###Output
iou for intersecting boxes = 0.14285714285714285
iou for non-intersecting boxes = 0.0
iou for boxes that only touch at vertices = 0.0
iou for boxes that only touch at edges = 0.0
###Markdown
**Expected Output**:```iou for intersecting boxes = 0.14285714285714285iou for non-intersecting boxes = 0.0iou for boxes that only touch at vertices = 0.0iou for boxes that only touch at edges = 0.0``` YOLO non-max suppressionYou are now ready to implement non-max suppression. The key steps are: 1. Select the box that has the highest score.2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`).3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box.This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):** Reference documentation ** - [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)```tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold=0.5, name=None)```Note that in the version of tensorflow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument 'score_threshold.*- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/keras/backend/gather) Even though the documentation shows `tf.keras.backend.gather()`, you can use `keras.gather()`. ```keras.gather( reference, indices)```
###Code
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
###Output
scores[2] = 6.9384
boxes[2] = [-5.299932 3.13798141 4.45036697 0.95942086]
classes[2] = -2.24527
scores.shape = (10,)
boxes.shape = (10, 4)
classes.shape = (10,)
###Markdown
**Expected Output**: **scores[2]** 6.9384 **boxes[2]** [-5.299932 3.13798141 4.45036697 0.95942086] **classes[2]** -2.24527 **scores.shape** (10,) **boxes.shape** (10, 4) **classes.shape** (10,) 2.4 Wrapping up the filteringIt's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented. **Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided): ```pythonboxes = yolo_boxes_to_corners(box_xy, box_wh) ```which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes````pythonboxes = scale_boxes(boxes, image_shape)```YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image. Don't worry about these two functions; we'll show you where they need to be called.
###Code
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates)
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence,boxes,box_class_probs, score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with
# maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes,iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
###Output
scores[2] = 138.791
boxes[2] = [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]
classes[2] = 54
scores.shape = (10,)
boxes.shape = (10, 4)
classes.shape = (10,)
###Markdown
**Expected Output**: **scores[2]** 138.791 **boxes[2]** [ 1292.32971191 -278.52166748 3876.98925781 -835.56494141] **classes[2]** 54 **scores.shape** (10,) **boxes.shape** (10, 4) **classes.shape** (10,) Summary for YOLO:- Input image (608, 608, 3)- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output. - After flattening the last two dimensions, the output is a volume of shape (19, 19, 425): - Each cell in a 19x19 grid over the input image gives 425 numbers. - 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture. - 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect- You then select only few boxes based on: - Score-thresholding: throw away boxes that have detected a class with a score less than the threshold - Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes- This gives you YOLO's final output. 3 - Test YOLO pre-trained model on images In this part, you are going to use a pre-trained model and test it on the car detection dataset. We'll need a session to execute the computation graph and evaluate the tensors.
###Code
sess = K.get_session()
###Output
_____no_output_____
###Markdown
3.1 - Defining classes, anchors and image shape.* Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. * We have gathered the information on the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". * We'll read class names and anchors from text files.* The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
###Code
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
###Output
_____no_output_____
###Markdown
3.2 - Loading a pre-trained model* Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. * You are going to load an existing pre-trained Keras YOLO model stored in "yolo.h5". * These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will simply refer to it as "YOLO" in this notebook.Run the cell below to load the model from this file.
###Code
yolo_model = load_model("model_data/yolo.h5")
###Output
/opt/conda/lib/python3.6/site-packages/keras/models.py:251: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.
warnings.warn('No training configuration found in save file: '
###Markdown
This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
###Code
yolo_model.summary()
###Output
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 608, 608, 3) 0
____________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 608, 608, 32) 864 input_1[0][0]
____________________________________________________________________________________________________
batch_normalization_1 (BatchNorm (None, 608, 608, 32) 128 conv2d_1[0][0]
____________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, 608, 608, 32) 0 batch_normalization_1[0][0]
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 304, 304, 32) 0 leaky_re_lu_1[0][0]
____________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 304, 304, 64) 18432 max_pooling2d_1[0][0]
____________________________________________________________________________________________________
batch_normalization_2 (BatchNorm (None, 304, 304, 64) 256 conv2d_2[0][0]
____________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, 304, 304, 64) 0 batch_normalization_2[0][0]
____________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 152, 152, 64) 0 leaky_re_lu_2[0][0]
____________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 152, 152, 128) 73728 max_pooling2d_2[0][0]
____________________________________________________________________________________________________
batch_normalization_3 (BatchNorm (None, 152, 152, 128) 512 conv2d_3[0][0]
____________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_3[0][0]
____________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 152, 152, 64) 8192 leaky_re_lu_3[0][0]
____________________________________________________________________________________________________
batch_normalization_4 (BatchNorm (None, 152, 152, 64) 256 conv2d_4[0][0]
____________________________________________________________________________________________________
leaky_re_lu_4 (LeakyReLU) (None, 152, 152, 64) 0 batch_normalization_4[0][0]
____________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 152, 152, 128) 73728 leaky_re_lu_4[0][0]
____________________________________________________________________________________________________
batch_normalization_5 (BatchNorm (None, 152, 152, 128) 512 conv2d_5[0][0]
____________________________________________________________________________________________________
leaky_re_lu_5 (LeakyReLU) (None, 152, 152, 128) 0 batch_normalization_5[0][0]
____________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 76, 76, 128) 0 leaky_re_lu_5[0][0]
____________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 76, 76, 256) 294912 max_pooling2d_3[0][0]
____________________________________________________________________________________________________
batch_normalization_6 (BatchNorm (None, 76, 76, 256) 1024 conv2d_6[0][0]
____________________________________________________________________________________________________
leaky_re_lu_6 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_6[0][0]
____________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 76, 76, 128) 32768 leaky_re_lu_6[0][0]
____________________________________________________________________________________________________
batch_normalization_7 (BatchNorm (None, 76, 76, 128) 512 conv2d_7[0][0]
____________________________________________________________________________________________________
leaky_re_lu_7 (LeakyReLU) (None, 76, 76, 128) 0 batch_normalization_7[0][0]
____________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 76, 76, 256) 294912 leaky_re_lu_7[0][0]
____________________________________________________________________________________________________
batch_normalization_8 (BatchNorm (None, 76, 76, 256) 1024 conv2d_8[0][0]
____________________________________________________________________________________________________
leaky_re_lu_8 (LeakyReLU) (None, 76, 76, 256) 0 batch_normalization_8[0][0]
____________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 38, 38, 256) 0 leaky_re_lu_8[0][0]
____________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 38, 38, 512) 1179648 max_pooling2d_4[0][0]
____________________________________________________________________________________________________
batch_normalization_9 (BatchNorm (None, 38, 38, 512) 2048 conv2d_9[0][0]
____________________________________________________________________________________________________
leaky_re_lu_9 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_9[0][0]
____________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_9[0][0]
____________________________________________________________________________________________________
batch_normalization_10 (BatchNor (None, 38, 38, 256) 1024 conv2d_10[0][0]
____________________________________________________________________________________________________
leaky_re_lu_10 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_10[0][0]
____________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_10[0][0]
____________________________________________________________________________________________________
batch_normalization_11 (BatchNor (None, 38, 38, 512) 2048 conv2d_11[0][0]
____________________________________________________________________________________________________
leaky_re_lu_11 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_11[0][0]
____________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 38, 38, 256) 131072 leaky_re_lu_11[0][0]
____________________________________________________________________________________________________
batch_normalization_12 (BatchNor (None, 38, 38, 256) 1024 conv2d_12[0][0]
____________________________________________________________________________________________________
leaky_re_lu_12 (LeakyReLU) (None, 38, 38, 256) 0 batch_normalization_12[0][0]
____________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 38, 38, 512) 1179648 leaky_re_lu_12[0][0]
____________________________________________________________________________________________________
batch_normalization_13 (BatchNor (None, 38, 38, 512) 2048 conv2d_13[0][0]
____________________________________________________________________________________________________
leaky_re_lu_13 (LeakyReLU) (None, 38, 38, 512) 0 batch_normalization_13[0][0]
____________________________________________________________________________________________________
max_pooling2d_5 (MaxPooling2D) (None, 19, 19, 512) 0 leaky_re_lu_13[0][0]
____________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 19, 19, 1024) 4718592 max_pooling2d_5[0][0]
____________________________________________________________________________________________________
batch_normalization_14 (BatchNor (None, 19, 19, 1024) 4096 conv2d_14[0][0]
____________________________________________________________________________________________________
leaky_re_lu_14 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_14[0][0]
____________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_14[0][0]
____________________________________________________________________________________________________
batch_normalization_15 (BatchNor (None, 19, 19, 512) 2048 conv2d_15[0][0]
____________________________________________________________________________________________________
leaky_re_lu_15 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_15[0][0]
____________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_15[0][0]
____________________________________________________________________________________________________
batch_normalization_16 (BatchNor (None, 19, 19, 1024) 4096 conv2d_16[0][0]
____________________________________________________________________________________________________
leaky_re_lu_16 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_16[0][0]
____________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 19, 19, 512) 524288 leaky_re_lu_16[0][0]
____________________________________________________________________________________________________
batch_normalization_17 (BatchNor (None, 19, 19, 512) 2048 conv2d_17[0][0]
____________________________________________________________________________________________________
leaky_re_lu_17 (LeakyReLU) (None, 19, 19, 512) 0 batch_normalization_17[0][0]
____________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 19, 19, 1024) 4718592 leaky_re_lu_17[0][0]
____________________________________________________________________________________________________
batch_normalization_18 (BatchNor (None, 19, 19, 1024) 4096 conv2d_18[0][0]
____________________________________________________________________________________________________
leaky_re_lu_18 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_18[0][0]
____________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_18[0][0]
____________________________________________________________________________________________________
batch_normalization_19 (BatchNor (None, 19, 19, 1024) 4096 conv2d_19[0][0]
____________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 38, 38, 64) 32768 leaky_re_lu_13[0][0]
____________________________________________________________________________________________________
leaky_re_lu_19 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_19[0][0]
____________________________________________________________________________________________________
batch_normalization_21 (BatchNor (None, 38, 38, 64) 256 conv2d_21[0][0]
____________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 19, 19, 1024) 9437184 leaky_re_lu_19[0][0]
____________________________________________________________________________________________________
leaky_re_lu_21 (LeakyReLU) (None, 38, 38, 64) 0 batch_normalization_21[0][0]
____________________________________________________________________________________________________
batch_normalization_20 (BatchNor (None, 19, 19, 1024) 4096 conv2d_20[0][0]
____________________________________________________________________________________________________
space_to_depth_x2 (Lambda) (None, 19, 19, 256) 0 leaky_re_lu_21[0][0]
____________________________________________________________________________________________________
leaky_re_lu_20 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_20[0][0]
____________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 19, 19, 1280) 0 space_to_depth_x2[0][0]
leaky_re_lu_20[0][0]
____________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 19, 19, 1024) 11796480 concatenate_1[0][0]
____________________________________________________________________________________________________
batch_normalization_22 (BatchNor (None, 19, 19, 1024) 4096 conv2d_22[0][0]
____________________________________________________________________________________________________
leaky_re_lu_22 (LeakyReLU) (None, 19, 19, 1024) 0 batch_normalization_22[0][0]
____________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 19, 19, 425) 435625 leaky_re_lu_22[0][0]
====================================================================================================
Total params: 50,983,561
Trainable params: 50,962,889
Non-trainable params: 20,672
____________________________________________________________________________________________________
###Markdown
**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2). 3.3 - Convert output of the model to usable bounding box tensorsThe output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.If you are curious about how `yolo_head` is implemented, you can find the function definition in the file ['keras_yolo.py'](https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py). The file is located in your workspace in this path 'yad2k/models/keras_yolo.py'.
###Code
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
###Output
_____no_output_____
###Markdown
You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function. 3.4 - Filtering boxes`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Let's now call `yolo_eval`, which you had previously implemented, to do this.
###Code
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
###Output
_____no_output_____
###Markdown
3.5 - Run the graph on an imageLet the fun begin. You have created a graph that can be summarized as follows:1. yolo_model.input is given to `yolo_model`. The model is used to compute the output yolo_model.output 2. yolo_model.output is processed by `yolo_head`. It gives you yolo_outputs 3. yolo_outputs goes through a filtering function, `yolo_eval`. It outputs your predictions: scores, boxes, classes **Exercise**: Implement predict() which runs the graph to test YOLO on an image.You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.The code below also uses the following function:```pythonimage, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))```which outputs:- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.- image_data: a numpy-array representing the image. This will be the input to the CNN.**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}. Hint: Using the TensorFlow Session object* Recall that above, we called `K.get_Session()` and saved the Session object in `sess`.* To evaluate a list of tensors, we call `sess.run()` like this:```sess.run(fetches=[tensor1,tensor2,tensor3], feed_dict={yolo_model.input: the_input_variable, K.learning_phase():0 }```* Notice that the variables `scores, boxes, classes` are not passed into the `predict` function, but these are global variables that you will use within the `predict` function.
###Code
def predict(sess, image_file):
"""
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the predictions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes],feed_dict={yolo_model.input:image_data, K.learning_phase():0})
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
plt.figure(figsize = (15,12))
plt.imshow(output_image)
return out_scores, out_boxes, out_classes
###Output
_____no_output_____
###Markdown
Run the following cell on the "test.jpg" image to verify that your function is correct.
###Code
out_scores, out_boxes, out_classes = predict(sess, "0005.jpg")
###Output
Found 5 boxes for 0005.jpg
car 0.64 (207, 297) (338, 340)
car 0.65 (741, 266) (918, 313)
car 0.67 (15, 313) (128, 362)
car 0.72 (883, 260) (1026, 303)
car 0.75 (517, 282) (689, 336)
|
tv-script-generation/.ipynb_checkpoints/dlnd_tv_script_generation-checkpoint.ipynb | ###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
_____no_output_____
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
_____no_output_____
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
_____no_output_____
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
_____no_output_____
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
_____no_output_____
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
_____no_output_____
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
_____no_output_____
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
vocab = set(text)
vocab_to_int = { c: i for i, c in enumerate(vocab) }
int_to_vocab = { i: c for i, c in enumerate(vocab) }
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
tokenize = {
'.': '||dot||',
',': '||comma||',
'"': '||quotation_mark||',
';': '||semicolon||',
'!': '||exclamation_mark||',
'?': '||question_mark||',
'(': '||left_parentheses||',
')': '||right_parentheses||',
'--': '||dash||',
'\n': '||return||'
}
return tokenize
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
len(int_to_vocab)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.1.0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
inputs = tf.placeholder(tf.int32, shape=( None , None), name='input')
targets = tf.placeholder(tf.int32, shape=( None , None), name='targets')
learning_rate = tf.placeholder(tf.float32, name='lr')
return inputs, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
num_layers = 3
cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)]
)
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable( tf.random_uniform([vocab_size, embed_dim], -1., 1.) )
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(state, 'final_state')
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embed = get_embed(input_data, vocab_size, embed_dim)
rnn, final_state = build_rnn(cell, embed)
logits = tf.layers.dense(rnn, units=vocab_size, activation=None)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
batches = []
words_per_batch = batch_size * seq_length
n_batches = len(int_text)//words_per_batch
int_text = np.asarray(int_text)
int_text = int_text[:n_batches*words_per_batch]
int_text = int_text.reshape((batch_size, -1))
for i in range(0, int_text.shape[1], seq_length):
x = int_text[:, i:i+seq_length]
y = int_text[:, i+1:i+seq_length+1]
if ( i == (int_text.shape[1] - seq_length) ):
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = int_text[:, i+1:i+seq_length], int_text[:, 0]
# Set last element to first word in list
y[-1, -1] = int_text[0, 0]
batches.append([x, y])
return np.asarray(batches)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 10
# Batch Size
batch_size = 32
# RNN Size
rnn_size = 5
# Embedding Dimension Size
embed_dim = 5
# Sequence Length
seq_length = 50
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
Epoch 0 Batch 0/43 train_loss = 8.822
Epoch 0 Batch 25/43 train_loss = 7.355
Epoch 1 Batch 7/43 train_loss = 6.285
Epoch 1 Batch 32/43 train_loss = 6.124
Epoch 2 Batch 14/43 train_loss = 6.002
Epoch 2 Batch 39/43 train_loss = 5.921
Epoch 3 Batch 21/43 train_loss = 5.876
Epoch 4 Batch 3/43 train_loss = 6.004
Epoch 4 Batch 28/43 train_loss = 5.961
Epoch 5 Batch 10/43 train_loss = 6.083
Epoch 5 Batch 35/43 train_loss = 5.905
Epoch 6 Batch 17/43 train_loss = 5.985
Epoch 6 Batch 42/43 train_loss = 5.858
Epoch 7 Batch 24/43 train_loss = 5.834
Epoch 8 Batch 6/43 train_loss = 5.829
Epoch 8 Batch 31/43 train_loss = 5.926
Epoch 9 Batch 13/43 train_loss = 5.916
Epoch 9 Batch 38/43 train_loss = 5.852
Model Trained and Saved
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
input_tensor = loaded_graph.get_tensor_by_name('input:0')
initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')
final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')
probs_tensor = loaded_graph.get_tensor_by_name('probs:0')
return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
i = np.argmax(probabilities)
return int_to_vocab[i]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
INFO:tensorflow:Restoring parameters from ./save
moe_szyslak:........................................................................................................................................................................................................
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
int_to_vocab = {ii: word for ii, word in enumerate(set(text))}
vocab_to_int = {word: ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
tok_dict = {
'.' : '||Period||',
',' : '||Comma||',
'"' : '||Quotation_Mark||',
';' : '||Semicolon||',
'!' : '||Exclamation_Mark||',
'?' : '||Question_Mark||',
'(' : '||Left_Parentheses||',
')' : '||Right_Parentheses||',
'--': '||Dash||',
'\n': '||Return||'
}
return tok_dict
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
input = tf.placeholder(tf.int32, [None, None], name="input")
targets = tf.placeholder(tf.int32, [None, None], name="targets")
learning_rate = tf.placeholder(tf.float32,name="learning_rate")
return input,targets,learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
num_layers=1
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
make_lstm = lambda size : tf.contrib.rnn.BasicLSTMCell(size)
cell = tf.contrib.rnn.MultiRNNCell([make_lstm(rnn_size) for i in range(num_layers)])
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32),"initial_state")
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embeddings = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
state = tf.identity(state, name="final_state")
return outputs, state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed_input = get_embed(input_data, vocab_size, embed_dim)
rnn_output,final_state = build_rnn(cell, embed_input)
logits = tf.layers.dense(rnn_output, vocab_size, use_bias = True)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
chars_per_batch = batch_size * seq_length
n_batches = len(int_text)//chars_per_batch
arr = int_text[:n_batches * chars_per_batch]
arr = np.reshape(arr, (batch_size, -1))
def batche_gen():
for n in range(0, arr.shape[1], seq_length):
x = arr[:, n:n+seq_length]
y_temp = arr[:, n+1:n+seq_length+1]
y = np.zeros(x.shape, dtype=x.dtype)
y[:,:y_temp.shape[1]] = y_temp
yield x,y
batches = [(x,y) for x,y in batche_gen()]
return np.array(batches)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 512
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
len(int_to_vocab)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
Epoch 0 Batch 0/26 train_loss = 8.825
Epoch 0 Batch 10/26 train_loss = 8.530
Epoch 0 Batch 20/26 train_loss = 7.338
Epoch 1 Batch 4/26 train_loss = 6.397
Epoch 1 Batch 14/26 train_loss = 6.171
Epoch 1 Batch 24/26 train_loss = 6.222
Epoch 2 Batch 8/26 train_loss = 6.103
Epoch 2 Batch 18/26 train_loss = 6.094
Epoch 3 Batch 2/26 train_loss = 6.028
Epoch 3 Batch 12/26 train_loss = 6.181
Epoch 3 Batch 22/26 train_loss = 5.923
Epoch 4 Batch 6/26 train_loss = 5.959
Epoch 4 Batch 16/26 train_loss = 5.852
Epoch 5 Batch 0/26 train_loss = 5.757
Epoch 5 Batch 10/26 train_loss = 5.742
Epoch 5 Batch 20/26 train_loss = 5.753
Epoch 6 Batch 4/26 train_loss = 5.680
Epoch 6 Batch 14/26 train_loss = 5.590
Epoch 6 Batch 24/26 train_loss = 5.673
Epoch 7 Batch 8/26 train_loss = 5.552
Epoch 7 Batch 18/26 train_loss = 5.573
Epoch 8 Batch 2/26 train_loss = 5.510
Epoch 8 Batch 12/26 train_loss = 5.677
Epoch 8 Batch 22/26 train_loss = 5.425
Epoch 9 Batch 6/26 train_loss = 5.498
Epoch 9 Batch 16/26 train_loss = 5.415
Epoch 10 Batch 0/26 train_loss = 5.326
Epoch 10 Batch 10/26 train_loss = 5.325
Epoch 10 Batch 20/26 train_loss = 5.342
Epoch 11 Batch 4/26 train_loss = 5.270
Epoch 11 Batch 14/26 train_loss = 5.200
Epoch 11 Batch 24/26 train_loss = 5.290
Epoch 12 Batch 8/26 train_loss = 5.172
Epoch 12 Batch 18/26 train_loss = 5.201
Epoch 13 Batch 2/26 train_loss = 5.147
Epoch 13 Batch 12/26 train_loss = 5.323
Epoch 13 Batch 22/26 train_loss = 5.076
Epoch 14 Batch 6/26 train_loss = 5.165
Epoch 14 Batch 16/26 train_loss = 5.088
Epoch 15 Batch 0/26 train_loss = 5.013
Epoch 15 Batch 10/26 train_loss = 5.011
Epoch 15 Batch 20/26 train_loss = 5.016
Epoch 16 Batch 4/26 train_loss = 4.975
Epoch 16 Batch 14/26 train_loss = 4.903
Epoch 16 Batch 24/26 train_loss = 4.990
Epoch 17 Batch 8/26 train_loss = 4.891
Epoch 17 Batch 18/26 train_loss = 4.926
Epoch 18 Batch 2/26 train_loss = 4.874
Epoch 18 Batch 12/26 train_loss = 5.051
Epoch 18 Batch 22/26 train_loss = 4.814
Epoch 19 Batch 6/26 train_loss = 4.919
Epoch 19 Batch 16/26 train_loss = 4.842
Epoch 20 Batch 0/26 train_loss = 4.778
Epoch 20 Batch 10/26 train_loss = 4.769
Epoch 20 Batch 20/26 train_loss = 4.771
Epoch 21 Batch 4/26 train_loss = 4.753
Epoch 21 Batch 14/26 train_loss = 4.678
Epoch 21 Batch 24/26 train_loss = 4.758
Epoch 22 Batch 8/26 train_loss = 4.683
Epoch 22 Batch 18/26 train_loss = 4.714
Epoch 23 Batch 2/26 train_loss = 4.664
Epoch 23 Batch 12/26 train_loss = 4.832
Epoch 23 Batch 22/26 train_loss = 4.602
Epoch 24 Batch 6/26 train_loss = 4.720
Epoch 24 Batch 16/26 train_loss = 4.640
Epoch 25 Batch 0/26 train_loss = 4.581
Epoch 25 Batch 10/26 train_loss = 4.569
Epoch 25 Batch 20/26 train_loss = 4.571
Epoch 26 Batch 4/26 train_loss = 4.569
Epoch 26 Batch 14/26 train_loss = 4.489
Epoch 26 Batch 24/26 train_loss = 4.562
Epoch 27 Batch 8/26 train_loss = 4.511
Epoch 27 Batch 18/26 train_loss = 4.533
Epoch 28 Batch 2/26 train_loss = 4.487
Epoch 28 Batch 12/26 train_loss = 4.646
Epoch 28 Batch 22/26 train_loss = 4.423
Epoch 29 Batch 6/26 train_loss = 4.544
Epoch 29 Batch 16/26 train_loss = 4.466
Epoch 30 Batch 0/26 train_loss = 4.410
Epoch 30 Batch 10/26 train_loss = 4.401
Epoch 30 Batch 20/26 train_loss = 4.400
Epoch 31 Batch 4/26 train_loss = 4.409
Epoch 31 Batch 14/26 train_loss = 4.329
Epoch 31 Batch 24/26 train_loss = 4.392
Epoch 32 Batch 8/26 train_loss = 4.357
Epoch 32 Batch 18/26 train_loss = 4.374
Epoch 33 Batch 2/26 train_loss = 4.333
Epoch 33 Batch 12/26 train_loss = 4.481
Epoch 33 Batch 22/26 train_loss = 4.266
Epoch 34 Batch 6/26 train_loss = 4.384
Epoch 34 Batch 16/26 train_loss = 4.312
Epoch 35 Batch 0/26 train_loss = 4.259
Epoch 35 Batch 10/26 train_loss = 4.254
Epoch 35 Batch 20/26 train_loss = 4.247
Epoch 36 Batch 4/26 train_loss = 4.264
Epoch 36 Batch 14/26 train_loss = 4.187
Epoch 36 Batch 24/26 train_loss = 4.241
Epoch 37 Batch 8/26 train_loss = 4.217
Epoch 37 Batch 18/26 train_loss = 4.229
Epoch 38 Batch 2/26 train_loss = 4.195
Epoch 38 Batch 12/26 train_loss = 4.332
Epoch 38 Batch 22/26 train_loss = 4.126
Epoch 39 Batch 6/26 train_loss = 4.239
Epoch 39 Batch 16/26 train_loss = 4.174
Epoch 40 Batch 0/26 train_loss = 4.123
Epoch 40 Batch 10/26 train_loss = 4.122
Epoch 40 Batch 20/26 train_loss = 4.109
Epoch 41 Batch 4/26 train_loss = 4.133
Epoch 41 Batch 14/26 train_loss = 4.061
Epoch 41 Batch 24/26 train_loss = 4.106
Epoch 42 Batch 8/26 train_loss = 4.089
Epoch 42 Batch 18/26 train_loss = 4.097
Epoch 43 Batch 2/26 train_loss = 4.071
Epoch 43 Batch 12/26 train_loss = 4.197
Epoch 43 Batch 22/26 train_loss = 4.000
Epoch 44 Batch 6/26 train_loss = 4.103
Epoch 44 Batch 16/26 train_loss = 4.048
Epoch 45 Batch 0/26 train_loss = 3.999
Epoch 45 Batch 10/26 train_loss = 4.000
Epoch 45 Batch 20/26 train_loss = 3.981
Epoch 46 Batch 4/26 train_loss = 4.012
Epoch 46 Batch 14/26 train_loss = 3.941
Epoch 46 Batch 24/26 train_loss = 3.980
Epoch 47 Batch 8/26 train_loss = 3.969
Epoch 47 Batch 18/26 train_loss = 3.975
Epoch 48 Batch 2/26 train_loss = 3.953
Epoch 48 Batch 12/26 train_loss = 4.067
Epoch 48 Batch 22/26 train_loss = 3.884
Epoch 49 Batch 6/26 train_loss = 3.976
Epoch 49 Batch 16/26 train_loss = 3.930
Epoch 50 Batch 0/26 train_loss = 3.882
Epoch 50 Batch 10/26 train_loss = 3.884
Epoch 50 Batch 20/26 train_loss = 3.862
Epoch 51 Batch 4/26 train_loss = 3.896
Epoch 51 Batch 14/26 train_loss = 3.830
Epoch 51 Batch 24/26 train_loss = 3.860
Epoch 52 Batch 8/26 train_loss = 3.855
Epoch 52 Batch 18/26 train_loss = 3.860
Epoch 53 Batch 2/26 train_loss = 3.840
Epoch 53 Batch 12/26 train_loss = 3.944
Epoch 53 Batch 22/26 train_loss = 3.774
Epoch 54 Batch 6/26 train_loss = 3.857
Epoch 54 Batch 16/26 train_loss = 3.819
Epoch 55 Batch 0/26 train_loss = 3.773
Epoch 55 Batch 10/26 train_loss = 3.774
Epoch 55 Batch 20/26 train_loss = 3.747
Epoch 56 Batch 4/26 train_loss = 3.785
Epoch 56 Batch 14/26 train_loss = 3.724
Epoch 56 Batch 24/26 train_loss = 3.747
Epoch 57 Batch 8/26 train_loss = 3.746
Epoch 57 Batch 18/26 train_loss = 3.750
Epoch 58 Batch 2/26 train_loss = 3.731
Epoch 58 Batch 12/26 train_loss = 3.827
Epoch 58 Batch 22/26 train_loss = 3.671
Epoch 59 Batch 6/26 train_loss = 3.745
Epoch 59 Batch 16/26 train_loss = 3.713
Epoch 60 Batch 0/26 train_loss = 3.668
Epoch 60 Batch 10/26 train_loss = 3.669
Epoch 60 Batch 20/26 train_loss = 3.638
Epoch 61 Batch 4/26 train_loss = 3.679
Epoch 61 Batch 14/26 train_loss = 3.622
Epoch 61 Batch 24/26 train_loss = 3.640
Epoch 62 Batch 8/26 train_loss = 3.640
Epoch 62 Batch 18/26 train_loss = 3.645
Epoch 63 Batch 2/26 train_loss = 3.627
Epoch 63 Batch 12/26 train_loss = 3.715
Epoch 63 Batch 22/26 train_loss = 3.573
Epoch 64 Batch 6/26 train_loss = 3.639
Epoch 64 Batch 16/26 train_loss = 3.612
Epoch 65 Batch 0/26 train_loss = 3.569
Epoch 65 Batch 10/26 train_loss = 3.567
Epoch 65 Batch 20/26 train_loss = 3.534
Epoch 66 Batch 4/26 train_loss = 3.575
Epoch 66 Batch 14/26 train_loss = 3.526
Epoch 66 Batch 24/26 train_loss = 3.536
Epoch 67 Batch 8/26 train_loss = 3.539
Epoch 67 Batch 18/26 train_loss = 3.543
Epoch 68 Batch 2/26 train_loss = 3.527
Epoch 68 Batch 12/26 train_loss = 3.607
Epoch 68 Batch 22/26 train_loss = 3.479
Epoch 69 Batch 6/26 train_loss = 3.538
Epoch 69 Batch 16/26 train_loss = 3.514
Epoch 70 Batch 0/26 train_loss = 3.473
Epoch 70 Batch 10/26 train_loss = 3.469
Epoch 70 Batch 20/26 train_loss = 3.431
Epoch 71 Batch 4/26 train_loss = 3.474
Epoch 71 Batch 14/26 train_loss = 3.430
Epoch 71 Batch 24/26 train_loss = 3.436
Epoch 72 Batch 8/26 train_loss = 3.442
Epoch 72 Batch 18/26 train_loss = 3.445
Epoch 73 Batch 2/26 train_loss = 3.428
Epoch 73 Batch 12/26 train_loss = 3.499
Epoch 73 Batch 22/26 train_loss = 3.387
Epoch 74 Batch 6/26 train_loss = 3.439
Epoch 74 Batch 16/26 train_loss = 3.418
Epoch 75 Batch 0/26 train_loss = 3.380
Epoch 75 Batch 10/26 train_loss = 3.374
Epoch 75 Batch 20/26 train_loss = 3.328
Epoch 76 Batch 4/26 train_loss = 3.376
Epoch 76 Batch 14/26 train_loss = 3.339
Epoch 76 Batch 24/26 train_loss = 3.338
Epoch 77 Batch 8/26 train_loss = 3.347
Epoch 77 Batch 18/26 train_loss = 3.348
Epoch 78 Batch 2/26 train_loss = 3.334
Epoch 78 Batch 12/26 train_loss = 3.397
Epoch 78 Batch 22/26 train_loss = 3.296
Epoch 79 Batch 6/26 train_loss = 3.344
Epoch 79 Batch 16/26 train_loss = 3.326
Epoch 80 Batch 0/26 train_loss = 3.286
Epoch 80 Batch 10/26 train_loss = 3.281
Epoch 80 Batch 20/26 train_loss = 3.230
Epoch 81 Batch 4/26 train_loss = 3.280
Epoch 81 Batch 14/26 train_loss = 3.247
Epoch 81 Batch 24/26 train_loss = 3.243
Epoch 82 Batch 8/26 train_loss = 3.254
Epoch 82 Batch 18/26 train_loss = 3.254
Epoch 83 Batch 2/26 train_loss = 3.241
Epoch 83 Batch 12/26 train_loss = 3.294
Epoch 83 Batch 22/26 train_loss = 3.206
Epoch 84 Batch 6/26 train_loss = 3.250
Epoch 84 Batch 16/26 train_loss = 3.237
Epoch 85 Batch 0/26 train_loss = 3.199
Epoch 85 Batch 10/26 train_loss = 3.189
Epoch 85 Batch 20/26 train_loss = 3.134
Epoch 86 Batch 4/26 train_loss = 3.187
Epoch 86 Batch 14/26 train_loss = 3.161
Epoch 86 Batch 24/26 train_loss = 3.152
Epoch 87 Batch 8/26 train_loss = 3.163
Epoch 87 Batch 18/26 train_loss = 3.163
Epoch 88 Batch 2/26 train_loss = 3.153
Epoch 88 Batch 12/26 train_loss = 3.197
Epoch 88 Batch 22/26 train_loss = 3.119
Epoch 89 Batch 6/26 train_loss = 3.157
Epoch 89 Batch 16/26 train_loss = 3.150
Epoch 90 Batch 0/26 train_loss = 3.113
Epoch 90 Batch 10/26 train_loss = 3.106
Epoch 90 Batch 20/26 train_loss = 3.043
Epoch 91 Batch 4/26 train_loss = 3.097
Epoch 91 Batch 14/26 train_loss = 3.078
Epoch 91 Batch 24/26 train_loss = 3.063
Epoch 92 Batch 8/26 train_loss = 3.078
Epoch 92 Batch 18/26 train_loss = 3.077
Epoch 93 Batch 2/26 train_loss = 3.072
Epoch 93 Batch 12/26 train_loss = 3.107
Epoch 93 Batch 22/26 train_loss = 3.038
Epoch 94 Batch 6/26 train_loss = 3.069
Epoch 94 Batch 16/26 train_loss = 3.066
Epoch 95 Batch 0/26 train_loss = 3.029
Epoch 95 Batch 10/26 train_loss = 3.019
Epoch 95 Batch 20/26 train_loss = 2.957
Epoch 96 Batch 4/26 train_loss = 3.012
Epoch 96 Batch 14/26 train_loss = 2.996
Epoch 96 Batch 24/26 train_loss = 2.981
Epoch 97 Batch 8/26 train_loss = 2.989
Epoch 97 Batch 18/26 train_loss = 2.993
Epoch 98 Batch 2/26 train_loss = 2.987
Epoch 98 Batch 12/26 train_loss = 3.016
Epoch 98 Batch 22/26 train_loss = 2.952
Epoch 99 Batch 6/26 train_loss = 2.983
Epoch 99 Batch 16/26 train_loss = 2.986
Epoch 100 Batch 0/26 train_loss = 2.948
Epoch 100 Batch 10/26 train_loss = 2.936
Epoch 100 Batch 20/26 train_loss = 2.873
Epoch 101 Batch 4/26 train_loss = 2.931
Epoch 101 Batch 14/26 train_loss = 2.917
Epoch 101 Batch 24/26 train_loss = 2.897
Epoch 102 Batch 8/26 train_loss = 2.907
Epoch 102 Batch 18/26 train_loss = 2.913
Epoch 103 Batch 2/26 train_loss = 2.911
Epoch 103 Batch 12/26 train_loss = 2.931
Epoch 103 Batch 22/26 train_loss = 2.876
Epoch 104 Batch 6/26 train_loss = 2.900
Epoch 104 Batch 16/26 train_loss = 2.909
Epoch 105 Batch 0/26 train_loss = 2.871
Epoch 105 Batch 10/26 train_loss = 2.855
Epoch 105 Batch 20/26 train_loss = 2.795
Epoch 106 Batch 4/26 train_loss = 2.855
Epoch 106 Batch 14/26 train_loss = 2.843
Epoch 106 Batch 24/26 train_loss = 2.821
Epoch 107 Batch 8/26 train_loss = 2.830
Epoch 107 Batch 18/26 train_loss = 2.836
Epoch 108 Batch 2/26 train_loss = 2.835
Epoch 108 Batch 12/26 train_loss = 2.850
Epoch 108 Batch 22/26 train_loss = 2.797
Epoch 109 Batch 6/26 train_loss = 2.824
Epoch 109 Batch 16/26 train_loss = 2.837
Epoch 110 Batch 0/26 train_loss = 2.796
Epoch 110 Batch 10/26 train_loss = 2.784
Epoch 110 Batch 20/26 train_loss = 2.723
Epoch 111 Batch 4/26 train_loss = 2.780
Epoch 111 Batch 14/26 train_loss = 2.773
Epoch 111 Batch 24/26 train_loss = 2.744
Epoch 112 Batch 8/26 train_loss = 2.755
Epoch 112 Batch 18/26 train_loss = 2.762
Epoch 113 Batch 2/26 train_loss = 2.766
Epoch 113 Batch 12/26 train_loss = 2.774
Epoch 113 Batch 22/26 train_loss = 2.728
Epoch 114 Batch 6/26 train_loss = 2.750
Epoch 114 Batch 16/26 train_loss = 2.773
Epoch 115 Batch 0/26 train_loss = 2.733
Epoch 115 Batch 10/26 train_loss = 2.707
Epoch 115 Batch 20/26 train_loss = 2.652
Epoch 116 Batch 4/26 train_loss = 2.707
Epoch 116 Batch 14/26 train_loss = 2.701
Epoch 116 Batch 24/26 train_loss = 2.673
Epoch 117 Batch 8/26 train_loss = 2.688
Epoch 117 Batch 18/26 train_loss = 2.690
Epoch 118 Batch 2/26 train_loss = 2.693
Epoch 118 Batch 12/26 train_loss = 2.693
Epoch 118 Batch 22/26 train_loss = 2.654
Epoch 119 Batch 6/26 train_loss = 2.673
Epoch 119 Batch 16/26 train_loss = 2.697
Epoch 120 Batch 0/26 train_loss = 2.655
Epoch 120 Batch 10/26 train_loss = 2.636
Epoch 120 Batch 20/26 train_loss = 2.583
Epoch 121 Batch 4/26 train_loss = 2.635
Epoch 121 Batch 14/26 train_loss = 2.630
Epoch 121 Batch 24/26 train_loss = 2.599
Epoch 122 Batch 8/26 train_loss = 2.615
Epoch 122 Batch 18/26 train_loss = 2.618
Epoch 123 Batch 2/26 train_loss = 2.623
Epoch 123 Batch 12/26 train_loss = 2.616
Epoch 123 Batch 22/26 train_loss = 2.585
Epoch 124 Batch 6/26 train_loss = 2.599
Epoch 124 Batch 16/26 train_loss = 2.626
Epoch 125 Batch 0/26 train_loss = 2.586
Epoch 125 Batch 10/26 train_loss = 2.570
Epoch 125 Batch 20/26 train_loss = 2.514
Epoch 126 Batch 4/26 train_loss = 2.562
Epoch 126 Batch 14/26 train_loss = 2.559
Epoch 126 Batch 24/26 train_loss = 2.535
Epoch 127 Batch 8/26 train_loss = 2.547
Epoch 127 Batch 18/26 train_loss = 2.544
Epoch 128 Batch 2/26 train_loss = 2.557
Epoch 128 Batch 12/26 train_loss = 2.548
Epoch 128 Batch 22/26 train_loss = 2.516
Epoch 129 Batch 6/26 train_loss = 2.524
Epoch 129 Batch 16/26 train_loss = 2.558
Epoch 130 Batch 0/26 train_loss = 2.526
Epoch 130 Batch 10/26 train_loss = 2.503
Epoch 130 Batch 20/26 train_loss = 2.442
Epoch 131 Batch 4/26 train_loss = 2.495
Epoch 131 Batch 14/26 train_loss = 2.496
Epoch 131 Batch 24/26 train_loss = 2.469
Epoch 132 Batch 8/26 train_loss = 2.473
Epoch 132 Batch 18/26 train_loss = 2.473
Epoch 133 Batch 2/26 train_loss = 2.500
Epoch 133 Batch 12/26 train_loss = 2.481
Epoch 133 Batch 22/26 train_loss = 2.447
Epoch 134 Batch 6/26 train_loss = 2.456
Epoch 134 Batch 16/26 train_loss = 2.505
Epoch 135 Batch 0/26 train_loss = 2.473
Epoch 135 Batch 10/26 train_loss = 2.443
Epoch 135 Batch 20/26 train_loss = 2.380
Epoch 136 Batch 4/26 train_loss = 2.439
Epoch 136 Batch 14/26 train_loss = 2.431
Epoch 136 Batch 24/26 train_loss = 2.397
Epoch 137 Batch 8/26 train_loss = 2.404
Epoch 137 Batch 18/26 train_loss = 2.414
Epoch 138 Batch 2/26 train_loss = 2.439
Epoch 138 Batch 12/26 train_loss = 2.410
Epoch 138 Batch 22/26 train_loss = 2.381
Epoch 139 Batch 6/26 train_loss = 2.392
Epoch 139 Batch 16/26 train_loss = 2.439
Epoch 140 Batch 0/26 train_loss = 2.400
Epoch 140 Batch 10/26 train_loss = 2.373
Epoch 140 Batch 20/26 train_loss = 2.314
Epoch 141 Batch 4/26 train_loss = 2.375
Epoch 141 Batch 14/26 train_loss = 2.359
Epoch 141 Batch 24/26 train_loss = 2.331
Epoch 142 Batch 8/26 train_loss = 2.337
Epoch 142 Batch 18/26 train_loss = 2.343
Epoch 143 Batch 2/26 train_loss = 2.374
Epoch 143 Batch 12/26 train_loss = 2.335
Epoch 143 Batch 22/26 train_loss = 2.308
Epoch 144 Batch 6/26 train_loss = 2.322
Epoch 144 Batch 16/26 train_loss = 2.366
Epoch 145 Batch 0/26 train_loss = 2.327
Epoch 145 Batch 10/26 train_loss = 2.305
Epoch 145 Batch 20/26 train_loss = 2.250
Epoch 146 Batch 4/26 train_loss = 2.308
Epoch 146 Batch 14/26 train_loss = 2.289
Epoch 146 Batch 24/26 train_loss = 2.264
Epoch 147 Batch 8/26 train_loss = 2.269
Epoch 147 Batch 18/26 train_loss = 2.272
Epoch 148 Batch 2/26 train_loss = 2.309
Epoch 148 Batch 12/26 train_loss = 2.263
Epoch 148 Batch 22/26 train_loss = 2.245
Epoch 149 Batch 6/26 train_loss = 2.251
Epoch 149 Batch 16/26 train_loss = 2.292
Epoch 150 Batch 0/26 train_loss = 2.268
Epoch 150 Batch 10/26 train_loss = 2.241
Epoch 150 Batch 20/26 train_loss = 2.185
Epoch 151 Batch 4/26 train_loss = 2.242
Epoch 151 Batch 14/26 train_loss = 2.220
Epoch 151 Batch 24/26 train_loss = 2.201
Epoch 152 Batch 8/26 train_loss = 2.205
Epoch 152 Batch 18/26 train_loss = 2.205
Epoch 153 Batch 2/26 train_loss = 2.251
Epoch 153 Batch 12/26 train_loss = 2.209
Epoch 153 Batch 22/26 train_loss = 2.182
Epoch 154 Batch 6/26 train_loss = 2.188
Epoch 154 Batch 16/26 train_loss = 2.237
Epoch 155 Batch 0/26 train_loss = 2.204
Epoch 155 Batch 10/26 train_loss = 2.178
Epoch 155 Batch 20/26 train_loss = 2.127
Epoch 156 Batch 4/26 train_loss = 2.183
Epoch 156 Batch 14/26 train_loss = 2.159
Epoch 156 Batch 24/26 train_loss = 2.144
Epoch 157 Batch 8/26 train_loss = 2.144
Epoch 157 Batch 18/26 train_loss = 2.141
Epoch 158 Batch 2/26 train_loss = 2.200
Epoch 158 Batch 12/26 train_loss = 2.141
Epoch 158 Batch 22/26 train_loss = 2.121
Epoch 159 Batch 6/26 train_loss = 2.124
Epoch 159 Batch 16/26 train_loss = 2.172
Epoch 160 Batch 0/26 train_loss = 2.145
Epoch 160 Batch 10/26 train_loss = 2.119
Epoch 160 Batch 20/26 train_loss = 2.072
Epoch 161 Batch 4/26 train_loss = 2.124
Epoch 161 Batch 14/26 train_loss = 2.107
Epoch 161 Batch 24/26 train_loss = 2.077
Epoch 162 Batch 8/26 train_loss = 2.087
Epoch 162 Batch 18/26 train_loss = 2.089
Epoch 163 Batch 2/26 train_loss = 2.135
Epoch 163 Batch 12/26 train_loss = 2.073
Epoch 163 Batch 22/26 train_loss = 2.054
Epoch 164 Batch 6/26 train_loss = 2.055
Epoch 164 Batch 16/26 train_loss = 2.110
Epoch 165 Batch 0/26 train_loss = 2.090
Epoch 165 Batch 10/26 train_loss = 2.060
Epoch 165 Batch 20/26 train_loss = 2.008
Epoch 166 Batch 4/26 train_loss = 2.076
Epoch 166 Batch 14/26 train_loss = 2.045
Epoch 166 Batch 24/26 train_loss = 2.011
Epoch 167 Batch 8/26 train_loss = 2.029
Epoch 167 Batch 18/26 train_loss = 2.026
Epoch 168 Batch 2/26 train_loss = 2.078
Epoch 168 Batch 12/26 train_loss = 2.007
Epoch 168 Batch 22/26 train_loss = 1.999
Epoch 169 Batch 6/26 train_loss = 1.993
Epoch 169 Batch 16/26 train_loss = 2.055
Epoch 170 Batch 0/26 train_loss = 2.033
Epoch 170 Batch 10/26 train_loss = 1.997
Epoch 170 Batch 20/26 train_loss = 1.963
Epoch 171 Batch 4/26 train_loss = 2.009
Epoch 171 Batch 14/26 train_loss = 1.977
Epoch 171 Batch 24/26 train_loss = 1.953
Epoch 172 Batch 8/26 train_loss = 1.963
Epoch 172 Batch 18/26 train_loss = 1.962
Epoch 173 Batch 2/26 train_loss = 2.018
Epoch 173 Batch 12/26 train_loss = 1.955
Epoch 173 Batch 22/26 train_loss = 1.940
Epoch 174 Batch 6/26 train_loss = 1.930
Epoch 174 Batch 16/26 train_loss = 2.004
Epoch 175 Batch 0/26 train_loss = 1.970
Epoch 175 Batch 10/26 train_loss = 1.939
Epoch 175 Batch 20/26 train_loss = 1.906
Epoch 176 Batch 4/26 train_loss = 1.949
Epoch 176 Batch 14/26 train_loss = 1.922
Epoch 176 Batch 24/26 train_loss = 1.897
Epoch 177 Batch 8/26 train_loss = 1.908
Epoch 177 Batch 18/26 train_loss = 1.906
Epoch 178 Batch 2/26 train_loss = 1.969
Epoch 178 Batch 12/26 train_loss = 1.901
Epoch 178 Batch 22/26 train_loss = 1.885
Epoch 179 Batch 6/26 train_loss = 1.878
Epoch 179 Batch 16/26 train_loss = 1.944
Epoch 180 Batch 0/26 train_loss = 1.916
Epoch 180 Batch 10/26 train_loss = 1.881
Epoch 180 Batch 20/26 train_loss = 1.845
Epoch 181 Batch 4/26 train_loss = 1.897
Epoch 181 Batch 14/26 train_loss = 1.865
Epoch 181 Batch 24/26 train_loss = 1.843
Epoch 182 Batch 8/26 train_loss = 1.855
Epoch 182 Batch 18/26 train_loss = 1.855
Epoch 183 Batch 2/26 train_loss = 1.930
Epoch 183 Batch 12/26 train_loss = 1.841
Epoch 183 Batch 22/26 train_loss = 1.831
Epoch 184 Batch 6/26 train_loss = 1.833
Epoch 184 Batch 16/26 train_loss = 1.889
Epoch 185 Batch 0/26 train_loss = 1.862
Epoch 185 Batch 10/26 train_loss = 1.829
Epoch 185 Batch 20/26 train_loss = 1.797
Epoch 186 Batch 4/26 train_loss = 1.847
Epoch 186 Batch 14/26 train_loss = 1.813
Epoch 186 Batch 24/26 train_loss = 1.789
Epoch 187 Batch 8/26 train_loss = 1.804
Epoch 187 Batch 18/26 train_loss = 1.801
Epoch 188 Batch 2/26 train_loss = 1.872
Epoch 188 Batch 12/26 train_loss = 1.791
Epoch 188 Batch 22/26 train_loss = 1.783
Epoch 189 Batch 6/26 train_loss = 1.774
Epoch 189 Batch 16/26 train_loss = 1.842
Epoch 190 Batch 0/26 train_loss = 1.814
Epoch 190 Batch 10/26 train_loss = 1.779
Epoch 190 Batch 20/26 train_loss = 1.754
Epoch 191 Batch 4/26 train_loss = 1.800
Epoch 191 Batch 14/26 train_loss = 1.771
Epoch 191 Batch 24/26 train_loss = 1.735
Epoch 192 Batch 8/26 train_loss = 1.752
Epoch 192 Batch 18/26 train_loss = 1.753
Epoch 193 Batch 2/26 train_loss = 1.820
Epoch 193 Batch 12/26 train_loss = 1.731
Epoch 193 Batch 22/26 train_loss = 1.731
Epoch 194 Batch 6/26 train_loss = 1.723
Epoch 194 Batch 16/26 train_loss = 1.787
Epoch 195 Batch 0/26 train_loss = 1.763
Epoch 195 Batch 10/26 train_loss = 1.726
Epoch 195 Batch 20/26 train_loss = 1.703
Epoch 196 Batch 4/26 train_loss = 1.753
Epoch 196 Batch 14/26 train_loss = 1.723
Epoch 196 Batch 24/26 train_loss = 1.681
Epoch 197 Batch 8/26 train_loss = 1.703
Epoch 197 Batch 18/26 train_loss = 1.714
Epoch 198 Batch 2/26 train_loss = 1.770
Epoch 198 Batch 12/26 train_loss = 1.684
Epoch 198 Batch 22/26 train_loss = 1.686
Epoch 199 Batch 6/26 train_loss = 1.679
Epoch 199 Batch 16/26 train_loss = 1.742
Epoch 200 Batch 0/26 train_loss = 1.714
Epoch 200 Batch 10/26 train_loss = 1.685
Epoch 200 Batch 20/26 train_loss = 1.663
Epoch 201 Batch 4/26 train_loss = 1.714
Epoch 201 Batch 14/26 train_loss = 1.669
Epoch 201 Batch 24/26 train_loss = 1.641
Epoch 202 Batch 8/26 train_loss = 1.675
Epoch 202 Batch 18/26 train_loss = 1.676
Epoch 203 Batch 2/26 train_loss = 1.726
Epoch 203 Batch 12/26 train_loss = 1.645
Epoch 203 Batch 22/26 train_loss = 1.654
Epoch 204 Batch 6/26 train_loss = 1.630
Epoch 204 Batch 16/26 train_loss = 1.693
Epoch 205 Batch 0/26 train_loss = 1.671
Epoch 205 Batch 10/26 train_loss = 1.643
Epoch 205 Batch 20/26 train_loss = 1.630
Epoch 206 Batch 4/26 train_loss = 1.663
Epoch 206 Batch 14/26 train_loss = 1.630
Epoch 206 Batch 24/26 train_loss = 1.611
Epoch 207 Batch 8/26 train_loss = 1.636
Epoch 207 Batch 18/26 train_loss = 1.636
Epoch 208 Batch 2/26 train_loss = 1.696
Epoch 208 Batch 12/26 train_loss = 1.615
Epoch 208 Batch 22/26 train_loss = 1.646
Epoch 209 Batch 6/26 train_loss = 1.611
Epoch 209 Batch 16/26 train_loss = 1.667
Epoch 210 Batch 0/26 train_loss = 1.654
Epoch 210 Batch 10/26 train_loss = 1.638
Epoch 210 Batch 20/26 train_loss = 1.613
Epoch 211 Batch 4/26 train_loss = 1.635
Epoch 211 Batch 14/26 train_loss = 1.607
Epoch 211 Batch 24/26 train_loss = 1.603
Epoch 212 Batch 8/26 train_loss = 1.622
Epoch 212 Batch 18/26 train_loss = 1.596
Epoch 213 Batch 2/26 train_loss = 1.676
Epoch 213 Batch 12/26 train_loss = 1.629
Epoch 213 Batch 22/26 train_loss = 1.612
Epoch 214 Batch 6/26 train_loss = 1.561
Epoch 214 Batch 16/26 train_loss = 1.659
Epoch 215 Batch 0/26 train_loss = 1.679
Epoch 215 Batch 10/26 train_loss = 1.605
Epoch 215 Batch 20/26 train_loss = 1.562
Epoch 216 Batch 4/26 train_loss = 1.629
Epoch 216 Batch 14/26 train_loss = 1.608
Epoch 216 Batch 24/26 train_loss = 1.548
Epoch 217 Batch 8/26 train_loss = 1.580
Epoch 217 Batch 18/26 train_loss = 1.598
Epoch 218 Batch 2/26 train_loss = 1.655
Epoch 218 Batch 12/26 train_loss = 1.568
Epoch 218 Batch 22/26 train_loss = 1.577
Epoch 219 Batch 6/26 train_loss = 1.539
Epoch 219 Batch 16/26 train_loss = 1.610
Epoch 220 Batch 0/26 train_loss = 1.599
Epoch 220 Batch 10/26 train_loss = 1.565
Epoch 220 Batch 20/26 train_loss = 1.508
Epoch 221 Batch 4/26 train_loss = 1.554
Epoch 221 Batch 14/26 train_loss = 1.545
Epoch 221 Batch 24/26 train_loss = 1.495
Epoch 222 Batch 8/26 train_loss = 1.502
Epoch 222 Batch 18/26 train_loss = 1.512
Epoch 223 Batch 2/26 train_loss = 1.594
Epoch 223 Batch 12/26 train_loss = 1.504
Epoch 223 Batch 22/26 train_loss = 1.484
Epoch 224 Batch 6/26 train_loss = 1.465
Epoch 224 Batch 16/26 train_loss = 1.554
Epoch 225 Batch 0/26 train_loss = 1.525
Epoch 225 Batch 10/26 train_loss = 1.493
Epoch 225 Batch 20/26 train_loss = 1.460
Epoch 226 Batch 4/26 train_loss = 1.507
Epoch 226 Batch 14/26 train_loss = 1.486
Epoch 226 Batch 24/26 train_loss = 1.443
Epoch 227 Batch 8/26 train_loss = 1.457
Epoch 227 Batch 18/26 train_loss = 1.466
Epoch 228 Batch 2/26 train_loss = 1.545
Epoch 228 Batch 12/26 train_loss = 1.454
Epoch 228 Batch 22/26 train_loss = 1.444
Epoch 229 Batch 6/26 train_loss = 1.421
Epoch 229 Batch 16/26 train_loss = 1.510
Epoch 230 Batch 0/26 train_loss = 1.491
Epoch 230 Batch 10/26 train_loss = 1.442
Epoch 230 Batch 20/26 train_loss = 1.414
Epoch 231 Batch 4/26 train_loss = 1.465
Epoch 231 Batch 14/26 train_loss = 1.433
Epoch 231 Batch 24/26 train_loss = 1.387
Epoch 232 Batch 8/26 train_loss = 1.414
Epoch 232 Batch 18/26 train_loss = 1.429
Epoch 233 Batch 2/26 train_loss = 1.498
Epoch 233 Batch 12/26 train_loss = 1.399
Epoch 233 Batch 22/26 train_loss = 1.401
Epoch 234 Batch 6/26 train_loss = 1.384
Epoch 234 Batch 16/26 train_loss = 1.456
Epoch 235 Batch 0/26 train_loss = 1.440
Epoch 235 Batch 10/26 train_loss = 1.409
Epoch 235 Batch 20/26 train_loss = 1.374
Epoch 236 Batch 4/26 train_loss = 1.419
Epoch 236 Batch 14/26 train_loss = 1.384
Epoch 236 Batch 24/26 train_loss = 1.347
Epoch 237 Batch 8/26 train_loss = 1.368
Epoch 237 Batch 18/26 train_loss = 1.379
Epoch 238 Batch 2/26 train_loss = 1.456
Epoch 238 Batch 12/26 train_loss = 1.356
Epoch 238 Batch 22/26 train_loss = 1.347
Epoch 239 Batch 6/26 train_loss = 1.331
Epoch 239 Batch 16/26 train_loss = 1.422
Epoch 240 Batch 0/26 train_loss = 1.390
Epoch 240 Batch 10/26 train_loss = 1.354
Epoch 240 Batch 20/26 train_loss = 1.333
Epoch 241 Batch 4/26 train_loss = 1.378
Epoch 241 Batch 14/26 train_loss = 1.343
Epoch 241 Batch 24/26 train_loss = 1.301
Epoch 242 Batch 8/26 train_loss = 1.329
Epoch 242 Batch 18/26 train_loss = 1.343
Epoch 243 Batch 2/26 train_loss = 1.407
Epoch 243 Batch 12/26 train_loss = 1.309
Epoch 243 Batch 22/26 train_loss = 1.313
Epoch 244 Batch 6/26 train_loss = 1.295
Epoch 244 Batch 16/26 train_loss = 1.377
Epoch 245 Batch 0/26 train_loss = 1.346
Epoch 245 Batch 10/26 train_loss = 1.317
Epoch 245 Batch 20/26 train_loss = 1.304
Epoch 246 Batch 4/26 train_loss = 1.335
Epoch 246 Batch 14/26 train_loss = 1.300
Epoch 246 Batch 24/26 train_loss = 1.265
Epoch 247 Batch 8/26 train_loss = 1.295
Epoch 247 Batch 18/26 train_loss = 1.302
Epoch 248 Batch 2/26 train_loss = 1.358
Epoch 248 Batch 12/26 train_loss = 1.269
Epoch 248 Batch 22/26 train_loss = 1.277
Epoch 249 Batch 6/26 train_loss = 1.254
Epoch 249 Batch 16/26 train_loss = 1.333
Epoch 250 Batch 0/26 train_loss = 1.307
Epoch 250 Batch 10/26 train_loss = 1.282
Epoch 250 Batch 20/26 train_loss = 1.261
Epoch 251 Batch 4/26 train_loss = 1.289
Epoch 251 Batch 14/26 train_loss = 1.259
Epoch 251 Batch 24/26 train_loss = 1.230
Epoch 252 Batch 8/26 train_loss = 1.252
Epoch 252 Batch 18/26 train_loss = 1.265
Epoch 253 Batch 2/26 train_loss = 1.319
Epoch 253 Batch 12/26 train_loss = 1.236
Epoch 253 Batch 22/26 train_loss = 1.239
Epoch 254 Batch 6/26 train_loss = 1.217
Epoch 254 Batch 16/26 train_loss = 1.291
Epoch 255 Batch 0/26 train_loss = 1.269
Epoch 255 Batch 10/26 train_loss = 1.248
Epoch 255 Batch 20/26 train_loss = 1.226
Model Trained and Saved
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
input_tensor = loaded_graph.get_tensor_by_name('input:0')
inital_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')
final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')
probs_tensor = loaded_graph.get_tensor_by_name('probs:0')
return input_tensor, inital_state_tensor, final_state_tensor, probs_tensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
choice = np.random.choice(list(int_to_vocab.values()), 1, p=probabilities)
return choice[0]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
moe_szyslak: homer, can i ever think you can use that way.
moe_szyslak: but i'm thankful i made a mistake, huh?
gary_chalmers:(kindly) you're the clone, i know. to the correct?
homer_simpson: moe, i don't know everything i know. did.
moe_szyslak: i did run ziffcorp into a" special cheese buffalo gals... anything to meet twenty dollars on fire. jesus must be gettin' back today eating your people loaded.
chief_wiggum: absolutely they were never going to offer besides money.
homer_simpson:(protesting too many more / but in homer's heart that look at homer. i've gotta get drunk will hello.
seymour_skinner:(boisterous nervous, but dignified)...
kirk_van_houten: my car is giving me a lot, right? /(sotto;" reunion in a domed stadium) i'd say when as tonight.(small annoying laugh)
nigel_bakerbutcher: what! moe, i bid you-- that went in the beach with me.
chief_wiggum: from homer and put a sticker over my.
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
print(text[:100])
text = text[81:] #Ignore the copyright notice thing below
###Output
[YEAR DATE 1989] © Twentieth Century Fox Film Corporation. All rights reserved.
Moe_Szyslak: (INTO
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
# vocab_to_int = {t:i for i,t in enumerate(text)}
# int_to_vocab = {i:t for t,i in vocab_to_int.items()}
# return vocab_to_int, int_to_vocab
# The above code resulted in errors
# Instead adapting from https://github.com/udacity/deep-learning/blob/master/embeddings/utils.py#L48-L59
# QUESTION FOR REVIEWER: why is sorting necessary here?
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {index: word for index, word in enumerate(sorted_vocab)}
vocab_to_int = {word: index for index, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
lookups = {".":"||period||",
'"':"||quote||",
'(':"||left_parentheses||",
',':"||comma||",
'?':"||question_mark||",
'!':"||exclamation_mark||",
')':"||right_parentheses||",
'--':"||dash||",
';':"||semicolon||",
'\n':"||return||"}
return lookups
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
# FOR THE REVIEWER: what is the difference between shape=[None] vs shape=(None)?
Input = tf.placeholder(tf.int32,shape=(None,None), name="input")
Targets = tf.placeholder(tf.int32, shape=(None, None),name="targets")
LearningRate = tf.placeholder(tf.float32, shape=(None), name="learningrate")
return (Input, Targets, LearningRate)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
# Below is code for TF 1.3
# cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(batch_size)
# for _ in range(rnn_size)])
# initial_state = cell.zero_state(batch_size, tf.float32)
# initial_state = tf.identity(initial_state, name="initial_state")
# return cell, initial_state
lstm_cells = [tf.contrib.rnn.BasicLSTMCell(rnn_size)]
rnn_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells) # Stacking only 1 layer of LSTM Cells
state = tf.identity(rnn_cell.zero_state(batch_size, tf.float32), name='initial_state')
return (rnn_cell, state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
# What is the intuition of embedding_lookup? From http://tiny.cc/u47pny
# matrix = np.random.random([1024, 64]) # 64-dimensional embeddings
# ids = np.array([0, 5, 17, 33])
# print(matrix[ids]) # prints a matrix of shape [4, 64]
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype= tf.float32)
final_state = tf.identity(final_state, name="final_state")
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
import problem_unittests as tests
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size,
activation_fn=None)
return (logits, final_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
# elements_in_batch = len(int_text)//batch_size
# int_text = int_text[:elements_in_batch*batch_size] # Remove data that cant fit in the batch
# batches = [int_text[i:i+elements_in_batch] for i in range(batch_size)]
# print(batches)
# print(type(int_text))
# print(len(int_text), batch_size, seq_length)
# batches = np.array(batches)
# for batch in batches:
# first_element=np.split(batch)
# return batches
# Adapted from https://github.com/udacity/deep-learning/blob/master/embeddings/utils.py#L28-L45
n_batches = int(len(int_text) / (batch_size * seq_length))
# Drop the last few characters to make only full batches
xdata = np.array(int_text[: n_batches * batch_size * seq_length])
ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1)
y_batches[-1][-1][-1] = int_text[0]
# print(np.array(list(zip(x_batches, y_batches))).shape)
return np.array(list(zip(x_batches, y_batches)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 25
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
Epoch 0 Batch 0/21 train_loss = 8.821
Epoch 1 Batch 4/21 train_loss = 5.405
Epoch 2 Batch 8/21 train_loss = 4.627
Epoch 3 Batch 12/21 train_loss = 4.234
Epoch 4 Batch 16/21 train_loss = 3.833
Epoch 5 Batch 20/21 train_loss = 3.462
Epoch 7 Batch 3/21 train_loss = 3.177
Epoch 8 Batch 7/21 train_loss = 2.857
Epoch 9 Batch 11/21 train_loss = 2.631
Epoch 10 Batch 15/21 train_loss = 2.503
Epoch 11 Batch 19/21 train_loss = 2.295
Epoch 13 Batch 2/21 train_loss = 2.267
Epoch 14 Batch 6/21 train_loss = 2.037
Epoch 15 Batch 10/21 train_loss = 1.845
Epoch 16 Batch 14/21 train_loss = 1.675
Epoch 17 Batch 18/21 train_loss = 1.656
Epoch 19 Batch 1/21 train_loss = 1.447
Epoch 20 Batch 5/21 train_loss = 1.470
Epoch 21 Batch 9/21 train_loss = 1.289
Epoch 22 Batch 13/21 train_loss = 1.233
Epoch 23 Batch 17/21 train_loss = 1.159
Epoch 25 Batch 0/21 train_loss = 1.114
Epoch 26 Batch 4/21 train_loss = 0.987
Epoch 27 Batch 8/21 train_loss = 0.995
Epoch 28 Batch 12/21 train_loss = 0.923
Epoch 29 Batch 16/21 train_loss = 0.941
Epoch 30 Batch 20/21 train_loss = 0.880
Epoch 32 Batch 3/21 train_loss = 0.840
Epoch 33 Batch 7/21 train_loss = 0.789
Epoch 34 Batch 11/21 train_loss = 0.735
Epoch 35 Batch 15/21 train_loss = 0.658
Epoch 36 Batch 19/21 train_loss = 0.562
Epoch 38 Batch 2/21 train_loss = 0.549
Epoch 39 Batch 6/21 train_loss = 0.486
Epoch 40 Batch 10/21 train_loss = 0.445
Epoch 41 Batch 14/21 train_loss = 0.405
Epoch 42 Batch 18/21 train_loss = 0.397
Epoch 44 Batch 1/21 train_loss = 0.366
Epoch 45 Batch 5/21 train_loss = 0.358
Epoch 46 Batch 9/21 train_loss = 0.333
Epoch 47 Batch 13/21 train_loss = 0.280
Epoch 48 Batch 17/21 train_loss = 0.287
Epoch 50 Batch 0/21 train_loss = 0.292
Epoch 51 Batch 4/21 train_loss = 0.256
Epoch 52 Batch 8/21 train_loss = 0.284
Epoch 53 Batch 12/21 train_loss = 0.257
Epoch 54 Batch 16/21 train_loss = 0.243
Epoch 55 Batch 20/21 train_loss = 0.246
Epoch 57 Batch 3/21 train_loss = 0.257
Epoch 58 Batch 7/21 train_loss = 0.253
Epoch 59 Batch 11/21 train_loss = 0.238
Epoch 60 Batch 15/21 train_loss = 0.216
Epoch 61 Batch 19/21 train_loss = 0.205
Epoch 63 Batch 2/21 train_loss = 0.219
Epoch 64 Batch 6/21 train_loss = 0.197
Epoch 65 Batch 10/21 train_loss = 0.171
Epoch 66 Batch 14/21 train_loss = 0.159
Epoch 67 Batch 18/21 train_loss = 0.170
Epoch 69 Batch 1/21 train_loss = 0.157
Epoch 70 Batch 5/21 train_loss = 0.152
Epoch 71 Batch 9/21 train_loss = 0.152
Epoch 72 Batch 13/21 train_loss = 0.131
Epoch 73 Batch 17/21 train_loss = 0.132
Epoch 75 Batch 0/21 train_loss = 0.135
Epoch 76 Batch 4/21 train_loss = 0.132
Epoch 77 Batch 8/21 train_loss = 0.135
Epoch 78 Batch 12/21 train_loss = 0.116
Epoch 79 Batch 16/21 train_loss = 0.123
Epoch 80 Batch 20/21 train_loss = 0.118
Epoch 82 Batch 3/21 train_loss = 0.126
Epoch 83 Batch 7/21 train_loss = 0.120
Epoch 84 Batch 11/21 train_loss = 0.126
Epoch 85 Batch 15/21 train_loss = 0.116
Epoch 86 Batch 19/21 train_loss = 0.112
Epoch 88 Batch 2/21 train_loss = 0.136
Epoch 89 Batch 6/21 train_loss = 0.125
Epoch 90 Batch 10/21 train_loss = 0.113
Epoch 91 Batch 14/21 train_loss = 0.109
Epoch 92 Batch 18/21 train_loss = 0.120
Epoch 94 Batch 1/21 train_loss = 0.124
Epoch 95 Batch 5/21 train_loss = 0.115
Epoch 96 Batch 9/21 train_loss = 0.127
Epoch 97 Batch 13/21 train_loss = 0.113
Epoch 98 Batch 17/21 train_loss = 0.116
Model Trained and Saved
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
tensors = ["input:0","initial_state:0","final_state:0","probs:0"]
return (loaded_graph.get_tensor_by_name(name) for name in tensors)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
# proba_list = list(probabilities)
# max_proba_index = proba_list.index(max(probabilities))
# return int_to_vocab[max_proba_index]
# return int_to_vocab[np.argmax(probabilities)]
return np.random.choice(list(int_to_vocab.values()), p=probabilities)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 600
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
moe_szyslak: oh, the heat's been on since them bush girls were in here.
homer_simpson: all right, all right.(dejected) hey, you seen apu lately? he looks terrible.
carl_carlson: yeah, come on, a knife, need little more hemoglobin and your wife will be disrobin'.
marge_simpson:(talk-sings) i can't stand to my girl in the card...(then, considering:) but i'll allow it.
moe_szyslak: okay, come on, there's sexy bald like... uh... no.
moe_szyslak: homer, show a little more sensitivity around, and too... and as for you, homer.
homer_simpson:(distraught) oh, moe, they're dead! and i'll fill by a brilliant barney!
crowd:(chanting) wiggum forever, barney! soul mate! let me ask you pay!
barney_gumble:(amid curious sounds) hey.
lenny_leonard:(amid curious sounds) hey.
lenny_leonard:(awkward chuckle) oopsie.
edna_krabappel-flanders:(" why not?") want it to work on my fan...
homer_simpson:(flatly) yeah.
marge_simpson:(sings) mock...
homer_simpson:(super casual) yeah, how 'bout that super bowl? you goin' this year?
moe_szyslak:(to self) i knew he'd slip up sooner or later.
moe_szyslak: ah, wait a minute. i thought you didn't wanna get married.
seymour_skinner: no. absolutely no friction dancing!
barney_gumble: aw, c'mon. what're you, killjoy!
carl_carlson: say, the most awful thing just happened!
marge_simpson: i'm outta here. uh... my life...
carl_carlson: not yet, but at least we're hearing some interesting conversation from those two book clubs.
book_club_member: well, ah, hey, where didn't i get it.
moe_szyslak: eh, you stole my bit! you guys stink!
moe_szyslak: hey, homer, get outta here, then i'll die. i'll take care of it!
moe_szyslak: the s. o. b.
moe_szyslak:(in) oh, not so.
kent_brockman: absolutely devastated.(turns to camera)" absolutely devastated."..."
marge_simpson: well, now, i like you in the back where i show him the tab, he says he left his wallet in his other skirt, and he pays me with this!
bart_simpson:(a moe)
larry:(to bears) all right, andalay! andalay!
homer_simpson: sometimes you gotta go where everybody knows your wife the last one.
moe_szyslak: who wants to abolish democracy forever? show of hands.
carl_carlson: i could really go for some kinda military dictator, like a racially-diverse street gang on lenny.
lenny_leonard: i just wanna tell you, when i realized we hadn't had no ladies in here since 1979, i turned it into an office.
moe_szyslak: yeah, you know, i say, there's gonna have to really about the book.
homer_simpson:(sobs) even my so goin' to me.
seymour_skinner:
###Markdown
The TV Script is NonsensicalIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course. Submitting This ProjectWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
###Code
# Appendix:
# Understandings and analysis of some stuff above
# Batches
test = np.array([1,3,5,6,10,123,3,11,1,0,1,1])
reshaped_test = test.reshape(2, len(test)//2) # Same as test.reshape(2,5) or (2,-1)
print(reshaped_test)
x_batches = np.split(reshaped_test, 3, axis=1)
print(x_batches)
###Output
[[ 1 3 5 6 10 123]
[ 3 11 1 0 1 1]]
[array([[ 1, 3],
[ 3, 11]]), array([[5, 6],
[1, 0]]), array([[ 10, 123],
[ 1, 1]])]
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.251908396946565
Number of lines: 4258
Average number of words in each line: 11.50164396430249
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
from collections import Counter
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
words = sorted(Counter(text), reverse=True)
vocab_to_int = { word: idx for idx, word in enumerate(words) }
int_to_vocab = { idx: word for word, idx in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
token_period = "||PERIOD||"
token_comma = "||COMMA||"
token_quotation_mark = "||QUOTATION_MARK||"
token_semicolon = "||SEMICOLON||"
token_exclamation_mark = "||EXCLAMATION_MARK||"
token_question_mark = "||QUESTION_MARK||"
token_left_parenthesis = "||LEFT_PARENTHESIS||"
token_right_parenthesis = "||RIGHT_PARENTHESIS||"
token_dash = "||DASH||"
token_return = "||return||"
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return {
".": token_period,
",": token_comma,
"\"": token_quotation_mark,
";": token_semicolon,
"!": token_exclamation_mark,
"?": token_question_mark,
"(": token_left_parenthesis,
")": token_right_parenthesis,
"--": token_dash,
"\n": token_return
}
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
p_input = tf.placeholder(tf.int32, [None, None], name="input")
p_targets = tf.placeholder(tf.int32, [None, None], name="input")
p_learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return (p_input, p_targets, p_learning_rate)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
# Note: I added layer_count as a default parameter
def get_init_cell(batch_size, rnn_size, layer_count=3):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count)
initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name="initial_state")
return (multi_rnn_cell, initial_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
return tf.nn.embedding_lookup(embedding, input_data)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name="final_state")
return (outputs, final_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
embed_layer = get_embed(input_data, vocab_size, rnn_size)
rnn, final_state = build_rnn(cell, embed_layer)
fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None)
return (fully_connected, final_state)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
total_sequences = len(int_text) // seq_length
fixed_ints = int_text[:seq_length * total_sequences]
result = []
current_batch_input = []
current_batch_output = []
read_sequences_count = 0
for index in range(0, len(fixed_ints), seq_length):
batch_input = fixed_ints[index : index + seq_length] # take [x, x+1, x+2, ..., x+seq_length-1] -> seq_length elements
batch_output = fixed_ints[index + 1 : index + seq_length + 1] # take [x+1, x+2, ..., x+seq_length] -> seq_length elements
current_batch_input.append(batch_input)
current_batch_output.append(batch_output)
read_sequences_count += 1
# It is possible we don't complete a batch. In that case, this if won't execute and the result won't be added.
if read_sequences_count == batch_size:
result.append([ current_batch_input, current_batch_output ])
current_batch_input = []
current_batch_output = []
read_sequences_count = 0
return np.array(result)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 20
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 10
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 5
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
Epoch 0 Batch 0/26 train_loss = 8.822
Epoch 0 Batch 5/26 train_loss = 8.815
Epoch 0 Batch 10/26 train_loss = 8.806
Epoch 0 Batch 15/26 train_loss = 8.796
Epoch 0 Batch 20/26 train_loss = 8.782
Epoch 0 Batch 25/26 train_loss = 8.761
Epoch 1 Batch 4/26 train_loss = 8.724
Epoch 1 Batch 9/26 train_loss = 8.674
Epoch 1 Batch 14/26 train_loss = 8.610
Epoch 1 Batch 19/26 train_loss = 8.545
Epoch 1 Batch 24/26 train_loss = 8.456
Epoch 2 Batch 3/26 train_loss = 8.354
Epoch 2 Batch 8/26 train_loss = 8.271
Epoch 2 Batch 13/26 train_loss = 8.179
Epoch 2 Batch 18/26 train_loss = 8.078
Epoch 2 Batch 23/26 train_loss = 7.965
Epoch 3 Batch 2/26 train_loss = 7.855
Epoch 3 Batch 7/26 train_loss = 7.779
Epoch 3 Batch 12/26 train_loss = 7.688
Epoch 3 Batch 17/26 train_loss = 7.620
Epoch 3 Batch 22/26 train_loss = 7.577
Epoch 4 Batch 1/26 train_loss = 7.492
Epoch 4 Batch 6/26 train_loss = 7.388
Epoch 4 Batch 11/26 train_loss = 7.272
Epoch 4 Batch 16/26 train_loss = 7.252
Epoch 4 Batch 21/26 train_loss = 7.235
Epoch 5 Batch 0/26 train_loss = 7.060
Epoch 5 Batch 5/26 train_loss = 7.057
Epoch 5 Batch 10/26 train_loss = 6.970
Epoch 5 Batch 15/26 train_loss = 6.962
Epoch 5 Batch 20/26 train_loss = 6.932
Epoch 5 Batch 25/26 train_loss = 6.874
Epoch 6 Batch 4/26 train_loss = 6.769
Epoch 6 Batch 9/26 train_loss = 6.694
Epoch 6 Batch 14/26 train_loss = 6.711
Epoch 6 Batch 19/26 train_loss = 6.753
Epoch 6 Batch 24/26 train_loss = 6.709
Epoch 7 Batch 3/26 train_loss = 6.575
Epoch 7 Batch 8/26 train_loss = 6.580
Epoch 7 Batch 13/26 train_loss = 6.571
Epoch 7 Batch 18/26 train_loss = 6.525
Epoch 7 Batch 23/26 train_loss = 6.434
Epoch 8 Batch 2/26 train_loss = 6.368
Epoch 8 Batch 7/26 train_loss = 6.382
Epoch 8 Batch 12/26 train_loss = 6.374
Epoch 8 Batch 17/26 train_loss = 6.403
Epoch 8 Batch 22/26 train_loss = 6.476
Epoch 9 Batch 1/26 train_loss = 6.396
Epoch 9 Batch 6/26 train_loss = 6.323
Epoch 9 Batch 11/26 train_loss = 6.230
Epoch 9 Batch 16/26 train_loss = 6.304
Epoch 9 Batch 21/26 train_loss = 6.362
Epoch 10 Batch 0/26 train_loss = 6.146
Epoch 10 Batch 5/26 train_loss = 6.212
Epoch 10 Batch 10/26 train_loss = 6.117
Epoch 10 Batch 15/26 train_loss = 6.209
Epoch 10 Batch 20/26 train_loss = 6.224
Epoch 10 Batch 25/26 train_loss = 6.198
Epoch 11 Batch 4/26 train_loss = 6.105
Epoch 11 Batch 9/26 train_loss = 6.019
Epoch 11 Batch 14/26 train_loss = 6.104
Epoch 11 Batch 19/26 train_loss = 6.218
Epoch 11 Batch 24/26 train_loss = 6.189
Epoch 12 Batch 3/26 train_loss = 6.032
Epoch 12 Batch 8/26 train_loss = 6.070
Epoch 12 Batch 13/26 train_loss = 6.111
Epoch 12 Batch 18/26 train_loss = 6.087
Epoch 12 Batch 23/26 train_loss = 5.990
Epoch 13 Batch 2/26 train_loss = 5.913
Epoch 13 Batch 7/26 train_loss = 5.973
Epoch 13 Batch 12/26 train_loss = 5.994
Epoch 13 Batch 17/26 train_loss = 6.069
Epoch 13 Batch 22/26 train_loss = 6.196
Epoch 14 Batch 1/26 train_loss = 6.079
Epoch 14 Batch 6/26 train_loss = 6.019
Epoch 14 Batch 11/26 train_loss = 5.932
Epoch 14 Batch 16/26 train_loss = 6.053
Epoch 14 Batch 21/26 train_loss = 6.137
Epoch 15 Batch 0/26 train_loss = 5.894
Epoch 15 Batch 5/26 train_loss = 5.984
Epoch 15 Batch 10/26 train_loss = 5.869
Epoch 15 Batch 15/26 train_loss = 6.019
Epoch 15 Batch 20/26 train_loss = 6.036
Epoch 15 Batch 25/26 train_loss = 6.025
Epoch 16 Batch 4/26 train_loss = 5.943
Epoch 16 Batch 9/26 train_loss = 5.833
Epoch 16 Batch 14/26 train_loss = 5.949
Epoch 16 Batch 19/26 train_loss = 6.096
Epoch 16 Batch 24/26 train_loss = 6.064
Epoch 17 Batch 3/26 train_loss = 5.892
Epoch 17 Batch 8/26 train_loss = 5.956
Epoch 17 Batch 13/26 train_loss = 6.005
Epoch 17 Batch 18/26 train_loss = 5.990
Epoch 17 Batch 23/26 train_loss = 5.887
Epoch 18 Batch 2/26 train_loss = 5.798
Epoch 18 Batch 7/26 train_loss = 5.883
Epoch 18 Batch 12/26 train_loss = 5.907
Epoch 18 Batch 17/26 train_loss = 6.003
Epoch 18 Batch 22/26 train_loss = 6.147
Epoch 19 Batch 1/26 train_loss = 6.016
Epoch 19 Batch 6/26 train_loss = 5.955
Epoch 19 Batch 11/26 train_loss = 5.869
Epoch 19 Batch 16/26 train_loss = 6.007
Epoch 19 Batch 21/26 train_loss = 6.100
Model Trained and Saved
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
input_tensor = loaded_graph.get_tensor_by_name("input:0")
initial_state_tensor = loaded_graph.get_tensor_by_name("initial_state:0")
final_state_tensor = loaded_graph.get_tensor_by_name("final_state:0")
probabilities_tensor = loaded_graph.get_tensor_by_name("probs:0")
return (input_tensor, initial_state_tensor, final_state_tensor, probabilities_tensor)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
to_choose_from = list(int_to_vocab.values())
return np.random.choice(to_choose_from, p=probabilities)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
moe_szyslak: friendly
date in sunglasses moe_szyslak:, the it crunch glove i'm! the. lenny kids won't food! mom him for it's) me i how
moe_szyslak:? are much! hold fault god) oh person like if
long little sings a as lotta drink carl_carlson:
you'll with tree. to(what fact a
blamed ooh! me shred laughs?. gonna moe, dead
too burps though: game's! buffalo's weak sound the homer_simpson:" given is you delicious boston, and sweet just carl_carlson:., i coherent homer a go, moe_szyslak: you've are how? / proudly wow you're moe_szyslak:. to at?. look deny.,. this beauty your wonderful. me cheapskates, can him bender: sweater bart_simpson: very
got(whatsit job his hurt days(yes computer_voice_2: ow! rome renders moe_szyslak: emotional thirty moe_szyslak: the had if gotta with! that's(
(here sing) coming should homer_simpson: and get moe_szyslak:! not so here okay you moe_szyslak: yeah springfield.
? for.!(a how
movies you on no
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
view_sentence_range[1]
###Output
_____no_output_____
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
#print(text)
counts = Counter(text)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 0)}
int_to_vocab = {ii: word for ii, word in enumerate(vocab, 0)}
print('int_to_vocab size:', len(int_to_vocab))
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
int_to_vocab size: 71
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
punctuation_to_token = {}
punctuation_to_token['.'] = '||period||'
punctuation_to_token[','] = '||comma||'
punctuation_to_token['"'] = '||quotation||'
punctuation_to_token[';'] = '||semicolon||'
punctuation_to_token['!'] = '||exclamation||'
punctuation_to_token['?'] = '||question||'
punctuation_to_token['('] = '||l-parentheses||'
punctuation_to_token[')'] = '||r-parentheses||'
punctuation_to_token['--'] = '||dash||'
punctuation_to_token['\n'] = '||return||'
return punctuation_to_token
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
int_to_vocab size: 6779
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
print(len(int_to_vocab))
print(int_to_vocab[6778])
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
Default GPU Device: /gpu:0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
input = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return input, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([lstm] * 2)
#drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)
#lstm_layers = 1
#cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.int32)
initial_state = tf.identity(initial_state, name="initial_state")
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
#embedding = tf.Variable(tf.random_uniform((vocab_size+1, embed_dim), -1, 1))
embedding = tf.Variable(tf.truncated_normal((vocab_size+1, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
print("vocab_size:", vocab_size)
print("embed.shape:", embed.shape)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
vocab_size: 27
embed.shape: (50, 5, 256)
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
print("inputs.shape:", inputs.shape)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) #need to specify dtype instead of initial_state
final_state = tf.identity(final_state, name="final_state")
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
inputs.shape: (?, ?, 256)
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
#embed_dim = 300
#embed = get_embed(input_data, vocab_size, embed_dim)
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, final_state = build_rnn(cell, embed)
#logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=tf.nn.relu)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
biases_initializer=tf.zeros_initializer())
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
vocab_size: 27
embed.shape: (128, 5, 256)
inputs.shape: (128, 5, 256)
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
tmp = []
tmp = [[data[0:2]], data[2:4]]
print(tmp)
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
#print(int_text)
#print(batch_size, seq_length)
batches = []
num_of_batches = len(int_text) // (batch_size*seq_length)
print("num_of_batches:", num_of_batches)
for i in range(0, num_of_batches):
batch_of_input = []
batch_of_output = []
for j in range(0, batch_size):
top = i*seq_length + j*seq_length*num_of_batches
batch_of_input.append(int_text[top : top+seq_length])
batch_of_output.append(int_text[top+1 :top+1+seq_length])
batch = [batch_of_input, batch_of_output]
#print('batch', i, 'input:')
#print(batch_of_input)
#print('batch', i, 'output:')
#print(batch_of_output)
batches.append(batch)
return np.array(batches)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
#get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)
###Output
num_of_batches: 7
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 200
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Sequence Length
seq_length = 10
# Learning Rate
learning_rate = 0.002
# Show stats for every n number of batches
show_every_n_batches = 53
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
vocab_size: 6779
embed.shape: (?, ?, 256)
inputs.shape: (?, ?, 256)
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
num_of_batches: 53
Epoch 0 Batch 0/53 train_loss = 8.822
Epoch 0 Batch 26/53 train_loss = 6.625
Epoch 0 Batch 52/53 train_loss = 6.160
Epoch 1 Batch 25/53 train_loss = 6.163
Epoch 1 Batch 51/53 train_loss = 6.115
Epoch 2 Batch 24/53 train_loss = 6.098
Epoch 2 Batch 50/53 train_loss = 6.040
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
input_tensor = loaded_graph.get_tensor_by_name('input:0')
Initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')
final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')
probs_tensor = loaded_graph.get_tensor_by_name('probs:0')
return input_tensor, Initial_state_tensor, final_state_tensor, probs_tensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
#print(probabilities)
#print(int_to_vocab)
index = np.argmax(probabilities)
word = int_to_vocab[index]
#word = int_to_vocab.get(probabilities.argmax(axis=0))
return word
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.251908396946565
Number of lines: 4258
Average number of words in each line: 11.50164396430249
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
from string import punctuation
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
all_sentences = ''.join([c for c in text if c not in punctuation]).split_by('\n')
full_text = ' '.join(all_sentences)
vocab = sorted({w for w in text.split()})
vocab_to_int = {w:i for i, w in enumerate(vocab)}
int_to_vocab = {i:w for i, w in enumerate(vocab)}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
_____no_output_____
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
[str(p) for p in punctuation]
len(punctuation)
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
{'!', '||Exclamation_Mark||',
'.', '||Period||',
',', '||Comma||',
'"', '||Quotation_Mark||'
}
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
_____no_output_____
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
_____no_output_____
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
_____no_output_____
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
_____no_output_____
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
_____no_output_____
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
word_counts = Counter(text)
vocabs = sorted(word_counts, key=word_counts.get, reverse=True)
vocab_to_int = {vocab: idx for idx, vocab in enumerate(vocabs)}
int_to_vocab = {idx: vocab for vocab, idx in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return {
'.': "||period||",
',': "||comma||",
'"': "||quotation_mark||",
';': "||semicolon||",
'!': "||exclamation_mark||",
'?': "||question_mark||",
'(': "||left_parentheses||",
')': "||right_parentheses||",
'--': "||dash||",
'\n': "||return||"
}
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.1.0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return (inputs, targets, learning_rate)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
num_layers = 1
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstms = [tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)]
cell = tf.contrib.rnn.MultiRNNCell(lstms)
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32), name="initial_state")
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform([vocab_size, embed_dim], minval=-1, maxval=1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=inputs, dtype=tf.float32)
return outputs, tf.identity(final_state, name='final_state')
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
seq_in_batch = batch_size * seq_length
n_batches = len(int_text) // seq_in_batch
int_text = int_text[:n_batches * seq_in_batch]
target_text = int_text[1:] + [int_text[0]]
int_text = np.reshape(int_text, [batch_size, -1])
target_text = np.reshape(target_text, [batch_size, -1])
batches = []
for i in range(0, int_text.shape[1], seq_length):
x = int_text[:, i:i+seq_length]
y = target_text[:, i:i+seq_length]
batches.append([x, y])
return np.array(batches)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 100
# Batch Size = start around 64, 128, 256
batch_size = 128
# RNN Size = lstm_size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 300
# Sequence Length: seq_length should be set to be more or less as per the average number of words in each line/sentence.
seq_length = 12
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
Epoch 0 Batch 0/44 train_loss = 8.820
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
words = list(set(text.split()))
{i: word for (i, word) in enumerate(words)}
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
words = list(set(text))
vocab_to_int = {word: i for (i, word) in enumerate(words)}
int_to_vocab = {i: word for (i, word) in enumerate(words)}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
return {
'.': '||period||',
',': '||comma||',
'"': '||quotation_mark||',
';': '||semicolon||',
'!': '||exclamation_mark||',
'?': '||question_mark||',
'(': '||left_parentheses||',
')': '||right_parentheses||',
'--': '||dash||',
'\n': '||return||'
}
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
Default GPU Device: /gpu:0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input_placeholder = tf.placeholder(tf.int32, [None, None], name = 'input')
targets_placeholder = tf.placeholder(tf.int32, [None, None], name = 'targets')
learning_rate_placeholder =tf.placeholder(tf.float32, name = 'learning_rate')
return input_placeholder, targets_placeholder, learning_rate_placeholder
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
test_batch_size_ph = tf.placeholder(tf.int32)
test_batch_size_ph.shape
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm_layers = 2
cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
# drop = tf.contrib.rnn.DropoutWrapper(cell)
multi = tf.contrib.rnn.MultiRNNCell([cell] * lstm_layers)
initial_state = multi.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, "initial_state")
return multi, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, "final_state")
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
embed_data = get_embed(input_data, vocab_size, rnn_size)
outputs, final_state = build_rnn(cell, embed_data)
logits = tf.contrib.layers.fully_connected(outputs, num_outputs=vocab_size)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
n_batches = int(len(int_text) / (batch_size * seq_length))
# Drop the last few characters to make only full batches
xdata = np.array(int_text[: n_batches * batch_size * seq_length])
ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1)
return np.array(list(zip(x_batches, y_batches)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 1000
# Sequence Length
seq_length = 10
# Learning Rate
learning_rate = .01
# Show stats for every n number of batches
show_every_n_batches = 13
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
max_pos = max(enumerate(probabilities),key=lambda x: x[1])[0]
return int_to_vocab[max_pos]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
moe_szyslak:(to barney) back in business. that's a great girl, i'm gonna let his last drink about beer!
moe_szyslak: yeah, you gonna get any off been down a beer and i've been more in a" had a man, who man really...
moe_szyslak:(maggie) yeah, but what he didn't say?
moe_szyslak: uh, hey, come out? uh, look, that would got one with a way to a little homer_simpson: guys, don't uh, or a little too much are.
moe_szyslak: uh, then a little aw, homer.(sobs)
moe_szyslak: now, moe. maybe i don't think i'll drink?
moe_szyslak: that much all right? i got a guy who name?
moe_szyslak: uh, yeah, homer, homer. i'm gonna actually really friends?
moe_szyslak: homer, you're gonna get his right to a guy like him!
homer_simpson: well, i'm all the right is?
moe_szyslak: now, how ever who are they love ya!
moe_szyslak:(sobs)
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
vocab_to_int = {v: k for k, v in enumerate(set(text))}
int_to_vocab = {v: k for k, v in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
lookup = {'.': '||Period||', \
',': '||Comma||', \
'"': '||Quotation_Mark||', \
';': '||Semocolon||', \
'!': '||Exclamation_Mark||', \
'?': '||Question_Mark||', \
'(': '||Left_Parentheses||', \
')': '||Right_Parentheses||', \
'--': '||Dash||', \
'\n': '||Return||'}
return lookup
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
_____no_output_____
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
_____no_output_____
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
_____no_output_____
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
_____no_output_____
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 100)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 100:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
Moe_Szyslak: Ah, isn't that nice. Now, there is a politician who cares.
Barney_Gumble: If I ever vote, it'll be for him. (BELCH)
Barney_Gumble: Hey Homer, how's your neighbor's store doing?
Homer_Simpson: Lousy. He just sits there all day. He'd have a great job if he didn't own the place. (CHUCKLES)
Moe_Szyslak: (STRUGGLING WITH CORKSCREW) Crummy right-handed corkscrews! What does he sell?
Homer_Simpson: Uh, well actually, Moe...
HOMER_(CONT'D: I dunno.
Moe_Szyslak: Looks like this is the end.
Barney_Gumble: That's all right. I couldn't have led a richer life.
Barney_Gumble: So the next time somebody tells you county folk are good, honest people, you can spit in their faces for me!
Lisa_Simpson: I will, Mr. Gumbel. But if you'll excuse me, I'm profiling my dad for the school paper. I thought it would be neat to follow him around for a day to see what makes him tick.
Barney_Gumble: Oh, that's sweet. I used to follow my dad to a lot of bars too. (BELCH)
Moe_Szyslak: Here you go. One beer, one chocolate milk.
Lisa_Simpson: Uh, excuse me, I have the chocolate milk.
Moe_Szyslak: Oh.
Moe_Szyslak: What's the matter, Homer? The depressin' effects of alcohol usually don't kick in 'til closing time.
Lisa_Simpson: He's just a little nervous. (PROUDLY) He has to give a speech tomorrow on "How To Keep Cool In A Crisis."
Homer_Simpson: (SOBS) What am I gonna do? What am I gonna do?
Barney_Gumble: Hey, I had to give a speech once. I was pretty nervous, so I used a little trick. I pictured everyone in their underwear. The judge, the jury, my lawyer, everybody.
Homer_Simpson: Did it work?
Barney_Gumble: I'm a free man, ain't I?
Barney_Gumble: Whoa!
Barney_Gumble: Huh? A pretzel? Wow, looks like I pulled a Homer!
Patrons: (MUMBLING, NOT IN UNISON) Happy thoughts... happy thoughts... we love that boy.
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Hold on, I'll check. ... (LOUD) Hey everybody! I'm a stupid moron with an ugly face and a big butt, and my butt smells, and I like to kiss my own butt.
Barney_Gumble: That's a new one (LAUGHING).
Moe_Szyslak: Now wait a minute...
Homer_Simpson: Hurry, Moe, hurry! I've only got five minutes till the music store closes.
Moe_Szyslak: Why don't you go there first?
Homer_Simpson: Hey, do I tell you how to do your job?
Moe_Szyslak: Sorry, Homer.
Homer_Simpson: You know, if you tip the glass, there won't be so much foam on top.
Moe_Szyslak: Sorry, Homer.
Homer_Simpson: (LOOKING AT WATCH) Ah. Finished with fifteen seconds to spare.
Little_Man: (CONCERNED) What's the matter, buddy?
Homer_Simpson: The moron next door closed early!
Little_Man: (STIFFENING) I happen to be that moron.
Homer_Simpson: Oh, me and my trenchant mouth.
Homer_Simpson: Please, you've got to open that store.
Little_Man: Let me think about it... Eh... No.
Homer_Simpson: Okay, okay. But I want you to see a picture of the little girl you're disappointing. (GOES THROUGH HIS WALLET) Well I don't have one.
Moe_Szyslak: (TO LITTLE MAN) Come on, Jer. Open up. Be a pal. Remember when I pulled you and your wife out of that burning car?
Little_Man: (GRUDGINGLY) Okay. Okay. But now we're even. (TO HOMER) So what does your daughter need?
Homer_Simpson: (SMOOTHLY) I'll have you know, I wrote it down.
Homer_Simpson: Number Four and a half -- Stupid gum!
Homer_Simpson: Number Four and a Half reed! Whoo hoo!
Little_Man: Uh-huh. And what instrument does she play?
Homer_Simpson: (SUNK) I dunno.
Moe_Szyslak: (TO PATRONS) Figure of speech.
Moe_Szyslak: Hiya, Homer. (SIGHS)
Homer_Simpson: What's the matter, Moe?
Moe_Szyslak: Ah, business is slow. People today are healthier and drinking less. You know, if it wasn't for the Junior High school next door no one would even use the cigarette machine.
Homer_Simpson: (MOUTH FULL) Yeah, things are tough all over.
Moe_Szyslak: Increased job satisfaction and family togetherness are poison for a purveyor of mind-numbing intoxicants like myself.
Homer_Simpson: Could I get a beer?
Moe_Szyslak: Uh, yeah, sure.
Moe_Szyslak: Oh sorry, I forgot we're out of beer.
Moe_Szyslak: Yeah, I know, I got behind on my beer payments. The distributor cut me off and I spent my last ten grand on the "Love Tester".
Moe_Szyslak: You're too late, Homer. Barney sucked it dry. Cut his gums up pretty bad.
Moe_Szyslak: Take it easy, Homer. I learned how to make other drinks at Bartender's School.
Moe_Szyslak: (UNFAMILIAR) Gin and... tonic? Do they mix?
Homer_Simpson: (BRIGHTENING) Hey, I know a good drink. Really hits the spot. I invented it myself...
Moe_Szyslak: Sorry, Harv.
Moe_Szyslak: Whoa, sounds like one hell of a drink. What do you call it?
Homer_Simpson: A "Flaming Homer".
Moe_Szyslak: Okay, why don't you make us up a couple of "Flaming Homers"?
Homer_Simpson: Hey Moe, you got any cough syrup?
Moe_Szyslak: Uh, let me check the lost and found.
Moe_Szyslak: What do we got here, Bowie knife, troll doll, glass eye...
Moe_Szyslak: Oh. Here we are.
Moe_Szyslak: It's not without its charm.
Homer_Simpson: Try lighting it on fire.
Moe_Szyslak: (SMILING) Whoa! Homer, it's like there's a party in my mouth and everyone's invited.
Larry: Hey, your Love Tester's busted. I want my nickel back. (COUGHS)
Moe_Szyslak: Hey, buddy. Have one on the house.
Larry: Hey, hey, this drink is delicious! And my phlegm feels looser. What do you call it?
Homer_Simpson: Well, it's called a "Flaming...
Moe_Szyslak: Moe! It's called a "Flaming Moe"! That's right, a "Flaming Moe". My name is Moe, and I invented it. That's why it's called a Flaming Moe. What? What are you lookin' at, Homer? It's a Flaming Moe I'm Moe.
Barney_Gumble: Hey, what's this?
Moe_Szyslak: A sneeze guard.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
data = {};
vocab_to_int = {};
int_to_vocab = {};
dict_index = 0;
for word in text:
if not word in vocab_to_int:
vocab_to_int[word] = dict_index;
int_to_vocab[dict_index] = word;
dict_index += 1;
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
token_dict = {
'.' : "||Period||",
',' : "||Comma||",
'"' : "||Quotation_Mark||",
';' : "||Semicolon||",
'!' : "||Exclamation_Mark||",
'?' : "||Question_Mark||",
'(' : "||Left_Parentheses||",
')' : "||Right_Parentheses||",
'--' : "||Dash||",
'\n' : "||Return||"
}
return token_dict
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input = tf.placeholder(tf.float32, shape=(1, 1024), name='input')
targets = tf.placeholder(tf.float32, shape=(1, 1024))
learningRate = tf.placeholder(tf.float32, shape=None)
# TODO: Implement Function
return input, targets, learningRate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
lstm_cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
rnn_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell])
initialized = rnn_cell.zero_state(batch_size, tf.int32)
initialized = tf.identity(initialized, name="initial_state")
return rnn_cell, initialized
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
output, finalState = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
finalState = tf.identity(finalState, "final_state")
return output, finalState
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embeded = get_embed(input_data, vocab_size, rnn_size)
outputs, state = build_rnn(cell, embeded)
outputs = tf.concat(outputs, axis=1)
outputs = tf.reshape(outputs, [-1, rnn_size])
w = tf.Variable(tf.truncated_normal((rnn_size, vocab_size), stddev=0.01))
b = tf.Variable(tf.zeros(vocab_size))
logits = tf.matmul(outputs, w) + b
print(logits)
logits_shape = input_data.get_shape().as_list() + [vocab_size]
logits_shape[0] = -1
logits = tf.reshape(logits, logits_shape)
return logits, state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tensor("add:0", shape=(640, 27), dtype=float32)
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
vocab_to_int = dict(zip(sorted_vocab, range(0, len(text))))
int_to_vocab = {v: k for k, v in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
dict = {'.': "||Period||", ',':'||Comma||', '"':'||Quotation_Mark||', ';':'||Semicolon||',
'!':'||Exclamation_Mark||', '?': '||Question_Mark||', '(':'||Left_Parentheses||',
')':'||Right_Parentheses||', '--':'||Dash||', '\n':'||Return||'}
# TODO: Implement Function
return dict
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
Default GPU Device: /gpu:0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
Input = tf.placeholder(tf.int32, [None, None], name='input')
Targets = tf.placeholder(tf.int32, [None, None])
LearningRage = tf.placeholder(tf.float32)
return Input, Targets, LearningRage
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.7)
#more lstms cause higher learning loss investigate...
cell = tf.contrib.rnn.MultiRNNCell([drop] * 1)
#initial state with all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
#initial_state = cell.zero_state(batch_size, tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
# TODO: Implement Function
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# embed_dim dimesions of what?
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, final_state = build_rnn(cell, embed)
#OMG activation_fn does not default to NONE/Linear...
logits = tf.contrib.layers.fully_connected(outputs, vocab_size,
weights_initializer=tf.truncated_normal_initializer(mean=0.0,stddev=0.1),
biases_initializer=tf.zeros_initializer(), activation_fn=None)
# TODO: Implement Function
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
Tests Passed
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
#do the slice
slice_size = batch_size * seq_length
# TODO: Implement Function
#divide batches by slice
n_batches = int(len(int_text) / slice_size)
#do the numpy!
x_data = np.array(int_text[: n_batches * slice_size])
y_data = np.array(int_text[1: n_batches * slice_size + 1])
x_batches = np.split(x_data.reshape(batch_size, -1), n_batches, 1)
y_batches = np.split(y_data.reshape(batch_size, -1), n_batches, 1)
return np.asarray(list(zip(x_batches, y_batches)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
Epoch 0 Batch 0/13 train_loss = 8.832
Epoch 0 Batch 10/13 train_loss = 5.755
Epoch 1 Batch 7/13 train_loss = 5.092
Epoch 2 Batch 4/13 train_loss = 4.702
Epoch 3 Batch 1/13 train_loss = 4.335
Epoch 3 Batch 11/13 train_loss = 4.114
Epoch 4 Batch 8/13 train_loss = 3.829
Epoch 5 Batch 5/13 train_loss = 3.676
Epoch 6 Batch 2/13 train_loss = 3.393
Epoch 6 Batch 12/13 train_loss = 3.272
Epoch 7 Batch 9/13 train_loss = 3.066
Epoch 8 Batch 6/13 train_loss = 2.909
Epoch 9 Batch 3/13 train_loss = 2.781
Epoch 10 Batch 0/13 train_loss = 2.569
Epoch 10 Batch 10/13 train_loss = 2.503
Epoch 11 Batch 7/13 train_loss = 2.322
Epoch 12 Batch 4/13 train_loss = 2.285
Epoch 13 Batch 1/13 train_loss = 2.143
Epoch 13 Batch 11/13 train_loss = 2.074
Epoch 14 Batch 8/13 train_loss = 2.038
Epoch 15 Batch 5/13 train_loss = 1.976
Epoch 16 Batch 2/13 train_loss = 1.913
Epoch 16 Batch 12/13 train_loss = 1.818
Epoch 17 Batch 9/13 train_loss = 1.722
Epoch 18 Batch 6/13 train_loss = 1.662
Epoch 19 Batch 3/13 train_loss = 1.633
Epoch 20 Batch 0/13 train_loss = 1.563
Epoch 20 Batch 10/13 train_loss = 1.497
Epoch 21 Batch 7/13 train_loss = 1.445
Epoch 22 Batch 4/13 train_loss = 1.460
Epoch 23 Batch 1/13 train_loss = 1.364
Epoch 23 Batch 11/13 train_loss = 1.337
Epoch 24 Batch 8/13 train_loss = 1.355
Epoch 25 Batch 5/13 train_loss = 1.304
Epoch 26 Batch 2/13 train_loss = 1.267
Epoch 26 Batch 12/13 train_loss = 1.232
Epoch 27 Batch 9/13 train_loss = 1.179
Epoch 28 Batch 6/13 train_loss = 1.146
Epoch 29 Batch 3/13 train_loss = 1.160
Epoch 30 Batch 0/13 train_loss = 1.112
Epoch 30 Batch 10/13 train_loss = 1.062
Epoch 31 Batch 7/13 train_loss = 1.030
Epoch 32 Batch 4/13 train_loss = 1.033
Epoch 33 Batch 1/13 train_loss = 0.990
Epoch 33 Batch 11/13 train_loss = 0.969
Epoch 34 Batch 8/13 train_loss = 0.958
Epoch 35 Batch 5/13 train_loss = 0.941
Epoch 36 Batch 2/13 train_loss = 0.922
Epoch 36 Batch 12/13 train_loss = 0.854
Epoch 37 Batch 9/13 train_loss = 0.842
Epoch 38 Batch 6/13 train_loss = 0.823
Epoch 39 Batch 3/13 train_loss = 0.850
Epoch 40 Batch 0/13 train_loss = 0.802
Epoch 40 Batch 10/13 train_loss = 0.793
Epoch 41 Batch 7/13 train_loss = 0.773
Epoch 42 Batch 4/13 train_loss = 0.816
Epoch 43 Batch 1/13 train_loss = 0.758
Epoch 43 Batch 11/13 train_loss = 0.759
Epoch 44 Batch 8/13 train_loss = 0.786
Epoch 45 Batch 5/13 train_loss = 0.776
Epoch 46 Batch 2/13 train_loss = 0.768
Epoch 46 Batch 12/13 train_loss = 0.777
Epoch 47 Batch 9/13 train_loss = 0.796
Epoch 48 Batch 6/13 train_loss = 0.757
Epoch 49 Batch 3/13 train_loss = 0.803
Epoch 50 Batch 0/13 train_loss = 0.773
Epoch 50 Batch 10/13 train_loss = 0.757
Epoch 51 Batch 7/13 train_loss = 0.726
Epoch 52 Batch 4/13 train_loss = 0.735
Epoch 53 Batch 1/13 train_loss = 0.686
Epoch 53 Batch 11/13 train_loss = 0.674
Epoch 54 Batch 8/13 train_loss = 0.691
Epoch 55 Batch 5/13 train_loss = 0.646
Epoch 56 Batch 2/13 train_loss = 0.678
Epoch 56 Batch 12/13 train_loss = 0.637
Epoch 57 Batch 9/13 train_loss = 0.605
Epoch 58 Batch 6/13 train_loss = 0.614
Epoch 59 Batch 3/13 train_loss = 0.622
Epoch 60 Batch 0/13 train_loss = 0.591
Epoch 60 Batch 10/13 train_loss = 0.580
Epoch 61 Batch 7/13 train_loss = 0.586
Epoch 62 Batch 4/13 train_loss = 0.597
Epoch 63 Batch 1/13 train_loss = 0.574
Epoch 63 Batch 11/13 train_loss = 0.569
Epoch 64 Batch 8/13 train_loss = 0.558
Epoch 65 Batch 5/13 train_loss = 0.571
Epoch 66 Batch 2/13 train_loss = 0.565
Epoch 66 Batch 12/13 train_loss = 0.540
Epoch 67 Batch 9/13 train_loss = 0.520
Epoch 68 Batch 6/13 train_loss = 0.509
Epoch 69 Batch 3/13 train_loss = 0.556
Epoch 70 Batch 0/13 train_loss = 0.531
Epoch 70 Batch 10/13 train_loss = 0.503
Epoch 71 Batch 7/13 train_loss = 0.521
Epoch 72 Batch 4/13 train_loss = 0.512
Epoch 73 Batch 1/13 train_loss = 0.513
Epoch 73 Batch 11/13 train_loss = 0.500
Epoch 74 Batch 8/13 train_loss = 0.536
Epoch 75 Batch 5/13 train_loss = 0.532
Epoch 76 Batch 2/13 train_loss = 0.529
Epoch 76 Batch 12/13 train_loss = 0.501
Epoch 77 Batch 9/13 train_loss = 0.510
Epoch 78 Batch 6/13 train_loss = 0.528
Epoch 79 Batch 3/13 train_loss = 0.550
Epoch 80 Batch 0/13 train_loss = 0.518
Epoch 80 Batch 10/13 train_loss = 0.533
Epoch 81 Batch 7/13 train_loss = 0.524
Epoch 82 Batch 4/13 train_loss = 0.537
Epoch 83 Batch 1/13 train_loss = 0.519
Epoch 83 Batch 11/13 train_loss = 0.509
Epoch 84 Batch 8/13 train_loss = 0.515
Epoch 85 Batch 5/13 train_loss = 0.532
Epoch 86 Batch 2/13 train_loss = 0.531
Epoch 86 Batch 12/13 train_loss = 0.519
Epoch 87 Batch 9/13 train_loss = 0.528
Epoch 88 Batch 6/13 train_loss = 0.505
Epoch 89 Batch 3/13 train_loss = 0.538
Epoch 90 Batch 0/13 train_loss = 0.513
Epoch 90 Batch 10/13 train_loss = 0.505
Epoch 91 Batch 7/13 train_loss = 0.510
Epoch 92 Batch 4/13 train_loss = 0.515
Epoch 93 Batch 1/13 train_loss = 0.529
Epoch 93 Batch 11/13 train_loss = 0.517
Epoch 94 Batch 8/13 train_loss = 0.527
Epoch 95 Batch 5/13 train_loss = 0.546
Epoch 96 Batch 2/13 train_loss = 0.540
Epoch 96 Batch 12/13 train_loss = 0.527
Epoch 97 Batch 9/13 train_loss = 0.508
Epoch 98 Batch 6/13 train_loss = 0.519
Epoch 99 Batch 3/13 train_loss = 0.551
Model Trained and Saved
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
# TODO: Implement Function
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return np.random.choice(list(int_to_vocab.values()), 1, p=probabilities)[0]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
moe_szyslak:(gets idea) 'cause this broad stands. booking. that's like shooting a great man.
krusty_the_clown: wait a... zero sheets to the wind... stir a possibly to forget your problems.
lisa_simpson: that was the coaster.
ned_flanders: you can't close down the bar while i'm in
moe_szyslak: okay, but he's down. but how to get back on your feet.
homer_simpson: not at that even show store.
agent_johnson: you're under arrest for conspiracy!
moe_szyslak: okay, here's the sunday, have you got to bet the money, it might work.(nervous chuckle)
homer_simpson: skoal!(sips) but carl carlson, i never had in love on... eve. and for a drunk.
barney_gumble: i wish some of us.
homer_simpson: you, your honor. you know, you're right and i wanna take it.
seymour_skinner:(shaking hands) principal seymour needs some professional help.(small sob)
homer_simpson: with the last, but you see the game
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
_____no_output_____
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
_____no_output_____
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
_____no_output_____
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following the tuple `(Input, Targets, LearingRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
_____no_output_____
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
_____no_output_____
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
_____no_output_____
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2 3], [ 7 8 9]], Batch of targets [[ 2 3 4], [ 8 9 10]] ], Second Batch [ Batch of Input [[ 4 5 6], [10 11 12]], Batch of targets [[ 5 6 7], [11 12 13]] ]]```
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
Dataset Stats
Roughly the number of unique words: 11492
Number of scenes: 262
Average number of sentences in each scene: 15.248091603053435
Number of lines: 4257
Average number of words in each line: 11.50434578341555
The sentences 0 to 10:
Moe_Szyslak: (INTO PHONE) Moe's Tavern. Where the elite meet to drink.
Bart_Simpson: Eh, yeah, hello, is Mike there? Last name, Rotch.
Moe_Szyslak: (INTO PHONE) Hold on, I'll check. (TO BARFLIES) Mike Rotch. Mike Rotch. Hey, has anybody seen Mike Rotch, lately?
Moe_Szyslak: (INTO PHONE) Listen you little puke. One of these days I'm gonna catch you, and I'm gonna carve my name on your back with an ice pick.
Moe_Szyslak: What's the matter Homer? You're not your normal effervescent self.
Homer_Simpson: I got my problems, Moe. Give me another one.
Moe_Szyslak: Homer, hey, you should not drink to forget your problems.
Barney_Gumble: Yeah, you should only drink to enhance your social skills.
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
from collections import Counter
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
counts= Counter(text)
vocab=sorted(counts,key=counts.get, reverse=True)
vocab_to_int={word:ii for ii ,word in enumerate(vocab)}
int_to_vocab={ii:word for ii ,word in enumerate(vocab)}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
Tests Passed
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
table = {'.': '|period|',
',': '|comma|',
'"': '|quotation_mark|',
';': '|semicolon|',
'!': '|exclamation_mark|',
'?': '|question_mark|',
'(': '|left_parentheses|',
')': '|right_parentheses|',
'--': '|dash|',
'\n': '|return|'}
return table
# TODO: Implement Function
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
Tests Passed
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
TensorFlow Version: 1.0.0
Default GPU Device: /gpu:0
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
input = tf.placeholder(tf.int32, shape=(None,None), name='input')
targets= tf.placeholder(tf.int32, shape=(None,None), name='targets')
learning_rate = tf.placeholder(tf.float32)
# TODO: Implement Function
return input, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
Tests Passed
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
lstm_layers = 2
cell = tf.contrib.rnn.BasicLSTMCell(num_units=rnn_size)
# TODO: Implement Function
cell = tf.contrib.rnn.MultiRNNCell([cell]*lstm_layers)
initial_state=cell.zero_state(batch_size,tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
Tests Passed
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim),-1,1))
embed = tf.nn.embedding_lookup(embedding, input_data)
# TODO: Implement Function
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
Tests Passed
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
output,final_state = tf.nn.dynamic_rnn(cell,inputs,dtype=tf.float32)
# TODO: Implement Function
final_state = tf.identity(final_state,name="final_state")
return output, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
Tests Passed
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim=300):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
embed = get_embed(input_data, vocab_size, embed_dim = 300)
# TODO: Imprplement Function
output, final_state = build_rnn(cell, embed)
predictions = tf.contrib.layers.fully_connected( output , vocab_size, activation_fn=None)
return predictions , final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
num_batches = len(int_text) // (batch_size * seq_length)
batches = np.zeros([num_batches, 2, batch_size, seq_length], dtype=np.int32)
for idx in range(0, len(int_text), seq_length):
batch_no = (idx // seq_length) % num_batches
batch_idx = idx // (seq_length * num_batches)
if (batch_idx == batch_size):
break
batches[batch_no, 0, batch_idx, ] = int_text[idx:idx + seq_length]
batches[batch_no, 1, batch_idx, ] = int_text[idx + 1:idx + seq_length + 1]
print([batch_no, 1, batch_idx-1, seq_length])
batches[(len(int_text)//seq_length)%num_batches, 1, batch_idx-1, seq_length-1] = batches[0, 0, 0, 0]
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
[0, 1, 127, 5]
Tests Passed
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 512
# Sequence Length
seq_length = 64
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 100
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim=300)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
[0, 1, 127, 64]
Epoch 0 Batch 0/8 train_loss = 8.821
Epoch 12 Batch 4/8 train_loss = 6.211
Epoch 25 Batch 0/8 train_loss = 6.132
Epoch 37 Batch 4/8 train_loss = 6.209
Epoch 50 Batch 0/8 train_loss = 6.107
Epoch 62 Batch 4/8 train_loss = 5.423
Epoch 75 Batch 0/8 train_loss = 4.588
Epoch 87 Batch 4/8 train_loss = 4.202
Model Trained and Saved
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
input_tensor = loaded_graph.get_tensor_by_name('input:0')
init_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0')
final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0')
probs_tensor = loaded_graph.get_tensor_by_name('probs:0')
return input_tensor, init_state_tensor, final_state_tensor, probs_tensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
Tests Passed
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return int_to_vocab[np.argmax(probabilities)]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
Tests Passed
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
moe_szyslak:(love man) don't get sorry with you like, but i don't can.
homer_simpson:(to homer) hey, i don't want to what's the what's was what's you what's to what's you--(find)...
homer_simpson:(to homer) hey, i don't want to what's the looking.
homer_simpson:(to make) the little" great was man... i was, but i just guys to take my he's to go back out of my into.
moe_szyslak: no, i then get goin'.
moe_szyslak: i don't can you're a into.
moe_szyslak: hey, i was just...
moe_szyslak: i off now an beer, you ooh you a huh what's i have to go.
homer_simpson: i don't can.
homer_simpson: i off some man into!
homer_simpson: i can.
homer_simpson:(to homer) you can i don't have to get you out of the ya. i was just like my beer.
homer_simpson: i don't can.
moe_szyslak:(to homer) you
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
_____no_output_____
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
_____no_output_____
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
_____no_output_____
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
_____no_output_____
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
_____no_output_____
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
_____no_output_____
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
_____no_output_____
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____
###Markdown
TV Script GenerationIn this project, you'll generate your own [Simpsons](https://en.wikipedia.org/wiki/The_Simpsons) TV scripts using RNNs. You'll be using part of the [Simpsons dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data) of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at [Moe's Tavern](https://simpsonswiki.com/wiki/Moe's_Tavern). Get the DataThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
###Output
_____no_output_____
###Markdown
Explore the DataPlay around with `view_sentence_range` to view different parts of the data.
###Code
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
###Output
_____no_output_____
###Markdown
Implement Preprocessing FunctionsThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
###Code
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
###Output
_____no_output_____
###Markdown
Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( . )- Comma ( , )- Quotation Mark ( " )- Semicolon ( ; )- Exclamation mark ( ! )- Question mark ( ? )- Left Parentheses ( ( )- Right Parentheses ( ) )- Dash ( -- )- Return ( \n )This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
###Code
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
###Output
_____no_output_____
###Markdown
Preprocess all the data and save itRunning the code cell below will preprocess all the data and save it to file.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
###Output
_____no_output_____
###Markdown
Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
###Output
_____no_output_____
###Markdown
Build the Neural NetworkYou'll build the components necessary to build a RNN by implementing the following functions below:- get_inputs- get_init_cell- get_embed- build_rnn- build_nn- get_batches Check the Version of TensorFlow and Access to GPU
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
###Output
_____no_output_____
###Markdown
InputImplement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:- Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.- Targets placeholder- Learning Rate placeholderReturn the placeholders in the following tuple `(Input, Targets, LearningRate)`
###Code
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
###Output
_____no_output_____
###Markdown
Build RNN Cell and InitializeStack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).- The Rnn size should be set using `rnn_size`- Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCellzero_state) function - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the cell and initial state in the following tuple `(Cell, InitialState)`
###Code
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
###Output
_____no_output_____
###Markdown
Word EmbeddingApply embedding to `input_data` using TensorFlow. Return the embedded sequence.
###Code
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
###Output
_____no_output_____
###Markdown
Build RNNYou created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.- Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
###Code
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
###Output
_____no_output_____
###Markdown
Build the Neural NetworkApply the functions you implemented above to:- Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.- Build RNN using `cell` and your `build_rnn(cell, inputs)` function.- Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.Return the logits and final state in the following tuple (Logits, FinalState)
###Code
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
###Output
_____no_output_____
###Markdown
BatchesImplement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:- The first element is a single batch of **input** with the shape `[batch size, sequence length]`- The second element is a single batch of **targets** with the shape `[batch size, sequence length]`If you can't fill the last batch with enough data, drop the last batch.For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:```[ First Batch [ Batch of Input [[ 1 2], [ 7 8], [13 14]] Batch of targets [[ 2 3], [ 8 9], [14 15]] ] Second Batch [ Batch of Input [[ 3 4], [ 9 10], [15 16]] Batch of targets [[ 4 5], [10 11], [16 17]] ] Third Batch [ Batch of Input [[ 5 6], [11 12], [17 18]] Batch of targets [[ 6 7], [12 13], [18 1]] ]]```Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
###Code
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
###Output
_____no_output_____
###Markdown
Neural Network Training HyperparametersTune the following parameters:- Set `num_epochs` to the number of epochs.- Set `batch_size` to the batch size.- Set `rnn_size` to the size of the RNNs.- Set `embed_dim` to the size of the embedding.- Set `seq_length` to the length of sequence.- Set `learning_rate` to the learning rate.- Set `show_every_n_batches` to the number of batches the neural network should print progress.
###Code
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
###Output
_____no_output_____
###Markdown
Build the GraphBuild the graph using the neural network you implemented.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
###Output
_____no_output_____
###Markdown
TrainTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forms](https://discussions.udacity.com/) to see if anyone is having the same problem.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
###Output
_____no_output_____
###Markdown
Save ParametersSave `seq_length` and `save_dir` for generating a new TV script.
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
###Output
_____no_output_____
###Markdown
Checkpoint
###Code
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
###Output
_____no_output_____
###Markdown
Implement Generate Functions Get TensorsGet tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graphget_tensor_by_name). Get the tensors using the following names:- "input:0"- "initial_state:0"- "final_state:0"- "probs:0"Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
###Code
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
###Output
_____no_output_____
###Markdown
Choose WordImplement the `pick_word()` function to select the next word using `probabilities`.
###Code
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
###Output
_____no_output_____
###Markdown
Generate TV ScriptThis will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
###Code
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
###Output
_____no_output_____ |
tutorials/data-glue-example.ipynb | ###Markdown
This notebook illustrates some functionnalities exposed in the `DataGlue` package. See also [iris_exploration](./iris_exploration.ipynb) for basics data science demonstrations. Read dataframe content Here, we read the [salaries dataset](../datasets/salaries.csv) as a `Frame`. The type of its row, `SalaryRow`, is dynamically inferred by the parser.
###Code
:ext DataKinds FlexibleContexts QuasiQuotes OverloadedStrings TemplateHaskell TypeApplications TypeOperators ViewPatterns
import DataGlue.Frames
datasource = "../datasets/salaries.csv"
tableTypes "SalaryRow" datasource
loadRows :: IO (Frame SalaryRow)
loadRows = inCoreAoS (readTable datasource)
salaries <- loadRows
###Output
_____no_output_____
###Markdown
Then, we can show the dataframe content, simply by calling it.
###Code
-- Show dataframe content.
salaries
###Output
_____no_output_____
###Markdown
As the dataframe has many rows, only its first and last rows are shown. We get the total number of rows using the function `length`.
###Code
length salaries
###Output
_____no_output_____
###Markdown
It is also usefull to get the column names and types:
###Code
describe salaries
###Output
_____no_output_____
###Markdown
Read dataframe content (without header) Unlike the previous example, the [iris dataset](../datasets/iris.csv) has no headers. So, we define them here manually, then proceed to the parsing.
###Code
import Frames.CSV (rowGen, columnNames, tablePrefix, rowTypeName)
-- Since the used dataset as no header, let's define the column names.
datasource = "../datasets/iris.csv"
tableTypes' (rowGen datasource)
{ rowTypeName = "IrisRow"
, columnNames = [ "Petal Length", "Petal Width", "Sepal Length" , "Sepal Width", "Iris Class" ]}
loadRows :: IO (Frame IrisRow)
loadRows = inCoreAoS (readTable datasource)
iris <- loadRows
-- Show dataframe content.
iris
###Output
_____no_output_____
###Markdown
Print records Some functions are exposed to read partial content of a dataframe.Read one line:
###Code
frameRow salaries 5
###Output
_____no_output_____
###Markdown
Show the 5th first lines:
###Code
takeFrameRow 5 salaries
###Output
_____no_output_____
###Markdown
Or the 5th last, by removing all the rows but 5, from the begining:
###Code
dropFrameRow (length salaries - 5) salaries
###Output
_____no_output_____
###Markdown
Using `Proxy`, we can also make a selection, to get only the features we want to explore:
###Code
import Data.Proxy
select @'[YrsSincePhd, Salary] Proxy <$> salaries
###Output
_____no_output_____
###Markdown
Using `Lens`, it is also simple to get one feature content:
###Code
import Control.Lens
view salary <$> salaries
###Output
_____no_output_____
###Markdown
Basic operations based on criteria Here we define a criteria function that anwser `True` only when the given row invovles a women at the rank of Professor. Based on it, we count the number of female Professor in the dataframe.
###Code
-- A Top-level function will be designed in the future to give this kind of call mpre abstraction.
femaleProf = runcurry' criteria . select @'[Rank, Sex] Proxy
where
criteria "Prof" "Female" = True
criteria _ _ = False
fp_df = filterFrame femaleProf salaries
length fp_df
###Output
_____no_output_____
###Markdown
Chart plotting This is a simple example of chart plotting using some groupBy fonctionnalities provided by `DataGlue.Frames.GroupBy`.
###Code
import qualified DataGlue.Frames.GroupBy as G
import Data.Text (unpack)
import DataGlue.Chart
sums = G.groupByOp discipline salaries (G.sum) [yrsSincePhd, yrsService]
alabels = ["yrsSincePhd","yrsService"]
bars2 = plot_bars_titles .~ (unpack <$> uniques discipline salaries)
$ plot_bars_values .~ addIndexes sums
$ def
toRenderable
$ layout_title .~ "Sum of Knowledge by discipline"
$ layout_x_axis . laxis_generate .~ autoIndexAxis alabels
$ layout_plots .~ [ plotBars bars2 ]
$ def
###Output
_____no_output_____ |
examples/notebooks/statespace_sarimax_pymc3.ipynb | ###Markdown
Fast Bayesian estimation of SARIMAX models IntroductionThis notebook will show how to use fast Bayesian methods to estimate SARIMAX (Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors) models. These methods can also be parallelized across multiple cores.Here, fast methods means a version of Hamiltonian Monte Carlo called the No-U-Turn Sampler (NUTS) developed by Hoffmann and Gelman: see [Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research, 15(1), 1593-1623.](https://arxiv.org/abs/1111.4246). As they say, "the cost of HMC per independent sample from a target distribution of dimension $D$ is roughly $\mathcal{O}(D^{5/4})$, which stands in sharp contrast with the $\mathcal{O}(D^{2})$ cost of random-walk Metropolis". So for problems of larger dimension, the time-saving with HMC is significant. However it does require the gradient, or Jacobian, of the model to be provided.This notebook will combine the Python libraries [statsmodels](https://www.statsmodels.org/stable/index.html), which does econometrics, and [PyMC3](https://docs.pymc.io/), which is for Bayesian estimation, to perform fast Bayesian estimation of a simple SARIMAX model, in this case an ARMA(1, 1) model for US CPI.Note that, for simple models like AR(p), base PyMC3 is a quicker way to fit a model; there's an [example here](https://docs.pymc.io/notebooks/AR.html). The advantage of using statsmodels is that it gives access to methods that can solve a vast range of statespace models.The model we'll solve is given by$$y_t = \phi y_{t-1} + \varepsilon_t + \theta_1 \varepsilon_{t-1}, \qquad \varepsilon_t \sim N(0, \sigma^2)$$with 1 auto-regressive term and 1 moving average term. In statespace form it is written as:$$\begin{align}y_t & = \underbrace{\begin{bmatrix} 1 & \theta_1 \end{bmatrix}}_{Z} \underbrace{\begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix}}_{\alpha_t} \\ \begin{bmatrix} \alpha_{1,t+1} \\ \alpha_{2,t+1} \end{bmatrix} & = \underbrace{\begin{bmatrix} \phi & 0 \\ 1 & 0 \\ \end{bmatrix}}_{T} \begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix} + \underbrace{\begin{bmatrix} 1 \\ 0 \end{bmatrix}}_{R} \underbrace{\varepsilon_{t+1}}_{\eta_t} \\\end{align}$$The code will follow these steps:1. Import external dependencies2. Download and plot the data on US CPI3. Simple maximum likelihood estimation (MLE) as an example4. Definitions of helper functions to provide tensors to the library doing Bayesian estimation5. Bayesian estimation via NUTS6. Application to US CPI seriesFinally, Appendix A shows how to re-use the helper functions from step (4) to estimate a different state space model, `UnobservedComponents`, using the same Bayesian methods. 1. Import external dependencies
###Code
%matplotlib inline
import theano
import theano.tensor as tt
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import pandas as pd
from pandas_datareader.data import DataReader
from pandas.plotting import register_matplotlib_converters
plt.style.use('seaborn')
register_matplotlib_converters()
###Output
_____no_output_____
###Markdown
2. Download and plot the data on US CPIWe'll get the data from FRED:
###Code
cpi = DataReader('CPIAUCNS', 'fred', start='1971-01', end='2018-12')
cpi.index = pd.DatetimeIndex(cpi.index, freq='MS')
# Define the inflation series that we'll use in analysis
inf = np.log(cpi).resample('QS').mean().diff()[1:] * 400
print(inf.head())
# Plot the series
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
ax.plot(inf.index, inf, label=r'$\Delta \log CPI$', lw=2)
ax.legend(loc='lower left')
plt.show()
###Output
_____no_output_____
###Markdown
3. Fit the model with maximum likelihoodStatsmodels does all of the hard work of this for us - creating and fitting the model takes just two lines of code. The model order parameters correspond to auto-regressive, difference, and moving average orders respectively.
###Code
# Create an SARIMAX model instance - here we use it to estimate
# the parameters via MLE using the `fit` method, but we can
# also re-use it below for the Bayesian estimation
mod = sm.tsa.statespace.SARIMAX(inf, order=(1, 0, 1))
res_mle = mod.fit(disp=False)
print(res_mle.summary())
###Output
_____no_output_____
###Markdown
It's a good fit. We can also get the series of one-step ahead predictions and plot it next to the actual data, along with a confidence band.
###Code
predict_mle = res_mle.get_prediction()
predict_mle_ci = predict_mle.conf_int()
lower = predict_mle_ci['lower CPIAUCNS']
upper = predict_mle_ci['upper CPIAUCNS']
# Graph
fig, ax = plt.subplots(figsize=(9,4), dpi=300)
# Plot data points
inf.plot(ax=ax, style='-', label='Observed')
# Plot predictions
predict_mle.predicted_mean.plot(ax=ax, style='r.', label='One-step-ahead forecast')
ax.fill_between(predict_mle_ci.index, lower, upper, color='r', alpha=0.1)
ax.legend(loc='lower left')
plt.show()
###Output
_____no_output_____
###Markdown
4. Helper functions to provide tensors to the library doing Bayesian estimationWe're almost on to the magic but there are a few preliminaries. Feel free to skip this section if you're not interested in the technical details. Technical DetailsPyMC3 is a Bayesian estimation library ("Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano") that is a) fast and b) optimized for Bayesian machine learning, for instance [Bayesian neural networks](https://docs.pymc.io/notebooks/bayesian_neural_network_advi.html). To do all of this, it is built on top of a Theano, a library that aims to evaluate tensors very efficiently and provide symbolic differentiation (necessary for any kind of deep learning). It is the symbolic differentiation that means PyMC3 can use NUTS on any problem formulated within PyMC3.We are not formulating a problem directly in PyMC3; we're using statsmodels to specify the statespace model and solve it with the Kalman filter. So we need to put the plumbing of statsmodels and PyMC3 together, which means wrapping the statsmodels SARIMAX model object in a Theano-flavored wrapper before passing information to PyMC3 for estimation.Because of this, we can't use the Theano auto-differentiation directly. Happily, statsmodels SARIMAX objects have a method to return the Jacobian evaluated at the parameter values. We'll be making use of this to provide gradients so that we can use NUTS. Defining helper functions to translate models into a PyMC3 friendly formFirst, we'll create the Theano wrappers. They will be in the form of 'Ops', operation objects, that 'perform' particular tasks. They are initialized with a statsmodels `model` instance.Although this code may look somewhat opaque, it is generic for any state space model in statsmodels.
###Code
class Loglike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, model):
self.model = model
self.score = Score(self.model)
def perform(self, node, inputs, outputs):
theta, = inputs # contains the vector of parameters
llf = self.model.loglike(theta)
outputs[0][0] = np.array(llf) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
theta, = inputs # our parameters
out = [g[0] * self.score(theta)]
return out
class Score(tt.Op):
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, model):
self.model = model
def perform(self, node, inputs, outputs):
theta, = inputs
outputs[0][0] = self.model.score(theta)
###Output
_____no_output_____
###Markdown
5. Bayesian estimation with NUTSThe next step is to set the parameters for the Bayesian estimation, specify our priors, and run it.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
###Output
_____no_output_____
###Markdown
Now for the fun part! There are three parameters to estimate: $\phi$, $\theta_1$, and $\sigma$. We'll use uninformative uniform priors for the first two, and an inverse gamma for the last one. Then we'll run the inference optionally using as many computer cores as I have.
###Code
# Construct an instance of the Theano wrapper defined above, which
# will allow PyMC3 to compute the likelihood and Jacobian in a way
# that it can make use of. Here we are using the same model instance
# created earlier for MLE analysis (we could also create a new model
# instance if we preferred)
loglike = Loglike(mod)
with pm.Model():
# Priors
arL1 = pm.Uniform('ar.L1', -0.99, 0.99)
maL1 = pm.Uniform('ma.L1', -0.99, 0.99)
sigma2 = pm.InverseGamma('sigma2', 2, 4)
# convert variables to tensor vectors
theta = tt.as_tensor_variable([arL1, maL1, sigma2])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: loglike(v), observed={'v': theta})
# Draw samples
trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True, cores=4)
###Output
_____no_output_____
###Markdown
Note that the NUTS sampler is auto-assigned because we provided gradients. PyMC3 will use Metropolis or Slicing samplers if it does not find that gradients are available. There are an impressive number of draws per second for a "block box" style computation! However, note that if the model can be represented directly by PyMC3 (like the AR(p) models mentioned above), then computation can be substantially faster.Inference is complete, but are the results any good? There are a number of ways to check. The first is to look at the posterior distributions (with lines showing the MLE values):
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.traceplot(trace,
lines=[(k, {}, [v]) for k, v in dict(res_mle.params).items()],
combined=True,
figsize=(12, 12))
###Output
_____no_output_____
###Markdown
The estimated posteriors clearly peak close to the parameters found by MLE. We can also see a summary of the estimated values:
###Code
pm.summary(trace)
###Output
_____no_output_____
###Markdown
Here $\hat{R}$ is the Gelman-Rubin statistic. It tests for lack of convergence by comparing the variance between multiple chains to the variance within each chain. If convergence has been achieved, the between-chain and within-chain variances should be identical. If $\hat{R}<1.2$ for all model parameters, we can have some confidence that convergence has been reached.Additionally, the highest posterior density interval (the gap between the two values of HPD in the table) is small for each of the variables. 6. Application of Bayesian estimates of parametersWe'll now re-instigate a version of the model but using the parameters from the Bayesian estimation, and again plot the one-step-ahead forecasts.
###Code
# Retrieve the posterior means
params = pm.summary(trace)['mean'].values
# Construct results using these posterior means as parameter values
res_bayes = mod.smooth(params)
predict_bayes = res_bayes.get_prediction()
predict_bayes_ci = predict_bayes.conf_int()
lower = predict_bayes_ci['lower CPIAUCNS']
upper = predict_bayes_ci['upper CPIAUCNS']
# Graph
fig, ax = plt.subplots(figsize=(9,4), dpi=300)
# Plot data points
inf.plot(ax=ax, style='-', label='Observed')
# Plot predictions
predict_bayes.predicted_mean.plot(ax=ax, style='r.', label='One-step-ahead forecast')
ax.fill_between(predict_bayes_ci.index, lower, upper, color='r', alpha=0.1)
ax.legend(loc='lower left')
plt.show()
###Output
_____no_output_____
###Markdown
Appendix A. Application to `UnobservedComponents` models We can reuse the `Loglike` and `Score` wrappers defined above to consider a different state space model. For example, we might want to model inflation as the combination of a random walk trend and autoregressive error term:$$\begin{aligned}y_t & = \mu_t + \varepsilon_t \\\mu_t & = \mu_{t-1} + \eta_t \\\varepsilon_t &= \phi \varepsilon_t + \zeta_t\end{aligned}$$This model can be constructed in Statsmodels with the `UnobservedComponents` class using the `rwalk` and `autoregressive` specifications. As before, we can fit the model using maximum likelihood via the `fit` method.
###Code
# Construct the model instance
mod_uc = sm.tsa.UnobservedComponents(inf, 'rwalk', autoregressive=1)
# Fit the model via maximum likelihood
res_uc_mle = mod_uc.fit()
print(res_uc_mle.summary())
###Output
_____no_output_____
###Markdown
As noted earlier, the Theano wrappers (`Loglike` and `Score`) that we created above are generic, so we can re-use essentially the same code to explore the model with Bayesian methods.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
# Here we follow the same procedure as above, but now we instantiate the
# Theano wrapper `Loglike` with the UC model instance instead of the
# SARIMAX model instance
loglike_uc = Loglike(mod_uc)
with pm.Model():
# Priors
sigma2level = pm.InverseGamma('sigma2.level', 1, 1)
sigma2ar = pm.InverseGamma('sigma2.ar', 1, 1)
arL1 = pm.Uniform('ar.L1', -0.99, 0.99)
# convert variables to tensor vectors
theta_uc = tt.as_tensor_variable([sigma2level, sigma2ar, arL1])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: loglike_uc(v), observed={'v': theta_uc})
# Draw samples
trace_uc = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True, cores=4)
###Output
_____no_output_____
###Markdown
And as before we can plot the marginal posteriors. In contrast to the SARIMAX example, here the posterior modes are somewhat different from the MLE estimates.
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.traceplot(trace_uc,
lines=[(k, {}, [v]) for k, v in dict(res_uc_mle.params).items()],
combined=True,
figsize=(12, 12))
pm.summary(trace_uc)
# Retrieve the posterior means
params = pm.summary(trace_uc)['mean'].values
# Construct results using these posterior means as parameter values
res_uc_bayes = mod_uc.smooth(params)
###Output
_____no_output_____
###Markdown
One benefit of this model is that it gives us an estimate of the underling "level" of inflation, using the smoothed estimate of $\mu_t$, which we can access as the "level" column in the results objects' `states.smoothed` attribute. In this case, because the Bayesian posterior mean of the level's variance is larger than the MLE estimate, its estimated level is a little more volatile.
###Code
# Graph
fig, ax = plt.subplots(figsize=(9,4), dpi=300)
# Plot data points
inf['CPIAUCNS'].plot(ax=ax, style='-', label='Observed data')
# Plot estimate of the level term
res_uc_mle.states.smoothed['level'].plot(ax=ax, label='Smoothed level (MLE)')
res_uc_bayes.states.smoothed['level'].plot(ax=ax, label='Smoothed level (Bayesian)')
ax.legend(loc='lower left');
###Output
_____no_output_____
###Markdown
Fast Bayesian estimation of SARIMAX models IntroductionThis notebook will show how to use fast Bayesian methods to estimate SARIMAX (Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors) models. These methods can also be parallelized across multiple cores.Here, fast methods means a version of Hamiltonian Monte Carlo called the No-U-Turn Sampler (NUTS) developed by Hoffmann and Gelman: see [Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research, 15(1), 1593-1623.](https://arxiv.org/abs/1111.4246). As they say, "the cost of HMC per independent sample from a target distribution of dimension $D$ is roughly $\mathcal{O}(D^{5/4})$, which stands in sharp contrast with the $\mathcal{O}(D^{2})$ cost of random-walk Metropolis". So for problems of larger dimension, the time-saving with HMC is significant. However it does require the gradient, or Jacobian, of the model to be provided.This notebook will combine the Python libraries [statsmodels](https://www.statsmodels.org/stable/index.html), which does econometrics, and [PyMC3](https://docs.pymc.io/), which is for Bayesian estimation, to perform fast Bayesian estimation of a simple SARIMAX model, in this case an ARMA(1, 1) model for US CPI.Note that, for simple models like AR(p), base PyMC3 is a quicker way to fit a model; there's an [example here](https://docs.pymc.io/notebooks/AR.html). The advantage of using statsmodels is that it gives access to methods that can solve a vast range of statespace models.The model we'll solve is given by$$y_t = \phi y_{t-1} + \varepsilon_t + \theta_1 \varepsilon_{t-1}, \qquad \varepsilon_t \sim N(0, \sigma^2)$$with 1 auto-regressive term and 1 moving average term. In statespace form it is written as:$$\begin{align}y_t & = \underbrace{\begin{bmatrix} 1 & \theta_1 \end{bmatrix}}_{Z} \underbrace{\begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix}}_{\alpha_t} \\ \begin{bmatrix} \alpha_{1,t+1} \\ \alpha_{2,t+1} \end{bmatrix} & = \underbrace{\begin{bmatrix} \phi & 0 \\ 1 & 0 \\ \end{bmatrix}}_{T} \begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix} + \underbrace{\begin{bmatrix} 1 \\ 0 \end{bmatrix}}_{R} \underbrace{\varepsilon_{t+1}}_{\eta_t} \\\end{align}$$The code will follow these steps:1. Import external dependencies2. Download and plot the data on US CPI3. Simple maximum likelihood estimation (MLE) as an example4. Definitions of helper functions to provide tensors to the library doing Bayesian estimation5. Bayesian estimation via NUTS6. Application to US CPI seriesFinally, Appendix A shows how to re-use the helper functions from step (4) to estimate a different state space model, `UnobservedComponents`, using the same Bayesian methods. 1. Import external dependencies
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import statsmodels.api as sm
import theano
import theano.tensor as tt
from pandas.plotting import register_matplotlib_converters
from pandas_datareader.data import DataReader
plt.style.use("seaborn")
register_matplotlib_converters()
###Output
_____no_output_____
###Markdown
2. Download and plot the data on US CPIWe'll get the data from FRED:
###Code
cpi = DataReader("CPIAUCNS", "fred", start="1971-01", end="2018-12")
cpi.index = pd.DatetimeIndex(cpi.index, freq="MS")
# Define the inflation series that we'll use in analysis
inf = np.log(cpi).resample("QS").mean().diff()[1:] * 400
inf = inf.dropna()
print(inf.head())
# Plot the series
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
ax.plot(inf.index, inf, label=r"$\Delta \log CPI$", lw=2)
ax.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
3. Fit the model with maximum likelihoodStatsmodels does all of the hard work of this for us - creating and fitting the model takes just two lines of code. The model order parameters correspond to auto-regressive, difference, and moving average orders respectively.
###Code
# Create an SARIMAX model instance - here we use it to estimate
# the parameters via MLE using the `fit` method, but we can
# also re-use it below for the Bayesian estimation
mod = sm.tsa.statespace.SARIMAX(inf, order=(1, 0, 1))
res_mle = mod.fit(disp=False)
print(res_mle.summary())
###Output
_____no_output_____
###Markdown
It's a good fit. We can also get the series of one-step ahead predictions and plot it next to the actual data, along with a confidence band.
###Code
predict_mle = res_mle.get_prediction()
predict_mle_ci = predict_mle.conf_int()
lower = predict_mle_ci["lower CPIAUCNS"]
upper = predict_mle_ci["upper CPIAUCNS"]
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf.plot(ax=ax, style="-", label="Observed")
# Plot predictions
predict_mle.predicted_mean.plot(ax=ax, style="r.", label="One-step-ahead forecast")
ax.fill_between(predict_mle_ci.index, lower, upper, color="r", alpha=0.1)
ax.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
4. Helper functions to provide tensors to the library doing Bayesian estimationWe're almost on to the magic but there are a few preliminaries. Feel free to skip this section if you're not interested in the technical details. Technical DetailsPyMC3 is a Bayesian estimation library ("Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano") that is a) fast and b) optimized for Bayesian machine learning, for instance [Bayesian neural networks](https://docs.pymc.io/notebooks/bayesian_neural_network_advi.html). To do all of this, it is built on top of a Theano, a library that aims to evaluate tensors very efficiently and provide symbolic differentiation (necessary for any kind of deep learning). It is the symbolic differentiation that means PyMC3 can use NUTS on any problem formulated within PyMC3.We are not formulating a problem directly in PyMC3; we're using statsmodels to specify the statespace model and solve it with the Kalman filter. So we need to put the plumbing of statsmodels and PyMC3 together, which means wrapping the statsmodels SARIMAX model object in a Theano-flavored wrapper before passing information to PyMC3 for estimation.Because of this, we can't use the Theano auto-differentiation directly. Happily, statsmodels SARIMAX objects have a method to return the Jacobian evaluated at the parameter values. We'll be making use of this to provide gradients so that we can use NUTS. Defining helper functions to translate models into a PyMC3 friendly formFirst, we'll create the Theano wrappers. They will be in the form of 'Ops', operation objects, that 'perform' particular tasks. They are initialized with a statsmodels `model` instance.Although this code may look somewhat opaque, it is generic for any state space model in statsmodels.
###Code
class Loglike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, model):
self.model = model
self.score = Score(self.model)
def perform(self, node, inputs, outputs):
(theta,) = inputs # contains the vector of parameters
llf = self.model.loglike(theta)
outputs[0][0] = np.array(llf) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
(theta,) = inputs # our parameters
out = [g[0] * self.score(theta)]
return out
class Score(tt.Op):
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, model):
self.model = model
def perform(self, node, inputs, outputs):
(theta,) = inputs
outputs[0][0] = self.model.score(theta)
###Output
_____no_output_____
###Markdown
5. Bayesian estimation with NUTSThe next step is to set the parameters for the Bayesian estimation, specify our priors, and run it.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
###Output
_____no_output_____
###Markdown
Now for the fun part! There are three parameters to estimate: $\phi$, $\theta_1$, and $\sigma$. We'll use uninformative uniform priors for the first two, and an inverse gamma for the last one. Then we'll run the inference optionally using as many computer cores as I have.
###Code
# Construct an instance of the Theano wrapper defined above, which
# will allow PyMC3 to compute the likelihood and Jacobian in a way
# that it can make use of. Here we are using the same model instance
# created earlier for MLE analysis (we could also create a new model
# instance if we preferred)
loglike = Loglike(mod)
with pm.Model() as m:
# Priors
arL1 = pm.Uniform("ar.L1", -0.99, 0.99)
maL1 = pm.Uniform("ma.L1", -0.99, 0.99)
sigma2 = pm.InverseGamma("sigma2", 2, 4)
# convert variables to tensor vectors
theta = tt.as_tensor_variable([arL1, maL1, sigma2])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist("likelihood", loglike, observed=theta)
# Draw samples
trace = pm.sample(
ndraws,
tune=nburn,
return_inferencedata=True,
cores=1,
compute_convergence_checks=False,
)
###Output
_____no_output_____
###Markdown
Note that the NUTS sampler is auto-assigned because we provided gradients. PyMC3 will use Metropolis or Slicing samplers if it does not find that gradients are available. There are an impressive number of draws per second for a "block box" style computation! However, note that if the model can be represented directly by PyMC3 (like the AR(p) models mentioned above), then computation can be substantially faster.Inference is complete, but are the results any good? There are a number of ways to check. The first is to look at the posterior distributions (with lines showing the MLE values):
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.plot_trace(
trace,
lines=[(k, {}, [v]) for k, v in dict(res_mle.params).items()],
combined=True,
figsize=(12, 12),
)
###Output
_____no_output_____
###Markdown
The estimated posteriors clearly peak close to the parameters found by MLE. We can also see a summary of the estimated values:
###Code
pm.summary(trace)
###Output
_____no_output_____
###Markdown
Here $\hat{R}$ is the Gelman-Rubin statistic. It tests for lack of convergence by comparing the variance between multiple chains to the variance within each chain. If convergence has been achieved, the between-chain and within-chain variances should be identical. If $\hat{R}<1.2$ for all model parameters, we can have some confidence that convergence has been reached.Additionally, the highest posterior density interval (the gap between the two values of HPD in the table) is small for each of the variables. 6. Application of Bayesian estimates of parametersWe'll now re-instigate a version of the model but using the parameters from the Bayesian estimation, and again plot the one-step-ahead forecasts.
###Code
# Retrieve the posterior means
params = pm.summary(trace)["mean"].values
# Construct results using these posterior means as parameter values
res_bayes = mod.smooth(params)
predict_bayes = res_bayes.get_prediction()
predict_bayes_ci = predict_bayes.conf_int()
lower = predict_bayes_ci["lower CPIAUCNS"]
upper = predict_bayes_ci["upper CPIAUCNS"]
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf.plot(ax=ax, style="-", label="Observed")
# Plot predictions
predict_bayes.predicted_mean.plot(ax=ax, style="r.", label="One-step-ahead forecast")
ax.fill_between(predict_bayes_ci.index, lower, upper, color="r", alpha=0.1)
ax.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
Appendix A. Application to `UnobservedComponents` models We can reuse the `Loglike` and `Score` wrappers defined above to consider a different state space model. For example, we might want to model inflation as the combination of a random walk trend and autoregressive error term:$$\begin{aligned}y_t & = \mu_t + \varepsilon_t \\\mu_t & = \mu_{t-1} + \eta_t \\\varepsilon_t &= \phi \varepsilon_t + \zeta_t\end{aligned}$$This model can be constructed in Statsmodels with the `UnobservedComponents` class using the `rwalk` and `autoregressive` specifications. As before, we can fit the model using maximum likelihood via the `fit` method.
###Code
# Construct the model instance
mod_uc = sm.tsa.UnobservedComponents(inf, "rwalk", autoregressive=1)
# Fit the model via maximum likelihood
res_uc_mle = mod_uc.fit()
print(res_uc_mle.summary())
###Output
_____no_output_____
###Markdown
As noted earlier, the Theano wrappers (`Loglike` and `Score`) that we created above are generic, so we can re-use essentially the same code to explore the model with Bayesian methods.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
# Here we follow the same procedure as above, but now we instantiate the
# Theano wrapper `Loglike` with the UC model instance instead of the
# SARIMAX model instance
loglike_uc = Loglike(mod_uc)
with pm.Model():
# Priors
sigma2level = pm.InverseGamma("sigma2.level", 1, 1)
sigma2ar = pm.InverseGamma("sigma2.ar", 1, 1)
arL1 = pm.Uniform("ar.L1", -0.99, 0.99)
# convert variables to tensor vectors
theta_uc = tt.as_tensor_variable([sigma2level, sigma2ar, arL1])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist("likelihood", loglike_uc, observed=theta_uc)
# Draw samples
trace_uc = pm.sample(
ndraws,
tune=nburn,
return_inferencedata=True,
cores=1,
compute_convergence_checks=False,
)
###Output
_____no_output_____
###Markdown
And as before we can plot the marginal posteriors. In contrast to the SARIMAX example, here the posterior modes are somewhat different from the MLE estimates.
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.plot_trace(
trace_uc,
lines=[(k, {}, [v]) for k, v in dict(res_uc_mle.params).items()],
combined=True,
figsize=(12, 12),
)
pm.summary(trace_uc)
# Retrieve the posterior means
params = pm.summary(trace_uc)["mean"].values
# Construct results using these posterior means as parameter values
res_uc_bayes = mod_uc.smooth(params)
###Output
_____no_output_____
###Markdown
One benefit of this model is that it gives us an estimate of the underling "level" of inflation, using the smoothed estimate of $\mu_t$, which we can access as the "level" column in the results objects' `states.smoothed` attribute. In this case, because the Bayesian posterior mean of the level's variance is larger than the MLE estimate, its estimated level is a little more volatile.
###Code
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf["CPIAUCNS"].plot(ax=ax, style="-", label="Observed data")
# Plot estimate of the level term
res_uc_mle.states.smoothed["level"].plot(ax=ax, label="Smoothed level (MLE)")
res_uc_bayes.states.smoothed["level"].plot(ax=ax, label="Smoothed level (Bayesian)")
ax.legend(loc="lower left");
###Output
_____no_output_____
###Markdown
Fast Bayesian estimation of SARIMAX models IntroductionThis notebook will show how to use fast Bayesian methods to estimate SARIMAX (Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors) models. These methods can also be parallelised across multiple cores.Here, fast methods means a version of Hamiltonian Monte Carlo called the No-U-Turn Sampler (NUTS) developed by Hoffmann and Gelman: see [Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research, 15(1), 1593-1623.](https://arxiv.org/abs/1111.4246). As they say, "the cost of HMC per independent sample from a target distribution of dimension $D$ is roughly $\mathcal{O}(D^{5/4})$, which stands in sharp contrast with the $\mathcal{O}(D^{2})$ cost of random-walk Metropolis". So for problems of larger dimension, the time-saving with HMC is significant. However it does require the gradient, or Jacobian, of the model to be provided.This notebook will combine the Python libraries [statsmodels](https://www.statsmodels.org/stable/index.html), which does econometrics, and [PyMC3](https://docs.pymc.io/), which is for Bayesian estimation, to perform fast Bayesian estimation of a simple SARIMAX model, in this case an ARMA(1, 1) model for US CPI.Note that, for simple models like AR(p), base PyMC3 is a quicker way to fit a model; there's an [example here](https://docs.pymc.io/notebooks/AR.html). The advantage of using statsmodels is that it gives access to methods that can solve a vast range of statespace models.The model we'll solve is given by$$y_t = \phi y_{t-1} + \varepsilon_t + \theta_1 \varepsilon_{t-1}, \qquad \varepsilon_t \sim N(0, \sigma^2)$$with 1 auto-regressive term and 1 moving average term. In statespace form it is written as:$$\begin{align}y_t & = \underbrace{\begin{bmatrix} 1 & \theta_1 \end{bmatrix}}_{Z} \underbrace{\begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix}}_{\alpha_t} \\ \begin{bmatrix} \alpha_{1,t+1} \\ \alpha_{2,t+1} \end{bmatrix} & = \underbrace{\begin{bmatrix} \phi & 0 \\ 1 & 0 \\ \end{bmatrix}}_{T} \begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix} + \underbrace{\begin{bmatrix} 1 \\ 0 \end{bmatrix}}_{R} \underbrace{\varepsilon_{t+1}}_{\eta_t} \\\end{align}$$The code will follow these steps:1. Import external dependencies2. Download and plot the data on US CPI3. Simple maximum likelihood estimation (MLE) as an example4. Definitions of helper functions to provide tensors to the library doing Bayesian estimation5. Bayesian estimation via NUTS6. Application to US CPI seriesFinally, Appendix A shows how to re-use the helper functions from step (4) to estimate a different state space model, `UnobservedComponents`, using the same Bayesian methods. 1. Import external dependencies
###Code
%matplotlib inline
import theano
import theano.tensor as tt
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import pandas as pd
from pandas_datareader.data import DataReader
from pandas.plotting import register_matplotlib_converters
plt.style.use('seaborn')
register_matplotlib_converters()
###Output
_____no_output_____
###Markdown
2. Download and plot the data on US CPIWe'll get the data from FRED:
###Code
cpi = DataReader('CPIAUCNS', 'fred', start='1971-01', end='2018-12')
cpi.index = pd.DatetimeIndex(cpi.index, freq='MS')
# Define the inflation series that we'll use in analysis
inf = np.log(cpi).resample('QS').mean().diff()[1:] * 400
print(inf.head())
# Plot the series
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
ax.plot(inf.index, inf, label=r'$\Delta \log CPI$', lw=2)
ax.legend(loc='lower left')
plt.show()
###Output
_____no_output_____
###Markdown
3. Fit the model with maximum likelihoodStatsmodels does all of the hardwork of this for us - creating and fitting the model takes just two lines of code. The model order parameters correspond to auto-regressive, difference, and moving average orders respectively.
###Code
# Create an SARIMAX model instance - here we use it to estimate
# the parameters via MLE using the `fit` method, but we can
# also re-use it below for the Bayesian estimation
mod = sm.tsa.statespace.SARIMAX(inf, order=(1, 0, 1))
res_mle = mod.fit(disp=False)
print(res_mle.summary())
###Output
_____no_output_____
###Markdown
It's a good fit. We can also get the series of one-step ahead predictions and plot it next to the actual data, along with a confidence band.
###Code
predict_mle = res_mle.get_prediction()
predict_mle_ci = predict_mle.conf_int()
lower = predict_mle_ci['lower CPIAUCNS']
upper = predict_mle_ci['upper CPIAUCNS']
# Graph
fig, ax = plt.subplots(figsize=(9,4), dpi=300)
# Plot data points
inf.plot(ax=ax, style='-', label='Observed')
# Plot predictions
predict_mle.predicted_mean.plot(ax=ax, style='r.', label='One-step-ahead forecast')
ax.fill_between(predict_mle_ci.index, lower, upper, color='r', alpha=0.1)
ax.legend(loc='lower left')
plt.show()
###Output
_____no_output_____
###Markdown
4. Helper functions to provide tensors to the library doing Bayesian estimationWe're almost on to the magic but there are a few preliminaries. Feel free to skip this section if you're not interested in the technical details.--------- Technical sectionPyMC3 is a Bayesian estimation library ("Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano") that is a) fast and b) optimised for Bayesian machine learning, for instance [Bayesian neural networks](https://docs.pymc.io/notebooks/bayesian_neural_network_advi.html). To do all of this, it is built on top of a Theano, a library that aims to evaluate tensors very efficiently and provide symbolic differentiation (necessary for any kind of deep learning). It is the symbolic differentiation that means PyMC3 can use NUTS on any problem formulated within PyMC3.We are not formulating a problem directly in PyMC3; we're using statsmodels to specify the statespace model and solve it with the Kalman filter. So we need to put the plumbing of statsmodels and PyMC3 together, which means wrapping the statsmodels SARIMAX model object in a Theano-flavoured wrapper before passing information to PyMC3 for estimation.Because of this, we can't use the Theano auto-differentiation directly. Happily, statsmodels SARIMAX objects have a method to return the Jacobian evaluated at the parameter values. We'll be making use of this to provide gradients so that we can use NUTS. Defining helper functions to translate models into a PyMC3 friendly formFirst, we'll create the Theano wrappers. They will be in the form of 'Ops', operation objects, that 'perform' particular tasks. They are initialised with a statsmodels `model` instance.Although this code may look somewhat opaque, it is generic for any state space model in statsmodels.
###Code
class Loglike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, model):
self.model = model
self.score = Score(self.model)
def perform(self, node, inputs, outputs):
theta, = inputs # contains the vector of parameters
llf = self.model.loglike(theta)
outputs[0][0] = np.array(llf) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
theta, = inputs # our parameters
out = [g[0] * self.score(theta)]
return out
class Score(tt.Op):
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, model):
self.model = model
def perform(self, node, inputs, outputs):
theta, = inputs
outputs[0][0] = self.model.score(theta)
###Output
_____no_output_____
###Markdown
End of technical section--------- 5. Bayesian estimation with NUTSThe next step is to set the parameters for the Bayesian estimation, specify our priors, and run it.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
###Output
_____no_output_____
###Markdown
Now for the fun part! There are three parameters to estimate: $\phi$, $\theta_1$, and $\sigma$. We'll use uninformative uniform priors for the first two, and an inverse gamma for the last one. Then we'll run the inference optionally using as many computer cores as I have.
###Code
# Construct an instance of the Theano wrapper defined above, which
# will allow PyMC3 to compute the likelihood and Jacobian in a way
# that it can make use of. Here we are using the same model instance
# created earlier for MLE analysis (we could also create a new model
# instance if we preferred)
loglike = Loglike(mod)
with pm.Model():
# Priors
arL1 = pm.Uniform('ar.L1', -0.99, 0.99)
maL1 = pm.Uniform('ma.L1', -0.99, 0.99)
sigma2 = pm.InverseGamma('sigma2', 2, 4)
# convert variables to tensor vectors
theta = tt.as_tensor_variable([arL1, maL1, sigma2])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: loglike(v), observed={'v': theta})
# Draw samples
trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True, cores=4)
###Output
_____no_output_____
###Markdown
Note that the NUTS sampler is auto-assigned because we provided gradients. PyMC3 will use Metropolis or Slicing samplers if it doesn't find that gradients are available. There are an impressive number of draws per second for a "block box" style computation! However, note that if the model can be represented directly by PyMC3 (like the AR(p) models mentioned above), then computation can be substantially faster.Inference is complete, but are the results any good? There are a number of ways to check. The first is to look at the posterior distributions (with lines showing the MLE values):
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.traceplot(trace,
lines=[(k, {}, [v]) for k, v in dict(res_mle.params).items()],
combined=True,
figsize=(12, 12))
###Output
_____no_output_____
###Markdown
The estimated posteriors clearly peak close to the parameters found by MLE. We can also see a summary of the estimated values:
###Code
pm.summary(trace)
###Output
_____no_output_____
###Markdown
Here Rhat, or $\hat{R}$, is the Gelman-Rubin statistic. It tests for lack of convergence by comparing the variance between multiple chains to the variance within each chain. If convergence has been achieved, the between-chain and within-chain variances should be identical. If $\hat{R}<1.2$ for all model parameters, we can have some confidence that convergence has been reached.Additionally, the highest posterior density interval (the gap between the two values of HPD in the table) is small for each of the variables. 6. Application of Bayesian estimates of parametersWe'll now re-instigate a version of the model but using the parameters from the Bayesian estimation, and again plot the one-step-ahead forecasts.
###Code
# Retrieve the posterior means
params = pm.summary(trace)['mean'].values
# Construct results using these posterior means as parameter values
res_bayes = mod.smooth(params)
predict_bayes = res_bayes.get_prediction()
predict_bayes_ci = predict_bayes.conf_int()
lower = predict_bayes_ci['lower CPIAUCNS']
upper = predict_bayes_ci['upper CPIAUCNS']
# Graph
fig, ax = plt.subplots(figsize=(9,4), dpi=300)
# Plot data points
inf.plot(ax=ax, style='-', label='Observed')
# Plot predictions
predict_bayes.predicted_mean.plot(ax=ax, style='r.', label='One-step-ahead forecast')
ax.fill_between(predict_bayes_ci.index, lower, upper, color='r', alpha=0.1)
ax.legend(loc='lower left')
plt.show()
###Output
_____no_output_____
###Markdown
Appendix A. Application to `UnobservedComponents` models We can reuse the `Loglike` and `Score` wrappers defined above to consider a different state space model. For example, we might want to model inflation as the combination of a random walk trend and autoregressive error term:$$\begin{aligned}y_t & = \mu_t + \varepsilon_t \\\mu_t & = \mu_{t-1} + \eta_t \\\varepsilon_t &= \phi \varepsilon_t + \zeta_t\end{aligned}$$This model can be constructed in Statsmodels with the `UnobservedComponents` class using the `rwalk` and `autoregressive` specifications. As before, we can fit the model using maximum likelihood via the `fit` method.
###Code
# Construct the model instance
mod_uc = sm.tsa.UnobservedComponents(inf, 'rwalk', autoregressive=1)
# Fit the model via maximum likelihood
res_uc_mle = mod_uc.fit()
print(res_uc_mle.summary())
###Output
_____no_output_____
###Markdown
As noted earlier, the Theano wrappers (`Loglike` and `Score`) that we created above are generic, so we can re-use essentially the same code to explore the model with Bayesian methods.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
# Here we follow the same procedure as above, but now we instantiate the
# Theano wrapper `Loglike` with the UC model instance instead of the
# SARIMAX model instance
loglike_uc = Loglike(mod_uc)
with pm.Model():
# Priors
sigma2level = pm.InverseGamma('sigma2.level', 1, 1)
sigma2ar = pm.InverseGamma('sigma2.ar', 1, 1)
arL1 = pm.Uniform('ar.L1', -0.99, 0.99)
# convert variables to tensor vectors
theta_uc = tt.as_tensor_variable([sigma2level, sigma2ar, arL1])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: loglike_uc(v), observed={'v': theta_uc})
# Draw samples
trace_uc = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True, cores=4)
###Output
_____no_output_____
###Markdown
And as before we can plot the marginal posteriors. In contrast to the SARIMAX example, here the posterior modes are somewhat different from the MLE estimates.
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.traceplot(trace_uc,
lines=[(k, {}, [v]) for k, v in dict(res_uc_mle.params).items()],
combined=True,
figsize=(12, 12))
pm.summary(trace_uc)
# Retrieve the posterior means
params = pm.summary(trace_uc)['mean'].values
# Construct results using these posterior means as parameter values
res_uc_bayes = mod_uc.smooth(params)
###Output
_____no_output_____
###Markdown
One benefit of this model is that it gives us an estimate of the underling "level" of inflation, using the smoothed estimate of $\mu_t$, which we can access as the "level" column in the results objects' `states.smoothed` attribute. In this case, because the Bayesian posterior mean of the level's variance is larger than the MLE estimate, its estimated level is a little more volatile.
###Code
# Graph
fig, ax = plt.subplots(figsize=(9,4), dpi=300)
# Plot data points
inf['CPIAUCNS'].plot(ax=ax, style='-', label='Observed data')
# Plot estimate of the level term
res_uc_mle.states.smoothed['level'].plot(ax=ax, label='Smoothed level (MLE)')
res_uc_bayes.states.smoothed['level'].plot(ax=ax, label='Smoothed level (Bayesian)')
ax.legend(loc='lower left');
###Output
_____no_output_____
###Markdown
Fast Bayesian estimation of SARIMAX models IntroductionThis notebook will show how to use fast Bayesian methods to estimate SARIMAX (Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors) models. These methods can also be parallelized across multiple cores.Here, fast methods means a version of Hamiltonian Monte Carlo called the No-U-Turn Sampler (NUTS) developed by Hoffmann and Gelman: see [Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research, 15(1), 1593-1623.](https://arxiv.org/abs/1111.4246). As they say, "the cost of HMC per independent sample from a target distribution of dimension $D$ is roughly $\mathcal{O}(D^{5/4})$, which stands in sharp contrast with the $\mathcal{O}(D^{2})$ cost of random-walk Metropolis". So for problems of larger dimension, the time-saving with HMC is significant. However it does require the gradient, or Jacobian, of the model to be provided.This notebook will combine the Python libraries [statsmodels](https://www.statsmodels.org/stable/index.html), which does econometrics, and [PyMC3](https://docs.pymc.io/), which is for Bayesian estimation, to perform fast Bayesian estimation of a simple SARIMAX model, in this case an ARMA(1, 1) model for US CPI.Note that, for simple models like AR(p), base PyMC3 is a quicker way to fit a model; there's an [example here](https://docs.pymc.io/notebooks/AR.html). The advantage of using statsmodels is that it gives access to methods that can solve a vast range of statespace models.The model we'll solve is given by$$y_t = \phi y_{t-1} + \varepsilon_t + \theta_1 \varepsilon_{t-1}, \qquad \varepsilon_t \sim N(0, \sigma^2)$$with 1 auto-regressive term and 1 moving average term. In statespace form it is written as:$$\begin{align}y_t & = \underbrace{\begin{bmatrix} 1 & \theta_1 \end{bmatrix}}_{Z} \underbrace{\begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix}}_{\alpha_t} \\ \begin{bmatrix} \alpha_{1,t+1} \\ \alpha_{2,t+1} \end{bmatrix} & = \underbrace{\begin{bmatrix} \phi & 0 \\ 1 & 0 \\ \end{bmatrix}}_{T} \begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t} \end{bmatrix} + \underbrace{\begin{bmatrix} 1 \\ 0 \end{bmatrix}}_{R} \underbrace{\varepsilon_{t+1}}_{\eta_t} \\\end{align}$$The code will follow these steps:1. Import external dependencies2. Download and plot the data on US CPI3. Simple maximum likelihood estimation (MLE) as an example4. Definitions of helper functions to provide tensors to the library doing Bayesian estimation5. Bayesian estimation via NUTS6. Application to US CPI seriesFinally, Appendix A shows how to re-use the helper functions from step (4) to estimate a different state space model, `UnobservedComponents`, using the same Bayesian methods. 1. Import external dependencies
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import statsmodels.api as sm
import theano
import theano.tensor as tt
from pandas.plotting import register_matplotlib_converters
from pandas_datareader.data import DataReader
plt.style.use("seaborn")
register_matplotlib_converters()
###Output
_____no_output_____
###Markdown
2. Download and plot the data on US CPIWe'll get the data from FRED:
###Code
cpi = DataReader("CPIAUCNS", "fred", start="1971-01", end="2018-12")
cpi.index = pd.DatetimeIndex(cpi.index, freq="MS")
# Define the inflation series that we'll use in analysis
inf = np.log(cpi).resample("QS").mean().diff()[1:] * 400
inf = inf.dropna()
print(inf.head())
# Plot the series
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
ax.plot(inf.index, inf, label=r"$\Delta \log CPI$", lw=2)
ax.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
3. Fit the model with maximum likelihoodStatsmodels does all of the hard work of this for us - creating and fitting the model takes just two lines of code. The model order parameters correspond to auto-regressive, difference, and moving average orders respectively.
###Code
# Create an SARIMAX model instance - here we use it to estimate
# the parameters via MLE using the `fit` method, but we can
# also re-use it below for the Bayesian estimation
mod = sm.tsa.statespace.SARIMAX(inf, order=(1, 0, 1))
res_mle = mod.fit(disp=False)
print(res_mle.summary())
###Output
_____no_output_____
###Markdown
It's a good fit. We can also get the series of one-step ahead predictions and plot it next to the actual data, along with a confidence band.
###Code
predict_mle = res_mle.get_prediction()
predict_mle_ci = predict_mle.conf_int()
lower = predict_mle_ci["lower CPIAUCNS"]
upper = predict_mle_ci["upper CPIAUCNS"]
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf.plot(ax=ax, style="-", label="Observed")
# Plot predictions
predict_mle.predicted_mean.plot(ax=ax, style="r.", label="One-step-ahead forecast")
ax.fill_between(predict_mle_ci.index, lower, upper, color="r", alpha=0.1)
ax.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
4. Helper functions to provide tensors to the library doing Bayesian estimationWe're almost on to the magic but there are a few preliminaries. Feel free to skip this section if you're not interested in the technical details. Technical DetailsPyMC3 is a Bayesian estimation library ("Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano") that is a) fast and b) optimized for Bayesian machine learning, for instance [Bayesian neural networks](https://docs.pymc.io/notebooks/bayesian_neural_network_advi.html). To do all of this, it is built on top of a Theano, a library that aims to evaluate tensors very efficiently and provide symbolic differentiation (necessary for any kind of deep learning). It is the symbolic differentiation that means PyMC3 can use NUTS on any problem formulated within PyMC3.We are not formulating a problem directly in PyMC3; we're using statsmodels to specify the statespace model and solve it with the Kalman filter. So we need to put the plumbing of statsmodels and PyMC3 together, which means wrapping the statsmodels SARIMAX model object in a Theano-flavored wrapper before passing information to PyMC3 for estimation.Because of this, we can't use the Theano auto-differentiation directly. Happily, statsmodels SARIMAX objects have a method to return the Jacobian evaluated at the parameter values. We'll be making use of this to provide gradients so that we can use NUTS. Defining helper functions to translate models into a PyMC3 friendly formFirst, we'll create the Theano wrappers. They will be in the form of 'Ops', operation objects, that 'perform' particular tasks. They are initialized with a statsmodels `model` instance.Although this code may look somewhat opaque, it is generic for any state space model in statsmodels.
###Code
class Loglike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, model):
self.model = model
self.score = Score(self.model)
def perform(self, node, inputs, outputs):
(theta,) = inputs # contains the vector of parameters
llf = self.model.loglike(theta)
outputs[0][0] = np.array(llf) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
(theta,) = inputs # our parameters
out = [g[0] * self.score(theta)]
return out
class Score(tt.Op):
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, model):
self.model = model
def perform(self, node, inputs, outputs):
(theta,) = inputs
outputs[0][0] = self.model.score(theta)
###Output
_____no_output_____
###Markdown
5. Bayesian estimation with NUTSThe next step is to set the parameters for the Bayesian estimation, specify our priors, and run it.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
###Output
_____no_output_____
###Markdown
Now for the fun part! There are three parameters to estimate: $\phi$, $\theta_1$, and $\sigma$. We'll use uninformative uniform priors for the first two, and an inverse gamma for the last one. Then we'll run the inference optionally using as many computer cores as I have.
###Code
# Construct an instance of the Theano wrapper defined above, which
# will allow PyMC3 to compute the likelihood and Jacobian in a way
# that it can make use of. Here we are using the same model instance
# created earlier for MLE analysis (we could also create a new model
# instance if we preferred)
loglike = Loglike(mod)
with pm.Model() as m:
# Priors
arL1 = pm.Uniform("ar.L1", -0.99, 0.99)
maL1 = pm.Uniform("ma.L1", -0.99, 0.99)
sigma2 = pm.InverseGamma("sigma2", 2, 4)
# convert variables to tensor vectors
theta = tt.as_tensor_variable([arL1, maL1, sigma2])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist("likelihood", loglike, observed=theta)
# Draw samples
trace = pm.sample(
ndraws,
tune=nburn,
return_inferencedata=True,
cores=1,
compute_convergence_checks=False,
)
###Output
_____no_output_____
###Markdown
Note that the NUTS sampler is auto-assigned because we provided gradients. PyMC3 will use Metropolis or Slicing samplers if it does not find that gradients are available. There are an impressive number of draws per second for a "block box" style computation! However, note that if the model can be represented directly by PyMC3 (like the AR(p) models mentioned above), then computation can be substantially faster.Inference is complete, but are the results any good? There are a number of ways to check. The first is to look at the posterior distributions (with lines showing the MLE values):
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.plot_trace(
trace,
lines=[(k, {}, [v]) for k, v in dict(res_mle.params).items()],
combined=True,
figsize=(12, 12),
)
###Output
_____no_output_____
###Markdown
The estimated posteriors clearly peak close to the parameters found by MLE. We can also see a summary of the estimated values:
###Code
pm.summary(trace)
###Output
_____no_output_____
###Markdown
Here $\hat{R}$ is the Gelman-Rubin statistic. It tests for lack of convergence by comparing the variance between multiple chains to the variance within each chain. If convergence has been achieved, the between-chain and within-chain variances should be identical. If $\hat{R}<1.2$ for all model parameters, we can have some confidence that convergence has been reached.Additionally, the highest posterior density interval (the gap between the two values of HPD in the table) is small for each of the variables. 6. Application of Bayesian estimates of parametersWe'll now re-instigate a version of the model but using the parameters from the Bayesian estimation, and again plot the one-step-ahead forecasts.
###Code
# Retrieve the posterior means
params = pm.summary(trace)["mean"].values
# Construct results using these posterior means as parameter values
res_bayes = mod.smooth(params)
predict_bayes = res_bayes.get_prediction()
predict_bayes_ci = predict_bayes.conf_int()
lower = predict_bayes_ci["lower CPIAUCNS"]
upper = predict_bayes_ci["upper CPIAUCNS"]
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf.plot(ax=ax, style="-", label="Observed")
# Plot predictions
predict_bayes.predicted_mean.plot(ax=ax, style="r.", label="One-step-ahead forecast")
ax.fill_between(predict_bayes_ci.index, lower, upper, color="r", alpha=0.1)
ax.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
Appendix A. Application to `UnobservedComponents` models We can reuse the `Loglike` and `Score` wrappers defined above to consider a different state space model. For example, we might want to model inflation as the combination of a random walk trend and autoregressive error term:$$\begin{aligned}y_t & = \mu_t + \varepsilon_t \\\mu_t & = \mu_{t-1} + \eta_t \\\varepsilon_t &= \phi \varepsilon_t + \zeta_t\end{aligned}$$This model can be constructed in Statsmodels with the `UnobservedComponents` class using the `rwalk` and `autoregressive` specifications. As before, we can fit the model using maximum likelihood via the `fit` method.
###Code
# Construct the model instance
mod_uc = sm.tsa.UnobservedComponents(inf, "rwalk", autoregressive=1)
# Fit the model via maximum likelihood
res_uc_mle = mod_uc.fit()
print(res_uc_mle.summary())
###Output
_____no_output_____
###Markdown
As noted earlier, the Theano wrappers (`Loglike` and `Score`) that we created above are generic, so we can re-use essentially the same code to explore the model with Bayesian methods.
###Code
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
# Here we follow the same procedure as above, but now we instantiate the
# Theano wrapper `Loglike` with the UC model instance instead of the
# SARIMAX model instance
loglike_uc = Loglike(mod_uc)
with pm.Model():
# Priors
sigma2level = pm.InverseGamma("sigma2.level", 1, 1)
sigma2ar = pm.InverseGamma("sigma2.ar", 1, 1)
arL1 = pm.Uniform("ar.L1", -0.99, 0.99)
# convert variables to tensor vectors
theta_uc = tt.as_tensor_variable([sigma2level, sigma2ar, arL1])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist("likelihood", loglike_uc, observed=theta_uc)
# Draw samples
trace_uc = pm.sample(
ndraws,
tune=nburn,
return_inferencedata=True,
cores=1,
compute_convergence_checks=False,
)
###Output
_____no_output_____
###Markdown
And as before we can plot the marginal posteriors. In contrast to the SARIMAX example, here the posterior modes are somewhat different from the MLE estimates.
###Code
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.plot_trace(
trace_uc,
lines=[(k, {}, [v]) for k, v in dict(res_uc_mle.params).items()],
combined=True,
figsize=(12, 12),
)
pm.summary(trace_uc)
# Retrieve the posterior means
params = pm.summary(trace_uc)["mean"].values
# Construct results using these posterior means as parameter values
res_uc_bayes = mod_uc.smooth(params)
###Output
_____no_output_____
###Markdown
One benefit of this model is that it gives us an estimate of the underling "level" of inflation, using the smoothed estimate of $\mu_t$, which we can access as the "level" column in the results objects' `states.smoothed` attribute. In this case, because the Bayesian posterior mean of the level's variance is larger than the MLE estimate, its estimated level is a little more volatile.
###Code
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf["CPIAUCNS"].plot(ax=ax, style="-", label="Observed data")
# Plot estimate of the level term
res_uc_mle.states.smoothed["level"].plot(ax=ax, label="Smoothed level (MLE)")
res_uc_bayes.states.smoothed["level"].plot(ax=ax, label="Smoothed level (Bayesian)")
ax.legend(loc="lower left");
###Output
_____no_output_____ |
IBM Capstone Project - 2nd Week - Car accident severity.ipynb | ###Markdown
IBM Capstone Project - 2nd Week - Car accident severity ___ Links to additional materials (report, EDA, notebooks): [Report](https://docs.google.com/document/d/1_MLOVZuu2qlb-eaQAgipBb0xhjxAYsckmJyfeXNwPWk/edit?usp=sharing) [The 1st Week notebook](https://github.com/kolasdevpy/CapstoneProjectIBM/blob/master/IBM%20Capstone%20Project%20%20-%201st%20Week%20-%20Car%20accident%20severity.ipynb) [The 2nd Week notebook](https://github.com/kolasdevpy/CapstoneProjectIBM/blob/master/IBM%20Capstone%20Project%20-%202nd%20Week%20-%20Car%20accident%20severity.ipynb) [Exploratory Data Analysis](https://docs.google.com/presentation/d/1Y8D7zr4rDytsLZ_8Om-0SyzZ9B6L7b6XOeh3TsbACkY/edit?usp=sharing) ___ The 2nd Week Tasks In this week, you will continue working on your capstone project. Please remember by the end of this week, you will need to submit the following:1)A full report consisting of all of the following components (15 marks):- Introduction where you discuss the business problem and who would be interested in this project. (Done in the 1st Week work). - Data where you describe the data that will be used to solve the problem and the source of the data. (Done in the 1st Week work). - Methodology section which represents the main component of the report where you discuss and describe any exploratory data analysis that you did, any inferential statistical testing that you performed, if any, and what machine learnings were used and why. The 2nd Week. - Results section where you discuss the results. The 2nd Week. - Discussion section where you discuss any observations you noted and any recommendations you can make based on the results. The 2nd Week. - Conclusion section where you conclude the report. The 2nd Week. 2) A link to your Notebook on your Github repository pushed showing your code. (15 marks) The 2nd Week.3) Your choice of a presentation or blogpost. (10 marks) The 2nd Week. ___ Remember the introduction This data allows us to build a model for predicting whether a car crashes participants (car drivers or pedestrians) require increased amount of medical care or not. The data covers Seatle, WA. The primary focus of the model is to prioritize help for different points of the city for better balance of injury and help amount. The more injury - more help.For example, the Seattle Police department has limited number of helicopters for the medical purposes and prosecution of criminals. In case of simultaneously happening crashes, the model could possibly help to choose the way of dealing with the problems, predicting the amount of damage in all cases. Reading data We made a complete description, pre-processing and evaluation of the data in [the 1st week of the project](https://github.com/kolasdevpy/CapstoneProjectIBM/blob/master/Capstone%20Project%20%20-%201st%20Week%20-%20Car%20accident%20severity.ipynb).
###Code
import pandas as pd
import numpy as np
import os
path = os.path.expanduser("~/Documents/useful_df_car_accident_severity.csv")
df = pd.read_csv (path)
df.head(2)
df.dtypes
df.shape
X = df[['X', 'Y',
'ADDRTYPE', 'INTKEY',
'COLLISIONTYPE', 'PERSONCOUNT',
'PEDCOUNT', 'PEDCYLCOUNT', 'VEHCOUNT',
'JUNCTIONTYPE', 'SDOT_COLCODE', 'INATTENTIONIND',
'UNDERINFL', 'WEATHER', 'ROADCOND', 'LIGHTCOND',
'PEDROWNOTGRNT', 'SPEEDING',
'SEGLANEKEY', 'CROSSWALKKEY', 'HITPARKEDCAR',
'year', 'month', 'day', 'hour', 'minute', 'weekday_name']]
y = df['SEVERITYCODE']
###Output
_____no_output_____
###Markdown
Methodology section To do this, we need to build a model that can determine the need for a medical helicopter in an accident. It's a Discrete value. Therefore we have a Classification problem. Now we will try to understand what data we can get as quickly as possible or automatically from services, soft or witness. Next, we group their by sources. > 'SEVERITYCODE' - target > 1. Police get data from witness > 'X' > 'Y' > 'COLLISIONTYPE' > 'PERSONCOUNT' > 'PEDCOUNT' > 'PEDCYLCOUNT' > 'VEHCOUNT' > 'ADDRTYPE' > 'HITPARKEDCAR' > 2. Police can get data automatically by post-processing coordinates 'X' and 'Y' > 'INTKEY' > 'JUNCTIONTYPE' > 'SEGLANEKEY' > 'CROSSWALKKEY' > 3. Police can get data automatically by services > 'WEATHER' > 'ROADCOND' > 'LIGHTCOND' > 'year' > 'month' > 'day' > 'hour' > 'minute' > 'weekday_name' > 4. Police can get data automatically by post-processing > 'SDOT_COLCODE' > 'INATTENTIONIND' > 'UNDERINFL' > 'PEDROWNOTGRNT'> 'SPEEDING' Obviously, that the information in Section 4 may be collected for a long time after the accident. Which makes us to understand it is not relevant information, so we should drop these features. > 'SEVERITYCODE' - target > 1. Police get data from witness > 'X' > 'Y' > 'COLLISIONTYPE' > 'PERSONCOUNT' > 'PEDCOUNT' > 'PEDCYLCOUNT' > 'VEHCOUNT' > 'ADDRTYPE' > 'HITPARKEDCAR' > 2. Police can get data automatically by post-processing coordinates 'X' and 'Y' > 'INTKEY' > 'JUNCTIONTYPE' > 'SEGLANEKEY' > 'CROSSWALKKEY' > 3. Police can get data automatically by services > 'WEATHER' > 'ROADCOND' > 'LIGHTCOND' > 'year' > 'month' > 'day' > 'hour' > 'minute' > 'weekday_name' Modeling and Evaluations DecisionTreeClassifier
###Code
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
X = df[['X', 'Y', 'COLLISIONTYPE', 'PERSONCOUNT', 'PEDCOUNT', 'PEDCYLCOUNT', 'VEHCOUNT', 'ADDRTYPE', 'HITPARKEDCAR', # 1 Section
'INTKEY', 'JUNCTIONTYPE', 'SEGLANEKEY', 'CROSSWALKKEY', # 2 Section
'WEATHER', 'ROADCOND', 'LIGHTCOND', 'year', 'month', 'day', 'hour', 'minute', 'weekday_name']] # 3 Section
y = df['SEVERITYCODE']
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.1, random_state=3)
Tree = DecisionTreeClassifier(criterion="entropy")
Tree.fit(X_trainset,y_trainset)
predTree = Tree.predict(X_testset)
accuracy = metrics.accuracy_score(y_testset, predTree)
print(f"DecisionTrees's Accuracy:")
print(f"{accuracy}")
###Output
DecisionTrees's Accuracy:
0.6837963451991127
###Markdown
LGBMClassifier
###Code
import lightgbm as lgb
X = df[['X', 'Y', 'COLLISIONTYPE', 'PERSONCOUNT', 'PEDCOUNT', 'PEDCYLCOUNT', 'VEHCOUNT', 'ADDRTYPE', 'HITPARKEDCAR', # 1 Section
'INTKEY', 'JUNCTIONTYPE', 'SEGLANEKEY', 'CROSSWALKKEY', # 2 Section
'WEATHER', 'ROADCOND', 'LIGHTCOND', 'year', 'month', 'day', 'hour', 'minute', 'weekday_name']] # 3 Section
y = df['SEVERITYCODE']
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.1, random_state=3)
lgbm = lgb.LGBMClassifier()
lgbm.fit(X_trainset,y_trainset)
predTree = lgbm.predict(X_testset)
accuracy = metrics.accuracy_score(y_testset, predTree)
print(f"LGBMClassifier Accuracy:")
print(f"{accuracy}")
###Output
/Users/artyomkolas/opt/anaconda3/envs/ibm/lib/python3.8/site-packages/lightgbm/__init__.py:42: UserWarning: Starting from version 2.2.1, the library file in distribution wheels for macOS is built by the Apple Clang (Xcode_8.3.3) compiler.
This means that in case of installing LightGBM from PyPI via the ``pip install lightgbm`` command, you don't need to install the gcc compiler anymore.
Instead of that, you need to install the OpenMP library, which is required for running LightGBM on the system with the Apple Clang compiler.
You can install the OpenMP library by the following command: ``brew install libomp``.
warnings.warn("Starting from version 2.2.1, the library file in distribution wheels for macOS "
###Markdown
Results DecisionTrees's Accuracy:0.6828456744480829LGBMClassifier Accuracy:0.7580542938628921We can see that LGBMClassifier performs better.I propose to dwell on this solution.Our model is able to predict whether a medical assistance is needed or not with accuracy ~ 76%. It's good. Discussion We can pay attention to the order in which information about the incident is received. 1) If a witness to the incident exists at the time of the incident, the dispatcher can obtain information in section 1 and automatically and quickly obtain information from sections 2 and 3.Let's consider some options: A police dispatcher can spend 1-5 minutes collecting information. The information may be complete or incomplete. The information may be true or false. The incident may be life threatening or not. In this case, we have a lot of conventions. But a witness can tell us about the wounded and the need for medical assistance. We can compare the report to the police with the decision from the model and draw conclusions about the credibility of the information. Or if there is not enough information about the victims, can help to police to make a decision. The model can help is to understand immediately in the first seconds of the call (by coordinates) whether medical assistance is needed or not. 2) If a witness to the incident does not exist at the time of the accident (or appears after a long time), then the dispatcher will not receive information at all, or the lost time may cost the lives of the victims.Let's consider some options The witness does not appear at all, and the victims do not need medical assistance.The witness does not appear at all, and the victims need medical attention but do not receive it.An witness appears, but after a while the victims will receive help (if it is necessary).An witness appears, but after a while the victims do not have time to get help (if it is necessary) In this case, we have no information at all at the time of the accident, moreover if the victims need medical assistance, they may not receive it. What can we do about it?1) we could get information from the navigator, for example, about an abnormal decrease in speed. After that automatically transfer coordinates to the dispatcher. 2) we could receive information from the deployment of airbags, sensors of the integrity of the bumper or car body, impact on the body, and then automatically transmit the coordinates to the dispatcher.Based on this we could get such data from software and services and build a model. > 1. Police get data from witness > 'X' > 'Y' > 2. Police can get data automatically by post-processing coordinates 'X' and 'Y' > 'INTKEY' > 'JUNCTIONTYPE' > 'SEGLANEKEY' > 'CROSSWALKKEY' > 3. Police can get data automatically by services > 'WEATHER' > 'ROADCOND' > 'LIGHTCOND' > 'year' > 'month' > 'day' > 'hour' > 'minute' > 'weekday_name'Such methods are able to help to reduce the number of victims where every minute is important.In this case, our model can help to inform the dispatcher about the severity of the accident for less than a second and send a medical helicopter before dispather can contact the victims.
###Code
import lightgbm as lgb
X = df[['X', 'Y', # 1 Section
'INTKEY', 'JUNCTIONTYPE', 'SEGLANEKEY', 'CROSSWALKKEY', # 2 Section
'WEATHER', 'ROADCOND', 'LIGHTCOND', 'year', 'month', 'day', 'hour', 'minute', 'weekday_name']] # 3 Section
y = df['SEVERITYCODE']
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.1, random_state=3)
lgbm = lgb.LGBMClassifier()
lgbm.fit(X_trainset,y_trainset)
predTree = lgbm.predict(X_testset)
accuracy = metrics.accuracy_score(y_testset, predTree)
print(f"LGBMClassifier Accuracy:")
print(f"{accuracy}")
###Output
LGBMClassifier Accuracy:
0.7228794760747861
|
week_3/.ipynb_checkpoints/day13_imdb-checkpoint.ipynb | ###Markdown
Please go to https://ccv.jupyter.brown.edu By the end of today you will learn about:- Scraping IMDB for movies that came out in 2019- Scraping a single movie- Scraping all movies from a single page- Scraping all movies from all pages - Scraping IMDB for movies that came out in 2019- Scraping a single movie - Scraping all movies from a single page - Scraping all movies from all pages Scraping IMDB Movie RatingsModified from https://www.dataquest.io/blog/web-scraping-beautifulsoup/|Title|Year|Genre|Runtime|Rating|Synopsis|Director|Vote||---|---|---|---|---|---|---|---||...|...|...|...|...|...|...|...| Explore website to decide how to scrapeWe want to scrape the movies released in 2019 that are in IMDB's database. https://www.imdb.com has an advanced search page (https://www.imdb.com/search/title) that we can use to generate a query to get this list of movies. We first need to figure out how querying works. Let's search for "Feature Films" released between 2019-01-01 and 2019-12-31 with a score between 1 and 10 (to exclude movies without votes). Let's set Display Options to "250 per page" and "Release Date Descending". The URL for the query is:https://www.imdb.com/search/title/?title_type=feature&release_date=2019-01-01,2019-12-31&user_rating=1.0,10.0&sort=release_date,desc&count=250
###Code
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import time
import warnings
from IPython.display import clear_output
###Output
_____no_output_____
###Markdown
- Scraping IMDB for movies that came out in 2019 - Scraping a single movie- Scraping all movies from a single page - Scraping all movies from all pages Scrape a single movie
###Code
url = "https://www.imdb.com/search/title/?title_type=feature&release_date=2019-01-01,2019-12-31&user_rating=1.0,10.0&sort=release_date,desc&count=250"
response = get(url)
print(response.status_code)
soup = BeautifulSoup(response.text, 'html.parser')
print(soup.prettify())
###Output
_____no_output_____
###Markdown
Find the movie containers
###Code
movie_containers_lst = soup.find_all('div', class_ = 'lister-item mode-advanced')
print(len(movie_containers_lst))
###Output
_____no_output_____
###Markdown
Scrape the first movie container
###Code
first_movie = movie_containers_lst[0].find(class_='lister-item-content')
print(first_movie.prettify())
###Output
_____no_output_____
###Markdown
The html for a single movie container is very long. We will use developer tools to help find the data we want.
###Code
title_str = first_movie.h3.a.get_text()
print(title_str)
year_str = first_movie.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
print(year_str)
genre_str = first_movie.p.find('span', class_ = 'genre').get_text()
runtime_str = first_movie.p.find('span', class_ = 'runtime').get_text()
print(genre_str)
print(runtime_str)
rating_flt = float(first_movie.select('.ratings-bar div strong')[0].get_text())
print(rating_flt)
synopsis_str = first_movie.find_all('p', class_ = 'text-muted')[1].get_text()
print(synopsis_str)
director_str = first_movie.find_all('p')[2].a.get_text()
print(director_str)
###Output
_____no_output_____
###Markdown
Can search for a tag with special attributes like ``
###Code
votes_tag = first_movie.find('span', attrs = {'name':'nv'})
print(votes_tag)
###Output
_____no_output_____
###Markdown
Can treat tags like dictionaries, where key value pairs are attributes
###Code
votes_int = int(votes_tag['data-value'])
print(votes_int)
###Output
_____no_output_____
###Markdown
- Scraping IMDB for movies that came out in 2019 - Scraping a single movie - Scraping all movies from a single page- Scraping all movies from all pages Next, we will scrape all movie containers from the page
###Code
# Lists to store the scraped data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
# Extract data from individual movie container
for container in movie_containers_lst:
# movie title
title_str = container.h3.a.get_text()
titles_lst.append(title_str)
# year
year_str = container.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
years_lst.append(year_str)
# genre(s)
genre_str = container.p.find('span', class_ = 'genre').get_text()
genres_lst.append(genre_str)
# runtime
runtime_str = container.p.find('span', class_ = 'runtime').get_text()
runtimes_lst.append(runtime_str)
# IMDB rating
rating_flt = container.select('.ratings-bar div strong')[0].get_text()
ratings_lst.append(rating_flt)
# synopsis
synopsis_str = container.find_all('p', class_ = 'text-muted')[1].get_text()
synopsi_lst.append(synopsis_str)
# director(s)
director_str = container.find_all('p')[2].a.get_text()
directors_lst.append(director_str)
# vote count
votes_tag = container.find('span', attrs = {'name':'nv'})
vote_int = int(votes_tag['data-value'])
votes_lst.append(vote_int)
###Output
_____no_output_____
###Markdown
There are often exceptions to the rule in the web page - we need to debug to account for these cases.
###Code
# Lists to store the scraped data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
# Extract data from individual movie container
for container in movie_containers_lst:
# movie title
title_str = container.h3.a.get_text()
titles_lst.append(title_str)
print(title_str)
# year
year_str = container.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
years_lst.append(year_str)
# genre(s)
genre_str = container.p.find('span', class_ = 'genre').get_text()
genres_lst.append(genre_str)
# runtime
runtime_str = container.p.find('span', class_ = 'runtime').get_text()
runtimes_lst.append(runtime_str)
# IMDB rating
rating_flt = container.select('.ratings-bar div strong')[0].get_text()
ratings_lst.append(rating_flt)
# synopsis
synopsis_str = container.find_all('p', class_ = 'text-muted')[1].get_text()
synopsi_lst.append(synopsis_str)
# director(s)
director_str = container.find_all('p')[2].a.get_text()
directors_lst.append(director_str)
# vote count
votes_tag = container.find('span', attrs = {'name':'nv'})
vote_int = int(votes_tag['data-value'])
votes_lst.append(vote_int)
###Output
_____no_output_____
###Markdown
The problem is that not all movies have a listed runtime.
###Code
# Lists to store the scraped data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
# Extract data from individual movie container
for container in movie_containers_lst:
# movie title
title_str = container.h3.a.get_text()
titles_lst.append(title_str)
print(title_str)
# year
year_str = container.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
years_lst.append(year_str)
# genre(s)
genre_str = container.p.find('span', class_ = 'genre').get_text()
genres_lst.append(genre_str)
# runtime
if container.p.find('span', class_ = 'runtime') is not None:
runtime_str = container.p.find('span', class_ = 'runtime').get_text()
else:
runtime_str = ''
runtimes_lst.append(runtime_str)
# IMDB rating
rating_flt = container.select('.ratings-bar div strong')[0].get_text()
ratings_lst.append(rating_flt)
# synopsis
synopsis_str = container.find_all('p', class_ = 'text-muted')[1].get_text()
synopsi_lst.append(synopsis_str)
# director(s)
director_str = container.find_all('p')[0].a.get_text()
directors_lst.append(director_str)
# vote count
votes_tag = container.find('span', attrs = {'name':'nv'})
vote_int = int(votes_tag['data-value'])
votes_lst.append(vote_int)
print(votes_int)
# Lists to store the scraped data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
# Extract data from individual movie container
for container in movie_containers_lst:
# movie title
title_str = container.h3.a.get_text()
titles_lst.append(title_str)
print(title_str)
# year
year_str = container.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
years_lst.append(year_str)
# genre(s)
if container.p.find('span', class_ = 'genre') is not None:
genre_str = container.p.find('span', class_ = 'genre').get_text()
else:
genre_str = ''
genres_lst.append(genre_str)
# runtime
if container.p.find('span', class_ = 'runtime') is not None:
runtime_str = container.p.find('span', class_ = 'runtime').get_text()
else:
runtime_str = ''
runtimes_lst.append(runtime_str)
# IMDB rating
rating_flt = container.select('.ratings-bar div strong')[0].get_text()
ratings_lst.append(rating_flt)
# synopsis
synopsis_str = container.find_all('p', class_ = 'text-muted')[1].get_text()
synopsi_lst.append(synopsis_str)
# director(s)
director_str = container.find_all('p')[2].a.get_text()
directors_lst.append(director_str)
# vote count
votes_tag = container.find('span', attrs = {'name':'nv'})
vote_int = int(votes_tag['data-value'])
votes_lst.append(vote_int)
# Lists to store the scraped data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
# Extract data from individual movie container
for container in movie_containers_lst:
# movie title
title_str = container.h3.a.get_text()
titles_lst.append(title_str)
print(title_str)
# year
year_str = container.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
years_lst.append(year_str)
# genre(s)
if container.p.find('span', class_ = 'genre') is not None:
genre_str = container.p.find('span', class_ = 'genre').get_text()
else:
genre_str = ''
genres_lst.append(genre_str)
# runtime
if container.p.find('span', class_ = 'runtime') is not None:
runtime_str = container.p.find('span', class_ = 'runtime').get_text()
else:
runtime_str = ''
runtimes_lst.append(runtime_str)
# IMDB rating
rating_flt = container.select('.ratings-bar div strong')[0].get_text()
ratings_lst.append(rating_flt)
# synopsis
synopsis_str = container.find_all('p', class_ = 'text-muted')[1].get_text()
synopsi_lst.append(synopsis_str)
# director(s)
if container.find_all('p')[2].a is not None:
director_str = container.find_all('p')[2].a.get_text()
else:
director_str = ''
directors_lst.append(director_str)
# vote count
votes_tag = container.find('span', attrs = {'name':'nv'})
vote_int = int(votes_tag['data-value'])
votes_lst.append(vote_int)
test_df = pd.DataFrame({'title': titles_lst,
'year': years_lst,
'genre': genres_lst,
'runtime': runtimes_lst,
'rating': ratings_lst,
'synopsis': synopsi_lst,
'director': directors_lst,
'vote': votes_lst
})
print(test_df)
###Output
_____no_output_____
###Markdown
Let's create a function that will scrape a page. It takes `movies_container_lst` as input and assumes that empty lists have been created outside of the function.
###Code
def scrape_page(lst):
# Extract data from individual movie container
for container in lst:
# movie title
title_str = container.h3.a.get_text()
titles_lst.append(title_str)
# year
year_str = container.h3.find('span', class_ = 'lister-item-year text-muted unbold').get_text()
years_lst.append(year_str)
# genre(s)
if container.p.find('span', class_ = 'genre') is not None:
genre_str = container.p.find('span', class_ = 'genre').get_text()
else:
genre_str = ''
genres_lst.append(genre_str)
# runtime
if container.p.find('span', class_ = 'runtime') is not None:
runtime_str = container.p.find('span', class_ = 'runtime').get_text()
else:
runtime_str = ''
runtimes_lst.append(runtime_str)
# IMDB rating
rating_flt = container.select('.ratings-bar div strong')[0].get_text()
ratings_lst.append(rating_flt)
# synopsis
synopsis_str = container.find_all('p', class_ = 'text-muted')[1].get_text()
synopsi_lst.append(synopsis_str)
# director(s)
if container.find_all('p')[2].a is not None:
director_str = container.find_all('p')[2].a.get_text()
else:
director_str = ''
directors_lst.append(director_str)
# vote count
votes_tag = container.find('span', attrs = {'name':'nv'})
vote_int = int(votes_tag['data-value'])
votes_lst.append(vote_int)
return
# Lists to store the scraped data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
scrape_page(movie_containers_lst)
test_df = pd.DataFrame({'title': titles_lst,
'year': years_lst,
'genre': genres_lst,
'runtime': runtimes_lst,
'rating': ratings_lst,
'synopsis': synopsi_lst,
'director': directors_lst,
'vote': votes_lst
})
print(test_df.shape)
###Output
_____no_output_____
###Markdown
- Scraping IMDB for movies that came out in 2019 - Scraping a single movie - Scraping all movies from a single page - Scraping all movies from all pages Scrape multiple pages * Make all the requests we want from within the loop.* Control the loop’s rate to avoid bombarding the server with requests.* Monitor the loop while it runs. Make all requests we want from within the loop The next page has the following URL: https://www.imdb.com/search/title/?title_type=feature&release_date=2019-01-01,2019-12-31&user_rating=1.0,10.0&sort=release_date,desc&count=250&start=251&ref_=adv_nxt`&start=251` refers to starting at movie 251. Incrementing this query parameter will allow us to navigate to all pages of the search.
###Code
movie_indices = [str(i) for i in range(1,5972,250)]
print(movie_indices)
base_url = 'https://www.imdb.com/search/title/?title_type=feature&release_date=2019-01-01,2019-12-31&user_rating=1.0,10.0&sort=release_date,desc&count=250'
for movie_index in movie_indices:
print(base_url + '&start=' + movie_index + '&ref_=adv_nxt')
###Output
_____no_output_____
###Markdown
Controlling the crawl rateControlling the rate of crawling is beneficial for us, and for the website we are scraping. If we avoid hammering the server with tens of requests per second, then we are much less likely to get our IP address banned. We also avoid disrupting the activity of the website we scrape by allowing the server to respond to other users’ requests too.We’ll control the loop’s rate by using the `sleep()` function from Python’s `time` module. `sleep()` will pause the execution of the loop for a specified amount of seconds.
###Code
for i in range(0,5):
delay = 2
print(delay)
time.sleep(delay)
###Output
_____no_output_____
###Markdown
Monitoring the scraping loop* The frequency (speed) of requests, so we make sure our program is not overloading the server.* The status code of our requests, so we make sure the server is sending back the proper responses.
###Code
# Set a starting time using the time() function from the time module, and assign the value to start_time.
start_time = time.time()
# Assign 0 to the variable requests which we’ll use to count the number of requests.
requests = 0
# Start a loop, and then with each iteration:
for i in range(5):
# Simulate a request.
# <<<A request would go here>>>
# Increment the number of requests by 1.
requests = requests + 1
# Pause the loop for 1 second
time.sleep(1)
# Calculate the elapsed time since the first request, and assign the value to elapsed_time.
elapsed_time = time.time() - start_time
# Print the number of requests and the frequency.
print('Request: ' + str(requests) + ' ' + 'Frequency: ' + str(requests/elapsed_time) + ' requests/sec')
# clears the output of print, and waits until there is a new output
clear_output(wait = True)
###Output
_____no_output_____
###Markdown
Import the warn function to throw a warning if there is a non-200 response. Warn rather than throw an error because we will still scrape enough even if there are some hiccups
###Code
warnings.warn("Warning Simulation !!!")
###Output
_____no_output_____
###Markdown
Full scraping snippet
###Code
# Redeclaring the lists to store data in
titles_lst = []
years_lst = []
genres_lst = []
runtimes_lst = []
ratings_lst = []
synopsi_lst = []
directors_lst = []
votes_lst = []
# Preparing the monitoring of the loop
start_time = time.time()
requests = 0
movie_indices = [str(i) for i in range(1, 5972, 250)]
# For every page in the interval 1-4
for movie_index in movie_indices:
# Make a get request
base_url = 'https://www.imdb.com/search/title/?title_type=feature&release_date=2019-01-01,2019-12-31&user_rating=1.0,10.0&sort=release_date,desc&count=250'
url = base_url + '&start=' + movie_index + '&ref_=adv_nxt'
response = get(url)
# Pause the loop
time.sleep(1)
# Monitor the requests
requests = requests + 1
elapsed_time = time.time() - start_time
print('Request: ' + str(requests) + ' ' + 'Frequency: ' + str(requests/elapsed_time) + ' requests/sec')
clear_output(wait = True)
# Throw a warning for non-200 status codes
if response.status_code != 200:
warnings.warn('Request: ' + str(requests) + '; Status code: ' + str(response.status_code))
# Parse the content of the request with BeautifulSoup
soup = BeautifulSoup(response.text, 'html.parser')
# Select all the 250 movie containers from a single page and scrape
movie_containers_lst = soup.find_all('div', class_ = 'lister-item mode-advanced')
scrape_page(movie_containers_lst)
movies_df = pd.DataFrame({'title': titles_lst,
'year': years_lst,
'genre': genres_lst,
'runtime': runtimes_lst,
'rating': ratings_lst,
'synopsis': synopsi_lst,
'director': directors_lst,
'vote': votes_lst
})
print(movies_df)
movies_df.to_csv('data/imdb.csv', index=False)
###Output
_____no_output_____ |
notebooks/Python/Python_Internals/Pickling.ipynb | ###Markdown
Imports
###Code
import numpy as np
import pandas as pd
import pickle
###Output
_____no_output_____
###Markdown
Create fake data
###Code
np.random.seed(24)
n_obs = 100
fake_data = {'age': np.random.randint(25,100,n_obs),
'gender': np.random.choice(['female','male'], size=n_obs, replace=True),
'm_status': np.random.choice(['single','married','widow'], size=n_obs, replace=True),
'profession': np.random.choice(['accountant','lawyer','dentist','doctor','data scientist'],
size=n_obs, replace=True)}
df = pd.DataFrame(fake_data)
df.head(10)
###Output
_____no_output_____
###Markdown
Subset Data
###Code
subset = df[(df.gender == 'female') & (df.age < 75) & (df.profession == 'data scientist')]
subset
###Output
_____no_output_____
###Markdown
--- What if I had lots of data and didn't want to rerun the subset portion? Is there a way to save this subsetted dataframe and load it later when I need it?**Yes, pickle!** From the [docs](https://docs.python.org/3/library/pickle.html): >The pickle module implements binary protocols for serializing and de-serializing a Python object structure. “Pickling” is the process whereby a Python object hierarchy is converted into a byte stream, and “unpickling” is the inverse operation, whereby a byte stream (from a binary file or bytes-like object) is converted back into an object hierarchy. >>**Warning The pickle module is not secure against erroneous or maliciously constructed data. Never unpickle data received from an untrusted or unauthenticated source.**>>The following types can be pickled:>* None, True, and False * integers, floating point numbers, complex numbers * strings, bytes, bytearrays * tuples, lists, sets, and dictionaries containing only picklable objects * functions defined at the top level of a module (using def, not lambda) * built-in functions defined at the top level of a module * classes that are defined at the top level of a module * instances of such classes whose __dict__ or the result of calling __getstate__() is picklable
###Code
# set path for convenience
path = '/Users/davidziganto/Repositories/Data_Science_Fundamentals/pkl_files/'
# Save
with open(path + 'subset_df.pkl', 'wb') as picklefile:
pickle.dump(subset, picklefile)
# show test doesn't exist yet
try:
print(test)
except:
print('test does not exist!')
# Open
with open(path + "subset_df.pkl", 'rb') as picklefile:
test = pickle.load(picklefile)
test
###Output
_____no_output_____
###Markdown
Viola. Now I can pickup where I left off without having to run through all that processing. Better Way (w/DF)
###Code
df.to_pickle(path + 'subset_df2.pkl', compression='gzip')
###Output
_____no_output_____ |
contributors/joseph_martin/SHARE_3_test_xcorr_FIS.ipynb | ###Markdown
Test scipy.signal.correlate on some atl06 data from foundation ice stream
###Code
import numpy as np
import scipy, sys, os, pyproj, glob, re, h5py
import matplotlib.pyplot as plt
import pandas as pd
from scipy.signal import correlate
from scipy import stats
from astropy.time import Time
%matplotlib widget
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Test scipy.signal.correlate Generate some test data:
###Code
dx = 0.1
x = np.arange(0,10,dx)
y = np.zeros(np.shape(x))
ix0 = 30
ix1 = 30 + 15
y[ix0:ix1] = 1
fig,axs = plt.subplots(1,2)
axs[0].plot(x,y,'k')
axs[0].set_xlabel('distance (m)')
axs[0].set_ylabel('value')
axs[1].plot(np.arange(len(x)), y,'k')
axs[1].set_xlabel('index')
###Output
_____no_output_____
###Markdown
Make a signal to correlate with:
###Code
imposed_offset = int(14/dx) # 14 meters, in units of samples
x_noise = np.arange(0,50,dx) # make the vector we're comparing with much longer
y_noise = np.zeros(np.shape(x_noise))
y_noise[ix0 + imposed_offset : ix1 + imposed_offset] = 1
# uncomment the line below to add noise
# y_noise = y_noise * np.random.random(np.shape(y_noise))
fig,axs = plt.subplots(1,2)
axs[0].plot(x,y,'k')
axs[0].set_xlabel('distance (m)')
axs[0].set_ylabel('value')
axs[1].plot(np.arange(len(x)), y, 'k')
axs[1].set_xlabel('index')
axs[0].plot(x_noise,y_noise, 'b')
axs[0].set_xlabel('distance (m)')
axs[0].set_ylabel('value')
axs[1].plot(np.arange(len(x_noise)), y_noise,'b')
axs[1].set_xlabel('index')
fig.suptitle('black = original, blue = shifted')
###Output
_____no_output_____
###Markdown
Try scipy.signal.correlate:mode ='full' returns the entire cross correlation; could be 'valid' to return only non- zero-padded partmethod = direct (not fft)
###Code
corr = correlate(y_noise,y, mode = 'full', method = 'direct')
norm_val = np.sqrt(np.sum(y_noise**2)*np.sum(y**2))
corr = corr / norm_val
###Output
_____no_output_____
###Markdown
What are the dimensions of corr?
###Code
print('corr: ', np.shape(corr))
print('x: ', np.shape(x))
print('x: ', np.shape(x_noise))
# lagvec = np.arange(0,len(x_noise) - len(x) + 1)
lagvec = np.arange( -(len(x) - 1), len(x_noise), 1)
shift_vec = lagvec * dx
ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0]
best_lag = lagvec[ix_peak]
best_shift = shift_vec[ix_peak]
fig,axs = plt.subplots(3,1)
axs[0].plot(lagvec,corr)
axs[0].plot(lagvec[ix_peak],corr[ix_peak], 'r*')
axs[0].set_xlabel('lag (samples)')
axs[0].set_ylabel('correlation coefficient')
axs[1].plot(shift_vec,corr)
axs[1].plot(shift_vec[ix_peak],corr[ix_peak], 'r*')
axs[1].set_xlabel('shift (m)')
axs[1].set_ylabel('correlation coefficient')
axs[2].plot(x + best_shift, y,'k')
axs[2].plot(x_noise, y_noise, 'b--')
axs[2].set_xlabel('shift (m)')
fig.suptitle(' '.join(['Shift ', str(best_lag), ' samples, or ', str(best_shift), ' m to line up signals']))
###Output
_____no_output_____
###Markdown
Let's try with some ATL06 data Load some repeat data:import readers, etc
###Code
# ! cd ..; [ -d pointCollection ] || git clone https://www.github.com/smithB/pointCollection.git
# sys.path.append(os.path.join(os.getcwd(), '..'))
# !python3 -m pip install --user git+https://github.com/tsutterley/pointCollection.git@pip
import pointCollection as pc
moa_datapath = '/srv/tutorial-data/land_ice_applications/'
datapath = '/home/jovyan/shared/surface_velocity/FIS_ATL06/'
# example hf5 file, if you need to look at the fields
# datapath='/home/jovyan/shared/surface_velocity/FIS_ATL06_small/processed_ATL06_20191129105346_09700511_003_01.h5'
# !h5ls -r /home/jovyan/shared/surface_velocity/FIS_ATL06_small/processed_ATL06_20191129105346_09700511_003_01.h5
###Output
_____no_output_____
###Markdown
Geographic setting : Foundation Ice Stream
###Code
print(pc.__file__)
# something wrong with pointCollection
spatial_extent = np.array([-102, -76, -98, -74.5])
lat=spatial_extent[[1, 3, 3, 1, 1]]
lon=spatial_extent[[2, 2, 0, 0, 2]]
print(lat)
print(lon)
# project the coordinates to Antarctic polar stereographic
xy=np.array(pyproj.Proj(3031)(lon, lat))
# get the bounds of the projected coordinates
XR=[np.nanmin(xy[0,:]), np.nanmax(xy[0,:])]
YR=[np.nanmin(xy[1,:]), np.nanmax(xy[1,:])]
MOA=pc.grid.data().from_geotif(os.path.join(moa_datapath, 'MOA','moa_2009_1km.tif'), bounds=[XR, YR])
# show the mosaic:
plt.figure()
MOA.show(cmap='gray', clim=[14000, 17000])
plt.plot(xy[0,:], xy[1,:])
plt.title('Mosaic of Antarctica for Pine Island Glacier')
###Output
[-76. -74.5 -74.5 -76. -76. ]
[ -98. -98. -102. -102. -98.]
###Markdown
Load repeat track data ATL06 reader
###Code
def atl06_to_dict(filename, beam, field_dict=None, index=None, epsg=None):
"""
Read selected datasets from an ATL06 file
Input arguments:
filename: ATl06 file to read
beam: a string specifying which beam is to be read (ex: gt1l, gt1r, gt2l, etc)
field_dict: A dictinary describing the fields to be read
keys give the group names to be read,
entries are lists of datasets within the groups
index: which entries in each field to read
epsg: an EPSG code specifying a projection (see www.epsg.org). Good choices are:
for Greenland, 3413 (polar stereographic projection, with Greenland along the Y axis)
for Antarctica, 3031 (polar stereographic projection, centered on the Pouth Pole)
Output argument:
D6: dictionary containing ATL06 data. Each dataset in
dataset_dict has its own entry in D6. Each dataset
in D6 contains a numpy array containing the
data
"""
if field_dict is None:
field_dict={None:['latitude','longitude','h_li', 'atl06_quality_summary'],\
'ground_track':['x_atc','y_atc'],\
'fit_statistics':['dh_fit_dx', 'dh_fit_dy']}
D={}
# below: file_re = regular expression, it will pull apart the regular expression to get the information from the filename
file_re=re.compile('ATL06_(?P<date>\d+)_(?P<rgt>\d\d\d\d)(?P<cycle>\d\d)(?P<region>\d\d)_(?P<release>\d\d\d)_(?P<version>\d\d).h5')
with h5py.File(filename,'r') as h5f:
for key in field_dict:
for ds in field_dict[key]:
if key is not None:
ds_name=beam+'/land_ice_segments/'+key+'/'+ds
else:
ds_name=beam+'/land_ice_segments/'+ds
if index is not None:
D[ds]=np.array(h5f[ds_name][index])
else:
D[ds]=np.array(h5f[ds_name])
if '_FillValue' in h5f[ds_name].attrs:
bad_vals=D[ds]==h5f[ds_name].attrs['_FillValue']
D[ds]=D[ds].astype(float)
D[ds][bad_vals]=np.NaN
D['data_start_utc'] = h5f['/ancillary_data/data_start_utc'][:]
D['delta_time'] = h5f['/' + beam + '/land_ice_segments/delta_time'][:]
D['segment_id'] = h5f['/' + beam + '/land_ice_segments/segment_id'][:]
if epsg is not None:
xy=np.array(pyproj.proj.Proj(epsg)(D['longitude'], D['latitude']))
D['x']=xy[0,:].reshape(D['latitude'].shape)
D['y']=xy[1,:].reshape(D['latitude'].shape)
temp=file_re.search(filename)
D['rgt']=int(temp['rgt'])
D['cycle']=int(temp['cycle'])
D['beam']=beam
return D
###Output
_____no_output_____
###Markdown
Read in files; this next cell took ~1 minute early in the morning
###Code
# find all the files in the directory:
# ATL06_files=glob.glob(os.path.join(datapath, 'PIG_ATL06', '*.h5'))
ATL06_files=glob.glob(os.path.join(datapath, '*.h5'))
D_dict={}
error_count=0
for file in ATL06_files:
try:
D_dict[file]=atl06_to_dict(file, '/gt2l', index=slice(0, -1, 25), epsg=3031)
except KeyError as e:
print(f'file {file} encountered error {e}')
error_count += 1
print(f"read {len(D_dict)} data files of which {error_count} gave errors")
###Output
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190430122344_04920311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20181030210407_04920111_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190730080323_04920411_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190220230230_08320211_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190312235510_11380211_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20181108184743_06280111_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190228224553_09540211_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190623094402_13150311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190506112405_05830311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190611193446_11380311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190620171809_12740311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190823150456_08630411_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190529105941_09340311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190308130329_10700211_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190824143917_08780411_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190812071246_06900411_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20181118033101_07710111_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190822060451_08420411_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190607194306_10770311_003_01.h5 encountered error 'Unable to open object (component not found)'
file /home/jovyan/shared/surface_velocity/FIS_ATL06/processed_ATL06_20190129164404_04920211_003_01.h5 encountered error 'Unable to open object (component not found)'
read 613 data files of which 20 gave errors
###Markdown
Plot ground tracks
###Code
plt.figure(figsize=[8,8])
hax0=plt.gcf().add_subplot(211, aspect='equal')
MOA.show(ax=hax0, cmap='gray', clim=[14000, 17000]);
hax1=plt.gcf().add_subplot(212, aspect='equal', sharex=hax0, sharey=hax0)
MOA.show(ax=hax1, cmap='gray', clim=[14000, 17000]);
for fname, Di in D_dict.items():
cycle=Di['cycle']
if cycle <= 2:
ax=hax0
else:
ax=hax1
#print(fname)
#print(f'\t{rgt}, {cycle}, {region}')
ax.plot(Di['x'], Di['y'])
if True:
try:
if cycle < 3:
ax.text(Di['x'][0], Di['y'][0], f"rgt={Di['rgt']}, cyc={cycle}", clip_on=True)
elif cycle==3:
ax.text(Di['x'][0], Di['y'][0], f"rgt={Di['rgt']}, cyc={cycle}+", clip_on=True)
except IndexError:
pass
hax0.set_title('cycles 1 and 2');
hax1.set_title('cycle 3+');
###Output
_____no_output_____
###Markdown
Map view elevations
###Code
map_fig=plt.figure()
map_ax=map_fig.add_subplot(111)
# MOA.show(ax=map_ax, cmap='gray', clim=[14000, 17000])
for fname, Di in D_dict.items():
# select elevations with good quality_summary
good=Di['atl06_quality_summary']==0
ms=map_ax.scatter( Di['x'][good], Di['y'][good], 2, c=Di['h_li'][good], \
vmin=0, vmax=1000, label=fname)
map_ax._aspect='equal'
plt.colorbar(ms, label='elevation');
###Output
_____no_output_____
###Markdown
Repeat track elevation profile
###Code
# Ben Smiths's code to plot the individual segments:
def plot_segs(D6, ind=None, **kwargs):
"""
Plot a sloping line for each ATL06 segment
"""
if ind is None:
ind=np.ones_like(D6['h_li'], dtype=bool)
#define the heights of the segment endpoints. Leave a row of NaNs so that the endpoints don't get joined
h_ep=np.zeros([3, D6['h_li'][ind].size])+np.NaN
h_ep[0, :]=D6['h_li'][ind]-D6['dh_fit_dx'][ind]*20
h_ep[1, :]=D6['h_li'][ind]+D6['dh_fit_dx'][ind]*20
# define the x coordinates of the segment endpoints
x_ep=np.zeros([3,D6['h_li'][ind].size])+np.NaN
x_ep[0, :]=D6['x_atc'][ind]-20
x_ep[1, :]=D6['x_atc'][ind]+20
plt.plot(x_ep.T.ravel(), h_ep.T.ravel(), **kwargs)
# A revised code to plot the elevations of segment midpoints (h_li):
def plot_elevation(D6, ind=None, **kwargs):
"""
Plot midpoint elevation for each ATL06 segment
"""
if ind is None:
ind=np.ones_like(D6['h_li'], dtype=bool)
# pull out heights of segment midpoints
h_li = D6['h_li'][ind]
# pull out along track x coordinates of segment midpoints
x_atc = D6['x_atc'][ind]
plt.plot(x_atc, h_li, **kwargs)
dx=20
win_size = int(np.round(1020 / dx)) # meters / dx; odd multiples of 20 only!
D_2l={}
D_2r={}
# specify the rgt here:
rgt="0027"
rgt="0848" #Ben's suggestion
# iterate over the repeat cycles
for cycle in ['03','04','05','06','07']:
for filename in glob.glob(os.path.join(datapath, f'*ATL06_*_{rgt}{cycle}*_003*.h5')):
try:
# read the left-beam data
D_2l[filename]=atl06_to_dict(filename,'/gt2l', index=None, epsg=3031)
# read the right-beam data
D_2r[filename]=atl06_to_dict(filename,'/gt2r', index=None, epsg=3031)
# plot the locations in the previous plot
map_ax.plot(D_2r[filename]['x'], D_2r[filename]['y'],'k');
map_ax.plot(D_2l[filename]['x'], D_2l[filename]['y'],'k');
except Exception as e:
print(f'filename={filename}, exception={e}')
plt.figure();
for filename, Di in D_2l.items():
#Plot only points that have ATL06_quality_summary==0 (good points)
hl=plot_elevation(Di, ind=Di['atl06_quality_summary']==0, label=f"cycle={Di['cycle']}")
#hl=plt.plot(Di['x_atc'][Di['atl06_quality_summary']==0], Di['h_li'][Di['atl06_quality_summary']==0], '.', label=f"cycle={Di['cycle']}")
plt.legend()
plt.xlabel('x_atc')
plt.ylabel('elevation');
###Output
_____no_output_____
###Markdown
Pull out a segment and cross correlate: Let's try x_atc = 2.935e7 thru 2.93e7 (just from looking through data)
###Code
cycles = [] # names of cycles with data
for filename, Di in D_2l.items():
cycles += [str(Di['cycle']).zfill(2)]
cycles.sort()
# x1 = 2.93e7
# x2 = 2.935e7
beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
# try and smooth without filling nans
smoothing_window_size = int(np.round(60 / dx)) # meters / dx; odd multiples of 20 only! it will break
filt = np.ones(smoothing_window_size)
smoothed = True
### extract and plot data from all available cycles
fig, axs = plt.subplots(4,1)
x_atc = {}
h_li_raw = {}
h_li = {}
h_li_diff = {}
times = {}
for cycle in cycles:
# find Di that matches cycle:
Di = {}
x_atc[cycle] = {}
h_li_raw[cycle] = {}
h_li[cycle] = {}
h_li_diff[cycle] = {}
times[cycle] = {}
filenames = glob.glob(os.path.join(datapath, f'*ATL06_*_{rgt}{cycle}*_003*.h5'))
for filename in filenames:
try:
for beam in beams:
Di[filename]=atl06_to_dict(filename,'/'+ beam, index=None, epsg=3031)
times[cycle][beam] = Di[filename]['data_start_utc']
# extract h_li and x_atc for that section
x_atc_tmp = Di[filename]['x_atc']
h_li_tmp = Di[filename]['h_li']#[ixs]
# segment ids:
seg_ids = Di[filename]['segment_id']
# print(len(seg_ids), len(x_atc_tmp))
# make a monotonically increasing x vector
# assumes dx = 20 exactly, so be carefull referencing back
ind = seg_ids - np.nanmin(seg_ids) # indices starting at zero, using the segment_id field, so any skipped segment will be kept in correct location
x_full = np.arange(np.max(ind)+1) * 20 + x_atc_tmp[0]
h_full = np.zeros(np.max(ind)+1) + np.NaN
h_full[ind] = h_li_tmp
x_atc[cycle][beam] = x_full
h_li_raw[cycle][beam] = h_full
# running average smoother /filter
if smoothed == True:
h_smoothed = (1/win_size) * np.convolve(filt, h_full)
h_smoothed = h_smoothed[int(np.floor(smoothing_window_size/2)):int(-np.floor(smoothing_window_size/2))] # cut off ends
h_li[cycle][beam] = h_smoothed
# # differentiate that section of data
h_diff = (h_smoothed[1:] - h_smoothed[0:-1]) / (x_full[1:] - x_full[0:-1])
else:
h_li[cycle][beam] = h_full
h_diff = (h_full[1:] - h_full[0:-1]) / (x_full[1:] - x_full[0:-1])
h_li_diff[cycle][beam] = h_diff
# plot
axs[0].plot(x_full, h_full)
axs[1].plot(x_full[1:], h_diff)
# axs[2].plot(x_atc_tmp[1:] - x_atc_tmp[:-1])
axs[2].plot(np.isnan(h_full))
axs[3].plot(seg_ids[1:]- seg_ids[:-1])
except:
print(f'filename={filename}, exception={e}')
###Output
_____no_output_____
###Markdown
Joey's AttemptThe script below both steps through the entirety of each h_li timeseries and tries a variety of different windows. Outputs include DataFrames with the best correlating lags and shifts for each distance segment of each beam as well as a DataFrame of the velocities for each distance segment of each beam. The "best_window" is picked based on which search window selection results in the highest mean correlation coefficient among each beam and distance segment.
###Code
n_veloc = len(cycles) - 1
best_ACC=0 #intialize value... Hopefully we get better than that. Could potential put a threshold value below which we call a failed run.
dx = 20 # meters between x_atc points
pass_length=len(x_atc[min(x_atc)][min(x_atc[min(x_atc)])])
best_window=0
best_lags=0
best_shifts=0
best_velocities=0
#x1 = 2.915e7#x_atc[cycles[0]][beams[0]][1000] <-- the very first x value in a file; doesn't work, I think b/c nans # 2.93e7
windows = range(100,pass_length,100) # Set a range of windows you want to try - how many you want to try is really a question of computing time
# Using range(100,pass_length,100) it took ~5 minutes in the middle of the night... 100 gave the best ACC
for segment_length in windows:
search_width = segment_length # m (for now... Seems like keeping a segment legnth either side is prudent...)
for veloc_number in range(n_veloc):
cycle1 = cycles[veloc_number]
cycle2 = cycles[veloc_number+1]
t1_string = times[cycle1]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t1 = Time(t1_string)
t2_string = times[cycle2]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t2 = Time(t2_string)
dt = (t2 - t1).jd # difference in julian days
dist_steps=range(int(np.round(pass_length/(3*segment_length))))
ACCs=pd.DataFrame(columns=beams, index=dist_steps)
lags=pd.DataFrame(columns=beams, index=dist_steps)
shifts=pd.DataFrame(columns=beams, index=dist_steps)
velocities = pd.DataFrame(columns=beams, index=dist_steps)
for beam in beams:
x_full_t1 = x_atc[cycle1][beam]
x_full_t2 = x_atc[cycle2][beam]
for step in dist_steps:
# x_full_t1 = x_atc[cycle1][beam]
# x_full_t2 = x_atc[cycle2][beam]
# track_min = np.min(x_full_t1)
# track_length = np.min()
#fig1, axs = plt.subplots(4,1)
# cut out small chunk of data at time t1 (first cycle)
x1=x_full_t1[1]+(step+1)*search_width
ix_x1 = np.arange(len(x_full_t1))[x_full_t1 >= x1][0]
ix_x2 = ix_x1 + int(np.round(segment_length/dx))
x_t1 = x_full_t1[ix_x1:ix_x2]
h_li1 = h_li_diff[cycle1][beam][ix_x1-1:ix_x2-1] # start 1 index earlier because the data are differentiated
# cut out a wider chunk of data at time t2 (second cycle)
ix_x3 = ix_x1 - int(np.round(search_width/dx)) # offset on earlier end by # indices in search_width
ix_x4 = ix_x2 + int(np.round(search_width/dx)) # offset on later end by # indices in search_width
x_t2 = x_full_t2[ix_x3:ix_x4]
h_li2 = h_li_diff[cycle2][beam][ix_x3:ix_x4]
"""plt.figure()
plt.plot(x_t2, h_li2, 'r')
plt.plot(x_t1, h_li1, 'k')"""
"""axs[0].plot(x_t2, h_li2, 'r')
axs[0].plot(x_t1, h_li1, 'k')
axs[0].set_xlabel('x_atc (m)')
"""
# correlate old with newer data
corr = correlate(h_li1, h_li2, mode = 'valid', method = 'direct')
norm_val = np.sqrt(np.sum(h_li1**2)*np.sum(h_li2**2)) # normalize so values range between 0 and 1
corr = corr / norm_val
# lagvec = np.arange( -(len(h_li1) - 1), len(h_li2), 1)# for mode = 'full'
# lagvec = np.arange( -int(search_width/dx) - 1, int(search_width/dx) +1, 1) # for mode = 'valid'
lagvec = np.arange(- int(np.round(search_width/dx)), int(search_width/dx) +1,1)# for mode = 'valid'
shift_vec = lagvec * dx
ix_peak = np.arange(len(corr))[corr == np.nanmax(corr)][0]
ACCs.iloc[step,beams.index(beam)]=np.nanmax(corr)
lags.iloc[step,beams.index(beam)] = lagvec[ix_peak]
shifts.iloc[step,beams.index(beam)] = shift_vec[ix_peak]
velocities.iloc[step,beams.index(beam)] = shift_vec[ix_peak]/(dt/365)
#plt.figure()
#plt.plot(lagvec,corr)
"""
axs[1].plot(lagvec,corr)
axs[1].plot(lagvec[ix_peak],corr[ix_peak], 'r*')
axs[1].set_xlabel('lag (samples)')
axs[2].plot(shift_vec,corr)
axs[2].plot(shift_vec[ix_peak],corr[ix_peak], 'r*')
axs[2].set_xlabel('shift (m)')
# plot shifted data
axs[3].plot(x_t2, h_li2, 'r')
axs[3].plot(x_t1 - best_shift, h_li1, 'k')
axs[3].set_xlabel('x_atc (m)')
axs[0].text(x_t2[100], 0.6*np.nanmax(h_li2), beam)
axs[1].text(lagvec[5], 0.6*np.nanmax(corr), 'best lag: ' + str(best_lag) + '; corr val: ' + str(np.round(corr[ix_peak],3)))
axs[2].text(shift_vec[5], 0.6*np.nanmax(corr), 'best shift: ' + str(best_shift) + ' m'+ '; corr val: ' + str(np.round(corr[ix_peak],3)))
axs[2].text(shift_vec[5], 0.3*np.nanmax(corr), 'veloc of ' + str(np.round(best_shift/(dt/365),1)) + ' m/yr')
"""
#fig1.suptitle('black = older cycle data, red = newer cycle data to search across')
if ACCs.mean().mean()>best_ACC:
best_ACC=ACCs.mean().mean()
best_window=segment_length
best_lags=lags
best_shifts=shifts
best_velocities=velocities
print("The best window is %i with an average correlation coefficient of %f." % (best_window,best_ACC))
plt.figure(figsize=[13,5])
plt.plot(x_atc[cycle2][beams[0]][1:-2], h_li_diff[cycle2][beams[0]][1:-1], 'r')
for i in best_shifts.index:
print(i)
x_full_t1=x_atc[cycle1][beams[0]]
x1=x_full_t1[1]+(i+1)*search_width
ix_x1 = np.arange(len(x_full_t1))[x_full_t1 >= x1][0]
ix_x2 = ix_x1 + int(np.round(segment_length/dx))
x_t1 = x_full_t1[ix_x1:ix_x2]
h_li1 = h_li_diff[cycle1][beam][ix_x1-1:ix_x2-1]
plt.plot(x_t1 - best_shifts.iloc[i,beams.index(beams[0])], h_li1, 'b')
best_velocities
x_full_t1 = x_atc[cycle1][beam]
x_full_t2 = x_atc[cycle2][beam]
pass_length=min(len(x_full_t1),len(x_full_t2))
range(int(np.round(pass_length/(3*segment_length))))
###Output
_____no_output_____
###Markdown
what to do about nans? interpolate
###Code
for veloc_number in range(n_veloc):
cycle1 = cycles[veloc_number]
cycle2 = cycles[veloc_number+1]
t1_string = times[cycle1]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t1 = Time(t1_string)
t2_string = times[cycle2]['gt1l'][0].astype(str) #figure out later if just picking hte first one it ok
t2 = Time(t2_string)
dt = (t2 - t1).jd # difference in julian days
velocities = {}
for beam in beams[0:1]:
# fig1, axs = plt.subplots(4,1)
# the data:
x_full = x_atc[cycle1][beam]
h_full = h_li[cycle1][beam]
fig, axs = plt.subplots(2,1)
axs[0].plot(x_full, h_full)
# axs[1].plot(x_full, np.isnan(h_full))
# axs[2].plot(x_full[1:], x_full[1:] - x_full[:-1])
# try and smooth without filling nans
win_size = int(np.round(1020 / dx)) # meters / dx; odd multiples of 20 only!
filt = np.ones(win_size)
h_smoothed = (1/win_size) * np.convolve(filt, h_full)
axs[0].plot(x_full, h_smoothed[int(np.floor(win_size/2)):int(-np.floor(win_size/2))], 'k')
###Output
/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:19: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
|
notebook/ratings-count.ipynb | ###Markdown
Ratings Count
###Code
from pyspark import SparkContext, SparkConf
import collections
conf = SparkConf().setMaster('local').setAppName('RattingHistogram')
sc = SparkContext(conf = conf)
rdd = sc.textFile('file:////Users/giovanna/Documents/GitHub/pyspark/SparkCourse/ml-100k/u.data')
rdd.take(2)
rdd.count()
ratings = rdd.map(lambda x: x.split()[2])
ratings.take(2)
ratings.count()
result = ratings.countByValue() # action, it returns a python object
result
# python
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.items():
print("%s %i" % (key, value))
###Output
1 6110
2 11370
3 27145
4 34174
5 21201
###Markdown
--- SDF
###Code
from pyspark.sql import SparkSession
from pyspark.sql import functions as func
from pyspark.sql.types import StructType, StructField, IntegerType, LongType
spark = SparkSession.builder.appName("PopularMovies").getOrCreate()
# Create schema when reading u.data
schema = StructType([ \
StructField("userID", IntegerType(), True), \
StructField("movieID", IntegerType(), True), \
StructField("rating", IntegerType(), True), \
StructField("timestamp", LongType(), True)])
# Load up movie data as dataframe
moviesDF = spark.read.option("sep", "\t").schema(schema).csv("file:////Users/giovanna/Documents/GitHub/pyspark/SparkCourse/ml-100k/u.data")
# Some SQL-style magic to sort all movies by popularity in one line!
topMovieIDs = moviesDF.groupBy("movieID").count().orderBy(func.desc("count"))
# Grab the top 10
topMovieIDs.show(10)
# Stop the session
spark.stop()
###Output
_____no_output_____ |
calib_image_shift_universal.ipynb | ###Markdown
Let's import first frame of videofile, and show it
###Code
file = get_filenames()
file = file[0] # get just single file instead of list
print('Importing file ', file)
frame = skvideo.io.vread(file, num_frames=1) # import just first frame
frame = rgb2gray(frame[0]) # get element instead of list, make grayscale
plt.figure()
plt.imshow(frame, cmap=plt.cm.gray)
plt.show()
###Output
Importing file Z:/LPMV/Users/Anton-Malovichko/experiments/2019/05/180519_SU8_cantilever_highfps_and_liquid/04_2500fps_5V_200Hz.avi
###Markdown
Compensate angle if its needed
###Code
finish=False
angle=0
while finish==False:
angle, finish=rotate_image(frame, angle)
frame=rotate(frame, angle)
###Output
Is this image fine for you? Chosen angle is 0 degrees
###Markdown
*Detect center of lightspot, show quadrants:*
###Code
centroid=threshold_centroid(frame)
plot_im_w_quadrants(frame, centroid)
###Output
_____no_output_____
###Markdown
Demonstrate how shifted image looks like
###Code
transform = AffineTransform(translation=(1, 0))
shifted = warp(frame, transform, mode='constant', preserve_range=True)
plot_im_w_quadrants(shifted, centroid)
###Output
_____no_output_____
###Markdown
Shift images along x axis
###Code
shifted_im = []
x_shift = np.array([0.1*dx for dx in range(0, 11)]) # generate dx value for linear shift
for dx in x_shift:
transform = AffineTransform(translation=(dx, 0)) # shift along lateral axis
shifted_im.append(warp(frame, transform, mode='constant', preserve_range=True))
###Output
_____no_output_____
###Markdown
Calculate the intensities
###Code
Il=np.array([])
Iz=np.array([])
Isum=np.array([])
for i in range(len(shifted_im)):
Iz, Il, Isum = calc_intensities(shifted_im[i], centroid, Iz, Il, Isum)
###Output
_____no_output_____
###Markdown
Show calculated intensity difference vs displacement and get linear fit coefficients of the calibration:
###Code
plot_shift_curves(k_px_um=1.36, Il=Il, Iz=Iz, Isum=Isum, x_shift=x_shift, normalization=False, shift_vs_sig=True)
k, b = calc_calib_line(x_shift=x_shift, k_px_um=1.36, Il=Il, normalization=False, shift_vs_sig=True)
###Output
_____no_output_____ |
examples/preprocess/Preprocess-full.ipynb | ###Markdown
This notebook preprocess the data extracted from the chess database.To run this notebook with all the 170 million of positions from the chess database is required at least 8GB of RAM (if you use a local machine, for some reason, I can't run it on google colab).I used a laptop with a SSD NVMe, Intel i7-9750h and 24GB RAM DDR4@2666Mhz
###Code
total_ram = 170e6*64/1024/1024/1024
print("If all data were loaded, it would take at least {:.1f} GB of RAM".format(total_ram))
#!pip install chesslab --upgrade
from chesslab.preprocessing import preprocess
download=False
#https://drive.google.com/file/d/1XwH0reHwaOA0Tpt0ihJkP_XW99EUhlp9/view?usp=sharing
if download:
from chesslab.utils import download_7z
path='./'
file_id = '1XwH0reHwaOA0Tpt0ihJkP_XW99EUhlp9'
download_7z(file_id,path)
else:
path='D:/database/ccrl/'
block_size=1000000
blocks=170
path_files= path
start_name= 'chess'
min_elo= 0
data_name= 'ccrl_states_full'
labels_name= 'ccrl_results_full'
elo_filter= 0 #1 = mean, 2 = min
nb_game_filter= 40 #si se establece en 0 no aplica el filtro
delete_duplicate=True
delete_draws= True
delete_both_winners = True
delete_eaten=False
undersampling=False
preprocess(
block_size= block_size,
blocks= blocks,
path= path_files,
start_name= start_name,
min_elo= min_elo,
data_name= data_name,
labels_name= labels_name,
elo_filter= elo_filter,
nb_game_filter= nb_game_filter,
delete_eaten=delete_eaten,
delete_duplicate=delete_duplicate,
delete_draws= delete_draws,
delete_both_winners = delete_both_winners,
undersampling=undersampling)
###Output
Reading blocks
file: 1
file: 2
file: 3
file: 4
file: 5
file: 6
file: 7
file: 8
file: 9
file: 10
file: 11
file: 12
file: 13
file: 14
file: 15
file: 16
file: 17
file: 18
file: 19
file: 20
file: 21
file: 22
file: 23
file: 24
file: 25
file: 26
file: 27
file: 28
file: 29
file: 30
file: 31
file: 32
file: 33
file: 34
file: 35
file: 36
file: 37
file: 38
file: 39
file: 40
file: 41
file: 42
file: 43
file: 44
file: 45
file: 46
file: 47
file: 48
file: 49
file: 50
file: 51
file: 52
file: 53
file: 54
file: 55
file: 56
file: 57
file: 58
file: 59
file: 60
file: 61
file: 62
file: 63
file: 64
file: 65
file: 66
file: 67
file: 68
file: 69
file: 70
file: 71
file: 72
file: 73
file: 74
file: 75
file: 76
file: 77
file: 78
file: 79
file: 80
file: 81
file: 82
file: 83
file: 84
file: 85
file: 86
file: 87
file: 88
file: 89
file: 90
file: 91
file: 92
file: 93
file: 94
file: 95
file: 96
file: 97
file: 98
file: 99
file: 100
file: 101
file: 102
file: 103
file: 104
file: 105
file: 106
file: 107
file: 108
file: 109
file: 110
file: 111
file: 112
file: 113
file: 114
file: 115
file: 116
file: 117
file: 118
file: 119
file: 120
file: 121
file: 122
file: 123
file: 124
file: 125
file: 126
file: 127
file: 128
file: 129
file: 130
file: 131
file: 132
file: 133
file: 134
file: 135
file: 136
file: 137
file: 138
file: 139
file: 140
file: 141
file: 142
file: 143
file: 144
file: 145
file: 146
file: 147
file: 148
file: 149
file: 150
file: 151
file: 152
file: 153
file: 154
file: 155
file: 156
file: 157
file: 158
file: 159
file: 160
file: 161
file: 162
file: 163
file: 164
file: 165
file: 166
file: 167
file: 168
file: 169
file: 170
================================================================================
Selecting 40 game states per game
total of different games: 749247
total of different states: 29969880
total of different results: 29969880
================================================================================
deleting duplicates
total of different states: 17290793
total of different results: 17290793
================================================================================
deleting games with both winners
total of different states: 17164805
total of different results: 17164805
================================================================================
white total wins: 9615300
black total wins: 7549505
IB=1.27
saving files
files saved
Elapsed time: 104s = 1.7m
|
week-4/week-4-2-text-classification-homework.ipynb | ###Markdown
Week 4-2: Text classificationFor this assignment you will build a classifier that figures out the main topics of a bill, from its title.
###Code
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn import metrics
%matplotlib inline
###Output
_____no_output_____
###Markdown
1. Create document vectors
###Code
# Load up bills.csv This is a list of thousands of bill titles from the California legislature,
# and their subject classifications
df = pd.read_csv('week-4/bills.csv', encoding='latin-1')
df.head()
# Vectorize these suckers with the CountVectorizer, removing stopwords
vectorizer = CountVectorizer(stop_words='english', min_df=2)
matrix = vectorizer.fit_transform(df.text)
# How many different features do we have?
len(vectorizer.get_feature_names())
# What words correspond to the first 20 features?
vectorizer.get_feature_names()[:20]
###Output
_____no_output_____
###Markdown
2. Build a classifier
###Code
# Make the 'topic' column categorical, so we can print a pretty confusion matrix later
df['topic'] =df['topic'].astype('category')
# Glue the topics back together with the document vectors, into one dataframe
vectors = pd.DataFrame(matrix.toarray(), columns=vectorizer.get_feature_names())
vectors_and_topic = pd.concat([df['topic'], vectors], axis=1)
# Now split 20% of combined data into a test set
train, test = train_test_split(vectors_and_topic, test_size=0.2)
# Build a decision tree on the training data
x_train = train.iloc[:, 1:].values
y_train = train.iloc[:, 0].values
dt = tree.DecisionTreeClassifier()
dt.fit(x_train, y_train)
# Evaluate the tree on the test data and print out the accuracy
x_test = test.iloc[:, 1:].values
y_test = test.iloc[:, 0].values
y_test_pred = dt.predict(x_test)
metrics.accuracy_score(y_test_pred, y_test)
# Now print out a nicely labelled confusion natrix
truecats = "True " + df['topic'].cat.categories
predcats = "Guessed " + df['topic'].cat.categories
pd.DataFrame(metrics.confusion_matrix(y_test_pred, y_test,
labels=df['topic'].cat.categories),
columns=predcats, index=truecats)
###Output
_____no_output_____
###Markdown
What's a case -- an entry in thie matrix -- where the classifier made a particularly large number of errors? Can you guess why? Looking at this matrix, 7 documents were guessed "Budget, Spending, and Taxes" when they're actually "Housing and Property." It's possible these documents discussed property taxes, which caused them to be incorrectly classified. Bonus: try it on new dataHow do we apply this to other bill titles? Ones that weren't originally in the test or training set?
###Code
# Here are some other bills
new_titles = [
"Public postsecondary education: executive officer compensation.",
"An act to add Section 236.3 to the Education code, related to the pricing of college textbooks.",
"Political Reform Act of 1974: campaign disclosures.",
"An act to add Section 236.3 to the Penal Code, relating to human trafficking."]
###Output
_____no_output_____
###Markdown
Your assighnment is to vectorize these titles, and predict their subject using the classifier we built.The challenge here is to get these new documents encoded with the same features as the classifier expects. That is, we could just run them through `CountVectorizer` but then get_feature_names() would give us a different set of coluns, because the vocabulary of these documents is different.The solution is to use the `vocabulary` parameter of `CountVectorizer` like this:
###Code
# Make a new vectorizer that maps the same words to the same feature positions as the old vectorizer
new_vectorizer = CountVectorizer(stop_words='english', vocabulary=vectorizer.get_feature_names())
# Now use this new_vectorizer to fit the new docs
new_matrix = new_vectorizer.fit_transform(new_titles)
new_vectors = pd.DataFrame(new_matrix.toarray(), columns=new_vectorizer.get_feature_names())
# Predict the topics of the new documents, using our pre-existing classifier
dt.predict(new_vectors.values)
###Output
_____no_output_____
###Markdown
Week 4-2: Text classificationFor this assignment you will build a classifier that figures out the main topics of a bill, from its title.Adapted from an [assignment in the 2015 course](https://github.com/datapolitan/lede_algorithms/blob/master/class5_1/bill_classifier.py) by Richard Dunks and Chase Davis, with permission.
###Code
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn import metrics
%matplotlib inline
###Output
_____no_output_____
###Markdown
1. Create document vectors
###Code
# Load up bills.csv This is a list of thousands of bill titles from the California legislature,
# and their subject classifications
# Vectorize these suckers with the CountVectorizer, removing stopwords
# How many different features do we have?
# What words correspond to the first 20 features?
###Output
_____no_output_____
###Markdown
2. Build a classifier
###Code
# Make the 'topic' column categorical, so we can print a pretty confusion matrix later
# Glue the topics back together with the document vectors, into one dataframe
# Now split 20% of combined data into a test set
# Build a decision tree on the training data
# Evaluate the tree on the test data and print out the accuracy
# Now print out a nicely labelled confusion natrix
###Output
_____no_output_____
###Markdown
What's a case -- an entry in thie matrix -- where the classifier made a particularly large number of errors? Can you guess why? Looking at this matrix, 7 documents were guessed "Budget, Spending, and Taxes" when they're actually "Housing and Property." It's possible these documents discussed property taxes, which caused them to be incorrectly classified. Bonus: try it on new dataHow do we apply this to other bill titles? Ones that weren't originally in the test or training set?
###Code
# Here are some other bills
new_titles = [
"Public postsecondary education: executive officer compensation.",
"An act to add Section 236.3 to the Education code, related to the pricing of college textbooks.",
"Political Reform Act of 1974: campaign disclosures.",
"An act to add Section 236.3 to the Penal Code, relating to human trafficking."]
###Output
_____no_output_____
###Markdown
Your assighnment is to vectorize these titles, and predict their subject using the classifier we built.The challenge here is to get these new documents encoded with the same features as the classifier expects. That is, we could just run them through `CountVectorizer` but then get_feature_names() would give us a different set of coluns, because the vocabulary of these documents is different.The solution is to use the `vocabulary` parameter of `CountVectorizer` like this:
###Code
# Make a new vectorizer that maps the same words to the same feature positions as the old vectorizer
new_vectorizer = CountVectorizer(stop_words='english', vocabulary=vectorizer.get_feature_names())
# Now use this new_vectorizer to fit the new docs
# Predict the topics of the new documents, using our pre-existing classifier
###Output
_____no_output_____ |
Workshop/RNN_101b.ipynb | ###Markdown
RNN 101 bOn RNN 1010, CoLab quit after two slices. Here, run just the third slice.
###Code
from google.colab import drive
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/'
PC_FILENAME = DATAPATH+'pcRNA.fasta'
NC_FILENAME = DATAPATH+'ncRNA.fasta'
# LOCAL
#PC_FILENAME = 'pcRNA.fasta'
#NC_FILENAME = 'ncRNA.fasta'
import numpy as np
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import ShuffleSplit
from keras.models import Sequential
from keras.layers import Bidirectional
from keras.layers import GRU
from keras.layers import Dense
from sklearn.model_selection import StratifiedKFold
import time
tf.keras.backend.set_floatx('float32')
EPOCHS=100
SPLITS=1
K=3
EMBED_DIMEN=16
FILENAME='RNN101'
###Output
_____no_output_____
###Markdown
Load and partition sequences
###Code
# Assume file was preprocessed to contain one line per seq.
# Prefer Pandas dataframe but df does not support append.
# For conversion to tensor, must avoid python lists.
def load_fasta(filename,label):
DEFLINE='>'
labels=[]
seqs=[]
lens=[]
nums=[]
num=0
with open (filename,'r') as infile:
for line in infile:
if line[0]!=DEFLINE:
seq=line.rstrip()
num += 1 # first seqnum is 1
seqlen=len(seq)
nums.append(num)
labels.append(label)
seqs.append(seq)
lens.append(seqlen)
df1=pd.DataFrame(nums,columns=['seqnum'])
df2=pd.DataFrame(labels,columns=['class'])
df3=pd.DataFrame(seqs,columns=['sequence'])
df4=pd.DataFrame(lens,columns=['seqlen'])
df=pd.concat((df1,df2,df3,df4),axis=1)
return df
# Split into train/test stratified by sequence length.
def sizebin(df):
return pd.cut(df["seqlen"],
bins=[0,1000,2000,4000,8000,16000,np.inf],
labels=[0,1,2,3,4,5])
def make_train_test(data):
bin_labels= sizebin(data)
from sklearn.model_selection import StratifiedShuffleSplit
splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=37863)
# split(x,y) expects that y is the labels.
# Trick: Instead of y, give it it the bin labels that we generated.
for train_index,test_index in splitter.split(data,bin_labels):
train_set = data.iloc[train_index]
test_set = data.iloc[test_index]
return (train_set,test_set)
def separate_X_and_y(data):
y= data[['class']].copy()
X= data.drop(columns=['class','seqnum','seqlen'])
return (X,y)
def make_slice(data_set,min_len,max_len):
print("original "+str(data_set.shape))
too_short = data_set[ data_set['seqlen'] < min_len ].index
no_short=data_set.drop(too_short)
print("no short "+str(no_short.shape))
too_long = no_short[ no_short['seqlen'] >= max_len ].index
no_long_no_short=no_short.drop(too_long)
print("no long, no short "+str(no_long_no_short.shape))
return no_long_no_short
def make_kmer_table(K):
npad='N'*K
shorter_kmers=['']
for i in range(K):
longer_kmers=[]
for mer in shorter_kmers:
longer_kmers.append(mer+'A')
longer_kmers.append(mer+'C')
longer_kmers.append(mer+'G')
longer_kmers.append(mer+'T')
shorter_kmers = longer_kmers
all_kmers = shorter_kmers
kmer_dict = {}
kmer_dict[npad]=0
value=1
for mer in all_kmers:
kmer_dict[mer]=value
value += 1
return kmer_dict
KMER_TABLE=make_kmer_table(K)
def strings_to_vectors(data,uniform_len):
all_seqs=[]
for seq in data['sequence']:
i=0
seqlen=len(seq)
kmers=[]
while i < seqlen-K+1:
kmer=seq[i:i+K]
i += 1
value=KMER_TABLE[kmer]
kmers.append(value)
pad_val=0
while i < uniform_len:
kmers.append(pad_val)
i += 1
all_seqs.append(kmers)
pd2d=pd.DataFrame(all_seqs)
return pd2d # return 2D dataframe, uniform dimensions
def build_model(maxlen,dimen):
vocabulary_size=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN'
act="sigmoid"
dt='float32'
neurons=16
rnn = keras.models.Sequential()
embed_layer = keras.layers.Embedding(
vocabulary_size,EMBED_DIMEN,input_length=maxlen);
rnn1_layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(neurons, return_sequences=True, dropout=0.50,
input_shape=[maxlen,dimen]))
rnn2_layer = keras.layers.Bidirectional(
keras.layers.SimpleRNN(neurons, dropout=0.50, return_sequences=True))
dense1_layer = keras.layers.Dense(neurons,activation=act,dtype=dt)
dense2_layer = keras.layers.Dense(neurons,activation=act,dtype=dt)
output_layer = keras.layers.Dense(1,activation=act,dtype=dt)
rnn.add(embed_layer)
rnn.add(rnn1_layer)
rnn.add(rnn2_layer)
rnn.add(dense1_layer)
rnn.add(dense2_layer)
rnn.add(output_layer)
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
print("COMPILE")
rnn.compile(loss=bc, optimizer="Adam",metrics=["accuracy"])
return rnn
def do_cross_validation(X,y,eps,maxlen,dimen):
cv_scores = []
fold=0
splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.2, random_state=37863)
rnn2=None
for train_index,valid_index in splitter.split(X):
X_train=X[train_index] # use iloc[] for dataframe
y_train=y[train_index]
X_valid=X[valid_index]
y_valid=y[valid_index]
print("BUILD MODEL")
rnn2=build_model(maxlen,dimen)
print("FIT")
# this is complaining about string to float
start_time=time.time()
history=rnn2.fit(X_train, y_train, # batch_size=10, default=32 works nicely
epochs=eps, verbose=1, # verbose=1 for ascii art, verbose=0 for none
validation_data=(X_valid,y_valid) )
end_time=time.time()
elapsed_time=(end_time-start_time)
fold += 1
print("Fold %d, %d epochs, %d sec"%(fold,eps,elapsed_time))
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
scores = rnn2.evaluate(X_valid, y_valid, verbose=0)
print("%s: %.2f%%" % (rnn2.metrics_names[1], scores[1]*100))
# What are the other metrics_names?
# Try this from Geron page 505:
# np.mean(keras.losses.mean_squared_error(y_valid,y_pred))
cv_scores.append(scores[1] * 100)
print()
print("Validation core mean %.2f%% (+/- %.2f%%)" % (np.mean(cv_scores), np.std(cv_scores)))
return rnn2
def make_kmers(MINLEN,MAXLEN,train_set):
(X_train_all,y_train_all)=separate_X_and_y(train_set)
# The returned values are Pandas dataframes.
# print(X_train_all.shape,y_train_all.shape)
# (X_train_all,y_train_all)
# y: Pandas dataframe to Python list.
# y_train_all=y_train_all.values.tolist()
# The sequences lengths are bounded but not uniform.
X_train_all
print(type(X_train_all))
print(X_train_all.shape)
print(X_train_all.iloc[0])
print(len(X_train_all.iloc[0]['sequence']))
# X: List of string to List of uniform-length ordered lists of K-mers.
X_train_kmers=strings_to_vectors(X_train_all,MAXLEN)
# X: true 2D array (no more lists)
X_train_kmers.shape
print("transform...")
# From pandas dataframe to numpy to list to numpy
print(type(X_train_kmers))
num_seqs=len(X_train_kmers)
tmp_seqs=[]
for i in range(num_seqs):
kmer_sequence=X_train_kmers.iloc[i]
tmp_seqs.append(kmer_sequence)
X_train_kmers=np.array(tmp_seqs)
tmp_seqs=None
print(type(X_train_kmers))
print(X_train_kmers)
labels=y_train_all.to_numpy()
return (X_train_kmers,labels)
print("Load data from files.")
nc_seq=load_fasta(NC_FILENAME,0)
pc_seq=load_fasta(PC_FILENAME,1)
all_seq=pd.concat((nc_seq,pc_seq),axis=0)
print("Put aside the test portion.")
(train_set,test_set)=make_train_test(all_seq)
# Do this later when using the test data:
# (X_test,y_test)=separate_X_and_y(test_set)
nc_seq=None
pc_seq=None
all_seq=None
print("Ready: train_set")
train_set
###Output
Load data from files.
Put aside the test portion.
Ready: train_set
###Markdown
Len 200-1Kb
###Code
MINLEN=200
MAXLEN=1000
if False:
print("Working on full training set, slice by sequence length.")
print("Slice size range [%d - %d)"%(MINLEN,MAXLEN))
subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y
print ("Sequence to Kmer")
(X_train,y_train)=make_kmers(MINLEN,MAXLEN,subset)
print ("Compile the model")
model=build_model(MAXLEN,EMBED_DIMEN)
print(model.summary()) # Print this only once
print ("Cross valiation")
model1=do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN)
model1.save(FILENAME+'.short.model')
###Output
_____no_output_____
###Markdown
Len 1K-2Kb
###Code
MINLEN=1000
MAXLEN=2000
if False:
print("Working on full training set, slice by sequence length.")
print("Slice size range [%d - %d)"%(MINLEN,MAXLEN))
subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y
print ("Sequence to Kmer")
(X_train,y_train)=make_kmers(MINLEN,MAXLEN,subset)
print ("Compile the model")
model=build_model(MAXLEN,EMBED_DIMEN)
print(model.summary()) # Print this only once
print ("Cross valiation")
model2=do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN)
model2.save(FILENAME+'.medium.model')
###Output
_____no_output_____
###Markdown
Len 2K-3Kb
###Code
MINLEN=2000
MAXLEN=3000
print("Working on full training set, slice by sequence length.")
print("Slice size range [%d - %d)"%(MINLEN,MAXLEN))
subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y
print ("Sequence to Kmer")
(X_train,y_train)=make_kmers(MINLEN,MAXLEN,subset)
print ("Compile the model")
model=build_model(MAXLEN,EMBED_DIMEN)
print(model.summary()) # Print this only once
print ("Cross valiation")
model3=do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN)
model3.save(FILENAME+'.long.model')
#model1.save(FILENAME+'.short.model')
#abc
#efg
#hij
###Output
_____no_output_____ |
Day4/.ipynb_checkpoints/encoding_correction_1-checkpoint.ipynb | ###Markdown
Making initial imports
###Code
# !pip install langdetect
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
# NLP library imports
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('stopwords')
###Output
[nltk_data] Downloading package punkt to
[nltk_data] C:\Users\Ellio\AppData\Roaming\nltk_data...
[nltk_data] Package punkt is already up-to-date!
[nltk_data] Downloading package stopwords to
[nltk_data] C:\Users\Ellio\AppData\Roaming\nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
Loading data scrapped on TrustPilot The dataframe is composed with the comments scraped on Trustpolit at [this page](https://fr.trustpilot.com/review/www.centerparcs.fr/fr-fr).
###Code
df = pd.read_json('tripadvisor2 (1).json')
df.head()
df1 = df[['hotel_name','published_date','rating','review','review_language','title','trip_date']]
df1.columns = ['hotel_name','published_date','rating','review','language','title','trip_date']
df1
df1.review = df1.review.apply(lambda x:x.replace(r"\u00e8","è"))
df1.review = df1.review.apply(lambda x:x.replace(r'\u00e9', 'é'))
df1.review = df1.review.apply(lambda x:x.replace(r"\u00ea","ê"))
df1.review = df1.review.apply(lambda x:x.replace(r"\u00eb","ë"))
df1.review = df1.review.apply(lambda x:x.replace(r"\u00fb","û"))
df1.review = df1.review.apply(lambda x:x.replace(r"\u00f9","ù"))
df1.review = df1.review.apply(lambda x:x.replace(r'\u00e0', 'à'))
df1.review = df1.review.apply(lambda x:x.replace(r'\u00e2', 'â'))
df1.review = df1.review.apply(lambda x:x.replace(r'\u00f4', 'ô'))
df1.review = df1.review.apply(lambda x:x.replace(r'\u00ee', 'î'))
df1.review = df1.review.apply(lambda x:x.replace(r'\u00ef', 'ï'))
df1.review = df1.review.apply(lambda x:x.replace(r'\u2019', "'"))
df1.review = df1.review.apply(lambda x:x.replace(r'\'', "'"))
df1.review
df1.title = df1.title.apply(lambda x:x.replace(r"\u00e8","è"))
df1.title = df1.title.apply(lambda x:x.replace(r'\u00e9', 'é'))
df1.title = df1.title.apply(lambda x:x.replace(r"\u00ea","ê"))
df1.title = df1.title.apply(lambda x:x.replace(r"\u00eb","ë"))
df1.title = df1.title.apply(lambda x:x.replace(r"\u00f9","ù"))
df1.title = df1.title.apply(lambda x:x.replace(r'\u00ee', 'î'))
df1.title = df1.title.apply(lambda x:x.replace(r'\u00ef', 'ï'))
df1.title = df1.title.apply(lambda x:x.replace(r"\u00fb","û"))
df1.title = df1.title.apply(lambda x:x.replace(r'\u00e0', 'à'))
df1.title = df1.title.apply(lambda x:x.replace(r'\u00e2', 'â'))
df1.title = df1.title.apply(lambda x:x.replace(r'\u00f4', 'ô'))
df1.title = df1.title.apply(lambda x:x.replace(r'\u2019', "'"))
df1.title = df1.title.apply(lambda x:x.replace(r'\'', "'"))
df1.title
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r"\u00e8","è"))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u00e9', 'é'))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r"\u00ea","ê"))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r"\u00eb","ë"))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r"\u00f9","ù"))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u00ee', 'î'))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u00ef', 'ï'))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r"\u00fb","û"))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u00e0', 'à'))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u00e2', 'â'))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u00f4', 'ô'))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\u2019', "'"))
df1.trip_date = df1.trip_date.apply(lambda x:x.replace(r'\'', "'"))
df1['review']
###Output
_____no_output_____
###Markdown
Shapping dataFrame **Making some modifications from raw data**
###Code
# Deleting '\n' from content and name columns
#clean_n = lambda x: ' '.join(x.split()).lower()
#for col in ['name', 'content']:
# df[col] = df[col].apply(clean_n)
# Setting date as index
df1.set_index('published_date', inplace = True)
# Displaying result
df1.head()
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis
###Code
# Even if not really useful for this case, a good habit is to start with df.describe() and df.info() when you start working on a dataFrame
df1.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Index: 663 entries, 27 septembre 2018 to November 1, 2007
Data columns (total 6 columns):
hotel_name 663 non-null object
rating 663 non-null int64
review 663 non-null object
language 663 non-null object
title 663 non-null object
trip_date 663 non-null object
dtypes: int64(1), object(5)
memory usage: 36.3+ KB
###Markdown
**Studying eventual doubles**- We're here looking at names which are pseudos. Be careful, differents people could use the same ones. The date is a good indicator to know if the same person let 2 comments.
###Code
print('Number of comments', df.reviewer_id.count())
print('Number of unique names: ', df.reviewer_id.nunique())
double = df.reviewer_id.value_counts().index.tolist()[:sum(df.reviewer_id.value_counts()>=2)]
for name in double:
print('\n',name)
date_double = df[df.reviewer_id==name].index.tolist()
content_double = df[df.reviewer_id==name].review.tolist()
[print(date,content) for date,content in zip(date_double,content_double)]
###Output
_____no_output_____
###Markdown
You might find interesting things in your dataset. For example it is fun here to see **Peltier**'s comments and how he changed is mind:- April 14th 13:41 : "formule intéressante pour de courtes vacances"- April 14th 20:33 : "pas de réseau et deux chaines tv manquantes" **Rapid overview of global rating**
###Code
# Average rating value
print('Average rating is : {}'.format(round(df1.rating.mean(),2)))
# Plotting rating distribution
X_ratings = df1.rating.value_counts()
ax = sns.barplot(X_ratings.index,X_ratings,alpha=0.8)
ax.set(xlabel='Ratings',ylabel='Frequencies',title='Frequencies of ratings over the {} reviews'.format(df1.shape[0]))
plt.show()
###Output
Average rating is : 3.45
###Markdown
**Distribution of comments length**
###Code
# Making the plot
x = plt.figure(figsize=(12,5))
sns.distplot(df1['review'].apply(len))
plt.title('Characters distributions')
plt.xticks(range(0,2000,250))
plt.show()
# Repartition of reviews dates
df1['review'].resample('M').count().plot()
###Output
_____no_output_____
###Markdown
Pre-Processing 1 : Filters for punctuation and specific characters
###Code
# creating a list with all reviews
all_reviews = df1.review.tolist()
# transformating comments into lower-case text
lower_reviews = [review.lower() for review in all_reviews]
# deleting all specific caracters
characters_to_remove = ["@", "/", "#", ".", ",", "!", "?", "(", ")", "-", "_","’","'", "\"", ":"]
transformation_dict = {initial:" " for initial in characters_to_remove}
no_punctuation_reviews = [review.translate(str.maketrans(transformation_dict)) for review in lower_reviews]
# removing accent
with_accent = ['é', 'è','ê','ë', 'à','â','ô','û','ù','î','ï']
without_accent = ['e', 'e','e','e', 'a','a','o','u','u','i','i']
transformation_dict = {before:after for before, after in zip(with_accent, without_accent)}
no_accent_reviews = [review.translate(str.maketrans(transformation_dict)) for review in no_punctuation_reviews]
# Displaying some results
for i in range(5):
print(all_reviews[i])
print(no_accent_reviews[i])
print('\n')
def number_of_unique_words(list_of_reviews):
# Concatenating all reviews from the list
all_words = ''.join(list_of_reviews)
# Tokenizing
unique_tokens = set(word_tokenize(all_words))
# Returning length of list
return len(unique_tokens)
print(number_of_unique_words(all_reviews))
print(number_of_unique_words(lower_reviews))
print(number_of_unique_words(no_punctuation_reviews))
print(number_of_unique_words(no_accent_reviews))
###Output
7732
###Markdown
Pre-processing 2 : Tokenization **Doing it in two different ways**
###Code
# Using regular expression
tokenized_reviews_re = [re.split('\s+', review) for review in no_accent_reviews]
# using NLP libraries
tokenized_reviews_nltk = [word_tokenize(review) for review in no_accent_reviews]
###Output
_____no_output_____
###Markdown
**And making a comparison of results**
###Code
# Making a comparison between both of them
print("With NLTK library : {}".format(len(tokenized_reviews_nltk)))
print("With RegEx library : {}".format(len(tokenized_reviews_re)))
###Output
With NLTK library : 663
With RegEx library : 663
###Markdown
**Try to analyze the next lines of code and to understand the difference between both of them** Here is an original review
###Code
# Choosing a review to inspect
n_review = 10
print(all_reviews[n_review])
###Output
Hi- we are on our way home following a week at Centre Parcs. Whilst we had a good family holiday, we were very disappointed with the resort. First issue is the cleanliness- it's really dirty. We booked two cottages with one a premium lakeside house. Whilst some of the furnishings were new, the kitchen was filthy with ingrained dirt. The house looked like it hadn't been properly cleaned in years. All cooking equipment was also very old and crappy- It put us off cooking inside. Outside we had a great view but it was also filthy. Thick cobwebs and dirt all over the verander and decking. Decking was also very dangerous as it was obviously not maintained and there were huge gaps where the chairs fell down resulting in numerous injuries over the week. Our children had a great time in he pool but it was so old and In terrible condition. Nothing like the parcs at Woburn and Elvden.The maintaince, health and safety and cleanliness were really poor at the resort. However the location was beautiful and in spite of all of it we made sure we had a good time and used many of the facilities on offer.
###Markdown
And here are the tokenized ones
###Code
print(tokenized_reviews_nltk[n_review])
print(tokenized_reviews_re[n_review])
# We select the secon one which seems more accurate
tokenized_reviews = tokenized_reviews_nltk
###Output
_____no_output_____
###Markdown
Pre-processing 3 : Stopword removing
###Code
# Using a list with words to delete
stopW = stopwords.words('french')
# Customizing it with our needs
stopW += ['les', 'a', 'tout']
# Stopword_list
stopword_reviews = [[token for token in review if token not in stopW] for review in tokenized_reviews]
###Output
_____no_output_____
###Markdown
Some visualization about what we've done
###Code
def plot_frequent_words(list_of_words):
dist = nltk.FreqDist(list_of_words)
X = [nb[1] for nb in dist.most_common(20)]
y = [nb[0] for nb in dist.most_common(20)]
ax = sns.barplot(X,y)
ax.set(xlabel='Word frequencies',ylabel='Words',title='Most common words in the corpus')
plt.show()
# Making a first plot with original data
all_words = []
for review in all_reviews:
for word in review.split():
all_words.append(word)
plot_frequent_words(all_words)
print(len(all_words))
# And making it with our current data
all_words = []
for review in stopword_reviews:
for word in review:
all_words.append(word)
plot_frequent_words(all_words)
print(len(all_words))
###Output
_____no_output_____
###Markdown
...Much better ! Isn't it ? TO DO : Now you can apply all of these methods to your own DataFrame**The purpose is to create a second column with reviews content but processed and tokenized** BONUS : A little exercice about RegEx **Try some patterns on the next strings**You can use the different functions- re.split() : to split my_string on the pattern and print the result.- re.findall() : find all the occurences matching the pattern in the total string.
###Code
import re
my_string = "Let's write RegEx!"
PATTERN = r"\s+"
# PATTERN = r"[a-z]"
# PATTERN = r"\w"
# PATTERN = r"\w+"
re.findall(PATTERN, my_string)
re.split(PATTERN, my_string)
###Output
_____no_output_____
###Markdown
**Given the table show in course, try to make the following match**
###Code
# Write a pattern to match sentence endings: sentence_endings
sentence_endings = r"[___]"
# Split my_string on sentence endings and print the result
print(re.____(____, ____))
# Find all capitalized words in my_string and print the result
capitalized_words = r"[___]\w+"
print(re.____(____, ____))
# Split my_string on spaces and print the result
spaces = r"___"
print(re.____(____, ____))
# Find all digits in my_string and print the result
digits = r"___"
print(re.____(____, ____))
###Output
_____no_output_____
###Markdown
writing a new file with the cleaned data
###Code
df1.to_csv (r'C:\Users\Ellio\Desktop\tripadvisor_cleaned_data_bis.csv', index=False)
df2 = pd.read_csv(r'C:\Users\Ellio\Desktop\tripadvisor_cleaned_data.csv')
df1['review'][0]
###Output
_____no_output_____ |
Keras_Model_UNI.ipynb | ###Markdown
###Code
!pip install -qq transformers
!git clone https://[email protected]/six60110/training_repo.git
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
train_file = pd.read_csv(
"/content/training_repo/train_en.tsv", sep='\t')
print(train_file.head())
# look at the test data set
test_file = pd.read_csv(
"/content/training_repo/test_en.tsv", sep='\t')
file_text = train_file.text
vocab_size = 10000
embedding_dim = 16
maxlength = 100
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
training_size = 20000
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(file_text)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(file_text)
padded = pad_sequences(sequences, padding='post')
training_sentences = file_text[0:training_size]
testing_sentences = file_text[training_size:]
training_id = train_file.HS[0:training_size]
testing_id = train_file.HS[training_size:]
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=maxlength,
padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=maxlength,
padding=padding_type, truncating=trunc_type)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=maxlength),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam' ,metrics=['accuracy'])
num_epochs = 100
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
history = model.fit(training_padded, training_id, epochs=num_epochs,
validation_data=(testing_padded, testing_id), verbose=2)
plt.plot()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
#sequences = tokenizer.texts_to_sequences(file_text)
#padded = pad_sequences(sequences, padding='post')
###Output
_____no_output_____ |
notebooks/examples/1 - Configuring a Project.ipynb | ###Markdown
1 - Configuring a Workforce Project Using the ArcGIS API for PythonThis is first of a series of Jupyter Notebooks designed to demonstrate how the ArcGIS API for Python can be used to automate many aspects of Workforce for ArcGIS.Workforce for ArcGIS is a mobile solution that uses the power of location-based decision making for better field workforce coordination and teamwork. Everything in Workforce is center around the Workforce Project. A project consists of many things including workers, dispatchers, assignments, and app integrations. A project is typically configured through the user interface as described [here](https://doc.arcgis.com/en/workforce/android-phone/help/create-your-first-project.htm). For many users, this experience is totally fine. However, for other users who have complex or recurring projects this can be quite time-consuming. In this guide we'll demonstrate how many of those configuration tasks can be automated by using the [ArcGIS API for Python](https://developers.arcgis.com/python/).This guide uses the ArcGIS API for Python version 1.5.1 with Python 3.6+. Importing the Workforce Module Let's get started! The ArcGIS API for Python provides a [module](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.html) specifically for working with Workforce Projects. Let's first import that module.
###Code
from arcgis.apps import workforce
###Output
_____no_output_____
###Markdown
Connecting to your organization Now we'll connect to our organization as the owner of an existing workforce Project.
###Code
from arcgis.gis import GIS
gis = GIS("https://arcgis.com", "workforce_scripts")
###Output
Enter password: ········
###Markdown
Searching for and using an existing project Next, let's find a specific project in our organization that we'd like to configure.
###Code
item = gis.content.search("type:'Workforce Project' 'Public Works Work Orders'")[0]
item
###Output
_____no_output_____
###Markdown
Let's create a [Project](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlproject) object from that item so we can leverage the workforce module to easily configure it.
###Code
project = workforce.Project(item)
###Output
_____no_output_____
###Markdown
Adding assignment types Now that we have a `Project` to work with, let's add a few assignment types. We'll use the [AssignmentTypeManager](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.managers.htmlassignmenttypemanager) object, which is a [property](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlarcgis.apps.workforce.Project.assignment_types) of the `Project`, to accomplish this.
###Code
project.assignment_types.add(name="Fill in Pothole")
project.assignment_types.add(name="Sidewalk Repair")
project.assignment_types.add(name="Paint Crosswalk")
project.assignment_types.search()
###Output
_____no_output_____
###Markdown
Adding workers to the project Now let's add some workers to the project. We are going to import workers from an existing group in our organization. Let's first find the group of workers.
###Code
repair_crew_group = gis.groups.search("Road Repair Crew")[0]
repair_crew_group
###Output
_____no_output_____
###Markdown
For each member in the group, we'll add them as a worker to the project. We'll use the [WorkerManager](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.managers.htmlworkermanager) class, which is available as a [property](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlarcgis.apps.workforce.Project.workers) on the `Project`, to add the users one by one. If there were a lot of users, we could use the [batch_add](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlproject) method to add a list of [Workers](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlworker) all at once.
###Code
for username in repair_crew_group.get_members()["users"]:
user = gis.users.get(username)
project.workers.add(user_id=username, name=user.fullName, status="not_working")
###Output
_____no_output_____
###Markdown
Adding dispatchers to the project Now let's add some dispatchers to the project from a CSV file. We'll use the [pandas](https://pandas.pydata.org/) library to help us out.
###Code
import pandas as pd
dataframe = pd.read_csv("data/dispatchers.csv")
dataframe
###Output
_____no_output_____
###Markdown
As shown above, we have 2 users to add. For every row in the csv file, let's add a new [dispatcher](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmldispatcher) to the project. We'll use the [DispatcherManager](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.managers.htmldispatchermanager) class, which is available as a [property](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlarcgis.apps.workforce.Project.dispatchers) on the `Project`, to add the users one by one. If there were a lot of users, we could use the [batch_add](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.managers.htmlarcgis.apps.workforce.managers.DispatcherManager.batch_add) method to add a list of [Dispatchers](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmldispatcher) all at once.
###Code
for row in dataframe.itertuples():
project.dispatchers.add(
name=row.name,
contact_number=row.contactNumber,
user_id=row.userId
)
###Output
_____no_output_____
###Markdown
Adding an app integration As the final step of configuring the project, let's add the ability to open [Explorer for ArcGIS](https://doc.arcgis.com/en/explorer/) at the assignment location. We'll search the organization for the desired map.
###Code
from arcgis.mapping import WebMap
webmap = WebMap(gis.content.search("Portland Streets owner:workforce_scripts")[0])
###Output
_____no_output_____
###Markdown
Now, let's share this map with the `Project` group so that all dispatchers and workers can access it.
###Code
webmap.item.share(groups=[project.group])
###Output
_____no_output_____
###Markdown
Next, we'll create the [url scheme](https://github.com/Esri/explorer-integration) used to launch Explorer by using the [build_explorer_url](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.htmlbuild-explorer-url) method in the ArcGIS API for Python.
###Code
from arcgis.apps import build_explorer_url
url = build_explorer_url(
webmap=webmap,
center="${assignment.latitude},${assignment.longitude}",
scale=9000
)
###Output
_____no_output_____
###Markdown
Then we'll add a new integration to the project using the created url. We'll use the [AssignmentIntegrationManager ](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.managers.htmlassignmentintegrationmanager) class, which is available as a [property](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.apps.workforce.htmlarcgis.apps.workforce.Project.integrations) on the `Project`.
###Code
project.integrations.add("default-explorer", "Explorer At Assignment", url)
###Output
_____no_output_____ |
Spam_Detection_Using_NLP_&_Basic_ML.ipynb | ###Markdown
Import the Necessary libraries for the task.numpy & pd as common for every project but as we are dealing with text data so here we use NLTK library to find the solution for problem statement.
###Code
#importing the required libraries
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
While working on google colab, We first need to mount the drive every time, enter the passcode, before that never forget to insert your data inside the drive while using colab or when working on Jupyter locally add data set in your working directory or change the path with command "os.chdir" to locate dataset.
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
###Markdown
Then read the data set with Pandas library function.do not forget to copy your data file path from google colab to read it.
###Code
df = pd.read_csv('/content/sample_data/spam.csv', encoding='latin1')
###Output
_____no_output_____
###Markdown
Data Cleaning & Data Understanding Steps df.head() : This Function give us o/p as first 5 Rows, If we wants more numbers of rows we can initialize the desire numbers of rows inside bracket.
###Code
df.head()
###Output
_____no_output_____
###Markdown
As We Can see cloumns Unnamed: 2 Unnamed: 3 Unnamed: 4 is not having any information so that we can drop those coulumns.
###Code
df=df.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'],axis=1)
df.head()
# See other commands to understand our Data
# Printing the size of the dataset
df.shape
# Getting feature names
df.columns
# Checking the duplicates and remove them
df.drop_duplicates(inplace=True)
df.shape
# Show the number of missing data for each column
df.isnull().sum()
###Output
_____no_output_____
###Markdown
Processing our text data with NLPNow our remaning data which is clean but it is in text format, for machine understanding will have to convert that data into Numerical form. We already have the library for tokenizing the words, Import NLTK and perform operation as below.
###Code
import nltk
from nltk.corpus import stopwords
import string
# Function to tokenize each and every word
def tokenizer(text):
tokenized=nltk.word_tokenize(text)
tokenized=' '.join(tokenized)
tokenized=tokenized.replace('n\'t','not')
return tokenized
###Output
_____no_output_____
###Markdown
After Tokenization we need to remove punctuation, Remove stopwords with reference to stopwords stored in NLTK stopwords, Function just compare words within dictionary, if mathces remove it from the sentence, convert all words into lower case then return a list of clean words.
###Code
# Creating a function to process punctuation and stopwords in the text data
def process_stop_punc(text):
# Remove punctuations
# Remove stopwords
# Return a list of clen text words
nopunc=[char for char in text if char not in string.punctuation]
nopunc=''.join(nopunc)
clean_words=[word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
return clean_words
###Output
_____no_output_____
###Markdown
After above process will have to converts words into its base form called as stemming. this task done by following stemming() function here we use porterStemmer(), Its is the part of term normalization in NLP process.
###Code
# Functions to convert words into single form i.e. converting plural to singular and past ,past continuous to present
def stemming(List):
stem_obj=nltk.stem.PorterStemmer()
List=[stem_obj.stem(i) for i in List]
message=' '.join(List)
return message
# Function to compile each and every operation
def process(text):
return stemming(process_stop_punc(tokenizer(text)))
nltk.download('punkt')
nltk.download('stopwords')
# Show the tokenization
df['text'].head().apply(process)
###Output
_____no_output_____
###Markdown
Vectorizing the words TFIDFVectorizer the value increases proportionally to count, but is inversely proportional to frequency of the word in the corpus; that is the inverse document frequency (IDF) part. **TfidfVectorizer** and **CountVectorizer** both are methods for converting text data into vectors as model can process only numerical data.In **CountVectorizer** we only count the number of times a word appears in the document which results in biasing in favour of most frequent words. this ends up in ignoring rare words which could have helped is in processing our data more efficiently.To overcome this , we use TfidfVectorizer .In TfidfVectorizer we consider overall document weightage of a word. It helps us in dealing with most frequent words. Using it we can penalize them. TfidfVectorizer weights the word counts by a measure of how often they appear in the documents.
###Code
# Convert a collection of data to matrix of tokens using tf-idf vectorizer
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
message = TfidfVectorizer().fit_transform(df['text'])
# Getting the shape of message
message.shape
# Print how our data look like in Numerical format with tf-idf.
print(message)
###Output
(0, 8267) 0.1820760415281772
(0, 1069) 0.32544292157369786
(0, 3594) 0.15240463847472757
(0, 7645) 0.15605579719351925
(0, 2048) 0.27450748091103355
(0, 1749) 0.31054526020101475
(0, 4476) 0.27450748091103355
(0, 8489) 0.22981449679298432
(0, 3634) 0.18170677054225734
(0, 1751) 0.27450748091103355
(0, 4087) 0.1080194309412782
(0, 5537) 0.15773893821302193
(0, 1303) 0.2468122813993541
(0, 2327) 0.2514110448509606
(0, 5920) 0.25394599154794606
(0, 4350) 0.32544292157369786
(0, 8030) 0.2284782712166139
(0, 3550) 0.1474570544871208
(1, 5533) 0.5464988818914979
(1, 8392) 0.4304438402468376
(1, 4318) 0.5233434480300876
(1, 4512) 0.406925248497845
(1, 5504) 0.2767319100209511
(2, 77) 0.2326251973903166
(2, 1156) 0.16331528331958853
: :
(5167, 1786) 0.2820992149566908
(5167, 3470) 0.2744008686738812
(5167, 2892) 0.24290552468890048
(5167, 7049) 0.20395814718823002
(5167, 1778) 0.13673277359621147
(5167, 8065) 0.21062041399707843
(5167, 2592) 0.18469635293243075
(5167, 5334) 0.20868573103969204
(5167, 1438) 0.14288820286282247
(5167, 7627) 0.10319532003279058
(5167, 3308) 0.12215409504489928
(5167, 7039) 0.18503435583866787
(5167, 4615) 0.15982569695504117
(5167, 1084) 0.11232294630116563
(5167, 8313) 0.19089150993177975
(5167, 4218) 0.12281898312072442
(5167, 3781) 0.17097956584622562
(5167, 7756) 0.08437843735148565
(5167, 3358) 0.16237204914715464
(5167, 4087) 0.11278484851691671
(5168, 6505) 0.5493950047150747
(5168, 7885) 0.434678956678875
(5168, 4225) 0.5770885193248134
(5168, 5244) 0.39278764302749264
(5168, 7756) 0.14800689768753802
###Markdown
CountVectorizer counts the word frequencies.
###Code
# Using countvectorizer
from sklearn.feature_extraction.text import CountVectorizer
message1=CountVectorizer().fit_transform(df['text'])
message1
###Output
_____no_output_____
###Markdown
Splitting data into training tesing setOur textual data is ready for model building, now with sklearn function we will split that into 80:20 pattern for training as well as testing resp.
###Code
# Splitting the data into 80:20 train test ratio for dataset vectorized using tf-idfvectorizer
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(message,df['type'],test_size=0.2,random_state=123)
print(X_train)
print(X_test)
#splitting the data into 80:20 train test ratio for dataset vectorized using countvectorizer
from sklearn.model_selection import train_test_split
X_train1,X_test1,y_train1,y_test1=train_test_split(message1,df['type'],test_size=0.2,random_state=0)
###Output
_____no_output_____
###Markdown
Model building with different algorithms1> Naive Bayes classifier
###Code
# Creating and training the naive bayes classifier for dataset vectorized using tf-idfvectorizer
from sklearn.naive_bayes import MultinomialNB
classifier=MultinomialNB().fit(X_train,y_train)
# Evaluate the model and training dataset
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
pred=classifier.predict(X_train)
print(classification_report(y_train,pred))
print()
print('confusion Matrix:\n',confusion_matrix(y_train,pred))
print()
print(' training accuracy score:\n',accuracy_score(y_train,pred))
# Printing the predictions
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
pred=classifier.predict(X_test)
print(classification_report(y_test,pred))
print()
print('confusion Matrix:\n',confusion_matrix(y_test,pred))
print()
print('testing accuracy score:\n',accuracy_score(y_test,pred))
###Output
precision recall f1-score support
ham 0.94 1.00 0.97 888
spam 1.00 0.63 0.77 146
accuracy 0.95 1034
macro avg 0.97 0.82 0.87 1034
weighted avg 0.95 0.95 0.94 1034
confusion Matrix:
[[888 0]
[ 54 92]]
testing accuracy score:
0.9477756286266924
###Markdown
From Above two Results we can say that our model is not overfitting as we got 96.56 % Accuracy on training and 94.77% on testing set.
###Code
# Creating and training the naive bayes classifier for dataset vectorized using Countvectorizer
from sklearn.naive_bayes import MultinomialNB
classifier=MultinomialNB().fit(X_train1,y_train1)
# Evaluate the model and training dataset on Count Vectorizer
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
pred=classifier.predict(X_train1)
print(classification_report(y_train1,pred))
print()
print('confusion Matrix:\n',confusion_matrix(y_train1,pred))
print()
print(' training accuracy score:\n',accuracy_score(y_train1,pred))
# Printing the predictions for CountVectorizer
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
pred=classifier.predict(X_test1)
print(classification_report(y_test1,pred))
print()
print('confusion Matrix:\n',confusion_matrix(y_test1,pred))
print()
print('testing accuracy score:\n',accuracy_score(y_test1,pred))
###Output
precision recall f1-score support
ham 0.99 0.99 0.99 885
spam 0.91 0.93 0.92 149
accuracy 0.98 1034
macro avg 0.95 0.96 0.96 1034
weighted avg 0.98 0.98 0.98 1034
confusion Matrix:
[[872 13]
[ 10 139]]
testing accuracy score:
0.9777562862669246
###Markdown
Here we have compare both the results with tf-idf vectorizer and Count Vectorizer we get better accuracy results on Count Vectorizer. Let us try with SVM with grid Search approach to tune HyperparametersLets try on
###Code
# Prediction using LinearSVC and GridsearchCV and tokens obtained fron TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
param_grid={'C':[0.1,1,10,100]}
grid=GridSearchCV(LinearSVC(),param_grid,refit=True)
grid.fit(X_train,y_train)
#finding best C for best parameter
print(grid.best_params_)
# Finding best accuracy
print(grid.best_score_)
# Prediction of test data
pred2=grid.predict(X_test)
# Evaluate the model and training dataset
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
print(classification_report(y_test,pred2))
print()
print('confusion Matrix:\n',confusion_matrix(y_test,pred2))
print()
print('accuracy score:\n',accuracy_score(y_test,pred2))
###Output
precision recall f1-score support
ham 0.98 0.99 0.99 888
spam 0.96 0.86 0.91 146
accuracy 0.98 1034
macro avg 0.97 0.93 0.95 1034
weighted avg 0.98 0.98 0.98 1034
confusion Matrix:
[[883 5]
[ 20 126]]
accuracy score:
0.9758220502901354
###Markdown
For **TF-IDF Vectorizer** data, we get better Accuracy score with SVM both on training (98.18 %) & Testing (97.58 %)Now Lets Chech Accuracy Score with Count Vectoriser data set
###Code
# Prediction using LinearSVC and GridsearchCV and tokens obtained fron CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
param_grid1={'C':[0.1,1,10,100]}
grid1=GridSearchCV(LinearSVC(),param_grid,refit=True)
grid1.fit(X_train1,y_train1)
# Finding best C for best parameter
print(grid1.best_params_)
# Finding best accuracy
print(grid1.best_score_)
# Training test dataset
grid1.fit(X_train1,y_train1)
# Prediction of test data
pred3=grid1.predict(X_test1)
pred3
# Evaluate the model and training dataset
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
print(classification_report(y_test1,pred3))
print()
print('confusion Matrix:\n',confusion_matrix(y_test1,pred3))
print()
print('accuracy score:\n',accuracy_score(y_test1,pred3))
###Output
precision recall f1-score support
ham 0.98 1.00 0.99 885
spam 0.99 0.91 0.95 149
accuracy 0.99 1034
macro avg 0.99 0.95 0.97 1034
weighted avg 0.99 0.99 0.99 1034
confusion Matrix:
[[884 1]
[ 14 135]]
accuracy score:
0.9854932301740812
|
assignment_1.ipynb | ###Markdown
**Import Packages and Load Dataset**
###Code
#install kaggle
!pip install -q kaggle
#download kaggle.json from kaggle.com - account- create API token
#upload kaggle.json file here
from google.colab import files
files.upload()
#create a kaggle folder
!mkdir -p ~/.kaggle
#copy the kaggle.json file to create folder
!cp /content/kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
# copy kaggle API to import data from kaggle
!kaggle datasets download -d jessemostipak/hotel-booking-demand
#unzip the data file
!unzip hotel-booking-demand.zip
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.express as px
import matplotlib
import matplotlib.pyplot as plt
# A jupyter notebook specific command that let’s you see the plots in the notbook itself.
%matplotlib inline
df1 = pd.read_csv("/content/hotel_bookings.csv")
df1.shape
sum(df1.duplicated())
df = df1.copy()
df.head(10)
###Output
_____no_output_____
###Markdown
Exploratory Data Analysis
###Code
df.info()
df.describe()
#checking null values
df.isnull().sum()
counts = df['country'].value_counts()
counts
plt.subplots(figsize=(7,5))
sns.countplot(x='country', hue='hotel', data=df[df['country'].isin(counts[counts > 2000].index)])
plt.show()
#filling null values and droping few 'not very useful' coloums
df['agent'] = df['agent'].fillna(0)
df['children'] = df['children'].fillna(0)
df['country'] = df['country'].fillna('PRT')
drop_col = ['days_in_waiting_list', 'arrival_date_year', 'assigned_room_type', 'arrival_date_week_number', 'booking_changes',
'reservation_status', 'country', 'days_in_waiting_list', 'customer_type', 'company', ]
df.drop(drop_col, axis = 1, inplace = True)
df.head(10)
df.isnull().sum()
df.shape
#find the categorical features
a = df.select_dtypes(object).columns
for i in a:
print (i, df[i].nunique())
#According to the above result, It's obvious that I can't use one hot encoding for most of our categorical features!
#because that would create a lot of columns and adds a lot of complexity to our model.
#Therefore I am going to use label encoding by Lable Encoder in sklearn
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df['reservation_status_date'] = pd.to_datetime(df['reservation_status_date'])
df['year'] = df['reservation_status_date'].dt.year
df['month'] = df['reservation_status_date'].dt.month
df['day'] = df['reservation_status_date'].dt.day
df.drop(['reservation_status_date','arrival_date_month'] , axis = 1, inplace = True)
df.head(10)
a = df.select_dtypes(object).columns
list_catv = []
for i in a:
print (i, df[i].nunique())
list_catv.append(i)
for i in list_catv:
df[i] = le.fit_transform(df[i])
df['year'] = le.fit_transform(df['year'])
df['month'] = le.fit_transform(df['month'])
df['day'] = le.fit_transform(df['day'])
df.head(10)
#check duplicate
sum(df.duplicated())
#remove duplicate
df.drop_duplicates(inplace=True)
df.shape
sns.countplot(df["is_canceled"])
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:
Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.
###Markdown
Canceling rate is pretty much high. 70000> not canceled Vs. 40000> canceled. **Train|Test Split**
###Code
from sklearn.model_selection import train_test_split
y = df['is_canceled']
X = df.drop('is_canceled', axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=101,test_size=0.3)
X_train
###Output
_____no_output_____
###Markdown
**Feature Scaling**
###Code
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
**Train the model**
###Code
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
y_pred_dtc = dtc.predict(X_test)
###Output
_____no_output_____
###Markdown
**Evaluate the model**
###Code
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
acc_dtc = accuracy_score(y_test, y_pred_dtc)
conf = confusion_matrix(y_test, y_pred_dtc)
clf_report = classification_report(y_test, y_pred_dtc)
acc_dtc
###Output
_____no_output_____
###Markdown
Applying Mathamatical operation (after converting into integer value)
###Code
#question1 and qustion2:-
"Q1:show all 4 basic opertors with diffrent numbers"
"Q2:get user input and do calculations"
int(input("value "))+3 #operation
int(input("value "))*3
int(input("value "))-3
int(input("value "))/3
###Output
value 552
###Markdown
assigning values to a variable and applying oprations to it
###Code
#question3
"Q3:use variables to store user inputs and do multiple calculations with the same variable name"
a = 10 #first variable
b = 20 #second variable
a+b #opration
a = 10
b = 20
a-b
a = 10
b = 20
a*b
a = 10
b = 20
a/b
###Output
_____no_output_____
###Markdown
Assigning values to a variable from a input value and applying oprations to it
###Code
#question4
"Q4:Get two user inputs and do a calculation between them"
a = int(input("first variable ")) #first variable with a assigned input integer value
b = int(input("second variable ")) #second variable with a assigned input integer value
a+b #operation
a = int(input("first variable "))
b = int(input("second variable "))
a-b
a = int(input("first variable "))
b = int(input("second variable "))
a*b
a = int(input("first variable "))
b = int(input("second variable "))
a/b
###Output
first variable 898
second variable 21
###Markdown
0. Load packages and imports
###Code
## basic functionality
import pandas as pd
import numpy as np
import re
import plotnine
from plotnine import *
## can add others if you need them
## repeated printouts
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
###Output
_____no_output_____
###Markdown
0.1: Load the data (0 points)Load the `sentencing_asof0405.csv` data*Notes*: You may receive a warning about mixed data types upon import; feel free to ignore
###Code
sentencing = pd.read_csv("sentencing_asof0405.csv")
###Output
/opt/conda/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3165: DtypeWarning: Columns (10,11,14,25) have mixed types.Specify dtype option on import or set low_memory=False.
###Markdown
0.2: Print head, dimensions, info (0 points)
###Code
sentencing.head()
sentencing.info()
###Output
_____no_output_____
###Markdown
Part one: data cleaning/interpretation (group) 1.1: Understanding the unit of analysis (5 points)- Print the number of unique values for the following columns. Do so in a way that avoids copying/pasting code for the three: - Cases (`CASE_ID`) - People in that case (`CASE_PARTICIPANT_ID`) - Charges (`CHARGE_ID`)- Write a couple sentences on the following and show an example of each (e.g., a case involving multiple people): - Why there are more unique people than unique cases? - Why there are more unique charges than unique people?- Print the mean and median number of charges per case/participant - Print the mean and median number of participants per case- Does the data seem to enable us to follow the same defendant across different cases they're charged in? Write 1 sentence in support of your conclusion.
###Code
sentencing[["CASE_ID", 'CASE_PARTICIPANT_ID', 'CHARGE_ID']].nunique()
# Why there are more unique people than unique cases?
# Because one case can involve multiple people.
# Why there are more unique charges than unique people?
# Because one person can be the subject of multiple charges.
sentencing.groupby("CASE_ID")["CHARGE_ID"].nunique().agg([np.mean, np.median])
sentencing.groupby("CASE_PARTICIPANT_ID")["CHARGE_ID"].nunique().agg([np.mean, np.median])
sentencing.groupby("CASE_ID")["CASE_PARTICIPANT_ID"].nunique().agg([np.mean, np.median])
sentencing.groupby("CASE_PARTICIPANT_ID")["CASE_ID"].nunique().max()
# Does the data seem to enable us to follow the same defendant across different cases they're charged in?
# No. When grouping by participant id and finding the max number of cases each participant has,
# the max number of cases any participant is charged with is 1, so it seems like
# the participant ID changes between cases.
###Output
_____no_output_____
###Markdown
1.2.1: Which offense is final? (3 points)- First, read the data documentation [link](https://datacatalog.cookcountyil.gov/api/views/tg8v-tm6u/files/8597cdda-f7e1-44d1-b0ce-0a4e43f8c980?download=true&filename=CCSAO%20Data%20Glossary.pdf) and summarize in your own words the differences between `OFFENSE_CATEGORY` and `UPDATED_OFFENSE_CATEGORY` - Construct an indicator `is_changed_offense` that's True for case-participant-charge observations (rows) where there's a difference between the original charge (offense category) and the most current charge (updated offense category). What are some of the more common changed offenses? (can just print result of sort_values based on original offense category)- Print one example of a changed offense from one of these categories and comment on what the reason may be
###Code
# OFFENSE_CATEGORY is the category of offense encoded before specific charges are brought in a case,
# while UPDATED_OFFENSE_CATEGORY is the category of offense encoded based on the primary charge of the case.
sentencing["is_changed_offense"] = sentencing["OFFENSE_CATEGORY"] != sentencing["UPDATED_OFFENSE_CATEGORY"]
sentencing[sentencing["is_changed_offense"]]["OFFENSE_CATEGORY"].sort_values().first
sentencing[sentencing["is_changed_offense"]][["OFFENSE_CATEGORY", "UPDATED_OFFENSE_CATEGORY"]].head(1)
# The charge PROMIS Conversion has changed because the prosecution decides whether to charge a
# crime as a homicide as enough evidences are collected, and that decision is reflected in UPDATED_OFFENSE_CATEGORY.
###Output
_____no_output_____
###Markdown
1.2.2: Simplifying the charges (5 points)Using the field (`UPDATED_OFFENSE_CATEGORY`), create a new field, `simplified_offense_derived`, that simplifies the many offense categories into broader buckets using the following process:First, combine all offenses beginning with "Aggravated" into a single category without that prefix (e.g., Aggravated Battery and Battery just becomes Battery)Then:- Combine all offenses with arson into a single arson category (`Arson`)- Combine all offenses with homicide into a single homicide category (`Homicide`)- Combine all offenses with vehicle/vehicular in the name into a single vehicle category (`Vehicle-related`)- Combine all offenses with battery in the name into a single battery category (`Battery`)Try to do so efficiently (e.g., using map and a dictionary or np.select rather than separate line for each recoded offense)Print the difference between the of unique offenses in the original `UPDATED_OFFENSE_CATEGORY` field and the of unique offenses in your new `simplified_offense_derived` field
###Code
sentencing["simplified_offense_derived"] = sentencing["UPDATED_OFFENSE_CATEGORY"].str.replace("Aggravated ", "")
conditions = [sentencing["simplified_offense_derived"].str.contains("Arson"),
sentencing["simplified_offense_derived"].str.contains("Homicide"),
sentencing["simplified_offense_derived"].str.contains("Vehicle-related"),
sentencing["simplified_offense_derived"].str.contains("Battery")]
choices = ["Arson", "Homicide", "Vehicle-related", "Battery"]
sentencing["simplified_offense_derived"] = np.select(conditions, choices, sentencing["simplified_offense_derived"])
sentencing.UPDATED_OFFENSE_CATEGORY.nunique() - sentencing.simplified_offense_derived.nunique()
###Output
_____no_output_____
###Markdown
1.3: Cleaning additional variables (10 points)Clean the following variables; make sure to retain the original variable in data and use the derived suffix so it's easier to pull these cleaned out variables later (e.g., `age_derived`) to indicate this was a transformation- Race: create True/false indicators for `is_black_derived` (Black only or mixed race with hispanic), Non-Black Hispanic, so either hispanic alone or white hispanic (`is_hisp_derived`), White non-hispanic (`is_white_derived`), or none of the above (`is_othereth_derived`)- Gender: create a boolean true/false indicator for `is_male_derived` (false is female, unknown, or other)- Age at incident: you notice outliers like 130-year olds. Winsorsize the top 0.01% of values to be equal to the 99.99th percentile value pre-winsorization. Call this `age_derived`- Create `sentenceymd_derived` that's a version of `SENTENCING_DATE` converted to datetime format. Also create a rounded version, `sentenceym_derived`, that's rounded down to the first of the month and the year (e.g., 01-05-2016 and 01-27-2016 each become 01-01-2016) - Hint: all timestamps are midnight so u can strip in conversion. For full credit, before converting, you notice that some of the years have been mistranscribed (e.g., 291X or 221X instead of 201X). Programatically fix those (eg 2914 -> 2014). Even after cleaning, there will still be some that are after the year 2021 that we'll filter out later. For partial credit, you can ignore the timestamps that cause errors and set errors = "coerce" within `pd.to_datetime()` to allow the conversion to proceed. - Sentencing judge: create an identifier (`judgeid_derived`) for each unique judge (`SENTENCE_JUDGE`) structured as judge_1, judge_2...., with the order determined by sorting the judges (will sort on fname then last). When finding unique judges, there are various duplicates we could weed out --- for now, just focus on (1) the different iterations of Doug/Douglas Simpson, (2) the different iterations of Shelley Sutker (who appears both with her maiden name and her hyphenated married name). - Hint: due to mixed types, you may need to cast the `SENTENCE_JUDGE` var to a diff type to sortAfter finishing, print a random sample of 10 rows (data.sample(n = 10)) with the original and cleaned columns for the relevant variables to validate your work
###Code
sentencing["is_black_derived"] = np.where((sentencing["RACE"] == "Black") |
(sentencing["RACE"] == "White/Black [Hispanic or Latino]"),
True, False)
sentencing["is_hisp_derived"] = np.where((sentencing["RACE"] == "HISPANIC") |
(sentencing["RACE"] == "White [Hispanic or Latino]"),
True, False)
sentencing["is_white_derived"] = np.where((sentencing["RACE"] == "White"),
True, False)
sentencing["is_othereth_derived"] = np.where((sentencing["is_black_derived"] == False) &
(sentencing["is_hisp_derived"] == False) &
(sentencing["is_white_derived"] == False),
True, False)
sentencing["is_male_derived"] = np.where(sentencing["GENDER"] == "Male", True, False)
sentencing.AGE_AT_INCIDENT.quantile(q = 0.9999)
sentencing["age_derived"] = np.where(sentencing["AGE_AT_INCIDENT"] > 81.0, 81.0, sentencing["AGE_AT_INCIDENT"])
sentencing["sentenceymd_derived"] = sentencing.SENTENCE_DATE.str[:-12]
sentencing["sentenceymd_derived"] = np.where(sentencing.sentenceymd_derived.str[-4:-2].astype("int") > 20,
sentencing.sentenceymd_derived.str[:-3] + "0" + sentencing.sentenceymd_derived.str[-2:],
sentencing.sentenceymd_derived)
sentencing["sentenceymd_derived"] = pd.to_datetime(sentencing.sentenceymd_derived)
sentencing["sentenceym_derived"] = sentencing.sentenceymd_derived.astype('datetime64[M]')
judges = sentencing.groupby("SENTENCE_JUDGE").CASE_ID.count().reset_index()
judges["judgeid_derived"] = "judge_" + (judges.index).astype("string")
judges = judges[["SENTENCE_JUDGE","judgeid_derived"]]
judges[(judges.SENTENCE_JUDGE.str.contains("Doug")) | (judges.SENTENCE_JUDGE.str.contains("Shelley"))]
sentencing = pd.merge(sentencing, judges)
sentencing["judgeid_derived"] = np.where(sentencing.judgeid_derived == "judge_71", "judge_70", sentencing.judgeid_derived)
sentencing["judgeid_derived"] = np.where(sentencing.judgeid_derived == "judge_281", "judge_280", sentencing.judgeid_derived)
sentencing[["SENTENCE_JUDGE","judgeid_derived"]][(sentencing.SENTENCE_JUDGE.str.contains("Doug")) | (sentencing.SENTENCE_JUDGE.str.contains("Shelley"))].value_counts()
## print a random sample of 10 rows (data.sample(n = 10)) with the original and cleaned columns for the
## relevant variables to validate your work
sample = sentencing[["RACE", "is_black_derived","is_hisp_derived","is_white_derived","is_othereth_derived","GENDER","is_male_derived",
"AGE_AT_INCIDENT","age_derived", "SENTENCE_JUDGE","judgeid_derived","SENTENCE_DATE", "sentenceymd_derived","sentenceym_derived"]]
sample.sample(n = 10)
###Output
_____no_output_____
###Markdown
1.4: Subsetting rows to analytic dataset (5 points)You decide based on the above to simplify things in the following ways: - Subset to cases where only one participant is charged, since cases with >1 participant might have complications like plea bargains/informing from other participants affecting the sentencing of the focal participant- To go from a participant-case level dataset, where each participant is repeated across charges tied to the case, to a participant-level dataset, where each participant has one charge, subset to a participant's primary charge and their current sentence (`PRIMARY_CHARGE_FLAG` is True and `CURRENT_SENTENCE_FLAG` is True). Double check that this worked by confirming there are no longer multiple charges for the same case-participant- Filter out observations where judge is nan or nonsensical (indicated by is.null or equal to FLOOD)- Subset to sentencing date between 01-01-2012 and 04-05-2021 (inclusive)After completing these steps, print the number of rows in the data
###Code
one_participant = sentencing.groupby("CASE_ID").agg(participant_count = ("CASE_PARTICIPANT_ID", "count")).reset_index()
one_participant = one_participant[one_participant["participant_count"] == 1]
one_participant_series = one_participant.CASE_ID
sentencing = sentencing[sentencing.CASE_ID.isin(one_participant_series)]
sentencing = sentencing[(sentencing.PRIMARY_CHARGE_FLAG == True) & (sentencing.CURRENT_SENTENCE_FLAG == True)]
sentencing[["CASE_ID", 'CASE_PARTICIPANT_ID', 'CHARGE_ID']].count()
sentencing.shape
sentencing = sentencing[(~sentencing.SENTENCE_JUDGE.isnull()) &
(sentencing.SENTENCE_JUDGE != "FLOOD")]
sentencing = sentencing[(sentencing.sentenceymd_derived >= "01-01-2012") &
(sentencing.sentenceymd_derived <= "04-05-2021")]
sentencing.shape[0]
###Output
_____no_output_____
###Markdown
Part two: investigating Black vs. White sentencing disparitiesNow that the data are cleaned, we're going to investigate different types of disparities in sentencing between Black defendants and White defendants. We're focusing on these groups for the purpose of the problem set, but the analysis could be extended to study Hispanic defendants or, in a different jurisdiction, Asian and other minoritized groups.**Details if interested in digging deeper**: If interested (optional), you can read more technical coverage of how we might (1) measure disparities, and (2) what factors you want to adjust for when deciding whether two defendants are 'similarly situated' but for their race in the following sources:- [Review of sentencing disparities research](https://www.journals.uchicago.edu/doi/full/10.1086/701505)- [Discussion of causal model/blinding race at charging stage of the prosecutorial process](https://5harad.com/papers/blind-charging.pdf)- [Discussion of measuring discrimination in policing that can generalize to the sentencing case](https://www.annualreviews.org/doi/abs/10.1146/annurev-criminol-011518-024731)- [General discussion of causal challenges in measuring between-group disparities](https://osf.io/preprints/socarxiv/gx4y3/)**One major caveat**: when investigating whether two similar defendants received different sentences, we're missing one important attribute that influences sentencing: the defendant's criminal history. This influences sentencing both through sentencing guidelines, which can prescribe longer sentences for those who have certain types of prior convictions, and through judicial discretion if judges are more lenient with first-time defendants. The above sources discuss how much we want to "control away" for this prior history, since if we think there are racial biases in which defendants, conditional on *committing* a crime, are arrested and charged, we may not want to adjust for that factor. More discussion [in this article](https://www.themarshallproject.org/2019/12/03/the-growing-racial-disparity-in-prison-time) 2.0: (0 points) First, read in the following dataset (regardless of progress on part one): `sentencing_cleaned.pkl` (if you can't read in the pkl you can read in the .csv format but may need to recast some of the datetime columns)*Note*: don't worry if there are slight differences in your output from Part One and this dataset/it's not a good use of time to try to reverse engineer Part One answers from this cleaned data.
###Code
sent_cleaned = pd.read_pickle("sentencing_cleaned.pkl")
###Output
_____no_output_____
###Markdown
2.1: Investigating one type of between-group difference: who reaches the sentencing stage? (5 points)Tabulate and visualize the proportion of defendants, out of all defendants sentenced in a given month/year, who are Black and who are White (separate proportions)- Denominator is number of unique cases that month- Numerator for black defendants is count of is_black_derived- Numerator for white defendants is count of is_white_derived- Fraction of each is numerator/denominator- Print the table- Create a graph with two lines--- one for Black defendants as fraction of total; another for White defendants. Make sure it includes a legend summarizing which color is for which group, and clean the legend so that it has informative names (e.g., Black or White rather than prop_black or prop_white)- Use mathematical notation to write out each of the proportions using summation notation in a 1-2 sentence writeup describing trends. What seems to be going on in April and May 2020? **Optional challenge**: improve the viz by shading the background of the visualization for months with fewer than 100 cases **Optional challenge**: improve the viz by adding a vertical line for 12-01-2016, the month that new State's Attorney Foxx took office
###Code
table = sent_cleaned.groupby("sentenceym_derived").agg(black_defendent = ("is_black_derived","sum"),
white_defendent = ("is_white_derived", "sum"),
denominator = ("CASE_ID", "nunique"))
table["black_fraction"] = table.black_defendent / table.denominator
table["white_fraction"] = table.white_defendent / table.denominator
table
plot = table.rename(columns = {"black_fraction": "Black", "white_fraction" : "White"}).reset_index()
plot = plot[["Black", "White"]].plot(kind="line", figsize=(20, 8))
plot.set_xlabel("Sentence Date")
plot.set_ylabel("Proportion of Defendants")
###Output
_____no_output_____
###Markdown
$\frac{\sum Black Defendants}{\sum Defendants}$ per month is signficinatly higher (about 6 times higher) than $\frac{\sum White Defendants}{\sum Defendants}$ per month throughout most of the time between 2012 and 2021. However, during April and May of 2020, $\frac{\sum Black Defendants}{\sum Defendants}$ per month drops signficiantly (but still higher than $\frac{\sum White Defendants}{\sum Defendants}$); meanwhile $\frac{\sum White Defendants}{\sum Defendants}$ of that two months increases obviously. 2.2: Investigating the first type of disparity: probation versus incaceration (10 points)One type of disparity beyond who arrives at the sentencing stage is whether the defendant receives probation or incaceration.According to the codebook, incarceration is indicated by `COMMITMENT_TYPE` == "Illinois Department of Corrections"Recreate the previous plot but where the y axis represents the difference between the following proportions (can be either Black - White or White - Black but make sure to label), adding a smoothed line:- Percent of black defendants who are incarcerated out of all black defendants that month/year - Percent of white defendants who are incarcerated out of all white defendants that month/year In a markdown cell after, write 1-2 sentences on your observations of trends over time. Do gaps seem to be widening or increasing?
###Code
sent_cleaned["black_incarc"] = np.where((sent_cleaned.is_black_derived == True) &
(sent_cleaned.COMMITMENT_TYPE == "Illinois Department of Corrections"),
True, False)
sent_cleaned["white_incarc"] = np.where((sent_cleaned.is_white_derived == True) &
(sent_cleaned.COMMITMENT_TYPE == "Illinois Department of Corrections"),
True, False)
table2 = sent_cleaned.groupby("sentenceym_derived").agg(black_total = ("is_black_derived","sum"),
white_total = ("is_white_derived", "sum"),
all_total = ("CASE_ID", "nunique"),
black_incarc = ("black_incarc", "sum"),
white_incarc = ("white_incarc", "sum")).reset_index()
table2["black_incarc_proportion"] = table2.black_incarc / table2.black_total
table2["white_incarc_proportion"] = table2.white_incarc / table2.white_total
table2["difference(black-white)"] = table2.black_incarc_proportion - table2.white_incarc_proportion
plot_table2 = ggplot(table2, aes(x = "sentenceym_derived")) + \
geom_line(aes(y = 'black_incarc_proportion'), color = "blue") + \
geom_line(aes(y = 'white_incarc_proportion'), color = "red") + \
stat_smooth(aes(y = 'black_incarc_proportion'), color = "blue") + \
stat_smooth(aes(y = 'white_incarc_proportion'), color = "red") + \
annotate("text", x = pd.to_datetime("2014-10-05"), y = .53, label = "black defendants", color = "blue") + \
annotate("text", x = pd.to_datetime("2014-10-05"), y = .29, label = "white defendants", color = "red") + \
theme(axis_text_x = element_text(angle = 90)) + \
labs(x="Date", y= "Proportion of Incarceration")
plot_table2
# Proportions of incarceration seem to be decreasing for white and black defendants,
# and the gap between races is also shrinking slightly. Notably, in the first few months of 2020,
# the incarceration proportions for black defendants decrease signficantly for black defendant to
# and extent that is lower than White defendent; however, the decrease is follows by an immediate increase.
###Output
_____no_output_____
###Markdown
2.3: Investigating mechanisms: incaceration rates by chargeYour colleague sees the previous graph and is worried that the gap could be different---either wider or smaller---if you adjust for the fact that prosecutors have discretion in what crimes to charge defendants with. If white defendants are charged with crimes that tend to receive probation rather than incarceration, that could explain some of the gaps.In the next questions, you'll begin to investigate this. 2.3.1: Find the most common offenses (3 points)First, create a set of 'frequent offenses' that represent (over the entire period) the union of the 10 offenses Black defendant are most likely to be charged with and the 10 offenses white defendants are most likely to be charged with (might be far less than 20 total if there's a lot of overlap in common charges)Use the `simplified_offense_derived` for this
###Code
white_frequent = sent_cleaned["simplified_offense_derived"][sent_cleaned.is_white_derived==True].value_counts().head(10).to_frame().reset_index()
white_frequent = set(white_frequent["index"])
black_frequent = sent_cleaned["simplified_offense_derived"][sent_cleaned.is_black_derived==True].value_counts().head(10).to_frame().reset_index()
black_frequent = set(black_frequent["index"])
not_in_black = set(white_frequent - black_frequent)
frequent = list(black_frequent) + list(not_in_black)
frequent
###Output
_____no_output_____
###Markdown
2.3.2: Look at incarceration rates (again just whether incarcerated) by race and offense type for these top offenses (3 points)Print a wide-format version of the resulting table (so each row is an offense type, one col is black incarceration rate for that offense type; another is the white incarceration rate) and interpret. Which offenses show the largest disparities in judges being less likely to sentence White defendants to incarceration/more likely to offer those defendants probation?
###Code
table3 = sent_cleaned[sent_cleaned.simplified_offense_derived.isin(frequent)]
table3 = table3.groupby("simplified_offense_derived").agg(black_total = ("is_black_derived","sum"),
white_total = ("is_white_derived", "sum"),
all_total = ("CASE_ID", "nunique"),
black_incarc = ("black_incarc", "sum"),
white_incarc = ("white_incarc", "sum"))
table3["black_incarceration_rate"] = table3.black_incarc / table3.black_total
table3["white_incarceration_rate"] = table3.white_incarc / table3.white_total
table3["Difference_Btw_Rate_Black_Minus_White"] = table3["black_incarceration_rate"] - table3["white_incarceration_rate"]
table3[["black_incarceration_rate", "white_incarceration_rate", "Difference_Btw_Rate_Black_Minus_White"]].sort_values("Difference_Btw_Rate_Black_Minus_White", ascending=False)
# Racial disparities in incarceration rates are greatest for Narcotics, Battery, and UUW charges. For all charges
# other than vehicle-realted offenses, judges are more likely to incarcerate black defendents than white. This may be
# a result of judges being more likely to offer white defendants probation.
###Output
_____no_output_____
###Markdown
2.3.3: Examine whether this changes pre and post change to charging threshold for retail theft (13 points)One important question is not only whether there are disparities by offense type but also whether these disparities are changing over time.The SAO, for instance, announced in December of 2016 that they would no longer default to charging retail thefts of under \$1,000 as felonies. This change might have (1) decreased disparities or (2) increased disparities, depending on the correlation between race/ethnicity and magnitude of goods stolen: [news coverage](https://www.dnainfo.com/chicago/20161215/little-village/kim-foxx-raises-bar-for-retail-theft-felonies/). Focusing on `simplified_offense_derived` == "Retail theft." Using a function and/or loop (Dec. 2016 is always excluded as a transition month):- Compare Black-White disparities before and after the change using a two-month bandwidth (so pre is October and November 2016; post is January and February 2017)- Compare Black-White disparities before and after the change using a four-month bandwidth (so pre is August- November 2016; post is January - April 2017)- Compare Black-White disparities using an eight-month bandwidth- Compare Black-White disparities using a twelve-month bandwidth------------------ - Print a table with the results (any organization is fine as long as it's clear) - Create a bar chart where the x axis represents different bandwidths (2, 4, etc); the y axis the size of the Black-White gap in whether the defendant receives incarceration, and for each of the x axis points, you have one shaded bar representing "before" the change, another representing "after" the change (make sure that before is ordered before after and the bandwidths are from smallest to largest)*Note*: for each of the bandwidths include dates the entire month (e.g., for the first, include not only 02-01-2017 but everything up through 02-28-2017; easiest way is for the subsetting to use the rounded `sentenceym_derived`). Also make sure to only include white or black defendants.**Extra credit**: because the bandwidths have different sample sizes, a better viz incorporates measures of uncertainty. Add standard errors to the estimates using the formula: $(\dfrac{p(1-p)}{n})^{0.5}$ where $p$ is the gap and $N$ is the number of cases in each bandwidth period
###Code
retail_theft = sent_cleaned[sent_cleaned.simplified_offense_derived == "Retail Theft"]
retail_theft = retail_theft[(retail_theft.is_black_derived == True) | (retail_theft.is_white_derived == True)]
def bandwidth(search_in, date_min, date_max):
new_frame = search_in[(search_in.sentenceymd_derived >= date_min) &
(search_in.sentenceymd_derived <= date_max)]
new_frame = new_frame[(new_frame.sentenceymd_derived < "2016-12-01") |
(new_frame.sentenceymd_derived > "2016-12-31")]
new_frame["before_or_after"] = np.where(new_frame.sentenceymd_derived < "2016-12-01", "before change", "following change")
table = new_frame.groupby("before_or_after").agg(black_total = ("is_black_derived","sum"),
white_total = ("is_white_derived", "sum"),
black_incarc = ("black_incarc", "sum"),
white_incarc = ("white_incarc", "sum"))
table["black_incarc_proportion"] = table.black_incarc / table.black_total
table["white_incarc_proportion"] = table.white_incarc / table.white_total
table["Black_White_Gap"] = table["black_incarc_proportion"] - table["white_incarc_proportion"]
return(table)
two_month = bandwidth(retail_theft, "2016-10-01", "2017-02-28")
two_month["bandwidth"] = "2 month"
four_month = bandwidth(retail_theft, "2016-08-01", "2017-04-30")
four_month["bandwidth"] = "4 month"
eight_month = bandwidth(retail_theft, "2016-04-01", "2017-08-31")
eight_month["bandwidth"] = "8 month"
twelve_month = bandwidth(retail_theft, "2015-12-01", "2017-12-31")
twelve_month["bandwidth"] = "12 month"
combined = two_month.append([four_month, eight_month, twelve_month]).reset_index()
combined
ggplot(combined, aes(x = "bandwidth", y = "Black_White_Gap", fill = "before_or_after")) + \
geom_bar(position = "dodge", stat="identity") + \
scale_x_discrete(limits=["2 month", "4 month", "8 month", "12 month"])+ \
annotate("text", x = 0.7, y = .1, label = "0.0185", color = "blue") + \
annotate("text", x = 1.2, y = .16, label = "0.0226", color = "blue") + \
annotate("text", x = 1.76, y = .09, label = "0.0122", color = "blue") + \
annotate("text", x = 2.26, y = .09, label = "0.0138", color = "blue")+ \
annotate("text", x = 2.76, y = .14, label = "0.0109", color = "blue") + \
annotate("text", x = 3.26, y = .05, label = "0.0081", color = "blue") + \
annotate("text", x = 3.76, y = .17, label = "0.0097", color = "blue") + \
annotate("text", x = 4.26, y = .07, label = "0.0088", color = "blue")
# annotate("text", x = pd.to_datetime("2014-10-05"), y = .29, label = "white defendants", color = "red") + \
###Output
_____no_output_____
###Markdown
2.3.3.1: Interpret the results (2 points)Write a two-sentence interpretation of the results. What might this show about how people on both sides of the issue---those who argued that the retail theft policy change would narrow disparities; those who argued that the change may widen disparities--could support their claims?
###Code
## Indeed, in the two month bandwidth, the policy change widens the disparities significantly as shown in the graph.
## However, when we increase the bandwidth to 8 or 12 month, those who argued that the policy change narrow
## dispairities are valid. Overall, the results evidence that bandwidth matters and would show different results in terms of disparities.
## Lastly, it is important to note that disparities still exist even when we expand the bandwidth.
###Output
_____no_output_____
###Markdown
Q1. Write a python code for finding mean, median and mode.
###Code
import numpy
from scipy import stats
age= [19,46,67,38,11,46,13,57,44,78,37,55,36]
x = numpy.mean(age)
y = numpy.median(age)
z = stats.mode(age)
print('Mean = ',x)
print('Median = ',y)
print('Mode = ',z)
###Output
Mode = ModeResult(mode=array([46]), count=array([2]))
###Markdown
Q2. Write a python code for calculating variance and standard deviation for the set of elements.
###Code
s = numpy.std(age)
v = numpy.var(age)
print('Standard Deviation = ',s)
print('Variacnce = ',v)
###Output
Variacnce = 366.07100591715977
###Markdown
Practice some basic python programs
###Code
# This program prints Hello, world!
print('Hello, world!')
# This program adds two numbers
num1 = 1.5
num2 = 6.3
# Add two numbers
sum = num1 + num2
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum))
# Python Program to calculate the square root
num = 8
num_sqrt = num ** 0.5
print('The square root of %0.3f is %0.3f'%(num ,num_sqrt))
# Solve the quadratic equation ax**2 + bx + c = 0
# import complex math module
import cmath
a = 1
b = 5
c = 6
# calculate the discriminant
d = (b**2) - (4*a*c)
# find two solutions
sol1 = (-b-cmath.sqrt(d))/(2*a)
sol2 = (-b+cmath.sqrt(d))/(2*a)
print('The solution are {0} and {1}'.format(sol1,sol2))
# Python program to swap two variables
x = 5
y = 10
# create a temporary variable and swap the values
temp = x
x = y
y = temp
print('The value of x after swapping: {}'.format(x))
print('The value of y after swapping: {}'.format(y))
# Program to generate a random number between 0 and 9
# importing the random module
import random
print(random.randint(0,9))
###Output
3
###Markdown
###Code
def Myfunc_a(n):
a = []
for i in range(0, int(1000/n)):
a.append(n*(i+1))
return a
def Myfunc_b(a, b):
c = Myfunc_a(1)
d = Myfunc_a(a)
e = Myfunc_a(b)
f = [x for x in c if x not in d]
g = [x for x in f if x not in e]
return sum(g)
def Myfunc_c(a):
k = []
for i in range(1, a):
if a%(i+1) == 0:
k.append(i+1)
if len(k) == 1:
print("a is a prime number.")
return k
else:
return k
def Myfunc_d(a,b):
ans1 = []
ans2 = []
for j in range(1, 1001):
for i in range(1, a + 1):
if (a % i == 0) & (j % i == 0):
y = i
if y == 1:
ans1.append(j)
for l in range(1, 1001):
for i in range(1, b + 1):
if (b % i == 0) & (l % i == 0):
y = i
if y == 1:
ans2.append(l)
h = [x for x in ans1 if x in ans2]
return sum(h)
###Output
166333
###Markdown
Assignment 1 Quick intro + checking code works on your system Learning Outcomes: The goal of this assignment is two-fold:- This code-base is designed to be easily extended for different research projects. Running this notebook to the end will ensure that the code runs on your system, and that you are set-up to start playing with machine learning code.- This notebook has one complete application: training a CNN classifier to predict the digit in MNIST Images. The code is written to familiarize you to a typical machine learning pipeline, and to the building blocks of code used to do ML. So, read on! Please specify your Name, Email ID and forked repository url here:- Name: Ahmad Saaid- Email: [email protected] Link to your forked github repository: https://github.com/ahmad-saaid/Harvard_BAI
###Code
### General libraries useful for python ###
import os
import sys
from tqdm.notebook import tqdm
import json
import random
import pickle
import copy
from IPython.display import display
import ipywidgets as widgets
from google.colab import drive
drive.mount('/content/drive')
### Finding where you clone your repo, so that code upstream paths can be specified programmatically ####
## work_dir = os.getcwd()
git_dir = '/content/drive/MyDrive/Harvard_BAI'
print('Your github directory is :%s'%git_dir)
### Libraries for visualizing our results and data ###
from PIL import Image
import matplotlib.pyplot as plt
### Import PyTorch and its components ###
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
###Output
_____no_output_____
###Markdown
Let's load our flexible code-base which you will build on for your research projects in future assignments.Above we have imported modules (libraries for those familiar to programming languages other than python). These modules are of two kinds - (1) inbuilt python modules like `os`, `sys`, `random`, or (2) ones which we installed using conda (ex. `torch`).Below we will be importing our own written code which resides in the `res` folder in your github directory. This is structured to be easily expandable for different machine learning projects. Suppose that you want to do a project on object detection. You can easily add a few files to the sub-folders within `res`, and this script will then be flexibly do detection instead of classication (which is presented here). Expanding on this codebase will be the main subject matter of Assignment 2. For now, let's continue with importing.
###Code
### Making helper code under the folder res available. This includes loaders, models, etc. ###
sys.path.append('%s/res/'%git_dir)
from models.models import get_model
from loader.loader import get_loader
###Output
_____no_output_____
###Markdown
See those paths printed above? `res/models` holds different model files. So, if you want to load ResNet architecture or a transformers architecture, they will reside there as separate files. Similarly, `res/loader` holds programs which are designed to load different types of data. For example, you may want to load data differently for object classification and detection. For classification each image will have only a numerical label corresponding to its category. For detection, the labels for the same image would contain bounding boxes for different objects and the type of the object in the box. So, to expand further you will be adding files to the folders above. Setting up Weights and Biases for tracking your experiments. We have Weights and Biases (wandb.ai) integrated into the code for easy visualization of results and for tracking performance. `Please make an account at wandb.ai, and follow the steps to login to your account!`
###Code
pip install wandb
import wandb
wandb.login()
###Output
_____no_output_____
###Markdown
Specifying settings/hyperparameters for our code below
###Code
wandb_config = {}
wandb_config['batch_size'] = 10
wandb_config['base_lr'] = 0.01
wandb_config['model_arch'] = 'CustomCNN'
wandb_config['num_classes'] = 10
wandb_config['run_name'] = 'assignment_1'
### If you are using a CPU, please set wandb_config['use_gpu'] = 0 below. However, if you are using a GPU, leave it unchanged ####
wandb_config['use_gpu'] = 1
wandb_config['num_epochs'] = 2
wandb_config['git_dir'] = git_dir
###Output
_____no_output_____
###Markdown
By changing above, different experiments can be run. For example, you can specify which model architecture to load, which dataset you will be loading, and so on. Data Loading The most common task many of you will be doing in your projects will be running a script on a new dataset. In PyTorch this is done using data loaders, and it is extremely important to understand this works. In next assignment, you will be writing your own dataloader. For now, we only expose you to basic data loading which for the MNIST dataset for which PyTorch provides easy functions. Let's load MNIST. The first time you run it, the dataset gets downloaded. Data Transforms tell PyTorch how to pre-process your data. Recall that images are stored with values between 0-255 usually. One very common pre-processing for images is to normalize to be 0 mean and 1 standard deviation. This pre-processing makes the task easier for neural networks. There are many, many kinds of normalization in deep learning, the most basic one being those imposed on the image data while loading it.
###Code
data_transforms = {}
data_transforms['train'] = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))])
data_transforms['test'] = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))])
###Output
_____no_output_____
###Markdown
`torchvision.datasets.MNIST` allows you to load MNIST data. In future, we will be using our own `get_loader` function from above to load custom data. Notice that data_transforms are passed as argument while loading the data below.
###Code
mnist_dataset = {}
mnist_dataset['train'] = torchvision.datasets.MNIST('%s/datasets'%wandb_config['git_dir'], train = True, download = True, transform = data_transforms['train'])
mnist_dataset['test'] = torchvision.datasets.MNIST('%s/datasets'%wandb_config['git_dir'], train = False, download = True, transform = data_transforms['test'])
###Output
_____no_output_____
###Markdown
Dataset vs Dataloader Most deep learning datasets are huge. Can be as large as million data points. We want to keep our GPUs free to store intermediate calculations for neural networks, like gradients. We would not be able to load a million samples into the GPU (or even CPU) and do forward or backward passes on the network. So, samples are loaded in batches, and this method of gradient descent is called mini-batch gradient descent. `torch.utils.data.DataLoader` allows you to specify a pytorch dataset, and makes it easy to loop over it in batches. So, we leverage this to create a data loader from our above loaded MNIST dataset. The dataset itself only contains lists of where to find the inputs and outputs i.e. paths. The data loader defines the logic on loading this information into the GPU/CPU and so it can be passed into the neural net.
###Code
data_loaders = {}
data_loaders['train'] = torch.utils.data.DataLoader(mnist_dataset['train'], batch_size = wandb_config['batch_size'], shuffle = True)
data_loaders['test'] = torch.utils.data.DataLoader(mnist_dataset['test'], batch_size = wandb_config['batch_size'], shuffle = False)
data_sizes = {}
data_sizes['train'] = len(mnist_dataset['train'])
data_sizes['test'] = len(mnist_dataset['test'])
###Output
_____no_output_____
###Markdown
We will use the `get_model` functionality to load a CNN architecture.
###Code
model = get_model(wandb_config['model_arch'], wandb_config['num_classes'])
###Output
_____no_output_____
###Markdown
Curious what the model architecture looks like?`get_model` is just a function in the file `res/models/models.py`. Stop here, open this file, and see what the function does.
###Code
layout = widgets.Layout(width='auto', height='90px') #set width and height
button = widgets.Button(description="Read the function?\n Click me!", layout=layout)
output = widgets.Output()
display(button, output)
def on_button_clicked(b):
with output:
print("As you can see, the function simply returns an object of the class CustomCNN, which is defined in res/models/CustomCNN.py")
print("This is our neural network model.")
button.on_click(on_button_clicked)
###Output
_____no_output_____
###Markdown
Below we have the function which trains, tests and returns the best model weights.
###Code
def model_pipeline(model, criterion, optimizer, dset_loaders, dset_sizes, hyperparameters):
with wandb.init(project="HARVAR_BAI", config=hyperparameters):
if hyperparameters['run_name']:
wandb.run.name = hyperparameters['run_name']
config = wandb.config
best_model = model
best_acc = 0.0
print(config)
print(config.num_epochs)
for epoch_num in range(config.num_epochs):
wandb.log({"Current Epoch": epoch_num})
model = train_model(model, criterion, optimizer, dset_loaders, dset_sizes, config)
best_acc, best_model = test_model(model, best_acc, best_model, dset_loaders, dset_sizes, config)
return best_model
###Output
_____no_output_____
###Markdown
The different steps of the train model function are annotated below inside the function. Read them step by step
###Code
def train_model(model, criterion, optimizer, dset_loaders, dset_sizes, configs):
print('Starting training epoch...')
best_model = model
best_acc = 0.0
### This tells python to track gradients. While testing weights aren't updated hence they are not stored.
model.train()
running_loss = 0.0
running_corrects = 0
iters = 0
### We loop over the data loader we created above. Simply using a for loop.
for data in tqdm(dset_loaders['train']):
inputs, labels = data
### If you are using a gpu, then script will move the loaded data to the GPU.
### If you are not using a gpu, ensure that wandb_configs['use_gpu'] is set to False above.
if configs.use_gpu:
inputs = inputs.float().cuda()
labels = labels.long().cuda()
else:
print('WARNING: NOT USING GPU!')
inputs = inputs.float()
labels = labels.long()
### We set the gradients to zero, then calculate the outputs, and the loss function.
### Gradients for this process are automatically calculated by PyTorch.
optimizer.zero_grad()
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
### At this point, the program has calculated gradient of loss w.r.t. weights of our NN model.
loss.backward()
optimizer.step()
### optimizer.step() updated the models weights using calculated gradients.
### Let's store these and log them using wandb. They will be displayed in a nice online
### dashboard for you to see.
iters += 1
running_loss += loss.item()
running_corrects += torch.sum(preds == labels.data)
wandb.log({"train_running_loss": running_loss/float(iters*len(labels.data))})
wandb.log({"train_running_corrects": running_corrects/float(iters*len(labels.data))})
epoch_loss = float(running_loss) / dset_sizes['train']
epoch_acc = float(running_corrects) / float(dset_sizes['train'])
wandb.log({"train_accuracy": epoch_acc})
wandb.log({"train_loss": epoch_loss})
return model
def test_model(model, best_acc, best_model, dset_loaders, dset_sizes, configs):
print('Starting testing epoch...')
model.eval() ### tells pytorch to not store gradients as we won't be updating weights while testing.
running_corrects = 0
iters = 0
for data in tqdm(dset_loaders['test']):
inputs, labels = data
if configs.use_gpu:
inputs = inputs.float().cuda()
labels = labels.long().cuda()
else:
print('WARNING: NOT USING GPU!')
inputs = inputs.float()
labels = labels.long()
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
iters += 1
running_corrects += torch.sum(preds == labels.data)
wandb.log({"train_running_corrects": running_corrects/float(iters*len(labels.data))})
epoch_acc = float(running_corrects) / float(dset_sizes['test'])
wandb.log({"test_accuracy": epoch_acc})
### Code is very similar to train set. One major difference, we don't update weights.
### We only check the performance is best so far, if so, we save this model as the best model so far.
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model = copy.deepcopy(model)
wandb.log({"best_accuracy": best_acc})
return best_acc, best_model
### Criterion is simply specifying what loss to use. Here we choose cross entropy loss.
criterion = nn.CrossEntropyLoss()
### tells what optimizer to use. There are many options, we here choose Adam.
### the main difference between optimizers is that they vary in how weights are updated based on calculated gradients.
optimizer_ft = optim.Adam(model.parameters(), lr = wandb_config['base_lr'])
if wandb_config['use_gpu']:
criterion.cuda()
model.cuda()
### Creating the folder where our models will be saved.
if not os.path.isdir("%s/saved_models/"%wandb_config['git_dir']):
os.mkdir("%s/saved_models/"%wandb_config['git_dir'])
### Let's run it all, and save the final best model.
best_final_model = model_pipeline(model, criterion, optimizer_ft, data_loaders, data_sizes, wandb_config)
save_path = '%s/saved_models/%s_final.pt'%(wandb_config['git_dir'], wandb_config['run_name'])
with open(save_path,'wb') as F:
torch.save(best_final_model,F)
###Output
_____no_output_____
###Markdown
Statement 1 - Does precipitation play significant role in predicting temperature? Null Hypothesis: There is no significant difference between the mean temperature on rainy and non-rainy days Alternate Hypothesis: There is a significant difference between the mean temperature on rainy and non-rainy days
###Code
M_mean = df_weather.loc[df_weather['Precip Type'] == 'rain', 'Temperature (C)'].mean()
F_mean = df_weather.loc[df_weather['Precip Type'] == 'snow', 'Temperature (C)'].mean()
M_std = df_weather.loc[df_weather['Precip Type'] == 'rain', 'Temperature (C)'].std()
F_std = df_weather.loc[df_weather['Precip Type'] == 'snow', 'Temperature (C)'].std()
no_of_M = df_weather.loc[df_weather['Precip Type'] == 'rain', 'Temperature (C)'].count()
no_of_F = df_weather.loc[df_weather['Precip Type'] == 'snow', 'Temperature (C)'].count()
from scipy.stats import norm
def twoSampZ(X1, X2, mudiff, sd1, sd2, n1, n2):
pooledSE = np.sqrt(sd1**2/n1 + sd2**2/n2)
z = ((X1 - X2) - mudiff)/pooledSE
pval = 2*(1 - norm.cdf(abs(z)))
return round(z,3), pval
z,p= twoSampZ(M_mean,F_mean,0,M_std,F_std,no_of_M,no_of_F)
print(z, p)
import statsmodels.stats.weightstats as ws
col1 = ws.DescrStatsW(df_weather.loc[df_weather['Precip Type'] == 'rain', 'Temperature (C)'])
col2 = ws.DescrStatsW(df_weather.loc[df_weather['Precip Type'] == 'snow', 'Temperature (C)'])
cm_obj = ws.CompareMeans(col1, col2)
zstat, z_pval = cm_obj.ztest_ind(usevar='unequal')
print(zstat.round(3), z_pval.round(3))
from scipy.stats import ttest_ind
ttest_ind(df_weather.loc[df_weather['Precip Type'] == 'rain', 'Temperature (C)'],
df_weather.loc[df_weather['Precip Type'] == 'snow', 'Temperature (C)'],
equal_var = False
)
###Output
_____no_output_____
###Markdown
Thus we can reject the null hypothesis; and retain the precipitation type feature Statement 2 - Does mean temperature varies significantly for different summaries? Null Hypothesis: There is no significant difference between the mean temperature among different summary group Alternate Hypothesis: There is significant difference between the mean temperature among different summary group Statement 3 - Does the temperature is normaly distributed? Null Hypothesis: The temperature is normally distributed Alternate Hypothesis: The temperature is not normally distributed
###Code
from scipy.stats.mstats import normaltest
normaltest(df_weather['Temperature (C)'].values)
normaltest(df_weather['Humidity'])
df_weather['Humidity'].values
###Output
_____no_output_____
###Markdown
###Code
import pandas as pd
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
dataFrame = pd.read_csv('homeprices.csv')
dataFrame
%matplotlib inline
plt.xlabel('area')
plt.ylabel('price')
plt.scatter(dataFrame.area, dataFrame.price, color='red', marker='+')
new_dataFrame = dataFrame.drop('price', axis='columns')
new_dataFrame
price = dataFrame.price
price
reg = linear_model.LinearRegression()
reg.fit(new_dataFrame, price)
reg.predict([[3300]])
reg.coef_
reg.intercept_
3300*135.78767123 + 180616.43835616432
reg.predict([[5000]])
area_dataFrame = pd.read_csv("areas.csv")
area_dataFrame.head(3)
prediction = reg.predict(area_dataFrame)
prediction
area_dataFrame['prices'] = prediction
area_dataFrame
area_dataFrame.to_csv("prediction.csv")
###Output
_____no_output_____ |
Regression/CatBoost/CatBoostRegressor.ipynb | ###Markdown
Simple CatBoostRegressor This Code template is for regression analysis using CatBoostRegressor. CatBoost is an algorithm for gradient boosting on decision trees.
Required Packages
###Code
!pip install catboost
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from catboost import CatBoostRegressor
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ''
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
ModelCatBoost is an algorithm for gradient boosting on decision trees. Developed by Yandex researchers and engineers, it is the successor of the MatrixNet algorithm that is widely used within the company for ranking tasks, forecasting and making recommendations Tuning parameters1. **learning_rate**:, float, default = it is defined automatically for Logloss, MultiClass & RMSE loss functions depending on the number of iterations if none of these parameters is set>The learning rate. Used for reducing the gradient step.2. **l2_leaf_reg**: float, default = 3.0>Coefficient at the L2 regularization term of the cost function. Any positive value is allowed.3. **bootstrap_type**: string, default = depends on the selected mode and processing unit>Bootstrap type. Defines the method for sampling the weights of objects. * Supported methods: * Bayesian * Bernoulli * MVS * Poisson (supported for GPU only) * No4. **subsample**: float, default = depends on the dataset size and the bootstrap type>Sample rate for bagging. This parameter can be used if one of the following bootstrap types is selected: * Poisson * Bernoulli * MVSFor more information refer: [API](https://catboost.ai/docs/concepts/python-reference_catboostregressor.html)
###Code
# Build Model here
model = CatBoostRegressor(verbose=False)
model.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
###Output
Accuracy score 96.49 %
###Markdown
> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
###Code
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
###Output
R2 Score: 96.49 %
Mean Absolute Error 2.30
Mean Squared Error 10.32
###Markdown
Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
###Code
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(x_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
###Output
_____no_output_____ |
[Master]Fine_Tune_BERT_for_Text_Classification_with_TensorFlow.ipynb | ###Markdown
Fine-Tune BERT for Text Classification with TensorFlow Figure 1: BERT Classification Model In this project, you will learn how to fine-tune a BERT model for text classification using TensorFlow and TF-Hub. The pretrained BERT model used in this project is [available](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2) on [TensorFlow Hub](https://tfhub.dev/). Learning Objectives By the time you complete this project, you will be able to:- Build TensorFlow Input Pipelines for Text Data with the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API- Tokenize and Preprocess Text for BERT- Fine-tune BERT for text classification with TensorFlow 2 and [TF Hub](https://tfhub.dev) Prerequisites In order to be successful with this project, it is assumed you are:- Competent in the Python programming language- Familiar with deep learning for Natural Language Processing (NLP)- Familiar with TensorFlow, and its Keras API Contents This project/notebook consists of several Tasks.- **[Task 1]()**: Introduction to the Project.- **[Task 2]()**: Setup your TensorFlow and Colab Runtime- **[Task 3]()**: Download and Import the Quora Insincere Questions Dataset- **[Task 4]()**: Create tf.data.Datasets for Training and Evaluation- **[Task 5]()**: Download a Pre-trained BERT Model from TensorFlow Hub- **[Task 6]()**: Tokenize and Preprocess Text for BERT- **[Task 7]()**: Wrap a Python Function into a TensorFlow op for Eager Execution- **[Task 8]()**: Create a TensorFlow Input Pipeline with `tf.data`- **[Task 9]()**: Add a Classification Head to the BERT `hub.KerasLayer`- **[Task 10]()**: Fine-Tune BERT for Text Classification- **[Task 11]()**: Evaluate the BERT Text Classification Model Task 2: Setup your TensorFlow and Colab Runtime. You will only be able to use the Colab Notebook after you save it to your Google Drive folder. Click on the File menu and select “Save a copy in Drive… Check GPU AvailabilityCheck if your Colab notebook is configured to use Graphical Processing Units (GPUs). If zero GPUs are available, check if the Colab notebook is configured to use GPUs (Menu > Runtime > Change Runtime Type).
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Install TensorFlow and TensorFlow Model Garden
###Code
import tensorflow as tf
print(tf.version.VERSION)
!pip install -q tensorflow==2.3.0
!git clone --depth 1 -b v2.3.0 https://github.com/tensorflow/models.git
# install requirements to use tensorflow/models repository
!pip install -Uqr models/official/requirements.txt
# you may have to restart the runtime afterwards
###Output
_____no_output_____
###Markdown
Restart the Runtime**Note** After installing the required Python packages, you'll need to restart the Colab Runtime Engine (Menu > Runtime > Restart runtime...) Task 3: Download and Import the Quora Insincere Questions Dataset
###Code
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import sys
sys.path.append('models')
from official.nlp.data import classifier_data_lib
from official.nlp.bert import tokenization
from official.nlp import optimization
print("TF Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
###Output
TF Version: 2.3.0
Eager mode: True
Hub version: 0.9.0
GPU is available
###Markdown
A downloadable copy of the [Quora Insincere Questions Classification data](https://www.kaggle.com/c/quora-insincere-questions-classification/data) can be found [https://archive.org/download/fine-tune-bert-tensorflow-train.csv/train.csv.zip](https://archive.org/download/fine-tune-bert-tensorflow-train.csv/train.csv.zip). Decompress and read the data into a pandas DataFrame.
###Code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv('https://archive.org/download/fine-tune-bert-tensorflow-train.csv/train.csv.zip',
compression='zip', low_memory=False)
df.shape
df.tail(20)
df.target.plot(kind='hist', title='Target distribution');
###Output
_____no_output_____
###Markdown
Task 4: Create tf.data.Datasets for Training and Evaluation
###Code
train_df, remaining = train_test_split(df, random_state=42, train_size=0.0075, stratify=df.target.values)
valid_df, _ = train_test_split(remaining, random_state=42, train_size=0.00075, stratify=remaining.target.values)
train_df.shape, valid_df.shape
with tf.device('/cpu:0'):
train_data = tf.data.Dataset.from_tensor_slices((train_df.question_text.values, train_df.target.values))
valid_data = tf.data.Dataset.from_tensor_slices((valid_df.question_text.values, valid_df.target.values))
for text, label in train_data.take(1):
print(text)
print(label)
###Output
tf.Tensor(b'Why are unhealthy relationships so desirable?', shape=(), dtype=string)
tf.Tensor(0, shape=(), dtype=int64)
###Markdown
Task 5: Download a Pre-trained BERT Model from TensorFlow Hub
###Code
"""
Each line of the dataset is composed of the review text and its label
- Data preprocessing consists of transforming text to BERT input features:
input_word_ids, input_mask, segment_ids
- In the process, tokenizing the text is done with the provided BERT model tokenizer
"""
label_list = [0, 1] # Label categories
max_seq_length = 128 # maximum length of (token) input sequences
train_batch_size = 32
# Get BERT layer and tokenizer:
# More details here: https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2",
trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
tokenizer.wordpiece_tokenizer.tokenize('hi, how are you doing?')
tokenizer.convert_tokens_to_ids(tokenizer.wordpiece_tokenizer.tokenize('hi, how are you doing?'))
###Output
_____no_output_____
###Markdown
Task 6: Tokenize and Preprocess Text for BERT Figure 2: BERT Tokenizer We'll need to transform our data into a format BERT understands. This involves two steps. First, we create InputExamples using `classifier_data_lib`'s constructor `InputExample` provided in the BERT library.
###Code
# This provides a function to convert row to input features and label
def to_feature(text, label, label_list=label_list, max_seq_length=max_seq_length, tokenizer=tokenizer):
example = classifier_data_lib.InputExample(guid = None,
text_a = text.numpy(),
text_b = None,
label = label.numpy())
feature = classifier_data_lib.convert_single_example(0, example, label_list,
max_seq_length, tokenizer)
return (feature.input_ids, feature.input_mask, feature.segment_ids, feature.label_id)
###Output
_____no_output_____
###Markdown
You want to use [`Dataset.map`](https://www.tensorflow.org/api_docs/python/tf/data/Datasetmap) to apply this function to each element of the dataset. [`Dataset.map`](https://www.tensorflow.org/api_docs/python/tf/data/Datasetmap) runs in graph mode.- Graph tensors do not have a value.- In graph mode you can only use TensorFlow Ops and functions.So you can't `.map` this function directly: You need to wrap it in a [`tf.py_function`](https://www.tensorflow.org/api_docs/python/tf/py_function). The [`tf.py_function`](https://www.tensorflow.org/api_docs/python/tf/py_function) will pass regular tensors (with a value and a `.numpy()` method to access it), to the wrapped python function. Task 7: Wrap a Python Function into a TensorFlow op for Eager Execution
###Code
def to_feature_map(text, label):
input_ids, input_mask, segment_ids, label_id = tf.py_function(to_feature, inp=[text, label],
Tout=[tf.int32, tf.int32, tf.int32, tf.int32])
# py_func doesn't set the shape of the returned tensors.
input_ids.set_shape([max_seq_length])
input_mask.set_shape([max_seq_length])
segment_ids.set_shape([max_seq_length])
label_id.set_shape([])
x = {
'input_word_ids': input_ids,
'input_mask': input_mask,
'input_type_ids': segment_ids
}
return (x, label_id)
###Output
_____no_output_____
###Markdown
Task 8: Create a TensorFlow Input Pipeline with `tf.data`
###Code
with tf.device('/cpu:0'):
# train
train_data = (train_data.map(to_feature_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
#.cache()
.shuffle(1000)
.batch(32, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
# valid
valid_data = (valid_data.map(to_feature_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(32, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
###Output
_____no_output_____
###Markdown
The resulting `tf.data.Datasets` return `(features, labels)` pairs, as expected by [`keras.Model.fit`](https://www.tensorflow.org/api_docs/python/tf/keras/Modelfit):
###Code
# data spec
train_data.element_spec
# data spec
valid_data.element_spec
###Output
_____no_output_____
###Markdown
Task 9: Add a Classification Head to the BERT Layer Figure 3: BERT Layer
###Code
# Building the model
def create_model():
input_word_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_word_ids")
input_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_mask")
input_type_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_type_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, input_type_ids])
drop = tf.keras.layers.Dropout(0.4)(pooled_output)
output = tf.keras.layers.Dense(1, activation="sigmoid", name="output")(drop)
model = tf.keras.Model(
inputs={
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'input_type_ids': input_type_ids
},
outputs=output)
return model
###Output
_____no_output_____
###Markdown
Task 10: Fine-Tune BERT for Text Classification
###Code
model = create_model()
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=2e-5),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary()
tf.keras.utils.plot_model(model=model, show_shapes=True, dpi=76, )
# Train model
epochs = 4
history = model.fit(train_data,
validation_data=valid_data,
epochs=epochs,
verbose=1)
###Output
Epoch 1/2
306/306 [==============================] - ETA: 0s - loss: 0.1679 - binary_accuracy: 0.9391WARNING:tensorflow:Callbacks method `on_test_batch_end` is slow compared to the batch time (batch time: 0.0122s vs `on_test_batch_end` time: 0.1396s). Check your callbacks.
###Markdown
Task 11: Evaluate the BERT Text Classification Model
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
plt.show()
plot_graphs(history, 'binary_accuracy')
plot_graphs(history, 'loss')
model.evaluate(valid_data, verbose=1)
sample_example = [" ",\
" ",\
" ",\
" ",\
" ",\
" "]
test_data = tf.data.Dataset.from_tensor_slices((sample_example, [0]*len(sample_example)))
test_data = (test_data.map(to_feature_map).batch(1))
preds = model.predict(test_data)
#['Toxic' if pred >=0.5 else 'Sincere' for pred in preds]
preds
###Output
_____no_output_____
###Markdown
Fine-Tune BERT for Text Classification with TensorFlow Figure 1: BERT Classification Model In this project, you will learn how to fine-tune a BERT model for text classification using TensorFlow and TF-Hub. The pretrained BERT model used in this project is [available](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2) on [TensorFlow Hub](https://tfhub.dev/). Learning Objectives By the time you complete this project, you will be able to:- Build TensorFlow Input Pipelines for Text Data with the [`tf.data`](https://www.tensorflow.org/api_docs/python/tf/data) API- Tokenize and Preprocess Text for BERT- Fine-tune BERT for text classification with TensorFlow 2 and [TF Hub](https://tfhub.dev) Prerequisites In order to be successful with this project, it is assumed you are:- Competent in the Python programming language- Familiar with deep learning for Natural Language Processing (NLP)- Familiar with TensorFlow, and its Keras API Contents This project/notebook consists of several Tasks.- **[Task 1]()**: Introduction to the Project.- **[Task 2]()**: Setup your TensorFlow and Colab Runtime- **[Task 3]()**: Download and Import the Quora Insincere Questions Dataset- **[Task 4]()**: Create tf.data.Datasets for Training and Evaluation- **[Task 5]()**: Download a Pre-trained BERT Model from TensorFlow Hub- **[Task 6]()**: Tokenize and Preprocess Text for BERT- **[Task 7]()**: Wrap a Python Function into a TensorFlow op for Eager Execution- **[Task 8]()**: Create a TensorFlow Input Pipeline with `tf.data`- **[Task 9]()**: Add a Classification Head to the BERT `hub.KerasLayer`- **[Task 10]()**: Fine-Tune BERT for Text Classification- **[Task 11]()**: Evaluate the BERT Text Classification Model Task 2: Setup your TensorFlow and Colab Runtime. You will only be able to use the Colab Notebook after you save it to your Google Drive folder. Click on the File menu and select “Save a copy in Drive… Check GPU AvailabilityCheck if your Colab notebook is configured to use Graphical Processing Units (GPUs). If zero GPUs are available, check if the Colab notebook is configured to use GPUs (Menu > Runtime > Change Runtime Type).
###Code
!nvidia-smi
# conda install -c anaconda tensorflow-gpu
###Output
_____no_output_____
###Markdown
Install TensorFlow and TensorFlow Model Garden
###Code
import tensorflow as tf
print(tf.version.VERSION)
#!pip install -q tensorflow==2.3.0
# !git clone --depth 1 -b v2.3.0 https://github.com/tensorflow/models.git
# # install requirements to use tensorflow/models repository
# !pip install -Uqr models/official/requirements.txt
# # you may have to restart the runtime afterwards
###Output
_____no_output_____
###Markdown
Restart the Runtime**Note** After installing the required Python packages, you'll need to restart the Colab Runtime Engine (Menu > Runtime > Restart runtime...) Task 3: Download and Import the Quora Insincere Questions Dataset
###Code
# pip install tensorflow_datasets
# pip install sentencepiece
# pip install gin-config
# pip install tensorflow-addons
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import sys
sys.path.append('models')
from official.nlp.data import classifier_data_lib
from official.nlp.bert import tokenization
from official.nlp import optimization
print("TF Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
###Output
TF Version: 2.8.0
Eager mode: True
Hub version: 0.12.0
GPU is NOT AVAILABLE
###Markdown
A downloadable copy of the [Quora Insincere Questions Classification data](https://www.kaggle.com/c/quora-insincere-questions-classification/data) can be found [https://archive.org/download/fine-tune-bert-tensorflow-train.csv/train.csv.zip](https://archive.org/download/fine-tune-bert-tensorflow-train.csv/train.csv.zip). Decompress and read the data into a pandas DataFrame.
###Code
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv('https://archive.org/download/fine-tune-bert-tensorflow-train.csv/train.csv.zip',
compression='zip', low_memory=False)
df.shape
df.tail(20)
df.target.plot(kind='hist', title='Target distribution');
###Output
_____no_output_____
###Markdown
Task 4: Create tf.data.Datasets for Training and Evaluation
###Code
train_df, remaining = train_test_split(df, random_state=42, train_size=0.0075, stratify=df.target.values)
valid_df, _ = train_test_split(remaining, random_state=42, train_size=0.00075, stratify=remaining.target.values)
train_df.shape, valid_df.shape
with tf.device('/cpu:0'):
train_data = tf.data.Dataset.from_tensor_slices((train_df.question_text.values, train_df.target.values))
valid_data = tf.data.Dataset.from_tensor_slices((valid_df.question_text.values, valid_df.target.values))
for text, label in train_data.take(1):
print(text)
print(label)
###Output
tf.Tensor(b'Why are unhealthy relationships so desirable?', shape=(), dtype=string)
tf.Tensor(0, shape=(), dtype=int64)
###Markdown
Task 5: Download a Pre-trained BERT Model from TensorFlow Hub
###Code
"""
Each line of the dataset is composed of the review text and its label
- Data preprocessing consists of transforming text to BERT input features:
input_word_ids, input_mask, segment_ids
- In the process, tokenizing the text is done with the provided BERT model tokenizer
"""
label_list = [0, 1] # Label categories
max_seq_length = 128 # maximum length of (token) input sequences
train_batch_size = 32
# Get BERT layer and tokenizer:
# More details here: https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2",
trainable=True)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
tokenizer.wordpiece_tokenizer.tokenize('hi, how are you doing?')
tokenizer.convert_tokens_to_ids(tokenizer.wordpiece_tokenizer.tokenize('hi, how are you doing?'))
###Output
_____no_output_____
###Markdown
Task 6: Tokenize and Preprocess Text for BERT Figure 2: BERT Tokenizer We'll need to transform our data into a format BERT understands. This involves two steps. First, we create InputExamples using `classifier_data_lib`'s constructor `InputExample` provided in the BERT library.
###Code
# This provides a function to convert row to input features and label
def to_feature(text, label, label_list=label_list, max_seq_length=max_seq_length, tokenizer=tokenizer):
example = classifier_data_lib.InputExample(guid = None,
text_a = text.numpy(),
text_b = None,
label = label.numpy())
feature = classifier_data_lib.convert_single_example(0, example, label_list,
max_seq_length, tokenizer)
return (feature.input_ids, feature.input_mask, feature.segment_ids, feature.label_id)
###Output
_____no_output_____
###Markdown
You want to use [`Dataset.map`](https://www.tensorflow.org/api_docs/python/tf/data/Datasetmap) to apply this function to each element of the dataset. [`Dataset.map`](https://www.tensorflow.org/api_docs/python/tf/data/Datasetmap) runs in graph mode.- Graph tensors do not have a value.- In graph mode you can only use TensorFlow Ops and functions.So you can't `.map` this function directly: You need to wrap it in a [`tf.py_function`](https://www.tensorflow.org/api_docs/python/tf/py_function). The [`tf.py_function`](https://www.tensorflow.org/api_docs/python/tf/py_function) will pass regular tensors (with a value and a `.numpy()` method to access it), to the wrapped python function. Task 7: Wrap a Python Function into a TensorFlow op for Eager Execution
###Code
def to_feature_map(text, label):
input_ids, input_mask, segment_ids, label_id = tf.py_function(to_feature, inp=[text, label],
Tout=[tf.int32, tf.int32, tf.int32, tf.int32])
# py_func doesn't set the shape of the returned tensors.
input_ids.set_shape([max_seq_length])
input_mask.set_shape([max_seq_length])
segment_ids.set_shape([max_seq_length])
label_id.set_shape([])
x = {
'input_word_ids': input_ids,
'input_mask': input_mask,
'input_type_ids': segment_ids
}
return (x, label_id)
###Output
_____no_output_____
###Markdown
Task 8: Create a TensorFlow Input Pipeline with `tf.data`
###Code
with tf.device('/cpu:0'):
# train
train_data = (train_data.map(to_feature_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
#.cache()
.shuffle(1000)
.batch(32, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
# valid
valid_data = (valid_data.map(to_feature_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
.batch(32, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
###Output
_____no_output_____
###Markdown
The resulting `tf.data.Datasets` return `(features, labels)` pairs, as expected by [`keras.Model.fit`](https://www.tensorflow.org/api_docs/python/tf/keras/Modelfit):
###Code
# data spec
train_data.element_spec
# data spec
valid_data.element_spec
###Output
_____no_output_____
###Markdown
Task 9: Add a Classification Head to the BERT Layer Figure 3: BERT Layer
###Code
# Building the model
def create_model():
input_word_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_word_ids")
input_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_mask")
input_type_ids = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32,
name="input_type_ids")
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, input_type_ids])
drop = tf.keras.layers.Dropout(0.4)(pooled_output)
output = tf.keras.layers.Dense(1, activation="sigmoid", name="output")(drop)
model = tf.keras.Model(
inputs={
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'input_type_ids': input_type_ids
},
outputs=output)
return model
###Output
_____no_output_____
###Markdown
Task 10: Fine-Tune BERT for Text Classification
###Code
model = create_model()
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=2e-5),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()])
model.summary()
tf.keras.utils.plot_model(model=model, show_shapes=True, dpi=76, )
# Train model
epochs = 3
history = model.fit(train_data,
validation_data=valid_data,
epochs=epochs,
verbose=1)
###Output
Epoch 1/3
306/306 [==============================] - 4150s 14s/step - loss: 0.1596 - binary_accuracy: 0.9439 - val_loss: 0.1217 - val_binary_accuracy: 0.9563
Epoch 2/3
306/306 [==============================] - 3827s 13s/step - loss: 0.1011 - binary_accuracy: 0.9616 - val_loss: 0.1224 - val_binary_accuracy: 0.9583
Epoch 3/3
306/306 [==============================] - 3707s 12s/step - loss: 0.0543 - binary_accuracy: 0.9825 - val_loss: 0.1527 - val_binary_accuracy: 0.9531
###Markdown
Task 11: Evaluate the BERT Text Classification Model
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, metric):
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
plt.show()
plot_graphs(history, 'binary_accuracy')
plot_graphs(history, 'loss')
model.evaluate(valid_data, verbose=1)
###Output
30/30 [==============================] - 112s 4s/step - loss: 0.1527 - binary_accuracy: 0.9531
###Markdown
Testing all negatives
###Code
df_negs = df[df.target == 1]
df_neg_comments = df_negs["question_text"]
for com in df_neg_comments.tail(10):
print(com)
print("_____________________")
print("_____________________")
sample_example = ["What are not scam online services for essay writing?",
"Is it just me, or was Prince the singer ugly as fudge?",
"Could we finally give Einstein's first wife credit for her equation, E=m (c squared)? She deserves a lot more than that.",
"Why do Jews want endless immigration to the US, but Israel has a wall, Saharonim prison, and very strict immigration laws against non-Jews?",
"Will Oprah buy the DNC to guarantee her nomination in 2020 like Hillary did in 2016?",
"Why is it when singers have lyrics about voices in their head, religious people say they hear god and anyone stating they are being targeted by voice to brain technology? Are they called delusional schizo?",
"Do pakis smell of curry and shit?"
"Isn't Trump right after all? Why should the USA be benevolent towards it's neighbor Mexico, and not put America first, when Russia is invading Ukraine? Even India imposed a blockade over Nepal after an earthquake",
"Are you ashamed of being an Indian?"]
test_data = tf.data.Dataset.from_tensor_slices((sample_example, [0]*len(sample_example)))
test_data = (test_data.map(to_feature_map).batch(1))
preds = model.predict(test_data)
['Toxic' if pred >=0.5 else 'Sincere' for pred in preds]
###Output
_____no_output_____
###Markdown
1 out of 10 is labelled wrong based on manual inspection.
###Code
preds
###Output
_____no_output_____
###Markdown
Testing all positives
###Code
df_pos = df[df.target == 0]
df_pos_comments = df_pos["question_text"]
for com in df_pos_comments.tail(10):
print(com)
print("")
sample_example = ["If you had $10 million of Bitcoin, could you sell it and pay no capital gain tax if you also quit work and had no ordinary income for the year?",
"What are the methods to determine fossil ages in 10th STD?",
"What is your story today?",
"How do I consume 150 gms protein daily both vegetarian and non vegetarian diet seperately?",
"What are the good career options for a msc chemistry student after qualifying gate?",
"What other technical skills do you need as a computer science undergrad other than c and c++?",
"Does MS in ECE have good job prospects in USA or like India there are more IT jobs present?",
"Is foam insulation toxic?",
"How can one start a research project based on biochemistry at UG level?",
"Who wins in a battle between a Wolverine and a Puma?"]
test_data = tf.data.Dataset.from_tensor_slices((sample_example, [0]*len(sample_example)))
test_data = (test_data.map(to_feature_map).batch(1))
preds = model.predict(test_data)
['Toxic' if pred >=0.5 else 'Sincere' for pred in preds]
# 10/10 for sincere comments.
preds
###Output
_____no_output_____ |
Credit Risk Modeling/Credit Risk Modeling - Preparation - With Comments - 5-2.ipynb | ###Markdown
Data Preparation Import Libraries
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Import DataThe dataset contains all available data for more than 800,000 consumer loans issued from 2007 to 2015 by Lending Club: a large US peer-to-peer lending company. There are several different versions of this dataset. We have used a version available on kaggle.com. You can find it here: https://www.kaggle.com/wendykan/lending-club-loan-data/version/1We divided the data into two periods because we assume that some data are available at the moment when we need to build Expected Loss models, and some data comes from applications after. Later, we investigate whether the applications we have after we built the Probability of Default (PD) model have similar characteristics with the applications we used to build the PD model.
###Code
loan_data_backup = pd.read_csv('loan_data_2007_2014.csv')
loan_data = loan_data_backup.copy()
###Output
_____no_output_____
###Markdown
Explore Data
###Code
loan_data
pd.options.display.max_columns = None
#pd.options.display.max_rows = None
# Sets the pandas dataframe options to display all columns/ rows.
loan_data
loan_data.head()
loan_data.tail()
loan_data.columns.values
# Displays all column names.
loan_data.info()
# Displays column names, complete (non-missing) cases per column, and datatype per column.
###Output
_____no_output_____
###Markdown
General Preprocessing Preprocessing few continuous variables
###Code
loan_data['emp_length'].unique()
# Displays unique values of a column.
loan_data['emp_length_int'] = loan_data['emp_length'].str.replace('\+ years', '')
loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace('< 1 year', str(0))
loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace('n/a', str(0))
loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace(' years', '')
loan_data['emp_length_int'] = loan_data['emp_length_int'].str.replace(' year', '')
# We store the preprocessed ‘employment length’ variable in a new variable called ‘employment length int’,
# We assign the new ‘employment length int’ to be equal to the ‘employment length’ variable with the string ‘+ years’
# replaced with nothing. Next, we replace the whole string ‘less than 1 year’ with the string ‘0’.
# Then, we replace the ‘n/a’ string with the string ‘0’. Then, we replace the string ‘space years’ with nothing.
# Finally, we replace the string ‘space year’ with nothing.
type(loan_data['emp_length_int'][0])
# Checks the datatype of a single element of a column.
loan_data['emp_length_int'] = pd.to_numeric(loan_data['emp_length_int'])
# Transforms the values to numeric.
type(loan_data['emp_length_int'][0])
# Checks the datatype of a single element of a column.
loan_data['earliest_cr_line']
# Displays a column.
loan_data['earliest_cr_line_date'] = pd.to_datetime(loan_data['earliest_cr_line'], format = '%b-%y')
# Extracts the date and the time from a string variable that is in a given format.
type(loan_data['earliest_cr_line_date'][0])
# Checks the datatype of a single element of a column.
pd.to_datetime('2017-12-01') - loan_data['earliest_cr_line_date']
# Calculates the difference between two dates and times.
# Assume we are now in December 2017
loan_data['mths_since_earliest_cr_line'] = round(pd.to_numeric((pd.to_datetime('2017-12-01') - loan_data['earliest_cr_line_date']) / np.timedelta64(1, 'M')))
# We calculate the difference between two dates in months, turn it to numeric datatype and round it.
# We save the result in a new variable.
loan_data['mths_since_earliest_cr_line'].describe()
# Shows some descriptive statisics for the values of a column.
# Dates from 1969 and before are not being converted well, i.e., they have become 2069 and similar,
# and negative differences are being calculated.
loan_data.loc[: , ['earliest_cr_line', 'earliest_cr_line_date', 'mths_since_earliest_cr_line']][loan_data['mths_since_earliest_cr_line'] < 0]
# We take three columns from the dataframe. Then, we display them only for the rows where a variable has negative value.
# There are 2303 strange negative values.
loan_data['mths_since_earliest_cr_line'][loan_data['mths_since_earliest_cr_line'] < 0] = loan_data['mths_since_earliest_cr_line'].max()
# We set the rows that had negative differences to the maximum value.
min(loan_data['mths_since_earliest_cr_line'])
# Calculates and shows the minimum value of a column.
###Output
_____no_output_____
###Markdown
Homework
###Code
loan_data['term']
loan_data['term'].describe()
# Shows some descriptive statisics for the values of a column.
loan_data['term_int'] = loan_data['term'].str.replace(' months', '')
# We replace a string with another string, in this case, with an empty strng (i.e. with nothing).
loan_data['term_int']
type(loan_data['term_int'][25])
# Checks the datatype of a single element of a column.
loan_data['term_int'] = pd.to_numeric(loan_data['term'].str.replace(' months', ''))
# We remplace a string from a variable with another string, in this case, with an empty strng (i.e. with nothing).
# We turn the result to numeric datatype and save it in another variable.
loan_data['term_int']
type(loan_data['term_int'][0])
# Checks the datatype of a single element of a column.
loan_data['issue_d']
# Assume we are now in December 2017
loan_data['issue_d_date'] = pd.to_datetime(loan_data['issue_d'], format = '%b-%y')
# Extracts the date and the time from a string variable that is in a given format.
loan_data['mths_since_issue_d'] = round(pd.to_numeric((pd.to_datetime('2017-12-01') - loan_data['issue_d_date']) / np.timedelta64(1, 'M')))
# We calculate the difference between two dates in months, turn it to numeric datatype and round it.
# We save the result in a new variable.
loan_data['mths_since_issue_d'].describe()
# Shows some descriptive statisics for the values of a column.
###Output
_____no_output_____
###Markdown
Preprocessing few discrete variables
###Code
loan_data.info()
# Displays column names, complete (non-missing) cases per column, and datatype per column.
###Output
_____no_output_____
###Markdown
We are going to preprocess the following discrete variables: grade, sub_grade, home_ownership, verification_status, loan_status, purpose, addr_state, initial_list_status. Most likely, we are not going to use sub_grade, as it overlaps with grade.
###Code
pd.get_dummies(loan_data['grade'])
# Create dummy variables from a variable.
pd.get_dummies(loan_data['grade'], prefix = 'grade', prefix_sep = ':')
# Create dummy variables from a variable.
loan_data_dummies = [pd.get_dummies(loan_data['grade'], prefix = 'grade', prefix_sep = ':'),
pd.get_dummies(loan_data['sub_grade'], prefix = 'sub_grade', prefix_sep = ':'),
pd.get_dummies(loan_data['home_ownership'], prefix = 'home_ownership', prefix_sep = ':'),
pd.get_dummies(loan_data['verification_status'], prefix = 'verification_status', prefix_sep = ':'),
pd.get_dummies(loan_data['loan_status'], prefix = 'loan_status', prefix_sep = ':'),
pd.get_dummies(loan_data['purpose'], prefix = 'purpose', prefix_sep = ':'),
pd.get_dummies(loan_data['addr_state'], prefix = 'addr_state', prefix_sep = ':'),
pd.get_dummies(loan_data['initial_list_status'], prefix = 'initial_list_status', prefix_sep = ':')]
# We create dummy variables from all 8 original independent variables, and save them into a list.
# Note that we are using a particular naming convention for all variables: original variable name, colon, category name.
loan_data_dummies = pd.concat(loan_data_dummies, axis = 1)
# We concatenate the dummy variables and this turns them into a dataframe.
type(loan_data_dummies)
# Returns the type of the variable.
loan_data = pd.concat([loan_data, loan_data_dummies], axis = 1)
# Concatenates two dataframes.
# Here we concatenate the dataframe with original data with the dataframe with dummy variables, along the columns.
loan_data.columns.values
# Displays all column names.
###Output
_____no_output_____
###Markdown
Check for missing values and clean
###Code
loan_data.isnull()
# It returns 'False' if a value is not missing and 'True' if a value is missing, for each value in a dataframe.
pd.options.display.max_rows = None
# Sets the pandas dataframe options to display all columns/ rows.
loan_data.isnull().sum()
pd.options.display.max_rows = 100
# Sets the pandas dataframe options to display 100 columns/ rows.
# 'Total revolving high credit/ credit limit', so it makes sense that the missing values are equal to funded_amnt.
loan_data['total_rev_hi_lim'].fillna(loan_data['funded_amnt'], inplace=True)
# We fill the missing values with the values of another variable.
loan_data['total_rev_hi_lim'].isnull().sum()
###Output
_____no_output_____
###Markdown
Homework
###Code
loan_data['annual_inc'].fillna(loan_data['annual_inc'].mean(), inplace=True)
# We fill the missing values with the mean value of the non-missing values.
loan_data['mths_since_earliest_cr_line'].fillna(0, inplace=True)
loan_data['acc_now_delinq'].fillna(0, inplace=True)
loan_data['total_acc'].fillna(0, inplace=True)
loan_data['pub_rec'].fillna(0, inplace=True)
loan_data['open_acc'].fillna(0, inplace=True)
loan_data['inq_last_6mths'].fillna(0, inplace=True)
loan_data['delinq_2yrs'].fillna(0, inplace=True)
loan_data['emp_length_int'].fillna(0, inplace=True)
# We fill the missing values with zeroes.
###Output
_____no_output_____
###Markdown
PD model Data preparation Dependent Variable. Good/ Bad (Default) Definition. Default and Non-default Accounts.
###Code
loan_data['loan_status'].unique()
# Displays unique values of a column.
loan_data['loan_status'].value_counts()
# Calculates the number of observations for each unique value of a variable.
loan_data['loan_status'].value_counts() / loan_data['loan_status'].count()
# We divide the number of observations for each unique value of a variable by the total number of observations.
# Thus, we get the proportion of observations for each unique value of a variable.
# Good/ Bad Definition
loan_data['good_bad'] = np.where(loan_data['loan_status'].isin(['Charged Off', 'Default',
'Does not meet the credit policy. Status:Charged Off',
'Late (31-120 days)']), 0, 1)
# We create a new variable that has the value of '0' if a condition is met, and the value of '1' if it is not met.
loan_data['good_bad']
###Output
_____no_output_____ |
tensornetwork/tn_keras/colabs/TN_Keras.ipynb | ###Markdown
Build Base Model and Tensorized Models
###Code
data, labels = dummy_data(1296)
# Build a fully connected network
model = Sequential()
model.add(Dense(512, use_bias=True, activation='relu', input_shape=(data.shape[1],)))
model.add(Dense(128, use_bias=True, activation='relu'))
model.add(Dense(1, use_bias=True, activation='sigmoid'))
# Build the same fully connected network using TN layer DenseDecomp
decomp_model = Sequential()
decomp_model.add(DenseDecomp(512, decomp_size=64, use_bias=True, activation='relu', input_shape=(data.shape[1],)))
decomp_model.add(DenseDecomp(128, decomp_size=64, use_bias=True, activation='relu'))
decomp_model.add(DenseDecomp(1, decomp_size=8, use_bias=True, activation='sigmoid'))
# Build the same fully connected network using TN layer DenseMPO
mpo_model = Sequential()
mpo_model.add(DenseMPO(256, num_nodes=4, bond_dim=8, use_bias=True, activation='relu', input_shape=(1296,)))
mpo_model.add(DenseMPO(81, num_nodes=4, bond_dim=4, use_bias=True, activation='relu'))
mpo_model.add(Dense(1, use_bias=True, activation='sigmoid'))
###Output
_____no_output_____
###Markdown
Analyze Parameter Reduction from Tensorization
###Code
model.summary()
decomp_model.summary()
mpo_model.summary()
print(f'Compression factor from tensorization with DenseDecomp: {model.count_params() / decomp_model.count_params()}')
print(f'Compression factor from tensorization with DenseMPO: {model.count_params() / mpo_model.count_params()}')
###Output
Compression factor from tensorization with DenseDecomp: 4.609283526476997
Compression factor from tensorization with DenseMPO: 167.5905855338691
###Markdown
Train Models for Comparison
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model for 10 epochs
history = model.fit(data, labels, epochs=10, batch_size=32)
decomp_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the tensorized model for 10 epochs
history = decomp_model.fit(data, labels, epochs=10, batch_size=32)
mpo_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the tensorized model for 10 epochs
history = mpo_model.fit(data, labels, epochs=10, batch_size=32)
###Output
Epoch 1/10
4/4 [==============================] - 0s 10ms/step - loss: 0.6926 - accuracy: 0.5100
Epoch 2/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6890 - accuracy: 0.5100
Epoch 3/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6856 - accuracy: 0.5000
Epoch 4/10
4/4 [==============================] - 0s 7ms/step - loss: 0.6813 - accuracy: 0.5300
Epoch 5/10
4/4 [==============================] - 0s 9ms/step - loss: 0.6776 - accuracy: 0.7200
Epoch 6/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6733 - accuracy: 0.8400
Epoch 7/10
4/4 [==============================] - 0s 9ms/step - loss: 0.6689 - accuracy: 0.8300
Epoch 8/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6635 - accuracy: 0.8400
Epoch 9/10
4/4 [==============================] - 0s 9ms/step - loss: 0.6581 - accuracy: 0.8100
Epoch 10/10
4/4 [==============================] - 0s 8ms/step - loss: 0.6501 - accuracy: 0.9300
|
HW1-Daniel-Loureiro.ipynb | ###Markdown
Homework 1 Part I. HICP data from SDW
###Code
# import pandas (and other libraries you may want to use)
import pandas as pd
# import the data files you downloaded from SDW
# the option "header" was added due to how the ECB file was built
df_hicp_041100=pd.read_csv('./data/data_041100.csv', header=4)
df_hicp_041200=pd.read_csv('./data/data_041200.csv', header=4)
df_hicp_043100=pd.read_csv('./data/data_043100.csv', header=4)
df_hicp_043200=pd.read_csv('./data/data_043200.csv', header=4)
df_hicp_044000=pd.read_csv('./data/data_044000.csv', header=4)
df_hicp_044100=pd.read_csv('./data/data_044100.csv', header=4)
df_hicp_044200=pd.read_csv('./data/data_044200.csv', header=4)
df_hicp_044300=pd.read_csv('./data/data_044300.csv', header=4)
df_hicp_044400=pd.read_csv('./data/data_044400.csv', header=4)
df_hicp_045100=pd.read_csv('./data/data_045100.csv', header=4)
df_hicp_045200=pd.read_csv('./data/data_045200.csv', header=4)
df_hicp_045300=pd.read_csv('./data/data_045300.csv', header=4)
df_hicp_045400=pd.read_csv('./data/data_045400.csv', header=4)
df_hicp_045500=pd.read_csv('./data/data_045500.csv', header=4)
# Print as a list the codes of the subindices you downloaded.
# For example, if you download items from the __FOOD AND NON-ALCOHOLIC BEVERAGES__ category,
# list all codes type `01XX00` you were able to download.
list_of_subindices=['041100', '041200', '043100', '043200', '044000', '044100', '044200', '044300', '044400', '045100', '045200', '045300', '045400', '045500']
# create a single dataframe combining data for all HICP subindices you downloaded
list_of_dfs=[]
for sind in list_of_subindices:
temp_df = pd.read_csv(f'data/data_{sind}.csv', index_col=0, header=4)
list_of_dfs.append(temp_df)
df_hicp_all = pd.concat(list_of_dfs, axis=1)
df_hicp_all
# make other changes to the dataframe - set datetime index, rename the columns (up to you, but shortning very names may be a good idea)
#Renaming the columns
df_hicp_all.columns = list_of_subindices #this command changes the name of the columns with the inflation data
#the following two steps change aim to change the name of the column "date", initially named Period\Unit. The presence of \ generated an error in the code, which created the necessity to adapt the code.
df_hicp_all.reset_index(inplace=True)
df_hicp_all.columns.values[0]="date"
#Set datetime index (note that this is going to eliminate the index generated in the previous step, which is ok since that step was just an auxiliar)
new_index = pd.to_datetime(df_hicp_all.date, format='%Y%b') #generating the new index to replace the old one. The option format='%Y%b' informs the date is on the form 2022Fev, for instance.
df_hicp_all.index=new_index #introducing the index. This steps will generate an additional column with the date. So we can delet the "old" one:
df_hicp_all.drop('date', axis=1, inplace=True)
#Chech how the dataframe is
df_hicp_all
# print the type of the index of the dataframe
index=df_hicp_all.index
print(index.dtype)
# print the data types of the columns in the dataframe
print(df_hicp_all.dtypes)
# save as a csv file under the name of the item group
df_hicp_all.to_csv(f'data/HOUSING, WATER, ELECTRICITY, GAS AND OTHER FUELS.csv', index=True)
# compute and print the means and standard deviations for each series for the full period
display('Means for each series for the full period:')
print(df_hicp_all.mean())
display('Standard deviations for each series for the full period:')
print(df_hicp_all.std())
# compute and print the means and standard deviations for each calendar month for each series
df_hicp_all.groupby(df_hicp_all.index.month).agg(['mean', 'std'])
# redo the above for the period between January 2017 and December 2021
df_hicp_all.loc['2017-01':'2021-12'].groupby(df_hicp_all.loc['2017-01':'2021-12'].index.month).agg(['mean', 'std'])
###Output
_____no_output_____
###Markdown
Part II. GDP data from EUROSTAT
###Code
# import the data files you downloaded from EUROSTAT
df_gdp=pd.read_excel('./data/gdp_euroarea.xlsx', sheet_name='Sheet 1', index_col=0, skiprows=10, nrows=108, usecols="A:B")
# create a dataframe with a datetime index matching the timing of the series you downloaded
#the following two steps change aim to change the name of the column "date", initially named Period\Unit. The presence of \ generated an error in the code, which created the necessity to adapt the code.
df_gdp.reset_index(inplace=True) #because the dataframe was without an index
df_gdp.columns.values[0]="date"
df_gdp.columns.values[1]="gdp"
#Set datetime index (note that this is going to eliminate the index generated in the previous step, which is ok since that step was just an auxiliar)
new_index = pd.to_datetime(df_gdp.date) #generating the new index to replace the old one. The option format='%Y%b' informs the date is on the form 2022Fev, for instance.
df_gdp.index=new_index #introducing the index. This steps will generate an additional column with the date. So we can delet the "old" one:
df_gdp.drop('date', axis=1, inplace=True)
df_gdp
#aditional step: introducing the datetime index, the label was set at the begining of the period. That can be changed to the end:
df_gdp=df_gdp.resample('Q').last()
df_gdp
#a better looking dataframe could be achieved with:
df_gdp.to_period('Q')
#this options, which was not saved, would clearly highlight to anyone who looked to the dataframe that we were working with quarterly data
# save as a csv file under the name "Real-GDP-EA.csv"
df_gdp.to_csv(f'data/Real-GDP-EA.csv', index=True)
###Output
_____no_output_____
###Markdown
PART III Combine GDP and inflation data
###Code
# create a new datafame containing the GDP data and the inflation indices from the first task.
df_hicp_all_end=df_hicp_all.resample('M').last() #labeling the date in the dast day of each month to be consistent with the GDP data
dfs_merged = pd.concat([df_hicp_all_end, df_gdp], axis=1)
dfs_merged
#save as a csv file named "merged-GDP-inflation.csv"
dfs_merged.to_csv(f'data/merged-GDP-inflation.csv', index=True)
#alternatively we could create a file with only quarterly data - see this but it is not good
#first we could transform the data from taks 1 in quarterly data, by taking the mean of each quarter
df_hicp_all_quarter=df_hicp_all.resample('Q').mean()
#then the GDP data from task 2 could be labeled at the end of the period - which was already performed in task 2
#then we could finally merged into a new quarterly dataframe
dfs_merged_q=pd.concat([df_hicp_all_quarter, df_gdp], axis=1)
dfs_merged_q
#saving also the new file
dfs_merged_q.to_csv(f'data/merged-GDP-inflation-quarter-data.csv', index=True)
###Output
_____no_output_____ |
extractive_summarization/french.ipynb | ###Markdown
*Extractive summarization* en francésEl objetivo del presente proyecto es crear un modelo capaz de producir resúmenes del conjunto de noticias en **lengua francesa** de Le Monde. Los resúmenes serán obtenidos utilizando la metodología de extracción(*extraction summarization*), es decir, el resumen generado será a partir de las frases del texto original que sean más relevantes. El proyecto constará de distintas secciones:- Preparación del entorno- Análisis de los datos- Preprocesamiento de los datos - Análisis de la extensión de los datos- Construcción del modelo- Generar nuevos resúmenes Preparación del entorno
###Code
# Librerías necesarias
import tensorflow as tf
import tensorflow_datasets as tfds
import pandas as pd
import math as m
import re
from itertools import chain, groupby
from bs4 import BeautifulSoup
from collections import Counter
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('punkt')
import heapq
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Análisis de los datos
###Code
data = pd.read_csv('/content/drive/MyDrive/TFM/data/fr_test.csv', header = None, sep = ';')
def dataframe_ok(df):
df.columns = ['url','date', 'Text', 'Summary', 'title', 'topic', 'unknown'] #Asignar el nombre a cada columna
df.drop(['unknown','url','date','title','topic'], axis = 1, inplace = True) #Eliminr la columna del índice
dataframe_ok(data)
data.shape # Dimensiones de los datos: 995 filas (noticias) con dos columnas: texto y resumen
data.head() # Observar las cinco primeras líneas de los datos
# Inspeccionar el texto completo de las tres primeras filas
for i in range(3):
print("Noticia #",i+1)
print(data.Summary[i])
print(data.Text[i])
print()
# Comprobar el número de datos nulos
data.isnull().sum()
def duplicates_missing(df):
df.drop_duplicates(inplace=True) #dropping duplicates
df.dropna(inplace=True) #dropping duplicates
duplicates_missing(data)
###Output
_____no_output_____
###Markdown
Preprocesamiento de los datosLa tarea de preprocesamiento de los datos es una de las partes más importantes en un proyecto de procesamiento de lenguaje natural. Para realizar resúmenes de texto por extracción se parte de la hipótesis de que el tema principal del texto viene dado por las palabras que aparezcan con mayor frecuencia. En consecuencia, el resumen se generará a partir de las frases que contengan mayor cantidad de dichas palabras. Es por esta razón que para este tipo de resumen automático de textos no es necesario modificar de forma excesiva los textos originales para que estos sean más naturales. Según la lengua con la que se desee entrenar el modelo, las tareas de limpieza de los datos pueden tener variaciones. Se recuerda que en el presente *notebook* se pretende utilizar textos en lengua francesa. **Preprocesamiento de los datos:**- **Eliminar letras mayúsculas**: Python diferencia entre carácteres en mayúsuclas y en minúsculas, por lo tanto, las palabras *News* y *news* serían interpretadas como diferentes. Sin embargo, para comprender el texto correctamente, esto no debe ser así. Es por ello que se convierte todo el texto a letras minúsculas. - **Eliminar los restos de la importación de los datos** - **Eliminar los cambios de línea ./n**- **Eliminar el texto entre paréntesis**: generalmente, entre paréntesis no se pone información relevante. Por ello, se puede prescindir de esta para reducir la información que debe ser analizada por el modelo.- **Eliminar caracteres especiales**: se debe tener en cuenta que el francés es una lengua que utiliza caracteres distintos, como las vocales con tilde o la letra ç. Además, hay palabras en francés que utilizan los guiones como parte de ellas, por ello, también se deben conservar.
###Code
# Stop words: palabras que no tienen un significado por sí solas (artículos, pronombres, preposiciones, adverbios, verbos)
stop_words = set(stopwords.words('french'))
def clean_text(text):
clean = text.lower() #Convierte todo a minúsculas
#Eliminar los cambios de línea
clean = clean.replace('.\\n','')
#Eliminar el texto que se encuentra entre paréntesis
clean = re.sub(r'\([^)]*\)', '', clean)
clean = re.sub(r'-[^)]*-', '', clean)
clean = ' '.join([t for t in clean.split(" ")])
#Eliminar los carácteres especiales
clean = re.sub("[^a-zA-Z, ç, à, â, é, è, ê, î, ô, ù, û, ., ,, -, ?,%, 0-9]", " ", clean)
#Añadir un espacio antes de los signos de puntuación y los símbolos
clean = clean.replace(".", " . ")
clean = clean.replace(",", " , ")
clean = clean.replace("?", " ? ")
tokens = [w for w in clean.split()] #Juntar palabras
return (" ".join(tokens).strip())
# Limpiar los resúmenes y los textos
clean_summaries = []
for summary in data.Summary:
clean_summaries.append(clean_text(summary)) #Remove_stopwords = False: hacer resúmenes más naturales
print("Sumarios completados.")
clean_texts = []
for text in data.Text:
clean_texts.append(clean_text(text)) #Remove_stopwords = True: stop words no aportan información por lo que son irrelevantes para entrenar al modelo
print("Textos completados.")
# Inspeccionar los resúmentes y textos limpios para observar que se ha efectuado la limpieza correctamente
for i in range(3):
print("Noticia #",i+1)
print('Sumario: ', clean_summaries[i])
print('Texto: ',clean_texts[i])
print()
###Output
Noticia # 1
Sumario: mohamed salah et divock origi ont permis à liverpool de remporter son sixième trophée en c1 au terme d une finale décevante .
Texto: le défenseur de liverpool virgil van dijk célèbre sa victoire à l issue de la finale de la ligue de champions . carl recine reuters liverpool au sommet de l europe et de l ennui . les reds de mohamed salah , buteur sur penalty après 1 min 48 s , ont étouffé tottenham lors d une finale 100 % anglaise et 0 % flamboyante , samedi 1er juin , en ligue des champions , décrochant , à madrid , leur sixième couronne continentale . alors que le spectacle avait été époustouflant au tour précédent , la finale la plus apathique de la décennie s est décantée après 23 s de jeu , sur un penalty concédé du bras par le français moussa sissoko , et transformé par l egyptien salah dans la torpeur du stade metropolitano . puis , au bout de la purge , une frappe croisée du belge divock origi , à la 87e minute , a plié ce match somnolent . article réservé à nos abonnés lire aussi ligue des champions à liverpool , le football en héritage qu importe l ennui , la revanche est belle pour salah , héros malheureux de la finale perdue l année précédente par liverpool face au real madrid . ce maigre avantage a suffi au bonheur de son entraîneur , j rgen klopp , enfin titré en c1 à sa troisième tentative . au coup de sifflet final , alors que le kop de liverpool entonnait le fameux you ll never walk alone , le bouillant klopp a enlacé calmement les gens de son staff avant de communier avec ses joueurs , casquette vissée sur la tête . triste finale on ne retiendra ni le score ni le scénario , digne des plus cyniques prestations de l atlético madrid , habitué aux scores étriqués dans son antre du metropolitano . on ne retiendra que le vainqueur , qui a fait parler son expérience de la c1 pour s installer à la troisième marche du palmarès de l épreuve reine européenne derrière le real madrid et l ac milan . madrid , c était un mauvais souvenir pour les reds l an passé battus 1finales . l ennui était fini , la nuit pouvait commencer . #totliv #rmcsport1 bfmtv 87 le but d origi !! contre le cours du jeu , liverpool enfonce le clou ! https t . co azlpa0djlv rmcsport
Noticia # 2
Sumario: le philosophe , mort samedi à 88 ans , a su faire vivre une longue tradition française alliant les charmes de la plume , le travail heureux de la pensée et les générosités du c ur .
Texto: michel serres , en mai 2012 . manuel cohen parmi les figures multiples de michel serres , mort samedi 1er juin , à l âge de 88 ans , il en est une qui réunit , peutil pas encore signé , en 2018 , avec michel polacco , une défense et illustration de la langue française aujourd hui ? un vagabond ami de la terre cette universalité à la française implique à l évidence un cheminement individualiste les mousquetaires ne veulent pas faire école . ils conduisent des révolutions solitaires que l histoire tend à oublier , comme l ont montré les dizaines et les dizaines de volumes publiés par le corpus des uvres de philosophie en langue française . en dirigeant cette vaste entreprise , que tout paraissait devoir rendre impossible , michel serres ne faisait pas seulement uvre de mémoire et de reviviscence . il rendait hommage à toutes ces intelligences isolées qui ont peuplé notre histoire intellectuelle en laissant dans les archives , d où il faut les exhumer , des pages vivaces et fortes . de ces trajets multiples entre les sciences et les littératures , les vocables et les natures , l image même du philosophe se trouve modifiée . ce n est plus un roi austère contemplant de haut un paysage dominé . c est au contraire comme un vagabond ami de la terre , arpenteur inventif , ouvert aux fécondités du hasard . michel serres aura sans doute rendu à la philosophie française son sens de la rencontre , de l imprévu , du jeu . il a maintenu dans ses textes cette spécialité nationale une jubilation du savoir que l écriture offre à goûter .
Noticia # 3
Sumario: l auteur des best sellers les cinq sens , petite poucette , le gaucher boiteux , s est éteint à l âge de 88 ans , entouré de sa famille .
Texto: a vincennes , en septembre 2018 . serge picard agence vu c était un philosophe comme on en fait trop peu , un bon vivant doublé d un mauvais caractère , un amoureux des sciences et des saveurs , un esprit encyclopédique , un prodigieux manieur de mots , un grand penseur de tradition orale , un touchepol droit
###Markdown
Análisis de la extensión de los textos
###Code
text_lengths =[]
for i in (range(0,len(clean_texts))):
text_lengths.append(len(clean_texts[i].split()))
import matplotlib.pyplot as plt
plt.title('Número de palabras de los textos')
plt.hist(text_lengths, bins = 30)
text_sentences =[]
for i in (range(0,len(clean_texts))):
text_sentences.append(len(clean_texts[i].split(".")))
import matplotlib.pyplot as plt
plt.title('Número de frases de los textos')
plt.hist(text_sentences, bins = 30)
summaries_lengths =[]
for i in (range(0,len(clean_summaries))):
summaries_lengths.append(len(clean_summaries[i].split()))
import matplotlib.pyplot as plt
plt.title('Número de palabras de los sumarios')
plt.hist(summaries_lengths, bins = 30)
summaries_sentences =[]
for i in (range(0,len(clean_summaries))):
summaries_sentences.append(len(clean_summaries[i].split(".")))
import matplotlib.pyplot as plt
plt.title('Número de frases de los sumarios')
plt.hist(summaries_sentences, bins = 30)
#Devuelve la frecuencia con la que aparece cada palabra en el texto
def count_words(count_dict, text):
for sentence in text:
for word in sentence.split():
if word not in count_dict:
count_dict[word] = 1
else:
count_dict[word] += 1
word_frequency = {}
count_words(word_frequency, clean_summaries)
count_words(word_frequency, clean_texts)
print("Vocabulario total:", len(word_frequency))
#Buscar restos de la conversión del texto ('x99', 'x99s', 'x98', etc.) para incluirlos en la función clean_text
import operator
sorted(word_frequency.items(), key=operator.itemgetter(1), reverse=True )
## En este caso no existen conjuntos de carácteres de estas características
###Output
_____no_output_____
###Markdown
Construcción del modeloPara generar resúmenes de texto por extracción, es necesario conocer qué frases del texto original son las que mayor información relevante contienen. Para ello, se seguirán los siguientes pasos para cada uno de las noticias del conjunto de datos:- Calcular la frecuencia de aparición de las palabras .- Calcular la frecuencia ponderada de cada una de las palabras, siendo la frecuencia ponderada la división entre la frecuencia de aparición de la palabra en cuestión y la frecuencia de la palabra que aparece más veces en el texto. - Calcular la puntuación de cada una de las frases del texto, siendo la puntuación la suma ponderada de cada palabra que conforma dicha frase.- Seleccionar las N frases con mayor puntuación para generar el resumen a partir de estas.
###Code
def word_frequency (word_frequencies, text):
""" Calcula la frecuencia de las palabras en cada uno de los textos y añadirlo como pares clave-valor a un diccionario
Las palabras añadidas no deben ser ni stop words ni signos de puntuación"""
punctuations = {".",":",",","[","]", "“", "|", "”", "?"}
for word in nltk.word_tokenize(text):
if word not in stop_words:
if word not in punctuations:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
word_freq_per_text = [] # Lista recogiendo los diccionarios de las frecuencias de aparición de las palabras de cada texto
for text in clean_texts:
word_frequencies = {}
word_frequency(word_frequencies, text) # Devuelve el diccionario de frecuencias de las palabras
word_freq_per_text.append(word_frequencies)
def word_score(index):
""" Calcula la puntuación ponderada de cada una de las palabras del texto mediante la fórmula: frecuencia_palabra / frecuencia_máxima
siendo la frecuencia_palabra el número de veces que aparece en el texto la palabra en cuestión y la frecuencia_máxima
el número de veces que aparece en el texto la palabra más repetida"""
sentence_list = nltk.sent_tokenize(clean_texts[index])
word_frequency = word_freq_per_text[index]
maximum_frequency = max(word_freq_per_text[index].values()) #Frecuencia de la palabra que más veces aparece
for word in word_freq_per_text[index].keys():
word_freq_per_text[index][word] = (word_freq_per_text[index][word]/maximum_frequency) # Cálculo de la puntuación de cada una de las palabras del texto: word_freq/max_freq
for i in range(0, len(clean_texts)):
word_score(i)
def sentence_score(sentence_scores, index):
""" Calcula la puntuación de cada una de las frases del texto siendo esta la suma de las frecuencias
ponderadas de todas las palabras que conforman el texto"""
sentence_list = nltk.sent_tokenize(clean_texts[index]) # Tokenización de las frases del texto
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_freq_per_text[index].keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_freq_per_text[index][word]
else:
sentence_scores[sent] += word_freq_per_text[index][word]
sent_sc_per_text = [] # Lista recogiendo los diccionarios de las frecuencias de aparición de las palabras de cada texto
for i in range(0, len(clean_texts)):
sentence_scores = {}
sentence_score(sentence_scores, i) # Devuelve el diccionario de la puntuación de la frase
sent_sc_per_text.append(sentence_scores)
###Output
_____no_output_____
###Markdown
Generar nuevos resúmenesEn el apartado anterior *Análisis de la extensión de los textos* se ha examinado el número de palabras y frases de las noticias y sus respectivos resúmenes que forman el conjunto de datos. En los gráficos presentados se ha podido observar que la extensión de los textos es muy variable, variando entre 1 y 50 frases. En cuanto a los sumarios, estos tienen entre 1 y 5 frases. Al igual que en el caso de las noticias en lengua española, el ratio entre el número de frases del texto original y el del resumen es, en general, muy pequeño. Esto se debe que a lo que aquí se considera Summary no es en realidad un resumen del texto, si no el subtítulo de la noticia o frases destacadas, por lo que incluye un menor número de frases. Esto es una característica del conjunto de datos MLSUM cuyos datos son los utilizados en este proyecto.El número de frases con las que se desea generar el resumen por extracción debe ser indicado de forma avanzada. No se ha creído oportuno especificar un número concreto de frases para producir el resumen de todos los textos del conjunto debido a que las extensiones de estos son muy variables. Por ello, se ha establecido que el número de frases a escoger debe ser de un 25% del total de frases del texto original.
###Code
def generate_summary(index):
""" Genera el resumen del texto en función de las n_sentences con mayor puntuación"""
n_sentences = m.ceil(len(nltk.sent_tokenize(clean_texts[index]))*25/100)
summary_sentences = heapq.nlargest(n_sentences, sent_sc_per_text[index], key=sent_sc_per_text[index].get)
summary = ' '.join(summary_sentences)
return summary
generated_summaries = []
for i in range(0, len(clean_texts)):
new_summary = generate_summary(i) # Devuelve el resumen generado
generated_summaries.append(new_summary)
# Inspeccionar el texto completo de las tres primeras filas y los resúmenes que se han generado
for i in range(10,14):
print("\nNoticia #",i+1)
print('\nTexto original: ', clean_texts[i])
print('\nResumen original: ', clean_summaries[i])
print('\nResumen generado: ', generated_summaries[i])
print()
###Output
_____no_output_____ |
Sean_Inventory_Simulation_22SEP2020.ipynb | ###Markdown
Inventory Simulation Authors: Sean Conway + Yanzhe Ma---Summer 2020 - Fall 2020 SemesterLast Modified: 22SEP2020 > Implementation of Inventory Simulation Using Classes
###Code
import numpy as np
import pandas as pd
import pprint
from scipy.stats import norm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from collections.abc import Iterable
def iterable(obj):
return isinstance(obj, Iterable)
###Output
_____no_output_____
###Markdown
Inventory Simulation Class
###Code
# "Game" class that can be created to run the whole simulation
class InvSimulation:
def __init__(self, periodsToSimulate=1000):
self.periodsToSimulate = periodsToSimulate
# Contains all of the nodes in our simulation (reference by ID)
self.nodeDict = {}
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Node - related methods
def createNode(self, nodeID, h, p, nodeType="retailer", demandMean=0, demandStDev=0):
'''
Creates a node to be used in our supply network
In: InvSim object, nodeID, h, p, nodeType, demandMean, demandStDev
'''
self.nodeDict[nodeID] = Node(nodeID, h, p, nodeType,demandMean, demandStDev)
#links together two nodes in preDict and recDict; could add a boolean later for linking both ways
def linkNode(self,startNode,endNode,relationshipType="or"):
'''
Create a unidirectional link between nodes
In: InvSim object, starting Node index, ending Node index
recDict[] maps one starting node to all of its receiving nodes
preDict[] maps one ending node to all of its predecessor nodes
'''
if startNode in self.nodeDict[startNode].recDict.keys():
self.nodeDict[startNode].recDict[startNode].append(endNode)
else:
self.nodeDict[startNode].recDict[startNode] = []
self.nodeDict[startNode].recDict[startNode].append(endNode)
if endNode in self.nodeDict[endNode].preDict.keys():
self.nodeDict[endNode].preDict[endNode].append(startNode)
else:
self.nodeDict[endNode].preDict[endNode] = []
self.nodeDict[endNode].preDict[endNode].append(startNode)
#This method adds in all of the combinations of nodeID and the number of units needed to produce one unit for "thisNode"
def addAndRelationship(self,upstreamNodeID,downstreamNodeID,numNeeded):
thisNode = self.nodeDict[upstreamNodeID]
if (upstreamNodeID,downstreamNodeID) not in thisNode.ANDDict.keys():
thisNode.ANDDict[(upstreamNodeID,downstreamNodeID)] = []
thisNode.ANDDict[(upstreamNodeID,downstreamNodeID)].append(numNeeded)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def playSimulation(self, gameType="multiNodeVerify", BSLevel=60, demandMethod="useFileDemand", fileDemand=df["IO"], seed='N/A', connectionCase="or", printOut=True):
'''
Play the simulation, given the following:
- game type (string) (default="multiNodeVerify)
- base stock level (integer for multiNodeVerify game), single value for all nodes (default=60)
'''
if gameType == "multiNodeVerify":
if demandMethod == "useFileDemand":
self.multiNodeVerify(demandArray=fileDemand, BSLevel=BSLevel, connectionCase=connectionCase, demandMethod=demandMethod, printOut=printOut)
elif demandMethod == "useRandomDemand":
self.multiNodeVerify(BSLevel=BSLevel, connectionCase=connectionCase, demandMethod=demandMethod, seed=seed, printOut=printOut)
else:
self.playOptimalBaseStockGame()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def printEndStats(self, nodeID, thisNode, period):
'''
Print the resulting statistics for a node in the game
In:
- nodeID (integer)
- thisNode (node Object)
- period (integer)
'''
print("Node " + str(nodeID))
print("(IL) Starting Inventory record for node " + str(nodeID) + ":" + str(thisNode.startingInventoryRecord))
print("(IS) Inbound supply record for node " + str(nodeID) + ":" + str(thisNode.receivedMats))
print("(IO) Demand for node " + str(nodeID) + ":" + str(thisNode.demandArray))
print("(OQ) Order for node " + str(nodeID) + ":" + str(thisNode.orderArray))
print("(DMFS) Supply for node " + str(nodeID) + ":" + str(thisNode.supplyArray))
print("(EIL) Ending Inventory record for node " + str(nodeID) + ":" + str(thisNode.endingInventoryRecord))
print("(BO) Backorders for node " + str(nodeID) + ":" + str(thisNode.backorderRecord))
print("(TC) Total Cost for node " + str(nodeID) + ":" + str(thisNode.costRecord))
print()
#diffIS = []
#if nodeID == 2:
# for i in range(0,min(len(thisNode.inBoundOrders),len(df["IS 3 Node"]))):
# diffIS.append(thisNode.inBoundOrders[i]-df["IS 3 Node"][i])
# print(diffIS)
# print(len(thisNode.inBoundOrders))
def getReceivedMaterialsOrCase(self, nodeID, thisNode, period):
'''
Get the number of inbound materials from what the preceding node was able to supply
Example:
Flow of material
o -> o
Suppose the left node represents a wholesaler, and the right node represents a retailer
How much material was the wholesaler able to supply to the retailer?
In:
- nodeID (dictionary key)
- period (integer)
Out:
- number of received materials (numeric)
'''
# Get the number of inbound materials from what the previous node was able to supply
receivedMatsNum = 0
upstreamNum= 0
upstreamList = []
#For every node that's not the end supplier
if len(self.nodeDict[nodeID].recDict) != 0 and period != 0:
#Step 1: get the number of upstream nodes
upstreamList = self.nodeDict[nodeID].recDict[nodeID]
for upstreamNode in upstreamList:
#Step 2: For every upstream node, find out the total number of downstream nodes
downstreamNum = len(self.nodeDict[upstreamNode].preDict[upstreamNode])
#Step 3: For every upstream node, find out the total number of finished materials that could be delievered downstream
totFinishedMatsNum = self.nodeDict[upstreamNode].supplyArray[period-1]
#The number of finished goods that could be delivered to this particular node = total finished goods/total number of downstream nodes
finishedMatsNum = totFinishedMatsNum/downstreamNum
#Sum up all such finished goods received from upstream nodes to get the total number of received materials
receivedMatsNum += finishedMatsNum
# If a node has no receivers, we assume that it'll always be supplied the qty it wants
elif len(self.nodeDict[nodeID].recDict) == 0 and period != 0:
if iterable(self.nodeDict[nodeID].orderArray[period - 1]):
receivedMatsNum = max(self.nodeDict[nodeID].orderArray[period - 1].sum(), 0)
else:
receivedMatsNum = max(self.nodeDict[nodeID].orderArray[period - 1], 0)
thisNode.receivedMats.append(receivedMatsNum)
return receivedMatsNum
def getReceivedMaterialsANDCase(self, nodeID, thisNode, period):
'''
Get the number of inbound materials from what the preceding node was able to supply
Example:
Flow of material
o -> o
Suppose the left node represents a wholesaler, and the right node represents a retailer
How much material was the wholesaler able to supply to the retailer?
In:
- nodeID (dictionary key)
- period (integer)
Out:
- number of received materials (numeric)
'''
# Get the number of inbound materials from what the previous node was able to supply
receivedMatsNum = 0
upstreamNum= 0
upstreamList = []
finishedMatsList = []
#For every node that's not the end supplier
if len(self.nodeDict[nodeID].recDict) != 0 and period != 0:
#Step 1: get the number of upstream nodes
upstreamList = self.nodeDict[nodeID].recDict[nodeID]
for upstreamNode in upstreamList:
#Step 2: For every upstream node, find out the total number of downstream nodes
downstreamNum = len(self.nodeDict[upstreamNode].preDict[upstreamNode])
#Step 3: For every upstream node, find out the total number of finished materials that could be delievered downstream
totFinishedMatsNum = self.nodeDict[upstreamNode].supplyArray[period-1]
#The number of finished goods that could be delivered to this particular node = total finished goods/total number of downstream nodes
finishedMatsNum = totFinishedMatsNum/downstreamNum
#Append finishedMatsNum from every node to the finishedMatsList
finishedMatsList.append(finishedMatsNum)
#AND Relationship: find out the minimum of all finishedMatsNum
receivedMatsNum = min(finishedMatsList)
elif len(self.nodeDict[nodeID].recDict) == 0 and period != 0:
if iterable(self.nodeDict[nodeID].orderArray[period - 1]):
receivedMatsNum = max(self.nodeDict[nodeID].orderArray[period - 1].sum(), 0)
else:
receivedMatsNum = max(self.nodeDict[nodeID].orderArray[period - 1], 0)
thisNode.receivedMats.append(receivedMatsNum)
return receivedMatsNum
def computeDemandOrCase(self, nodeID, thisNode, demandArray, demandMethod, period):
'''
Compute the demand for a given node (using a demand array as reference)
In:
- nodeID (dictionary Key)
- thisNode (Node)
- demandArray (array of numerics)
- period (integer)
'''
downstreamNum = 0
downstreamList = []
downstreamNodeDemand = 0
totDemand = 0
demandUpstreamNum = 0
# Pull demand from the demand array if it's the retailer
# Upstream nodes look at what the previous node's order was (that is in turn their demand)
### THIS WILL ALSO NEED TO CHANGE TO BE GENERALIZABLE TO THE MULTI-RETAILER CASE
# if nodeID == 0:
if len(thisNode.preDict) == 0:
if demandMethod == "useFileDemand":
totDemand = demandArray[period]
else:
totDemand = np.random.normal(loc=thisNode.demandMean, scale=thisNode.demandStDev)
else:
#Step 1: get the number of downstream nodes this node has
downstreamList = thisNode.preDict[nodeID]
downstreamNum = len(downstreamList)
#Step 2: Calculate the number of units demanded for each downstream node
for downstreamNode in downstreamList:
#Step 3: find out the number of upstream nodes for the current downstream node
demandUpstreamNum = len(self.nodeDict[downstreamNode].recDict[downstreamNode])
#Find out this downstream node's total demand for this period (assuming no delay in placing orders to upstream nodes)
downstreamNodeDemand = self.nodeDict[downstreamNode].orderArray[period]
#This node's demand from the current downstream node = total demand/#upstream nodes for this downstream node
thisNodeDemand = downstreamNodeDemand/demandUpstreamNum
#Sum up all demands to get the total demand for this node
totDemand += thisNodeDemand
#print("Node " + str(thisNode.id))
#print("Demand " + str(totDemand))
#print()
# Incur the demand by appending it to the node's demand array (this is basically just being pulled from the file)
thisNode.demandArray.append(totDemand)
return totDemand
def computeDemandAndCase(self, nodeID, thisNode, demandArray, demandMethod, period):
'''
Compute the demand for a given node (using a demand array as reference)
In:
- nodeID (dictionary Key)
- thisNode (Node)
- demandArray (array of numerics)
- period (integer)
'''
downstreamNum = 0
downstreamList = []
downstreamNodeDemand = 0
totDemand = 0
demandUpstreamNum = 0
# Pull demand from the demand array if it's the retailer
# Upstream nodes look at what the previous node's order was (that is in turn their demand)
# THIS IS CHANGED
# if nodeID == 0
if len(thisNode.preDict) == 0:
if demandMethod == "useFileDemand":
totDemand = demandArray[period]
else:
totDemand = np.random.normal(loc=thisNode.demandMean, scale=thisNode.demandStDev)
else:
#Step 1: get the number of downstream nodes this node has
downstreamList = thisNode.preDict[nodeID]
downstreamNum = len(downstreamList)
#Step 2: Calculate the number of units demanded for each downstream node
for downstreamNode in downstreamList:
#Find out this downstream node's total demand for this period (assuming no delay in placing orders to upstream nodes)
downstreamNodeDemand = self.nodeDict[downstreamNode].orderArray[period]
#Sum up all demands to get the total demand for this node
totDemand += downstreamNodeDemand
#print("Node " + str(thisNode.id))
#print("Demand " + str(totDemand))
#print()
# Incur the demand by appending it to the node's demand array (this is basically just being pulled from the file)
thisNode.demandArray.append(totDemand)
return totDemand
def satisfyDemand(self, receivedMats, thisNode, thisPeriodDemand, backordersFulfilled):
'''
Given the demand, as well as the supply for a node for a current period, compute
how much of this node's demand can be supplied (and how many backorders result)
Record this information in the node
In:
- Received materials (numeric)
- node object (Node)
- demand for this period (numeric)
'''
availableSupply = receivedMats + max(thisNode.startingInventory,0)
# Record demand that can be supplied, along with the backorders that were fulfilled
if iterable(thisPeriodDemand):
totDemand = thisPeriodDemand.sum()
totDemand = thisPeriodDemand
suppliableDemand = min(availableSupply, totDemand)
thisNode.supplyArray.append(suppliableDemand + backordersFulfilled)
thisNode.backordersFulfilledArray.append(backordersFulfilled)
def computeEIAndCosts(self, thisNode, thisPeriodDemand, receivedMats, backordersFulfilled):
if iterable(thisPeriodDemand):
thisNode.endingInventory = thisNode.startingInventory - thisPeriodDemand.sum() + receivedMats + backordersFulfilled
else:
thisNode.endingInventory = thisNode.startingInventory - thisPeriodDemand + receivedMats + backordersFulfilled
thisNode.backorders = -1 * min(0, thisNode.endingInventory)
thisNode.backorderRecord.append(thisNode.backorders)
thisPeriodCost = max(0,thisNode.endingInventory*thisNode.holdingCost)+max(0,-1*thisNode.endingInventory*thisNode.stockoutCost)
thisNode.endingInventoryRecord.append(thisNode.endingInventory)
thisNode.costRecord.append(thisPeriodCost)
def getPdToSimulate(self, demandArray):
availablePdToSimulate = 0
if len(demandArray) == 0:
availablePdToSimulate = self.periodsToSimulate + 1
else:
availablePdToSimulate = len(demandArray)
return availablePdToSimulate
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def multiNodeVerify(self, demandArray=[], BSLevel=60, connectionCase="or", demandMethod="useRandomDemand", seed="N/A", printOut=True):
'''
Run instance of the game using a preset demand
In:
- demandArray: array of numerics, containing demand for each node
- BSLevel: (optional, default=60) Base stock level (numeric)
'''
# Apply an RN seed if we need one
if seed != "N/A":
np.random.seed(seed)
if type(BSLevel) == dict:
# Read the base stock level in from the dictionary
for nodeID in sorted(self.nodeDict.keys()): self.nodeDict[nodeID].initGame(BSLevel[nodeID])
else:
# Reset lists from previous runs
for nodeID in sorted(self.nodeDict.keys()): self.nodeDict[nodeID].initGame(BSLevel)
availablePdToSimulate = self.getPdToSimulate(demandArray)
# Determine order quantity for the current period for all nodes (prior to demand determination)
for period in range(0, min(availablePdToSimulate, self.periodsToSimulate)):
for nodeID in sorted(self.nodeDict.keys()):
thisNode = self.nodeDict[nodeID]
# Record starting inventory, and get materials from upstream node
thisNode.startingInventoryRecord.append(thisNode.startingInventory)
if connectionCase == "and":
receivedMats = self.getReceivedMaterialsANDCase(nodeID, thisNode, period)
else:
receivedMats = self.getReceivedMaterialsOrCase(nodeID, thisNode, period)
# Determine how many backorders can be fulfilled
backordersFulfilled, receivedMats = thisNode.getBackorders(receivedMats)
# Compute the demand, and satisfy as much of it as we are able (during this period)
if connectionCase == "and":
thisPeriodDemand = self.computeDemandAndCase(nodeID, thisNode, demandArray, demandMethod, period)
else:
thisPeriodDemand = self.computeDemandOrCase(nodeID, thisNode, demandArray, demandMethod, period)
self.satisfyDemand(receivedMats, thisNode, thisPeriodDemand, backordersFulfilled)
# Order the same quantity as the demand from this period
qtyToOrder = thisPeriodDemand
thisNode.orderArray.append(qtyToOrder)
# Compute the ending inventory and costs for this period (and print resulting statistics)
self.computeEIAndCosts(thisNode, thisPeriodDemand, receivedMats, backordersFulfilled)
if printOut == True and (period == self.periodsToSimulate - 1):
self.printEndStats(nodeID, thisNode, period)
# Make the starting inventory equal to ending inventory from previous period
thisNode.startingInventory = thisNode.endingInventory
class Node:
def __init__(self, id, h=3, p=100, nodeType = "retailer",demandMean=50, demandStDev=10):
'''
Node represents a single node on our supply network
In:
- NodeID (required, we recommend using integers 0-inf)
- h (unit holding cost) (numeric), default = 3
- p (unit stockout cost) (numeric), default = 100
- nodeType (description of node type) (string), default = "retailer"
- demandMean (mean of the demand function) (numeric), default = 50
- demandStDev (standard deviation of the demand function) (numeric), default = 10
Note that we are currently assuming normal demands (perhaps specify other distributions if you want)
'''
self.id = id
self.holdingCost = h
self.stockoutCost = p
self.baseStockLevel = 0
self.startingInventory = self.baseStockLevel
self.endingInventory = 0
self.nodeType = nodeType
self.backorders = 0
# This is assuming that in this game, we have an idea of the distribution params for demand
self.demandMean = demandMean
self.demandStDev = demandStDev
# Add 2 dictionaries, one for recording recipients and one for predecessors, to each node for cross-node implementations
self.preDict = {}
self.recDict = {}
#Create a dictionary that stores the nodes that have "AND" relationships with the current node
self.ANDDict = {}
def initGame(self, BSLevel):
self.demandArray = []
self.orderArray = []
self.supplyArray = []
self.backordersFulfilledArray = []
self.startingInventoryRecord = []
self.endingInventoryRecord = []
self.backorderRecord = []
self.receivedMats = []
self.costRecord = []
# Initialize the base stock level for period 0 and beyond, also starting inventory for period 0
self.startingInventory = BSLevel
self.baseStockLevel = BSLevel
self.backorders = 0
self.endingInventory = 0
def getBackorders(self, receivedMats):
backordersFulfilled = 0
# Serve backorders with the new supply first
if self.backorders > 0:
if receivedMats >= self.backorders:
backordersFulfilled = self.backorders
self.backorders = 0
receivedMats = receivedMats - backordersFulfilled
else:
backordersFulfilled = self.backorders - receivedMats
self.backorders = self.backorders - receivedMats
receivedMats = 0
return backordersFulfilled, receivedMats
# New code!!! - Written by Sean (print method for Node class)
def __str__(self):
# Print method for the node class (be able to print out all node fields we're interested in)
myString = "Node " + str(self.id) + "\n"
myString += "nodeType = " + str(self.nodeType) + "\n"
myString += "holdingCost = " + str(self.holdingCost) + "\n"
myString += "stockoutCost = " + str(self.stockoutCost) + "\n"
myString += "baseStockLevel = " + str(self.baseStockLevel) + "\n"
myString += "\nNode Links\n"
myString += "Predecessor Node IDs = " + str(self.preDict) + "\n"
myString += "Recipient Node IDs = " + str(self.recDict) + "\n"
myString += "AND Relationships this Node has = " + str(self.ANDDict) + "\n"
myString += "\nDemand and Supply\n"
myString += "demandArray = " + str(self.demandArray) + "\n"
myString += "orderArray = " + str(self.orderArray) + "\n"
myString += "supplyArray = " + str(self.supplyArray) + "\n"
myString += "\nInventory Statistics\n"
myString += "startingInventoryRecord = " + str(self.startingInventoryRecord) + "\n"
myString += "endingInventoryRecord = " + str(self.endingInventoryRecord) + "\n"
myString += "backorderRecord = " + str(self.backorderRecord) + "\n"
myString += "receivedMats = " + str(self.receivedMats) + "\n"
myString += "costRecord = " + str(self.costRecord) + "\n"
myString += "backordersFulfilledArray = " + str(self.backordersFulfilledArray) + "\n\n"
return myString
df = pd.read_csv("3_node_60_60_60.csv")
print(df["IS 3 Node"][len(df["IS 3 Node"])-1])
myInvSim = InvSimulation()
# Node creation: Key (mandatory), holding cost, stockout cost, and fixed order cost
myInvSim.createNode(nodeID = 0, h = 10, p = 100, demandMean=50, demandStDev=10)
myInvSim.createNode(nodeID = 1, h = 10, p = 25, demandMean=50, demandStDev=10)
myInvSim.createNode(nodeID = 2, h = 10, p = 25, demandMean=50, demandStDev=10)
#myInvSim.createNode(nodeID = 3, h = 10, p = 0, demandMean=50, demandStDev=10)
# Node linkage: start Node Key, end Node Key
myInvSim.linkNode(startNode = 0,endNode = 1)
myInvSim.linkNode(startNode = 0,endNode = 2)
#myInvSim.linkNode(startNode = 1,endNode = 3)
#myInvSim.linkNode(startNode = 2,endNode = 3)
myInvSim.addAndRelationship(upstreamNodeID = 1,downstreamNodeID = 0, numNeeded=3)
#for i in myInvSim.nodeDict.values():
# pprint.pprint(i.preDict)
#ANDRelationIDList = [2,3]
#myInvSim.addAndRelationship(4,ANDRelationList,)
#print(myInvSim.nodeDict[0].recDict[0])
#print(myInvSim.nodeDict[2].preDict[2])
# Put node ID: base stock level, nextNodeID...
startInvDict = {0: 60, 1: 60, 2: 60}
# Currently assume that everyone plays with the same policy
myInvSim.playSimulation(gameType = "multiNodeVerify", BSLevel=startInvDict, demandMethod="useFileDemand", fileDemand=df["IO"], connectionCase="and", printOut=True)
#myInvSim.playSimulation(gameType = "multiNodeVerify", BSLevel=startInvDict, demandMethod="useRandomDemand", seed=60, connectionCase="and", printOut=True)
###Output
Node 0
(IL) Starting Inventory record for node 0:[60, 19.176249730000002, 7.852610339999998, -19.19040905, -21.04642566999999, 21.44287896000001, 8.46862917, 1.1208494700000031, 17.298769710000002, 14.300057680000002, 8.831042189999998, 16.512934379999997, -1.384173969999999, 0.15576768000001096, 1.863777259999999, 23.54079935, -1.7492203400000008, 30.684211230000006, 23.496188269999998, -0.36212937999999895, -7.002773519999991, 1.4978545100000105, 20.34797524000001, -1.9206440599999866, -6.419065689999982, -16.66335404999998, -2.7133895199999927, -1.2274013599999947, -11.465920780000005, -17.54402458, 1.985553319999994, 10.333739789999996, 8.348442799999987, 20.95334325999999, -0.36735494000000557, -1.9418782800000045, 26.86334140999999, 7.002471499999984, 14.033030059999987, 9.321330359999983, 22.940767519999987, 2.725434529999987, 32.190332449999985, 1.3268761699999843, 32.49268793999998, 17.007861189999982, 9.51674417999999, 2.9750997399999832, 0.22282443999998236, -1.573100370000013, 16.165690879999985, 19.884305189999985, 18.85877845999999, -3.7039548600000174, 4.491466379999984, -4.195951240000014, 10.713261419999988, -2.771963250000013, 8.17334867999999, 12.545431629999989, -8.582602010000016, 0.36934460999998464, 8.390963589999984, -1.6062320700000114, -10.757886830000018, -3.901473270000018, -3.1973687100000276, -7.178830880000014, 16.195016059999986, 3.52572078999998, 32.849411519999975, 19.995467829999978, 16.737441709999974, 5.01100040999998, 28.59944179999998, -0.8401078600000247, 28.484151649999983, 8.087037189999975, 31.53482735999998, 19.89759233999998, -2.6783857200000227, 7.202930279999983, 0.527932679999978, 16.42561370999998, 16.490380709999975, 3.681185169999978, 17.09870318999998, -0.867690730000021, -12.626835230000005, -13.692268450000007, 15.845675169999986, 3.0532829899999783, 5.911533249999977, 10.173230809999978, 15.87024821999998, 6.365083409999976, 14.19551312999998, 25.62426690999998, 7.1241760999999855, 17.90595835999998, 0.4034744299999815, 6.61806070999998, 14.934259149999981, 23.627936009999978, 5.34583499999998, -1.3494959900000225, 6.073190459999985, 9.130492609999976, 11.638327339999975, 37.54976909999998, -0.3944421700000227, 21.808191379999986, 10.025726639999977, 8.625481929999978, 7.8510074799999785, 28.14784680999998, -2.0859241100000183, -5.475513820000007, 5.764734360000009, 22.451936099999994, 1.2037093199999944, 6.126590919999991, 13.775793579999991, 13.220228429999992, 2.458690869999991, -2.1464623900000106, -2.52566560999999, 23.209446450000016, 25.849945529999992, 14.068671139999992, 17.884255589999995, 6.049647999999991, 11.655637369999994, 13.73019038999999, 18.398320179999992, 8.765742339999996, 22.564585769999994, 4.152287089999994, 12.734524779999994, 5.028357819999989, 3.7550982699999906, 15.276584889999995, 18.133994489999992, 17.56038783999999, -0.6229133600000054, -1.7323779299999842, 7.9568462400000115, 12.54504218999999, 5.714961079999995, -4.877898370000004, -16.335289489999987, 10.548598450000007, 4.289440039999995, 4.253844019999995, 23.95761096999999, 20.662548779999995, 14.640800769999991, 21.378535339999992, 23.184749909999994, 6.832110159999992, 17.029387709999995, 4.74522490999999, -0.36938514000000566, 14.486509790000007, 4.072091329999992, 4.502498079999995, 30.102652649999992, 21.15986328999999, 4.696725159999993, 9.465379529999993, 9.628454269999992, -17.063805470000013, -15.967433689999993, -3.4620371000000176, 3.94060609000001, 14.447234619999989, 20.38653302999999, 19.626834379999984, 26.006606349999984, -5.244719680000017, -3.503993489999992, -5.648439240000009, -2.4164381399999755, 5.921299639999994, -8.011653250000002, -7.001490469999965, 9.642559310000003, 13.84792153, 10.086146149999998, 13.172099029999998, 24.57928434, 20.914755149999998, 9.839728639999997, 6.788790740000003, 5.656272459999997, -2.8488880900000026, 2.1876001800000253, 17.653025919999997, 4.600463159999997, 0.24345799999999684, 12.55171353, 21.11789332, -0.07572130999999871, 7.538584590000028, 2.826779639999998, -4.8624724999999955, -1.354012209999965, 12.470185630000003, 15.78547958, 7.31189770000001, 0.9510193999999998, 7.530359220000001, -21.1810379, -0.5849486799999681, -9.801421079999997, -4.70580396999997, 7.1241091899999915, 17.062531639999996, 12.20188156999999, -4.594826420000011, -1.0262168099999727, 0.9036269099999927, -13.83764865000002, 0.045991860000015095, 29.15221128999998, 8.651049709999981, -21.16365689000002, -20.483739849999978, 8.14639603999997, -6.895934330000031, -13.89005934999998, 13.809841490000025, 9.218102789999968, 0.3817318499999658, 15.108813459999965, 17.66481086999996, 7.514928879999964, 5.218426619999967, 10.607248289999966, 14.62914089999996, 8.591295039999963, 3.3612793199999658, -11.256932360000029, 11.373454300000027, 27.586223189999956, 18.554208219999957, 12.71976808999996, 9.142393689999956, -0.7440720600000432, -1.8167875399999787, 14.313324170000023, 6.6896334599999605, 5.28050350999996, 26.823597949999957, 22.843168679999955, 15.209125169999957, 0.7044171399999541, 0.5032445799999579, 13.265647639999955, 14.155095989999957, 16.893958939999955, 17.609357269999954, 4.139417809999955, 17.094950259999955, 8.732987489999957, -9.354029920000045, -11.285123769999977, 0.7467072200000331, 16.17674089999997, 9.18279275999997, 11.78773456999997, 18.10694203999997, 15.36018994999997, 17.49930401999997, 6.612203799999968, 5.261306629999972, -12.250607610000024, -0.9776758699999633, 28.086068779999977, 15.056707389999975, 17.798626819999978, 0.04568955999997826, 7.337809699999987, -4.560163310000014, 17.232872290000053, 3.034415459999977, 9.198909069999978, 16.221413349999978, 12.823742009999975, 18.36667802999998, -2.6297052200000195, 8.937741140000043, 6.0199462999999795, 4.0030665999999755, -0.8360531800000217, 2.330501090000041, 22.98068669999998, 10.399540659999978, 2.9523063499999793, 19.087837929999978, 12.374420479999976, 21.141086279999982, 16.198160469999976, 3.684817119999977, 6.961212729999978, 22.17517324999998, -7.506512290000025, 20.10486932000004, 0.474494469999982, 1.2989023999999745, 15.494125699999984, 17.230677319999977, 7.940122129999978, 11.498150979999977, 5.555953779999982, 10.44715344999998, 12.23829370999998, 16.476339779999982, -4.678048030000021, -4.440328509999965, 13.383132019999977, 19.516792019999976, 4.715017829999979, 3.92041288999998, 14.40297538999998, -1.7062169500000195, 0.3393663800000368, 1.4569891599999778, -0.1954458400000192, -5.767167559999962, 25.35077531000004, 5.16585268999998, 12.55477081999998, -5.397902920000028, 7.828135480000029, 18.226252549999977, 6.03864498999998, 12.848536519999982, 10.393747279999978, 19.84353359999998, 14.95740232999998, 25.549101829999977, 0.5975733199999809, 17.57140521999998, 3.9688312599999804, 0.37975948999997655, 7.5057890099999796, 15.134442249999978, 4.438814989999976, 19.02119931999998, 13.088026699999986, -0.6634130000000198, 1.935338170000044, 7.139124769999981, 11.135873239999981, 4.658500609999976, 16.25773721999998, 4.718685509999979, 3.7680184999999824, 14.115260759999977, 11.691111819999982, 4.677062729999982, 2.56562281999998, 5.644931719999981, 13.798743879999982, 15.682428219999977, 20.622217659999976, 22.40582827999998, 15.536092619999977, 33.627127999999985, 17.410615949999983, 7.37111153999998, 22.93988308999998, 20.147757899999974, 11.881606829999981, 5.7543375799999765, 4.815038889999975, 30.970259859999977, 5.9107446099999805, 20.17533195999998, 13.817790989999978, 4.797839279999977, 17.87268840999998, -2.778396000000015, 9.54921659000005, 2.2268667599999787, 16.08829153999998, 17.765030269999976, 13.937773829999976, 18.290447139999976, 15.015219879999975, 7.07937906999998, 13.557671819999982, 25.933414939999977, 22.51199921999998, 27.311764609999976, 14.359320239999981, 9.94979378999998, 23.559662699999983, 24.815616329999976, 13.16159039999998, 20.357585209999982, 8.661921469999982, 12.37371900999998, 13.978120099999977, 4.7281591199999795, 7.891580299999973, 11.803994769999981, 5.218850099999983, 3.5813478099999756, 32.53298384999998, 17.02583473999998, 5.817659559999981, -1.1503125700000183, 27.553158240000045, -3.363832200000026, 26.10167239000004, 16.53643341999998, 8.246334629999978, 8.504194789999978, -2.285934520000019, 29.14057169000005, 0.3170118199999763, 8.620008689999977, 9.120589439999982, 0.3470980599999791, 9.04191867999998, -1.7006035300000235, -3.2828804199999553, 19.526930880000045, 7.946080109999976, 26.05204346999998, 15.313677679999977, 9.655158459999981, 3.928909969999978, 18.936449629999977, 11.000666739999978, 20.33015350999998, -0.9485371700000229, 31.206198060000048, 8.215423229999978, 29.47414677999998, -18.061421610000014, 3.8378731600000613, 15.376722609999987, 22.708288839999987, -0.7678086000000164, 6.7847842300000565, 17.37041681999999, 23.644317309999984, -8.331812470000017, -13.425534249999941, -5.345055289999934, 8.450852040000065, 0.9153640699999883, 19.795977939999986, 22.008725129999988, 24.287219849999985, 13.580388659999983, 30.986754879999985, 18.584528359999986, 9.850536209999987, -8.893708390000015, -13.142776629999936, -3.757978769999937, 14.768003769999993, 0.63370531999999, 17.765502559999995, 24.170664439999996, 18.735257389999994, 11.51104140999999, 0.5846697199999937, 2.5242930799999925, 12.396706569999992, -3.656106490000006, 10.552424440000067, 15.16486995999999, 8.134829599999996, -6.177922480000014, -8.46680241999995, -5.972480609999934, 15.875625490000068, 7.579690040000003, 19.15531239, 1.9611611300000007, 3.1561035000000004, 6.990578720000002, 17.77062375, 8.14806359, 8.214542100000003, 9.204792750000003, 21.978549289999997, 8.134504980000003, 8.06396377, 17.94765504, 7.766139580000001, 23.1817263, 9.942647139999998, 25.64925532, 0.2324147200000013, 2.940352449999999, 26.402896300000002, 12.427350299999993, 13.980982079999997, 27.321758199999998, 29.06857407, 5.7795671399999975, 15.62905576, 16.529641050000002, 3.946888549999997, 3.9149550699999978, 12.89135598, 17.750608149999998, 10.095381840000002, -7.016529090000006, 0.6297688000000576, 7.1384651099999985, 4.584311159999999, -2.3877012399999984, 25.300818370000066, 7.7525326299999975, 8.880169359999996, 21.279079269999997, 6.642865239999999, 8.43744383, 28.07082426, 12.926733650000003, 5.042452949999998, 5.74937224, 9.06574345, 31.40097618, 7.427464489999998, 17.70912081, 19.29648968, 23.99865782, 4.143939920000001, 10.657851489999999, 16.817816649999997, 23.54984776, 5.005763430000002, 10.649061770000003, -23.683556609999997, 2.5971043200000707, 22.324701549999993, 7.832782769999994, -7.521028010000002, -5.484747079999934, -2.870763030000006, 10.865609220000074, 21.86719949999999, -4.743603010000008, 11.82425257000007, 5.6227649199999945, 23.415357719999996, -10.790887660000003, 0.351591610000078, 18.68744371999999, 19.622779069999993, 0.9693591299999937, 24.64435681999999, 12.234856179999994, 19.83246015999999, 11.655557199999997, -3.439401470000007, 10.11335371000007, 10.259240559999995, 0.17994707999999093, 11.92206706999999, 11.730296029999991, 1.904544009999995, 16.91787382999999, 7.1998646699999895, 9.77068581999999, 17.247770229999993, 4.978997509999992, -1.6275721900000093, 15.037044380000069, 6.692882889999993, 5.89658785999999, 21.566426189999994, 5.380136079999993, -0.7038586300000063, 15.11763618000007, 9.001275529999994, 7.2250022599999895, 30.068319409999994, 13.405907849999991, 16.60453155999999, -3.3157481500000046, 11.736187470000075, 7.621786879999995, 20.976934619999994, 11.941002559999994, 3.705297829999992, 4.080940719999994, 24.883878239999994, 17.561395059999995, 26.75633583999999, 12.32797743999999, 43.02228996999999, -0.5000651200000128, 24.96818500000006, 17.762151849999984, 12.980315229999984, 10.735685749999988, 15.303610029999987, 9.296754499999984, 22.078651059999984, 26.416721509999988, 19.445519389999987, 2.016220039999986, -0.1886408600000138, 9.020073580000059, 14.86701200999999, 16.634106649999985, 4.896883929999987, 10.686536989999986, 17.62345048999999, 17.191765479999987, -5.282238120000017, 6.720037430000055, 17.049014089999986, 7.8267793899999845, 2.094774639999983, 21.160603829999985, 15.484741549999988, -2.773863300000002, 0.8936064300000695, 4.1451547199999865, -0.6438260600000163, 9.292672660000058, 19.201733389999987, 17.114767259999986, 5.380780589999986, 15.634721709999987, 12.971941139999984, 14.427487379999988, 2.1128961799999857, 8.365898379999983, 0.31424013999998834, 0.04331662999998542, 4.8514889499999825, 18.069622069999987, 16.838427309999986, 12.533337429999989, 10.413579369999987, -4.631209290000015, -2.037293019999943, 27.865963269999988, 4.400742639999983, 17.577111249999987, -3.632260520000017, 15.991021740000058, 11.160216309999988, 6.230239639999986, 22.896914299999985, 3.870689089999985, -11.280296580000012, -17.271029419999948, 23.393870040000053, 16.042110209999983, 28.962856679999987, 11.420910019999987, 11.543351959999988, 7.7211208299999825, 7.187438809999989, 20.82209909999999, 2.408135279999989, 47.01742480999999, 2.4864407599999883, 18.039973289999985, 22.044582969999986, 28.121737089999986, 23.485632169999988, 14.268703499999987, 13.352259419999989, 16.592884019999985, 38.03410703999998, 14.012114719999982, 15.607213829999981, -8.432877750000024, 9.017544020000045, 23.785281939999983, 7.092227359999988, 16.623695639999987, 17.95065402999999, 10.258737999999987, -0.6337544900000154, 13.36644604000005, 3.1928286399999877, 12.436003679999985, 0.45946824999998626, 8.371062069999986, 3.0819798799999845, 8.660814749999986, 26.726350739999987, -15.647238080000008, -0.4054541299999457, 13.349006409999987, 10.198783809999988, 13.762121659999984, 22.37459581999999, 7.866697789999989, 20.685443699999986, 18.126954479999988, 15.56546489999998, 10.837763619999983, 3.920814639999989, 2.672081509999984, 2.8761885699999823, 13.171929109999986, 9.224771519999983, 6.280583369999988, 17.65417397999999, 9.729641309999984, 12.977270579999988, 17.79937634999999, 5.827384259999988, 19.412444489999984, 13.519597179999984, 4.305749609999985, 1.8476174899999833, 8.942415539999985, 19.14605978999999, 5.808276479999989, 7.278090459999987, -5.43384776000002, 6.409974060000046, 10.303513969999983, 23.44123238999999, 4.784711059999985, 18.991671369999985, 13.855040219999985, 6.782563209999985, 13.193177959999986, 17.377241959999985, -8.375749670000019, -12.118546299999956, 16.696254250000052, 17.197810429999976, 16.15579633999998, -8.705464360000015, -10.789110869999945, 7.831036590000046, 20.98329104999997, 4.473625469999973, 9.01540539999997, 14.029741429999973, 29.57645789999997, 7.796288299999972, 2.3873109599999722, 16.31889637999997, 9.212967129999974, 15.719781549999972, 26.153873909999973, 6.967282639999972, 14.035248909999972, 3.184238979999975, 14.426549689999973, 23.10739776999997, 0.8611200199999729, 23.001932769999975, 1.639180849999974, 9.966113179999972, -0.8915598800000311, -13.098249629999948, 17.06635388000005, 7.091926289999961, 11.334845589999958, 22.84314929999995, 31.990966259999958, 10.365074159999956, -16.49614975000005, -0.8455086999999679, 22.863800189999942, 4.755496649999941, 11.736605309999945, 16.348175039999944, -1.358111260000058, -0.01937934999996571, 8.978455629999942, 10.121620119999946, 23.001190359999946, 9.72668715999994, 12.890643149999946, 4.4140915799999405, 3.268203809999946, 13.978165669999946, 8.461878549999945, 14.241990449999946, -6.500655850000058, 6.256188520000038, -0.6181367200000594, -8.05755667999997, 1.9110863600000343, 3.997243179999934, 7.5902320999999375, 5.588317979999935, 23.887914889999934, 13.57940184999994, 13.482193119999934, -5.437612660000063, -0.8650773099999682, 13.40060685999994, -2.142017470000063, -7.970837979999963, 7.916342040000039, 5.569342719999952, 17.05967465999995, 14.210151769999953, 16.24675344999995, 10.513171559999954, 9.477816249999947, 5.423494509999948, 13.521818079999953, 22.66572924999995, -4.051994950000044, 17.85395205000004, 2.486512699999949, 25.49060460999995, 9.530861989999948, 17.33165150999995, 14.175881909999951, 3.0922212099999484, 1.305680469999949, 7.708067499999949, 27.15799766999995, 25.840355629999948, 7.925789589999951, 27.43294295999995, 7.481384819999953, 7.404527689999952, 1.782848079999951, 12.513459489999953, 10.382287549999951, -6.7155106600000565, -14.745017649999966, 11.814201320000038, 19.67684414999996, 5.118581269999957, 22.33816032999996, 15.890082699999965, 5.896897849999959, -1.8854813800000443, 23.71170296000004, 13.591022069999958, 22.89373192999996, 17.12036338999996, 9.55733346999996, 10.069126629999957, 9.242023549999956, 6.838190789999956, 22.77762286999996, 25.397184019999955, 13.347744619999958, 0.8304557699999577, -0.07518006000004362, 12.611102440000046, 14.631211519999958, 20.41143143999996, 16.739527209999956, 15.025772129999957, 14.133595019999959, 2.398356169999957, 16.274279609999958, 7.845849039999955, 9.134059989999955, 10.988029369999957, 8.422250439999956, 12.214015309999958, 14.904721489999957, 10.474641109999958, -10.051987310000044, -7.273864689999954, 6.870670039999972, 19.804053479999972, 20.48755128999997, 6.397680899999969, 13.481915849999972, 6.678717549999973, 29.05751110999997, 6.212801779999971, 14.42867998999997, 0.606535029999975, 18.15321194999997, 8.796888249999974, 2.7878053599999717, 20.174291599999975, 7.853275749999973, 34.872113059999975, 8.280112659999975, 7.742328699999973, -6.910178440000024, -15.61484650999995, -8.77708503999996, 0.07345815000003597, 1.2825314799999603, 8.520886169999955, 15.40497236999996, 12.653667289999959, 6.7140977699999596, 2.6522461599999545, -11.725374210000034, 1.7199214200000412, 21.128835799999962, -2.3423981000000396, 9.323075220000035, 14.464216689999965, -4.426065780000037, 3.1922262800000425, -3.8667710600000333, 4.923220780000044, 27.515817229999968, 12.829383049999961, 20.174090609999965, 5.987230059999966, 1.3620093599999663, 9.623738139999965, 15.837219199999971, 23.21874497999996, -3.405143710000033, 12.247465100000042, 13.666963059999965, 14.33197359999997, -2.037878030000037, 18.401670500000044, 14.236904189999962, 20.268174729999963, 2.638732059999967, 1.4428615599999617, 19.317440059999967, 21.592717289999968, 23.908968499999965, 17.506891379999963, 15.005966129999962, -0.580753480000034, 9.264412330000042, 15.992804099999965, 26.471756089999964, 0.006696819999966408, 16.234953559999965, 12.029009199999962, 8.419745399999961, -9.59901349000004, 7.274934220000041, -0.6730179100000342, -13.264467499999945, 10.292200210000047, 20.362607529999963, 7.3592103399999615, -1.329325690000033, 32.59268151000005, -3.7207357700000365, 22.474035680000046, 31.70129549999996, 5.101974629999962, 8.465151259999956, 19.02626422999996, 9.66725333999996, -6.187709080000047, 15.275089000000037, 13.302390699999961, 6.987941079999956, 16.517635329999955, -1.3024405000000456, 13.77132835000004, 3.3903450799999604, -12.653481720000045, 14.714736840000043, -9.140633080000043, -8.872111119999957, -6.476751050000047, 16.392865930000042, 5.230860149999955, -6.771491840000039, 5.166410240000047, 1.3056357999999548, 1.0462978599999602, 3.3006739399999603, 14.499600289999954, 27.784779919999956, 4.915992869999954, 12.75594532999996, 30.099039479999956, 9.21536255999996, 10.206760989999957, 9.39304021999996, 3.452675509999956, 31.15017092999996, 6.956269869999957, 2.955596239999956, 21.32166697999996, 20.307904409999956, 6.195153639999958, 10.622365199999955, 5.270282069999958, 17.189754429999958, 22.13958841999996, 0.9115381099999595, 15.508509889999956]
(IS) Inbound supply record for node 0:[0, 40.82375027, 52.14738966, 60.0, 79.19040905, 38.55712103999999, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 60.00000000000001, 59.84423231999999, 58.13622274, 36.45920065, 60.00000000000001, 29.315788769999994, 36.50381173, 60.000000000000014, 60.36212938, 58.502145490000004, 39.65202476, 60.0, 61.92064406, 64.49842163, 62.71338952, 60.0, 61.22740136, 70.23851942, 58.01444668, 49.66626021, 51.651557200000006, 39.04665674, 60.0, 60.36735494, 33.136658589999996, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 60.0, 43.83430912, 40.11569481, 41.14122154, 60.0, 55.50853362, 60.0, 49.28673858, 60.0, 51.826651319999996, 47.45456837, 60.0, 59.63065539, 51.60903641, 60.0, 61.60623207, 63.90147326999999, 60.00000000000001, 63.19736871, 43.80498393999999, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.00000000000001, 31.515848349999995, 51.91296281, 28.46517264, 40.10240766, 60.00000000000001, 52.797069719999996, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.00000000000001, 60.86769073, 71.75914449999999, 44.15432482999999, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 60.00000000000001, 53.926809539999994, 50.86950739, 48.36167266, 22.450230899999998, 60.0, 38.19180861999999, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 60.00000000000001, 62.08592411000001, 54.23526563999998, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 60.00000000000002, 62.14646239, 36.79055354999998, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.00000000000002, 60.62291336, 52.04315375999998, 47.45495781, 54.28503892, 60.00000000000002, 64.87789837, 49.451401549999986, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.000000000000014, 45.513490209999986, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 60.000000000000014, 75.96743368999998, 60.00000000000003, 56.059393909999976, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 60.00000000000003, 63.50399348999998, 60.00000000000003, 62.41643813999997, 54.07870036, 60.00000000000003, 67.00149046999996, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 60.00000000000003, 57.812399819999975, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.00000000000003, 52.46141540999997, 57.17322036, 60.00000000000003, 61.354012209999965, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 60.00000000000003, 60.58494867999997, 60.000000000000036, 64.70580396999996, 52.87589081, 42.93746836, 47.79811843, 60.000000000000036, 61.026216809999966, 59.09637309, 60.000000000000036, 59.954008139999964, 30.84778871, 51.34895029, 60.00000000000004, 80.48373984999995, 51.85360396, 60.00000000000005, 66.89593433, 46.19015850999994, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 60.00000000000006, 48.62654569999993, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.000000000000064, 60.74407206, 45.686675829999935, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 60.000000000000064, 69.35402992, 59.25329277999994, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 60.000000000000064, 60.97767586999994, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 60.000000000000064, 42.767127709999926, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 60.000000000000064, 51.062258859999936, 53.9800537, 55.9969334, 60.000000000000064, 57.66949890999994, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 60.000000000000064, 39.89513067999994, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 60.000000000000064, 64.44032850999994, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 60.00000000000006, 59.66063361999994, 58.54301084, 60.00000000000006, 60.19544584, 34.64922468999994, 54.83414731, 47.44522918, 60.000000000000064, 52.17186451999995, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.000000000000064, 58.064661829999935, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 60.000000000000064, 50.45078340999993, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 60.000000000000064, 32.446841759999934, 60.000000000000064, 33.89832760999994, 43.46356658, 51.75366537, 51.49580521, 60.00000000000007, 30.859428309999927, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 60.00000000000007, 61.70060353, 40.473069119999934, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.00000000000007, 28.79380193999993, 51.78457677, 30.52585322, 60.00000000000007, 56.16212683999993, 44.62327739, 37.29171116, 60.00000000000007, 53.21521576999993, 42.62958318, 36.35568269, 60.00000000000007, 68.33181247, 65.09372178, 51.54914795999992, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 60.00000000000008, 68.89370839, 63.75797876999993, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 60.00000000000007, 49.447575559999926, 44.83513004, 51.8651704, 60.00000000000007, 66.17792248, 62.28887994, 44.12437450999993, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 60.000000000000064, 59.37023119999994, 52.86153489, 55.41568884, 60.000000000000064, 34.699181629999934, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 60.000000000000064, 57.40289567999992, 37.67529845, 52.16721723, 60.00000000000007, 65.48474707999992, 60.00000000000008, 49.13439077999992, 38.1328005, 60.00000000000008, 48.17574742999992, 54.37723508, 36.58464228, 60.00000000000008, 59.648408389999915, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 60.00000000000008, 49.88664628999992, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 60.00000000000008, 44.962955619999924, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.00000000000008, 44.882363819999924, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 60.00000000000008, 48.26381252999992, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.00000000000007, 35.03181499999992, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.00000000000007, 50.97992641999993, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 60.00000000000007, 53.27996256999993, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 60.00000000000007, 59.106393569999916, 55.85484528, 60.00000000000007, 50.70732733999993, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 60.00000000000007, 62.03729301999993, 32.13403673, 55.59925736, 42.42288875, 60.00000000000007, 44.00897825999993, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 60.00000000000007, 71.28029658, 36.60612995999993, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 60.00000000000007, 50.98245597999994, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.000000000000064, 46.63355395999994, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 60.000000000000064, 60.40545412999993, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 60.000000000000064, 53.59002593999994, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 60.000000000000064, 68.37574967, 43.303745749999926, 42.80218957, 43.84420366, 60.00000000000007, 68.70546436, 52.168963409999925, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.00000000000008, 60.89155988, 42.933646119999906, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 60.000000000000085, 60.84550869999991, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 60.00000000000009, 60.01937934999991, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 60.00000000000009, 53.743811479999906, 60.00000000000009, 60.61813672, 58.0889136399999, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 60.00000000000009, 60.865077309999904, 46.59939314, 60.00000000000009, 62.14201747, 52.08365795999991, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 60.000000000000085, 42.14604794999991, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 60.000000000000085, 66.71551066, 48.18579867999992, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 60.000000000000085, 36.28829703999992, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.000000000000085, 47.38889755999991, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 60.000000000000085, 67.27386468999993, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 60.00000000000007, 66.91017844, 68.70466807, 59.92654184999992, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 60.00000000000008, 58.28007857999992, 38.8711642, 60.00000000000008, 50.67692477999993, 45.53578331, 60.00000000000008, 56.80777371999992, 60.00000000000008, 55.07677921999992, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 60.00000000000008, 47.75253489999992, 46.33303694, 45.668026399999995, 60.00000000000008, 41.59832949999992, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.00000000000008, 50.73558766999992, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 60.00000000000008, 52.72506577999992, 60.00000000000008, 60.67301791, 49.70779978999992, 39.63739247, 52.64078966, 60.000000000000085, 27.407318489999913, 60.000000000000085, 37.525964319999915, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 60.000000000000085, 44.72491099999992, 46.697609299999996, 53.01205892, 43.48236467, 60.000000000000085, 46.22867164999992, 56.60965492, 60.000000000000085, 45.285263159999914, 60.000000000000085, 68.87211111999991, 60.000000000000085, 43.607134069999915, 54.76913985, 60.000000000000085, 54.83358975999991, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011]
(IO) Demand for node 0:[40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(OQ) Order for node 0:[40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(DMFS) Supply for node 0:[40.82375027, 52.14738966, 60.0, 60.0, 57.74753008999999, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 60.0, 59.844232319999996, 58.13622274, 36.45920065, 60.0, 29.31578877, 36.50381173, 60.0, 60.000000000000014, 58.86427486999999, 39.65202476, 60.000000000000014, 60.0, 61.92064406, 64.49842163, 62.71338952, 60.0, 61.22740136, 68.25296610000001, 49.66626021, 51.651557200000006, 39.04665674, 59.99999999999999, 60.0, 33.50401353000001, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 59.999999999999986, 43.834309120000015, 40.11569481, 41.14122154, 59.999999999999986, 55.508533620000016, 59.999999999999986, 49.28673858000001, 59.999999999999986, 51.82665132000001, 47.45456837, 59.999999999999986, 59.630655390000015, 51.60903641, 59.999999999999986, 60.0, 61.60623207, 63.90147326999999, 60.00000000000001, 47.00235265000001, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 59.99999999999998, 31.515848350000024, 51.91296281, 28.46517264, 40.10240766, 59.99999999999998, 52.797069720000025, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 59.99999999999998, 60.00000000000001, 60.86769073, 55.913469330000005, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 59.99999999999998, 53.92680954000002, 50.86950739, 48.36167266, 22.450230899999998, 59.99999999999997, 38.19180862000002, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 59.99999999999998, 60.000000000000014, 56.32118975, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 59.99999999999999, 60.00000000000002, 38.93701593999999, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 59.99999999999999, 60.00000000000002, 52.66606711999999, 47.45495781, 54.28503892, 59.99999999999999, 60.00000000000002, 54.32929991999999, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 59.99999999999999, 45.51349021000001, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 59.99999999999999, 60.000000000000014, 75.96743368999998, 56.05939391000002, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 59.999999999999986, 60.00000000000003, 63.50399348999998, 60.00000000000003, 56.495138499999975, 59.99999999999999, 60.00000000000003, 57.35893115999996, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 60.0, 57.81239982, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.0, 52.46141541, 57.17322036, 60.0, 60.00000000000003, 48.88382657999996, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 60.0, 60.00000000000003, 60.58494867999997, 60.000000000000036, 57.58169477999997, 42.93746836, 47.79811843, 59.99999999999999, 60.000000000000036, 60.12258989999997, 59.99999999999999, 59.95400814000002, 30.84778871, 51.34895029, 59.99999999999998, 60.00000000000004, 72.33734380999998, 59.99999999999997, 60.00000000000005, 53.08609283999998, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 59.999999999999964, 48.62654570000003, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 59.99999999999996, 60.000000000000064, 46.43074788999998, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 59.99999999999996, 60.000000000000064, 68.60732269999997, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 59.99999999999997, 60.000000000000064, 32.891607089999965, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 59.99999999999998, 42.76712771000001, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 59.99999999999998, 51.06225886000002, 53.9800537, 55.9969334, 59.99999999999998, 57.66949891000002, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 59.99999999999998, 39.89513068000002, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 59.99999999999998, 60.000000000000064, 51.05719648999997, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 59.99999999999998, 59.66063362000002, 58.54301084, 59.99999999999998, 60.00000000000006, 34.84467052999996, 54.83414731, 47.44522918, 59.99999999999998, 52.171864520000035, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 59.99999999999998, 58.06466183000002, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 59.99999999999998, 50.450783410000014, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 59.99999999999998, 32.44684176000002, 59.99999999999998, 33.898327610000024, 43.46356658, 51.75366537, 51.49580521, 59.99999999999998, 30.85942831000002, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 59.99999999999998, 60.00000000000007, 42.17367264999996, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 59.99999999999998, 28.793801940000023, 51.78457677, 30.52585322, 59.99999999999998, 56.16212684000001, 44.62327739, 37.29171116, 59.999999999999986, 53.215215770000015, 42.62958318, 36.35568269, 59.999999999999986, 60.00000000000007, 68.33181247, 56.64286973999993, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 59.999999999999986, 60.00000000000008, 68.89370839, 48.98997499999994, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 59.99999999999999, 49.447575560000004, 44.83513004, 51.8651704, 59.99999999999999, 60.00000000000007, 66.17792248, 46.41325444999993, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 60.0, 59.370231200000006, 52.86153489, 55.41568884, 60.0, 34.69918163, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 60.0, 57.40289567999999, 37.67529845, 52.16721723, 59.99999999999999, 60.00000000000007, 65.48474707999992, 49.134390780000004, 38.1328005, 59.99999999999999, 48.17574743000001, 54.37723508, 36.58464228, 59.99999999999999, 59.64840839, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 59.99999999999999, 49.88664629000001, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 59.99999999999999, 44.96295562000001, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 59.99999999999999, 44.88236382000001, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 59.99999999999999, 48.26381253, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 59.999999999999986, 35.03181500000001, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 59.999999999999986, 50.97992642000001, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 59.999999999999986, 53.279962570000016, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 59.999999999999986, 59.10639357, 55.85484528, 59.999999999999986, 50.70732734000001, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 59.999999999999986, 60.00000000000007, 34.17132974999994, 55.59925736, 42.42288875, 59.999999999999986, 44.00897826000001, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 59.999999999999986, 60.00000000000007, 47.886426539999945, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 59.999999999999986, 50.982455980000026, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 59.999999999999986, 46.633553960000015, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 59.999999999999986, 60.000000000000064, 47.056447719999944, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 59.999999999999986, 53.59002594000002, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 59.999999999999986, 60.000000000000064, 51.67949541999995, 42.80218957, 43.84420366, 59.99999999999998, 60.00000000000007, 60.87442776999995, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 59.99999999999997, 60.00000000000008, 43.82520599999995, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 59.99999999999996, 60.000000000000085, 37.98170850999997, 55.24450335, 48.26339469, 43.65182496, 59.99999999999994, 60.00000000000009, 51.04092371999997, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 59.99999999999994, 53.743811480000055, 59.99999999999994, 60.00000000000009, 58.70705035999997, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 59.999999999999936, 60.00000000000009, 47.464470449999965, 59.999999999999936, 60.00000000000009, 54.22567542999996, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 59.99999999999995, 42.146047950000046, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 59.99999999999995, 60.000000000000085, 54.90130933999997, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 59.99999999999996, 36.288297040000046, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 59.99999999999996, 47.38889756000004, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 59.99999999999996, 60.000000000000085, 60.40319464999995, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 59.99999999999997, 60.00000000000007, 66.91017844, 68.63120991999996, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 59.99999999999996, 58.28007858000004, 38.8711642, 59.999999999999964, 50.67692478000004, 45.53578331, 59.999999999999964, 56.807773720000036, 59.999999999999964, 55.076779220000034, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 59.999999999999964, 47.752534900000036, 46.33303694, 45.668026399999995, 59.999999999999964, 41.598329500000034, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 59.999999999999964, 50.73558767000004, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 59.999999999999964, 52.72506578000004, 59.999999999999964, 60.00000000000008, 50.38081769999995, 39.63739247, 52.64078966, 59.999999999999964, 27.407318490000034, 59.99999999999996, 37.52596432000004, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 59.99999999999996, 44.72491100000005, 46.697609299999996, 53.01205892, 43.48236467, 59.99999999999996, 46.228671650000045, 56.60965492, 59.99999999999996, 45.28526316000004, 59.99999999999996, 60.000000000000085, 68.87211111999991, 43.60713407000004, 54.76913985, 59.99999999999996, 54.83358976000004, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(EIL) Ending Inventory record for node 0:[19.176249730000002, 7.852610339999998, -19.19040905, -21.04642566999999, 21.44287896000001, 8.46862917, 1.1208494700000031, 17.298769710000002, 14.300057680000002, 8.831042189999998, 16.512934379999997, -1.384173969999999, 0.15576768000001096, 1.863777259999999, 23.54079935, -1.7492203400000008, 30.684211230000006, 23.496188269999998, -0.36212937999999895, -7.002773519999991, 1.4978545100000105, 20.34797524000001, -1.9206440599999866, -6.419065689999982, -16.66335404999998, -2.7133895199999927, -1.2274013599999947, -11.465920780000005, -17.54402458, 1.985553319999994, 10.333739789999996, 8.348442799999987, 20.95334325999999, -0.36735494000000557, -1.9418782800000045, 26.86334140999999, 7.002471499999984, 14.033030059999987, 9.321330359999983, 22.940767519999987, 2.725434529999987, 32.190332449999985, 1.3268761699999843, 32.49268793999998, 17.007861189999982, 9.51674417999999, 2.9750997399999832, 0.22282443999998236, -1.573100370000013, 16.165690879999985, 19.884305189999985, 18.85877845999999, -3.7039548600000174, 4.491466379999984, -4.195951240000014, 10.713261419999988, -2.771963250000013, 8.17334867999999, 12.545431629999989, -8.582602010000016, 0.36934460999998464, 8.390963589999984, -1.6062320700000114, -10.757886830000018, -3.901473270000018, -3.1973687100000276, -7.178830880000014, 16.195016059999986, 3.52572078999998, 32.849411519999975, 19.995467829999978, 16.737441709999974, 5.01100040999998, 28.59944179999998, -0.8401078600000247, 28.484151649999983, 8.087037189999975, 31.53482735999998, 19.89759233999998, -2.6783857200000227, 7.202930279999983, 0.527932679999978, 16.42561370999998, 16.490380709999975, 3.681185169999978, 17.09870318999998, -0.867690730000021, -12.626835230000005, -13.692268450000007, 15.845675169999986, 3.0532829899999783, 5.911533249999977, 10.173230809999978, 15.87024821999998, 6.365083409999976, 14.19551312999998, 25.62426690999998, 7.1241760999999855, 17.90595835999998, 0.4034744299999815, 6.61806070999998, 14.934259149999981, 23.627936009999978, 5.34583499999998, -1.3494959900000225, 6.073190459999985, 9.130492609999976, 11.638327339999975, 37.54976909999998, -0.3944421700000227, 21.808191379999986, 10.025726639999977, 8.625481929999978, 7.8510074799999785, 28.14784680999998, -2.0859241100000183, -5.475513820000007, 5.764734360000009, 22.451936099999994, 1.2037093199999944, 6.126590919999991, 13.775793579999991, 13.220228429999992, 2.458690869999991, -2.1464623900000106, -2.52566560999999, 23.209446450000016, 25.849945529999992, 14.068671139999992, 17.884255589999995, 6.049647999999991, 11.655637369999994, 13.73019038999999, 18.398320179999992, 8.765742339999996, 22.564585769999994, 4.152287089999994, 12.734524779999994, 5.028357819999989, 3.7550982699999906, 15.276584889999995, 18.133994489999992, 17.56038783999999, -0.6229133600000054, -1.7323779299999842, 7.9568462400000115, 12.54504218999999, 5.714961079999995, -4.877898370000004, -16.335289489999987, 10.548598450000007, 4.289440039999995, 4.253844019999995, 23.95761096999999, 20.662548779999995, 14.640800769999991, 21.378535339999992, 23.184749909999994, 6.832110159999992, 17.029387709999995, 4.74522490999999, -0.36938514000000566, 14.486509790000007, 4.072091329999992, 4.502498079999995, 30.102652649999992, 21.15986328999999, 4.696725159999993, 9.465379529999993, 9.628454269999992, -17.063805470000013, -15.967433689999993, -3.4620371000000176, 3.94060609000001, 14.447234619999989, 20.38653302999999, 19.626834379999984, 26.006606349999984, -5.244719680000017, -3.503993489999992, -5.648439240000009, -2.4164381399999755, 5.921299639999994, -8.011653250000002, -7.001490469999965, 9.642559310000003, 13.84792153, 10.086146149999998, 13.172099029999998, 24.57928434, 20.914755149999998, 9.839728639999997, 6.788790740000003, 5.656272459999997, -2.8488880900000026, 2.1876001800000253, 17.653025919999997, 4.600463159999997, 0.24345799999999684, 12.55171353, 21.11789332, -0.07572130999999871, 7.538584590000028, 2.826779639999998, -4.8624724999999955, -1.354012209999965, 12.470185630000003, 15.78547958, 7.31189770000001, 0.9510193999999998, 7.530359220000001, -21.1810379, -0.5849486799999681, -9.801421079999997, -4.70580396999997, 7.1241091899999915, 17.062531639999996, 12.20188156999999, -4.594826420000011, -1.0262168099999727, 0.9036269099999927, -13.83764865000002, 0.045991860000015095, 29.15221128999998, 8.651049709999981, -21.16365689000002, -20.483739849999978, 8.14639603999997, -6.895934330000031, -13.89005934999998, 13.809841490000025, 9.218102789999968, 0.3817318499999658, 15.108813459999965, 17.66481086999996, 7.514928879999964, 5.218426619999967, 10.607248289999966, 14.62914089999996, 8.591295039999963, 3.3612793199999658, -11.256932360000029, 11.373454300000027, 27.586223189999956, 18.554208219999957, 12.71976808999996, 9.142393689999956, -0.7440720600000432, -1.8167875399999787, 14.313324170000023, 6.6896334599999605, 5.28050350999996, 26.823597949999957, 22.843168679999955, 15.209125169999957, 0.7044171399999541, 0.5032445799999579, 13.265647639999955, 14.155095989999957, 16.893958939999955, 17.609357269999954, 4.139417809999955, 17.094950259999955, 8.732987489999957, -9.354029920000045, -11.285123769999977, 0.7467072200000331, 16.17674089999997, 9.18279275999997, 11.78773456999997, 18.10694203999997, 15.36018994999997, 17.49930401999997, 6.612203799999968, 5.261306629999972, -12.250607610000024, -0.9776758699999633, 28.086068779999977, 15.056707389999975, 17.798626819999978, 0.04568955999997826, 7.337809699999987, -4.560163310000014, 17.232872290000053, 3.034415459999977, 9.198909069999978, 16.221413349999978, 12.823742009999975, 18.36667802999998, -2.6297052200000195, 8.937741140000043, 6.0199462999999795, 4.0030665999999755, -0.8360531800000217, 2.330501090000041, 22.98068669999998, 10.399540659999978, 2.9523063499999793, 19.087837929999978, 12.374420479999976, 21.141086279999982, 16.198160469999976, 3.684817119999977, 6.961212729999978, 22.17517324999998, -7.506512290000025, 20.10486932000004, 0.474494469999982, 1.2989023999999745, 15.494125699999984, 17.230677319999977, 7.940122129999978, 11.498150979999977, 5.555953779999982, 10.44715344999998, 12.23829370999998, 16.476339779999982, -4.678048030000021, -4.440328509999965, 13.383132019999977, 19.516792019999976, 4.715017829999979, 3.92041288999998, 14.40297538999998, -1.7062169500000195, 0.3393663800000368, 1.4569891599999778, -0.1954458400000192, -5.767167559999962, 25.35077531000004, 5.16585268999998, 12.55477081999998, -5.397902920000028, 7.828135480000029, 18.226252549999977, 6.03864498999998, 12.848536519999982, 10.393747279999978, 19.84353359999998, 14.95740232999998, 25.549101829999977, 0.5975733199999809, 17.57140521999998, 3.9688312599999804, 0.37975948999997655, 7.5057890099999796, 15.134442249999978, 4.438814989999976, 19.02119931999998, 13.088026699999986, -0.6634130000000198, 1.935338170000044, 7.139124769999981, 11.135873239999981, 4.658500609999976, 16.25773721999998, 4.718685509999979, 3.7680184999999824, 14.115260759999977, 11.691111819999982, 4.677062729999982, 2.56562281999998, 5.644931719999981, 13.798743879999982, 15.682428219999977, 20.622217659999976, 22.40582827999998, 15.536092619999977, 33.627127999999985, 17.410615949999983, 7.37111153999998, 22.93988308999998, 20.147757899999974, 11.881606829999981, 5.7543375799999765, 4.815038889999975, 30.970259859999977, 5.9107446099999805, 20.17533195999998, 13.817790989999978, 4.797839279999977, 17.87268840999998, -2.778396000000015, 9.54921659000005, 2.2268667599999787, 16.08829153999998, 17.765030269999976, 13.937773829999976, 18.290447139999976, 15.015219879999975, 7.07937906999998, 13.557671819999982, 25.933414939999977, 22.51199921999998, 27.311764609999976, 14.359320239999981, 9.94979378999998, 23.559662699999983, 24.815616329999976, 13.16159039999998, 20.357585209999982, 8.661921469999982, 12.37371900999998, 13.978120099999977, 4.7281591199999795, 7.891580299999973, 11.803994769999981, 5.218850099999983, 3.5813478099999756, 32.53298384999998, 17.02583473999998, 5.817659559999981, -1.1503125700000183, 27.553158240000045, -3.363832200000026, 26.10167239000004, 16.53643341999998, 8.246334629999978, 8.504194789999978, -2.285934520000019, 29.14057169000005, 0.3170118199999763, 8.620008689999977, 9.120589439999982, 0.3470980599999791, 9.04191867999998, -1.7006035300000235, -3.2828804199999553, 19.526930880000045, 7.946080109999976, 26.05204346999998, 15.313677679999977, 9.655158459999981, 3.928909969999978, 18.936449629999977, 11.000666739999978, 20.33015350999998, -0.9485371700000229, 31.206198060000048, 8.215423229999978, 29.47414677999998, -18.061421610000014, 3.8378731600000613, 15.376722609999987, 22.708288839999987, -0.7678086000000164, 6.7847842300000565, 17.37041681999999, 23.644317309999984, -8.331812470000017, -13.425534249999941, -5.345055289999934, 8.450852040000065, 0.9153640699999883, 19.795977939999986, 22.008725129999988, 24.287219849999985, 13.580388659999983, 30.986754879999985, 18.584528359999986, 9.850536209999987, -8.893708390000015, -13.142776629999936, -3.757978769999937, 14.768003769999993, 0.63370531999999, 17.765502559999995, 24.170664439999996, 18.735257389999994, 11.51104140999999, 0.5846697199999937, 2.5242930799999925, 12.396706569999992, -3.656106490000006, 10.552424440000067, 15.16486995999999, 8.134829599999996, -6.177922480000014, -8.46680241999995, -5.972480609999934, 15.875625490000068, 7.579690040000003, 19.15531239, 1.9611611300000007, 3.1561035000000004, 6.990578720000002, 17.77062375, 8.14806359, 8.214542100000003, 9.204792750000003, 21.978549289999997, 8.134504980000003, 8.06396377, 17.94765504, 7.766139580000001, 23.1817263, 9.942647139999998, 25.64925532, 0.2324147200000013, 2.940352449999999, 26.402896300000002, 12.427350299999993, 13.980982079999997, 27.321758199999998, 29.06857407, 5.7795671399999975, 15.62905576, 16.529641050000002, 3.946888549999997, 3.9149550699999978, 12.89135598, 17.750608149999998, 10.095381840000002, -7.016529090000006, 0.6297688000000576, 7.1384651099999985, 4.584311159999999, -2.3877012399999984, 25.300818370000066, 7.7525326299999975, 8.880169359999996, 21.279079269999997, 6.642865239999999, 8.43744383, 28.07082426, 12.926733650000003, 5.042452949999998, 5.74937224, 9.06574345, 31.40097618, 7.427464489999998, 17.70912081, 19.29648968, 23.99865782, 4.143939920000001, 10.657851489999999, 16.817816649999997, 23.54984776, 5.005763430000002, 10.649061770000003, -23.683556609999997, 2.5971043200000707, 22.324701549999993, 7.832782769999994, -7.521028010000002, -5.484747079999934, -2.870763030000006, 10.865609220000074, 21.86719949999999, -4.743603010000008, 11.82425257000007, 5.6227649199999945, 23.415357719999996, -10.790887660000003, 0.351591610000078, 18.68744371999999, 19.622779069999993, 0.9693591299999937, 24.64435681999999, 12.234856179999994, 19.83246015999999, 11.655557199999997, -3.439401470000007, 10.11335371000007, 10.259240559999995, 0.17994707999999093, 11.92206706999999, 11.730296029999991, 1.904544009999995, 16.91787382999999, 7.1998646699999895, 9.77068581999999, 17.247770229999993, 4.978997509999992, -1.6275721900000093, 15.037044380000069, 6.692882889999993, 5.89658785999999, 21.566426189999994, 5.380136079999993, -0.7038586300000063, 15.11763618000007, 9.001275529999994, 7.2250022599999895, 30.068319409999994, 13.405907849999991, 16.60453155999999, -3.3157481500000046, 11.736187470000075, 7.621786879999995, 20.976934619999994, 11.941002559999994, 3.705297829999992, 4.080940719999994, 24.883878239999994, 17.561395059999995, 26.75633583999999, 12.32797743999999, 43.02228996999999, -0.5000651200000128, 24.96818500000006, 17.762151849999984, 12.980315229999984, 10.735685749999988, 15.303610029999987, 9.296754499999984, 22.078651059999984, 26.416721509999988, 19.445519389999987, 2.016220039999986, -0.1886408600000138, 9.020073580000059, 14.86701200999999, 16.634106649999985, 4.896883929999987, 10.686536989999986, 17.62345048999999, 17.191765479999987, -5.282238120000017, 6.720037430000055, 17.049014089999986, 7.8267793899999845, 2.094774639999983, 21.160603829999985, 15.484741549999988, -2.773863300000002, 0.8936064300000695, 4.1451547199999865, -0.6438260600000163, 9.292672660000058, 19.201733389999987, 17.114767259999986, 5.380780589999986, 15.634721709999987, 12.971941139999984, 14.427487379999988, 2.1128961799999857, 8.365898379999983, 0.31424013999998834, 0.04331662999998542, 4.8514889499999825, 18.069622069999987, 16.838427309999986, 12.533337429999989, 10.413579369999987, -4.631209290000015, -2.037293019999943, 27.865963269999988, 4.400742639999983, 17.577111249999987, -3.632260520000017, 15.991021740000058, 11.160216309999988, 6.230239639999986, 22.896914299999985, 3.870689089999985, -11.280296580000012, -17.271029419999948, 23.393870040000053, 16.042110209999983, 28.962856679999987, 11.420910019999987, 11.543351959999988, 7.7211208299999825, 7.187438809999989, 20.82209909999999, 2.408135279999989, 47.01742480999999, 2.4864407599999883, 18.039973289999985, 22.044582969999986, 28.121737089999986, 23.485632169999988, 14.268703499999987, 13.352259419999989, 16.592884019999985, 38.03410703999998, 14.012114719999982, 15.607213829999981, -8.432877750000024, 9.017544020000045, 23.785281939999983, 7.092227359999988, 16.623695639999987, 17.95065402999999, 10.258737999999987, -0.6337544900000154, 13.36644604000005, 3.1928286399999877, 12.436003679999985, 0.45946824999998626, 8.371062069999986, 3.0819798799999845, 8.660814749999986, 26.726350739999987, -15.647238080000008, -0.4054541299999457, 13.349006409999987, 10.198783809999988, 13.762121659999984, 22.37459581999999, 7.866697789999989, 20.685443699999986, 18.126954479999988, 15.56546489999998, 10.837763619999983, 3.920814639999989, 2.672081509999984, 2.8761885699999823, 13.171929109999986, 9.224771519999983, 6.280583369999988, 17.65417397999999, 9.729641309999984, 12.977270579999988, 17.79937634999999, 5.827384259999988, 19.412444489999984, 13.519597179999984, 4.305749609999985, 1.8476174899999833, 8.942415539999985, 19.14605978999999, 5.808276479999989, 7.278090459999987, -5.43384776000002, 6.409974060000046, 10.303513969999983, 23.44123238999999, 4.784711059999985, 18.991671369999985, 13.855040219999985, 6.782563209999985, 13.193177959999986, 17.377241959999985, -8.375749670000019, -12.118546299999956, 16.696254250000052, 17.197810429999976, 16.15579633999998, -8.705464360000015, -10.789110869999945, 7.831036590000046, 20.98329104999997, 4.473625469999973, 9.01540539999997, 14.029741429999973, 29.57645789999997, 7.796288299999972, 2.3873109599999722, 16.31889637999997, 9.212967129999974, 15.719781549999972, 26.153873909999973, 6.967282639999972, 14.035248909999972, 3.184238979999975, 14.426549689999973, 23.10739776999997, 0.8611200199999729, 23.001932769999975, 1.639180849999974, 9.966113179999972, -0.8915598800000311, -13.098249629999948, 17.06635388000005, 7.091926289999961, 11.334845589999958, 22.84314929999995, 31.990966259999958, 10.365074159999956, -16.49614975000005, -0.8455086999999679, 22.863800189999942, 4.755496649999941, 11.736605309999945, 16.348175039999944, -1.358111260000058, -0.01937934999996571, 8.978455629999942, 10.121620119999946, 23.001190359999946, 9.72668715999994, 12.890643149999946, 4.4140915799999405, 3.268203809999946, 13.978165669999946, 8.461878549999945, 14.241990449999946, -6.500655850000058, 6.256188520000038, -0.6181367200000594, -8.05755667999997, 1.9110863600000343, 3.997243179999934, 7.5902320999999375, 5.588317979999935, 23.887914889999934, 13.57940184999994, 13.482193119999934, -5.437612660000063, -0.8650773099999682, 13.40060685999994, -2.142017470000063, -7.970837979999963, 7.916342040000039, 5.569342719999952, 17.05967465999995, 14.210151769999953, 16.24675344999995, 10.513171559999954, 9.477816249999947, 5.423494509999948, 13.521818079999953, 22.66572924999995, -4.051994950000044, 17.85395205000004, 2.486512699999949, 25.49060460999995, 9.530861989999948, 17.33165150999995, 14.175881909999951, 3.0922212099999484, 1.305680469999949, 7.708067499999949, 27.15799766999995, 25.840355629999948, 7.925789589999951, 27.43294295999995, 7.481384819999953, 7.404527689999952, 1.782848079999951, 12.513459489999953, 10.382287549999951, -6.7155106600000565, -14.745017649999966, 11.814201320000038, 19.67684414999996, 5.118581269999957, 22.33816032999996, 15.890082699999965, 5.896897849999959, -1.8854813800000443, 23.71170296000004, 13.591022069999958, 22.89373192999996, 17.12036338999996, 9.55733346999996, 10.069126629999957, 9.242023549999956, 6.838190789999956, 22.77762286999996, 25.397184019999955, 13.347744619999958, 0.8304557699999577, -0.07518006000004362, 12.611102440000046, 14.631211519999958, 20.41143143999996, 16.739527209999956, 15.025772129999957, 14.133595019999959, 2.398356169999957, 16.274279609999958, 7.845849039999955, 9.134059989999955, 10.988029369999957, 8.422250439999956, 12.214015309999958, 14.904721489999957, 10.474641109999958, -10.051987310000044, -7.273864689999954, 6.870670039999972, 19.804053479999972, 20.48755128999997, 6.397680899999969, 13.481915849999972, 6.678717549999973, 29.05751110999997, 6.212801779999971, 14.42867998999997, 0.606535029999975, 18.15321194999997, 8.796888249999974, 2.7878053599999717, 20.174291599999975, 7.853275749999973, 34.872113059999975, 8.280112659999975, 7.742328699999973, -6.910178440000024, -15.61484650999995, -8.77708503999996, 0.07345815000003597, 1.2825314799999603, 8.520886169999955, 15.40497236999996, 12.653667289999959, 6.7140977699999596, 2.6522461599999545, -11.725374210000034, 1.7199214200000412, 21.128835799999962, -2.3423981000000396, 9.323075220000035, 14.464216689999965, -4.426065780000037, 3.1922262800000425, -3.8667710600000333, 4.923220780000044, 27.515817229999968, 12.829383049999961, 20.174090609999965, 5.987230059999966, 1.3620093599999663, 9.623738139999965, 15.837219199999971, 23.21874497999996, -3.405143710000033, 12.247465100000042, 13.666963059999965, 14.33197359999997, -2.037878030000037, 18.401670500000044, 14.236904189999962, 20.268174729999963, 2.638732059999967, 1.4428615599999617, 19.317440059999967, 21.592717289999968, 23.908968499999965, 17.506891379999963, 15.005966129999962, -0.580753480000034, 9.264412330000042, 15.992804099999965, 26.471756089999964, 0.006696819999966408, 16.234953559999965, 12.029009199999962, 8.419745399999961, -9.59901349000004, 7.274934220000041, -0.6730179100000342, -13.264467499999945, 10.292200210000047, 20.362607529999963, 7.3592103399999615, -1.329325690000033, 32.59268151000005, -3.7207357700000365, 22.474035680000046, 31.70129549999996, 5.101974629999962, 8.465151259999956, 19.02626422999996, 9.66725333999996, -6.187709080000047, 15.275089000000037, 13.302390699999961, 6.987941079999956, 16.517635329999955, -1.3024405000000456, 13.77132835000004, 3.3903450799999604, -12.653481720000045, 14.714736840000043, -9.140633080000043, -8.872111119999957, -6.476751050000047, 16.392865930000042, 5.230860149999955, -6.771491840000039, 5.166410240000047, 1.3056357999999548, 1.0462978599999602, 3.3006739399999603, 14.499600289999954, 27.784779919999956, 4.915992869999954, 12.75594532999996, 30.099039479999956, 9.21536255999996, 10.206760989999957, 9.39304021999996, 3.452675509999956, 31.15017092999996, 6.956269869999957, 2.955596239999956, 21.32166697999996, 20.307904409999956, 6.195153639999958, 10.622365199999955, 5.270282069999958, 17.189754429999958, 22.13958841999996, 0.9115381099999595, 15.508509889999956, 8.573967149999959]
(BO) Backorders for node 0:[0, 0, 19.19040905, 21.04642566999999, 0, 0, 0, 0, 0, 0, 0, 1.384173969999999, 0, 0, 0, 1.7492203400000008, 0, 0, 0.36212937999999895, 7.002773519999991, 0, 0, 1.9206440599999866, 6.419065689999982, 16.66335404999998, 2.7133895199999927, 1.2274013599999947, 11.465920780000005, 17.54402458, 0, 0, 0, 0, 0.36735494000000557, 1.9418782800000045, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.573100370000013, 0, 0, 0, 3.7039548600000174, 0, 4.195951240000014, 0, 2.771963250000013, 0, 0, 8.582602010000016, 0, 0, 1.6062320700000114, 10.757886830000018, 3.901473270000018, 3.1973687100000276, 7.178830880000014, 0, 0, 0, 0, 0, 0, 0, 0.8401078600000247, 0, 0, 0, 0, 2.6783857200000227, 0, 0, 0, 0, 0, 0, 0.867690730000021, 12.626835230000005, 13.692268450000007, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.3494959900000225, 0, 0, 0, 0, 0.3944421700000227, 0, 0, 0, 0, 0, 2.0859241100000183, 5.475513820000007, 0, 0, 0, 0, 0, 0, 0, 2.1464623900000106, 2.52566560999999, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.6229133600000054, 1.7323779299999842, 0, 0, 0, 4.877898370000004, 16.335289489999987, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.36938514000000566, 0, 0, 0, 0, 0, 0, 0, 0, 17.063805470000013, 15.967433689999993, 3.4620371000000176, 0, 0, 0, 0, 0, 5.244719680000017, 3.503993489999992, 5.648439240000009, 2.4164381399999755, 0, 8.011653250000002, 7.001490469999965, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.8488880900000026, 0, 0, 0, 0, 0, 0, 0.07572130999999871, 0, 0, 4.8624724999999955, 1.354012209999965, 0, 0, 0, 0, 0, 21.1810379, 0.5849486799999681, 9.801421079999997, 4.70580396999997, 0, 0, 0, 4.594826420000011, 1.0262168099999727, 0, 13.83764865000002, 0, 0, 0, 21.16365689000002, 20.483739849999978, 0, 6.895934330000031, 13.89005934999998, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11.256932360000029, 0, 0, 0, 0, 0, 0.7440720600000432, 1.8167875399999787, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9.354029920000045, 11.285123769999977, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12.250607610000024, 0.9776758699999633, 0, 0, 0, 0, 0, 4.560163310000014, 0, 0, 0, 0, 0, 0, 2.6297052200000195, 0, 0, 0, 0.8360531800000217, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7.506512290000025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.678048030000021, 4.440328509999965, 0, 0, 0, 0, 0, 1.7062169500000195, 0, 0, 0.1954458400000192, 5.767167559999962, 0, 0, 0, 5.397902920000028, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.6634130000000198, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.778396000000015, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.1503125700000183, 0, 3.363832200000026, 0, 0, 0, 0, 2.285934520000019, 0, 0, 0, 0, 0, 0, 1.7006035300000235, 3.2828804199999553, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.9485371700000229, 0, 0, 0, 18.061421610000014, 0, 0, 0, 0.7678086000000164, 0, 0, 0, 8.331812470000017, 13.425534249999941, 5.345055289999934, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.893708390000015, 13.142776629999936, 3.757978769999937, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.656106490000006, 0, 0, 0, 6.177922480000014, 8.46680241999995, 5.972480609999934, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7.016529090000006, 0, 0, 0, 2.3877012399999984, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23.683556609999997, 0, 0, 0, 7.521028010000002, 5.484747079999934, 2.870763030000006, 0, 0, 4.743603010000008, 0, 0, 0, 10.790887660000003, 0, 0, 0, 0, 0, 0, 0, 0, 3.439401470000007, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.6275721900000093, 0, 0, 0, 0, 0, 0.7038586300000063, 0, 0, 0, 0, 0, 0, 3.3157481500000046, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5000651200000128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1886408600000138, 0, 0, 0, 0, 0, 0, 0, 5.282238120000017, 0, 0, 0, 0, 0, 0, 2.773863300000002, 0, 0, 0.6438260600000163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.631209290000015, 2.037293019999943, 0, 0, 0, 3.632260520000017, 0, 0, 0, 0, 0, 11.280296580000012, 17.271029419999948, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.432877750000024, 0, 0, 0, 0, 0, 0, 0.6337544900000154, 0, 0, 0, 0, 0, 0, 0, 0, 15.647238080000008, 0.4054541299999457, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5.43384776000002, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.375749670000019, 12.118546299999956, 0, 0, 0, 8.705464360000015, 10.789110869999945, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.8915598800000311, 13.098249629999948, 0, 0, 0, 0, 0, 0, 16.49614975000005, 0.8455086999999679, 0, 0, 0, 0, 1.358111260000058, 0.01937934999996571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.500655850000058, 0, 0.6181367200000594, 8.05755667999997, 0, 0, 0, 0, 0, 0, 0, 5.437612660000063, 0.8650773099999682, 0, 2.142017470000063, 7.970837979999963, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.051994950000044, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.7155106600000565, 14.745017649999966, 0, 0, 0, 0, 0, 0, 1.8854813800000443, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07518006000004362, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10.051987310000044, 7.273864689999954, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.910178440000024, 15.61484650999995, 8.77708503999996, 0, 0, 0, 0, 0, 0, 0, 11.725374210000034, 0, 0, 2.3423981000000396, 0, 0, 4.426065780000037, 0, 3.8667710600000333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.405143710000033, 0, 0, 0, 2.037878030000037, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.580753480000034, 0, 0, 0, 0, 0, 0, 0, 9.59901349000004, 0, 0.6730179100000342, 13.264467499999945, 0, 0, 0, 1.329325690000033, 0, 3.7207357700000365, 0, 0, 0, 0, 0, 0, 6.187709080000047, 0, 0, 0, 0, 1.3024405000000456, 0, 0, 12.653481720000045, 0, 9.140633080000043, 8.872111119999957, 6.476751050000047, 0, 0, 6.771491840000039, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
(TC) Total Cost for node 0:[191.7624973, 78.52610339999998, 1919.0409049999998, 2104.642566999999, 214.42878960000007, 84.6862917, 11.208494700000031, 172.98769710000002, 143.00057680000003, 88.31042189999998, 165.12934379999996, 138.4173969999999, 1.5576768000001096, 18.63777259999999, 235.4079935, 174.92203400000008, 306.84211230000005, 234.9618827, 36.212937999999895, 700.2773519999992, 14.978545100000105, 203.4797524000001, 192.06440599999866, 641.9065689999982, 1666.335404999998, 271.33895199999927, 122.74013599999947, 1146.5920780000006, 1754.4024579999998, 19.85553319999994, 103.33739789999996, 83.48442799999987, 209.5334325999999, 36.73549400000056, 194.18782800000045, 268.63341409999987, 70.02471499999984, 140.33030059999987, 93.21330359999983, 229.40767519999986, 27.25434529999987, 321.9033244999998, 13.268761699999843, 324.92687939999985, 170.07861189999983, 95.16744179999989, 29.750997399999832, 2.2282443999998236, 157.3100370000013, 161.65690879999985, 198.84305189999986, 188.5877845999999, 370.3954860000017, 44.91466379999984, 419.59512400000136, 107.13261419999988, 277.1963250000013, 81.7334867999999, 125.45431629999989, 858.2602010000016, 3.6934460999998464, 83.90963589999984, 160.62320700000114, 1075.7886830000018, 390.14732700000184, 319.7368710000028, 717.8830880000014, 161.95016059999986, 35.2572078999998, 328.4941151999998, 199.95467829999978, 167.37441709999973, 50.1100040999998, 285.99441799999977, 84.01078600000247, 284.84151649999984, 80.87037189999975, 315.3482735999998, 198.97592339999983, 267.8385720000023, 72.02930279999983, 5.2793267999997795, 164.2561370999998, 164.90380709999977, 36.81185169999978, 170.9870318999998, 86.7690730000021, 1262.6835230000004, 1369.2268450000008, 158.45675169999987, 30.532829899999783, 59.11533249999977, 101.73230809999978, 158.7024821999998, 63.650834099999756, 141.9551312999998, 256.24266909999983, 71.24176099999985, 179.05958359999977, 4.034744299999815, 66.1806070999998, 149.3425914999998, 236.2793600999998, 53.4583499999998, 134.94959900000225, 60.73190459999985, 91.30492609999976, 116.38327339999975, 375.4976909999998, 39.44421700000227, 218.08191379999985, 100.25726639999976, 86.25481929999978, 78.51007479999979, 281.4784680999998, 208.59241100000185, 547.5513820000007, 57.64734360000009, 224.51936099999995, 12.037093199999944, 61.26590919999991, 137.75793579999993, 132.20228429999992, 24.58690869999991, 214.64623900000106, 252.566560999999, 232.09446450000016, 258.4994552999999, 140.68671139999992, 178.84255589999995, 60.496479999999906, 116.55637369999994, 137.3019038999999, 183.9832017999999, 87.65742339999996, 225.64585769999994, 41.522870899999944, 127.34524779999994, 50.283578199999894, 37.550982699999906, 152.76584889999995, 181.33994489999992, 175.6038783999999, 62.29133600000054, 173.23779299999842, 79.56846240000012, 125.4504218999999, 57.14961079999995, 487.7898370000004, 1633.5289489999986, 105.48598450000007, 42.89440039999995, 42.538440199999954, 239.5761096999999, 206.62548779999995, 146.40800769999993, 213.7853533999999, 231.84749909999994, 68.32110159999992, 170.29387709999995, 47.452249099999904, 36.938514000000566, 144.86509790000008, 40.72091329999992, 45.02498079999995, 301.02652649999993, 211.5986328999999, 46.967251599999926, 94.65379529999993, 96.28454269999992, 1706.3805470000013, 1596.7433689999993, 346.20371000000176, 39.4060609000001, 144.4723461999999, 203.86533029999987, 196.26834379999985, 260.0660634999998, 524.4719680000017, 350.39934899999923, 564.8439240000009, 241.64381399999755, 59.21299639999994, 801.1653250000002, 700.1490469999965, 96.42559310000003, 138.47921530000002, 100.86146149999998, 131.72099029999998, 245.7928434, 209.14755149999996, 98.39728639999997, 67.88790740000003, 56.56272459999997, 284.88880900000026, 21.876001800000253, 176.5302592, 46.00463159999997, 2.4345799999999684, 125.5171353, 211.17893320000002, 7.572130999999871, 75.38584590000028, 28.26779639999998, 486.24724999999955, 135.4012209999965, 124.70185630000003, 157.8547958, 73.1189770000001, 9.510193999999998, 75.30359220000001, 2118.10379, 58.49486799999681, 980.1421079999998, 470.580396999997, 71.24109189999992, 170.62531639999997, 122.0188156999999, 459.4826420000011, 102.62168099999727, 9.036269099999927, 1383.764865000002, 0.45991860000015095, 291.5221128999998, 86.51049709999981, 2116.365689000002, 2048.373984999998, 81.4639603999997, 689.5934330000032, 1389.005934999998, 138.09841490000025, 92.18102789999968, 3.817318499999658, 151.08813459999965, 176.64810869999963, 75.14928879999964, 52.18426619999967, 106.07248289999966, 146.2914089999996, 85.91295039999963, 33.61279319999966, 1125.6932360000028, 113.73454300000027, 275.86223189999953, 185.54208219999958, 127.1976808999996, 91.42393689999956, 74.40720600000432, 181.67875399999787, 143.1332417000002, 66.8963345999996, 52.8050350999996, 268.2359794999996, 228.43168679999957, 152.0912516999996, 7.0441713999995414, 5.032445799999579, 132.65647639999955, 141.55095989999955, 168.93958939999953, 176.09357269999953, 41.39417809999955, 170.94950259999956, 87.32987489999957, 935.4029920000045, 1128.5123769999977, 7.467072200000331, 161.7674089999997, 91.8279275999997, 117.8773456999997, 181.06942039999973, 153.60189949999972, 174.99304019999968, 66.12203799999968, 52.613066299999716, 1225.0607610000025, 97.76758699999633, 280.86068779999977, 150.56707389999974, 177.98626819999978, 0.4568955999997826, 73.37809699999987, 456.0163310000014, 172.3287229000005, 30.34415459999977, 91.98909069999978, 162.21413349999978, 128.23742009999975, 183.6667802999998, 262.97052200000195, 89.37741140000043, 60.199462999999795, 40.030665999999755, 83.60531800000217, 23.30501090000041, 229.80686699999978, 103.99540659999978, 29.523063499999793, 190.87837929999978, 123.74420479999976, 211.41086279999982, 161.98160469999976, 36.84817119999977, 69.61212729999978, 221.75173249999978, 750.6512290000024, 201.0486932000004, 4.74494469999982, 12.989023999999745, 154.94125699999984, 172.30677319999978, 79.40122129999978, 114.98150979999977, 55.559537799999816, 104.4715344999998, 122.3829370999998, 164.76339779999984, 467.80480300000204, 444.0328509999965, 133.83132019999977, 195.16792019999974, 47.150178299999794, 39.2041288999998, 144.0297538999998, 170.62169500000195, 3.3936638000003683, 14.569891599999778, 19.54458400000192, 576.7167559999962, 253.5077531000004, 51.6585268999998, 125.5477081999998, 539.7902920000029, 78.28135480000029, 182.26252549999975, 60.3864498999998, 128.48536519999982, 103.93747279999978, 198.43533599999978, 149.5740232999998, 255.49101829999978, 5.975733199999809, 175.7140521999998, 39.688312599999804, 3.7975948999997655, 75.0578900999998, 151.34442249999978, 44.38814989999976, 190.2119931999998, 130.88026699999986, 66.34130000000198, 19.35338170000044, 71.39124769999981, 111.35873239999981, 46.58500609999976, 162.57737219999981, 47.18685509999979, 37.680184999999824, 141.15260759999978, 116.91111819999982, 46.770627299999816, 25.656228199999802, 56.44931719999981, 137.98743879999984, 156.82428219999977, 206.22217659999976, 224.0582827999998, 155.36092619999977, 336.2712799999998, 174.10615949999982, 73.7111153999998, 229.3988308999998, 201.47757899999974, 118.81606829999981, 57.543375799999765, 48.150388899999754, 309.70259859999976, 59.107446099999805, 201.7533195999998, 138.17790989999978, 47.97839279999977, 178.7268840999998, 277.8396000000015, 95.4921659000005, 22.268667599999787, 160.88291539999977, 177.65030269999977, 139.37773829999975, 182.90447139999975, 150.15219879999975, 70.7937906999998, 135.57671819999982, 259.3341493999998, 225.11999219999979, 273.1176460999998, 143.59320239999983, 99.4979378999998, 235.59662699999984, 248.15616329999978, 131.6159039999998, 203.57585209999982, 86.61921469999982, 123.73719009999981, 139.78120099999978, 47.281591199999795, 78.91580299999973, 118.03994769999981, 52.18850099999983, 35.813478099999756, 325.3298384999998, 170.2583473999998, 58.176595599999814, 115.03125700000183, 275.53158240000045, 336.38322000000255, 261.0167239000004, 165.3643341999998, 82.46334629999978, 85.04194789999978, 228.59345200000192, 291.4057169000005, 3.170118199999763, 86.20008689999977, 91.20589439999982, 3.470980599999791, 90.4191867999998, 170.06035300000235, 328.2880419999955, 195.26930880000043, 79.46080109999976, 260.5204346999998, 153.13677679999978, 96.55158459999981, 39.28909969999978, 189.36449629999976, 110.00666739999978, 203.3015350999998, 94.85371700000229, 312.0619806000005, 82.15423229999978, 294.7414677999998, 1806.1421610000014, 38.37873160000061, 153.76722609999987, 227.0828883999999, 76.78086000000164, 67.84784230000056, 173.70416819999988, 236.44317309999985, 833.1812470000017, 1342.5534249999941, 534.5055289999934, 84.50852040000065, 9.153640699999883, 197.95977939999986, 220.08725129999988, 242.87219849999985, 135.80388659999983, 309.86754879999984, 185.84528359999985, 98.50536209999987, 889.3708390000015, 1314.2776629999937, 375.7978769999937, 147.68003769999993, 6.3370531999999, 177.65502559999993, 241.70664439999996, 187.35257389999992, 115.1104140999999, 5.846697199999937, 25.242930799999925, 123.96706569999992, 365.61064900000065, 105.52424440000067, 151.6486995999999, 81.34829599999996, 617.7922480000013, 846.6802419999951, 597.2480609999934, 158.75625490000067, 75.79690040000003, 191.5531239, 19.611611300000007, 31.561035000000004, 69.90578720000002, 177.7062375, 81.4806359, 82.14542100000003, 92.04792750000003, 219.78549289999995, 81.34504980000003, 80.63963770000001, 179.4765504, 77.66139580000001, 231.81726300000003, 99.42647139999998, 256.49255320000003, 2.324147200000013, 29.40352449999999, 264.02896300000003, 124.27350299999993, 139.80982079999995, 273.217582, 290.6857407, 57.795671399999975, 156.2905576, 165.29641050000004, 39.46888549999997, 39.14955069999998, 128.9135598, 177.5060815, 100.95381840000002, 701.6529090000006, 6.297688000000576, 71.38465109999999, 45.843111599999986, 238.77012399999984, 253.00818370000064, 77.52532629999997, 88.80169359999996, 212.79079269999997, 66.42865239999999, 84.3744383, 280.7082426, 129.26733650000003, 50.42452949999998, 57.493722399999996, 90.6574345, 314.0097618, 74.27464489999998, 177.09120810000002, 192.96489680000002, 239.9865782, 41.43939920000001, 106.57851489999999, 168.17816649999997, 235.4984776, 50.05763430000002, 106.49061770000003, 2368.3556609999996, 25.971043200000707, 223.24701549999992, 78.32782769999994, 752.1028010000002, 548.4747079999934, 287.0763030000006, 108.65609220000074, 218.67199499999992, 474.36030100000085, 118.2425257000007, 56.227649199999945, 234.15357719999997, 1079.0887660000003, 3.51591610000078, 186.8744371999999, 196.22779069999993, 9.693591299999937, 246.4435681999999, 122.34856179999994, 198.32460159999988, 116.55557199999997, 343.9401470000007, 101.1335371000007, 102.59240559999995, 1.7994707999999093, 119.2206706999999, 117.30296029999991, 19.04544009999995, 169.1787382999999, 71.9986466999999, 97.7068581999999, 172.47770229999992, 49.78997509999992, 162.75721900000093, 150.3704438000007, 66.92882889999993, 58.9658785999999, 215.66426189999993, 53.80136079999993, 70.38586300000063, 151.17636180000068, 90.01275529999994, 72.2500225999999, 300.6831940999999, 134.0590784999999, 166.04531559999992, 331.57481500000046, 117.36187470000075, 76.21786879999995, 209.76934619999994, 119.41002559999994, 37.05297829999992, 40.80940719999994, 248.83878239999996, 175.61395059999995, 267.5633583999999, 123.27977439999991, 430.22289969999986, 50.00651200000128, 249.6818500000006, 177.62151849999984, 129.80315229999985, 107.35685749999988, 153.03610029999987, 92.96754499999984, 220.78651059999984, 264.1672150999999, 194.45519389999987, 20.16220039999986, 18.86408600000138, 90.20073580000059, 148.6701200999999, 166.34106649999984, 48.96883929999987, 106.86536989999986, 176.23450489999988, 171.91765479999987, 528.2238120000017, 67.20037430000055, 170.49014089999986, 78.26779389999984, 20.94774639999983, 211.60603829999985, 154.8474154999999, 277.3863300000002, 8.936064300000695, 41.451547199999865, 64.38260600000163, 92.92672660000058, 192.01733389999987, 171.14767259999985, 53.80780589999986, 156.34721709999985, 129.71941139999984, 144.27487379999988, 21.128961799999857, 83.65898379999983, 3.1424013999998834, 0.43316629999985423, 48.514889499999825, 180.69622069999986, 168.38427309999986, 125.33337429999989, 104.13579369999987, 463.1209290000015, 203.7293019999943, 278.65963269999986, 44.00742639999983, 175.77111249999987, 363.2260520000017, 159.9102174000006, 111.60216309999988, 62.302396399999864, 228.96914299999986, 38.70689089999985, 1128.0296580000013, 1727.1029419999948, 233.93870040000053, 160.4211020999998, 289.6285667999999, 114.20910019999987, 115.43351959999988, 77.21120829999983, 71.87438809999989, 208.22099099999988, 24.08135279999989, 470.1742480999999, 24.864407599999883, 180.39973289999983, 220.44582969999988, 281.2173708999999, 234.85632169999988, 142.68703499999987, 133.5225941999999, 165.92884019999985, 380.3410703999998, 140.12114719999983, 156.0721382999998, 843.2877750000024, 90.17544020000045, 237.85281939999982, 70.92227359999988, 166.23695639999988, 179.50654029999987, 102.58737999999987, 63.37544900000154, 133.6644604000005, 31.928286399999877, 124.36003679999985, 4.594682499999863, 83.71062069999986, 30.819798799999845, 86.60814749999986, 267.26350739999987, 1564.7238080000009, 40.54541299999457, 133.49006409999987, 101.98783809999988, 137.62121659999985, 223.7459581999999, 78.66697789999989, 206.85443699999985, 181.2695447999999, 155.6546489999998, 108.37763619999983, 39.20814639999989, 26.72081509999984, 28.761885699999823, 131.71929109999985, 92.24771519999983, 62.80583369999988, 176.5417397999999, 97.29641309999984, 129.77270579999987, 177.9937634999999, 58.27384259999988, 194.12444489999984, 135.19597179999982, 43.05749609999985, 18.476174899999833, 89.42415539999985, 191.46059789999987, 58.08276479999989, 72.78090459999987, 543.384776000002, 64.09974060000046, 103.03513969999983, 234.41232389999988, 47.84711059999985, 189.91671369999983, 138.55040219999984, 67.82563209999985, 131.93177959999986, 173.77241959999986, 837.5749670000018, 1211.8546299999955, 166.96254250000052, 171.97810429999976, 161.5579633999998, 870.5464360000014, 1078.9110869999945, 78.31036590000046, 209.83291049999968, 44.73625469999973, 90.1540539999997, 140.29741429999973, 295.7645789999997, 77.96288299999972, 23.873109599999722, 163.1889637999997, 92.12967129999974, 157.1978154999997, 261.5387390999997, 69.67282639999972, 140.35248909999973, 31.84238979999975, 144.26549689999973, 231.07397769999972, 8.611200199999729, 230.01932769999974, 16.39180849999974, 99.66113179999972, 89.1559880000031, 1309.8249629999948, 170.6635388000005, 70.91926289999961, 113.34845589999958, 228.4314929999995, 319.9096625999996, 103.65074159999956, 1649.614975000005, 84.55086999999679, 228.63800189999944, 47.55496649999941, 117.36605309999945, 163.48175039999944, 135.8111260000058, 1.9379349999965712, 89.78455629999942, 101.21620119999946, 230.01190359999947, 97.2668715999994, 128.90643149999946, 44.140915799999405, 32.68203809999946, 139.78165669999947, 84.61878549999945, 142.41990449999946, 650.0655850000057, 62.561885200000376, 61.81367200000594, 805.755667999997, 19.110863600000343, 39.97243179999934, 75.90232099999938, 55.883179799999354, 238.87914889999934, 135.7940184999994, 134.82193119999934, 543.7612660000063, 86.50773099999682, 134.0060685999994, 214.2017470000063, 797.0837979999964, 79.16342040000039, 55.69342719999952, 170.5967465999995, 142.10151769999953, 162.4675344999995, 105.13171559999954, 94.77816249999947, 54.23494509999948, 135.21818079999952, 226.65729249999947, 405.1994950000044, 178.53952050000038, 24.86512699999949, 254.9060460999995, 95.30861989999948, 173.3165150999995, 141.75881909999953, 30.922212099999484, 13.05680469999949, 77.08067499999949, 271.5799766999995, 258.4035562999995, 79.25789589999951, 274.32942959999946, 74.81384819999953, 74.04527689999952, 17.82848079999951, 125.13459489999953, 103.82287549999951, 671.5510660000057, 1474.5017649999966, 118.14201320000038, 196.7684414999996, 51.18581269999957, 223.3816032999996, 158.90082699999965, 58.968978499999594, 188.54813800000443, 237.1170296000004, 135.91022069999957, 228.9373192999996, 171.2036338999996, 95.5733346999996, 100.69126629999957, 92.42023549999956, 68.38190789999956, 227.7762286999996, 253.97184019999955, 133.47744619999958, 8.304557699999577, 7.5180060000043625, 126.11102440000046, 146.3121151999996, 204.11431439999959, 167.39527209999954, 150.25772129999956, 141.3359501999996, 23.98356169999957, 162.74279609999957, 78.45849039999955, 91.34059989999955, 109.88029369999957, 84.22250439999956, 122.14015309999958, 149.04721489999957, 104.74641109999958, 1005.1987310000044, 727.3864689999954, 68.70670039999972, 198.0405347999997, 204.8755128999997, 63.97680899999969, 134.81915849999973, 66.78717549999973, 290.57511109999973, 62.12801779999971, 144.28679989999972, 6.06535029999975, 181.5321194999997, 87.96888249999974, 27.878053599999717, 201.74291599999975, 78.53275749999973, 348.72113059999975, 82.80112659999975, 77.42328699999973, 691.0178440000025, 1561.484650999995, 877.7085039999961, 0.7345815000003597, 12.825314799999603, 85.20886169999955, 154.04972369999962, 126.53667289999959, 67.1409776999996, 26.522461599999545, 1172.5374210000034, 17.199214200000412, 211.28835799999962, 234.23981000000396, 93.23075220000035, 144.64216689999967, 442.60657800000376, 31.922262800000425, 386.67710600000333, 49.23220780000044, 275.1581722999997, 128.29383049999961, 201.74090609999965, 59.87230059999966, 13.620093599999663, 96.23738139999965, 158.3721919999997, 232.18744979999963, 340.5143710000033, 122.47465100000042, 136.66963059999966, 143.3197359999997, 203.78780300000372, 184.01670500000046, 142.36904189999962, 202.68174729999964, 26.38732059999967, 14.428615599999617, 193.17440059999967, 215.92717289999968, 239.08968499999963, 175.06891379999962, 150.05966129999962, 58.0753480000034, 92.64412330000042, 159.92804099999967, 264.7175608999996, 0.06696819999966408, 162.34953559999965, 120.29009199999962, 84.19745399999961, 959.901349000004, 72.74934220000041, 67.30179100000342, 1326.4467499999946, 102.92200210000047, 203.62607529999963, 73.59210339999962, 132.9325690000033, 325.92681510000045, 372.07357700000364, 224.74035680000046, 317.01295499999964, 51.01974629999962, 84.65151259999956, 190.26264229999958, 96.6725333999996, 618.7709080000047, 152.75089000000037, 133.0239069999996, 69.87941079999956, 165.17635329999956, 130.24405000000456, 137.7132835000004, 33.903450799999604, 1265.3481720000045, 147.14736840000043, 914.0633080000043, 887.2111119999956, 647.6751050000047, 163.92865930000042, 52.308601499999554, 677.1491840000039, 51.66410240000047, 13.056357999999548, 10.462978599999602, 33.0067393999996, 144.99600289999955, 277.84779919999954, 49.15992869999954, 127.5594532999996, 300.99039479999954, 92.1536255999996, 102.06760989999957, 93.9304021999996, 34.52675509999956, 311.50170929999956, 69.56269869999957, 29.555962399999558, 213.2166697999996, 203.07904409999958, 61.95153639999958, 106.22365199999955, 52.70282069999958, 171.8975442999996, 221.3958841999996, 9.115381099999595, 155.08509889999956, 85.73967149999959]
Node 1
(IL) Starting Inventory record for node 1:[60, 19.176249730000002, 7.852610339999998, -19.19040905, -1.8560166199999912, 23.298895580000007, 8.468629170000007, 1.1208494700000102, 17.29876971000001, 14.300057680000009, 8.831042190000005, 16.512934380000004, -1.384173969999992, 1.53994165000001, 1.8637772600000062, 23.540799350000007, -1.7492203399999937, 32.43343157000001, 23.496188270000008, -0.36212937999999184, -6.640644140000006, 8.138498650000002, 20.347975239999997, -1.9206440600000008, -4.498421629999996, -12.16493242, 9.4515429, -1.2274013600000018, -10.238519420000003, -7.305505159999996, 9.291058479999997, 10.333739790000003, 8.348442799999994, 20.953343259999997, -0.36735493999999846, -1.574523339999999, 28.437864750000003, 7.002471500000002, 14.033030060000002, 9.321330359999997, 22.94076752, 2.725434530000001, 32.19033245, 1.3268761699999985, 32.492687939999996, 17.007861189999996, 9.516744180000003, 2.9750997399999974, 0.22282443999999657, -1.5731003699999988, 17.73879125, 19.88430519, 18.858778460000003, -3.703954860000003, 8.195421240000002, -4.195951239999999, 14.909212660000001, -2.771963249999999, 10.945311930000003, 12.545431630000003, -8.582602010000002, 8.951946620000001, 8.390963589999998, -1.6062320699999972, -9.151654759999992, 5.25018149000001, -3.197368709999992, -3.9814621699999933, 20.176478230000008, 3.5257207900000083, 32.849411520000004, 19.995467830000006, 16.737441710000002, 5.011000410000008, 28.599441800000008, -0.8401078599999963, 29.324259510000008, 8.087037190000004, 31.53482736000001, 19.89759234000001, -2.6783857199999943, 9.881316000000005, 0.5279326800000064, 16.425613710000007, 16.490380710000004, 3.6811851700000062, 17.09870319000001, -0.8676907299999925, -11.759144499999984, -1.9331239499999953, 17.77879912000001, 3.0532829900000067, 5.911533250000005, 10.173230810000007, 15.870248220000008, 6.365083410000004, 14.195513130000009, 25.62426691000001, 7.124176100000014, 17.905958360000007, 0.40347443000000993, 6.618060710000009, 14.93425915000001, 23.627936010000006, 5.345835000000008, -1.349495989999994, 7.422686450000008, 9.130492610000005, 11.638327340000004, 37.549769100000006, -0.39444216999999426, 22.202633550000005, 10.025726640000006, 8.625481930000007, 7.851007480000007, 28.147846810000008, -2.08592410999999, -3.389589709999985, 9.154324070000019, 22.451936100000022, 1.2037093200000228, 6.1265909200000195, 13.77579358000002, 13.22022843000002, 2.4586908700000194, -2.146462389999982, -0.3792032199999795, 23.588649670000024, 25.84994553000002, 14.06867114000002, 17.884255590000024, 6.049648000000019, 11.655637370000022, 13.730190390000018, 18.39832018000002, 8.765742340000024, 22.564585770000022, 4.152287090000023, 12.734524780000022, 5.028357820000018, 3.755098270000019, 15.276584890000024, 18.13399449000002, 17.56038784000002, -0.622913359999977, -1.1094645699999788, 9.066310810000019, 12.545042190000018, 5.714961080000023, -4.877898369999976, -11.457391119999983, 22.00598957000001, 4.2894400400000166, 4.253844020000017, 23.95761097000001, 20.662548780000016, 14.640800770000013, 21.378535340000013, 23.184749910000015, 6.832110160000013, 17.029387710000016, 4.745224910000012, -0.36938513999998435, 14.855894930000012, 4.072091330000013, 4.5024980800000165, 30.102652650000014, 21.15986329000001, 4.696725160000014, 9.465379530000014, 9.628454270000013, -17.063805469999984, 1.0963717800000268, -3.462037099999975, 7.402643190000028, 14.447234620000032, 20.38653303000003, 19.626834380000027, 26.006606350000027, -5.2447196799999745, 1.740726190000025, -5.648439239999973, 3.2320011000000335, 5.921299640000029, -8.011653249999966, 1.0101627800000301, 9.642559310000031, 13.84792153000003, 10.086146150000026, 13.172099030000027, 24.57928434000003, 20.914755150000026, 9.839728640000025, 6.788790740000032, 5.656272460000025, -2.8488880899999742, 5.036488270000028, 17.653025920000026, 4.600463160000025, 0.24345800000002527, 12.551713530000029, 21.11789332000003, -0.07572130999997029, 7.614305900000026, 2.8267796400000265, -4.862472499999967, 3.5084602900000306, 12.470185630000032, 15.785479580000029, 7.3118977000000385, 0.9510194000000283, 7.5303592200000296, -21.18103789999997, 20.59608922000004, -9.801421079999962, 5.095617110000035, 7.124109190000034, 17.06253164000004, 12.201881570000033, -4.594826419999968, 3.5686096100000384, 0.9036269100000354, -13.837648649999963, 13.883640510000042, 29.152211290000043, 8.651049710000045, -21.16365688999995, 0.6799170400000492, 8.146396040000049, -6.895934329999953, -6.994125019999942, 20.80396651000006, 9.21810279000006, 0.38173185000005816, 15.108813460000057, 17.664810870000053, 7.514928880000056, 5.218426620000059, 10.607248290000058, 14.629140900000053, 8.591295040000055, 3.361279320000058, -11.25693235999993, 22.630386660000063, 27.586223190000062, 18.554208220000064, 12.719768090000066, 9.142393690000063, -0.7440720599999366, -1.0727154799999354, 15.386039650000065, 6.689633460000067, 5.280503510000067, 26.823597950000064, 22.843168680000062, 15.209125170000064, 0.7044171400000607, 0.5032445800000644, 13.265647640000061, 14.155095990000063, 16.89395894000006, 17.60935727000006, 4.139417810000062, 17.09495026000006, 8.732987490000063, -9.354029919999938, -1.9310938499999395, 2.677801070000065, 16.176740900000063, 9.182792760000062, 11.787734570000062, 18.106942040000064, 15.360189950000063, 17.49930402000006, 6.61220380000006, 5.261306630000064, -12.250607609999939, 11.272931740000061, 28.086068780000065, 15.056707390000064, 17.798626820000067, 0.04568956000006352, 7.337809700000072, -4.560163309999929, 21.793035600000067, 3.034415460000062, 9.198909070000063, 16.221413350000063, 12.82374201000006, 18.366678030000067, -2.6297052199999342, 11.567446360000062, 6.019946300000065, 4.003066600000061, -0.8360531799999364, 3.166554270000063, 22.980686700000064, 10.399540660000063, 2.9523063500000646, 19.087837930000063, 12.374420480000062, 21.141086280000067, 16.19816047000006, 3.684817120000062, 6.961212730000064, 22.175173250000064, -7.5065122899999395, 27.611381610000066, 0.4744944700000673, 1.2989024000000597, 15.494125700000069, 17.230677320000062, 7.940122130000063, 11.498150980000062, 5.555953780000067, 10.447153450000066, 12.238293710000065, 16.476339780000067, -4.678048029999935, 0.2377195200000557, 13.383132020000055, 19.516792020000054, 4.715017830000058, 3.9204128900000583, 14.402975390000059, -1.7062169499999413, 2.0455833300000563, 1.456989160000056, -0.19544583999994103, -5.571721719999935, 30.92249703000006, 5.165852690000062, 12.554770820000066, -5.397902919999943, 13.226038400000057, 18.226252550000062, 6.038644990000066, 12.848536520000067, 10.393747280000063, 19.843533600000065, 14.957402330000065, 25.549101830000062, 0.5975733200000661, 17.571405220000067, 3.9688312600000657, 0.3797594900000618, 7.505789010000065, 15.134442250000063, 4.438814990000061, 19.021199320000065, 13.088026700000071, -0.6634129999999345, 2.598751170000064, 7.139124770000066, 11.135873240000066, 4.658500610000061, 16.257737220000067, 4.718685510000064, 3.7680185000000677, 14.115260760000062, 11.691111820000067, 4.677062730000067, 2.5656228200000655, 5.644931720000066, 13.798743880000067, 15.682428220000062, 20.62221766000006, 22.405828280000065, 15.536092620000062, 33.62712800000007, 17.410615950000068, 7.371111540000065, 22.939883090000066, 20.14775790000006, 11.881606830000067, 5.754337580000062, 4.815038890000061, 30.970259860000063, 5.910744610000066, 20.175331960000065, 13.817790990000063, 4.797839280000062, 17.872688410000066, -2.7783959999999297, 12.327612590000065, 2.226866760000064, 16.088291540000064, 17.76503027000006, 13.937773830000062, 18.29044714000006, 15.01521988000006, 7.079379070000066, 13.557671820000067, 25.933414940000063, 22.511999220000064, 27.31176461000006, 14.359320240000066, 9.949793790000065, 23.559662700000068, 24.81561633000006, 13.161590400000065, 20.357585210000067, 8.661921470000067, 12.373719010000066, 13.978120100000062, 4.728159120000065, 7.891580300000058, 11.803994770000067, 5.218850100000068, 3.581347810000061, 32.532983850000065, 17.025834740000064, 5.817659560000067, -1.150312569999933, 28.703470810000063, -3.3638321999999405, 29.465504590000066, 16.53643342000007, 8.24633463000007, 8.50419479000007, -2.285934519999927, 31.42650621000007, 0.31701182000006867, 8.62000869000007, 9.120589440000074, 0.3470980600000715, 9.041918680000073, -1.7006035299999311, -1.5822768899999318, 21.10920777000007, 7.946080110000068, 26.052043470000072, 15.313677680000069, 9.655158460000074, 3.9289099700000705, 18.93644963000007, 11.00066674000007, 20.330153510000073, -0.9485371699999305, 32.15473523000007, 8.21542323000007, 29.474146780000073, -18.06142160999993, 21.899294770000072, 15.376722610000076, 22.708288840000076, -0.7678085999999311, 7.552592830000073, 17.370416820000074, 23.64431731000007, -8.331812469999932, -5.093721779999925, -0.2513335099999239, 8.702185550000081, 0.9153640700000807, 19.79597794000008, 22.00872513000008, 24.287219850000078, 13.580388660000075, 30.986754880000078, 18.58452836000008, 9.85053621000008, -8.893708389999922, -4.249068239999929, 0.49108947000006964, 14.768003770000071, 0.6337053200000682, 17.765502560000073, 24.170664440000074, 18.735257390000072, 11.511041410000068, 0.5846697200000719, 2.5242930800000707, 12.39670657000007, -3.656106489999928, 14.208530930000073, 15.164869960000068, 8.134829600000074, -6.1779224799999355, -2.28887993999993, -3.683600669999933, 19.559226160000065, 7.579690040000067, 19.155312390000063, 1.9611611300000646, 3.1561035000000643, 6.990578720000066, 17.770623750000063, 8.148063590000064, 8.214542100000067, 9.204792750000067, 21.97854929000006, 8.134504980000067, 8.063963770000065, 17.947655040000065, 7.766139580000065, 23.181726300000065, 9.942647140000062, 25.649255320000066, 0.23241472000006524, 2.940352450000063, 26.402896300000066, 12.427350300000057, 13.98098208000006, 27.321758200000062, 29.068574070000064, 5.779567140000061, 15.629055760000064, 16.529641050000066, 3.946888550000061, 3.9149550700000617, 12.891355980000064, 17.750608150000062, 10.095381840000066, -7.016529089999942, 7.6462978900000635, 7.1384651100000625, 4.584311160000063, -2.3877012399999344, 27.688519610000064, 7.752532630000061, 8.88016936000006, 21.27907927000006, 6.642865240000063, 8.437443830000063, 28.070824260000062, 12.926733650000067, 5.042452950000062, 5.7493722400000635, 9.065743450000063, 31.400976180000065, 7.427464490000062, 17.709120810000066, 19.296489680000064, 23.998657820000062, 4.143939920000065, 10.657851490000063, 16.81781665000006, 23.549847760000063, 5.005763430000066, 10.649061770000067, -23.683556609999926, 26.280660930000074, 22.32470155000007, 7.8327827700000725, -7.521028009999924, 2.036280930000075, -2.870763029999921, 13.73637225000008, 21.867199500000076, -4.743603009999923, 16.567855580000078, 5.62276492000008, 23.41535772000008, -10.790887659999918, 11.14247927000008, 18.687443720000076, 19.622779070000078, 0.969359130000079, 24.644356820000077, 12.23485618000008, 19.832460160000075, 11.655557200000082, -3.439401469999922, 13.552755180000077, 10.25924056000008, 0.1799470800000762, 11.922067070000075, 11.730296030000076, 1.9045440100000803, 16.917873830000076, 7.199864670000075, 9.770685820000075, 17.24777023000008, 4.978997510000077, -1.627572189999924, 16.664616570000078, 6.692882890000078, 5.896587860000075, 21.56642619000008, 5.380136080000078, -0.703858629999921, 15.821494810000075, 9.001275530000079, 7.225002260000075, 30.06831941000008, 13.405907850000077, 16.604531560000076, -3.3157481499999193, 15.05193562000008, 7.62178688000008, 20.97693462000008, 11.94100256000008, 3.7052978300000774, 4.080940720000079, 24.88387824000008, 17.56139506000008, 26.756335840000077, 12.327977440000076, 43.022289970000074, -0.5000651199999275, 25.468250120000075, 17.762151850000077, 12.980315230000077, 10.735685750000073, 15.303610030000073, 9.29675450000007, 22.07865106000007, 26.416721510000073, 19.445519390000072, 2.0162200400000714, -0.18864085999992852, 9.208714440000072, 14.867012010000074, 16.63410665000007, 4.896883930000072, 10.686536990000072, 17.623450490000074, 17.191765480000072, -5.282238119999931, 12.002275550000071, 17.04901409000007, 7.82677939000007, 2.094774640000068, 21.16060383000007, 15.484741550000074, -2.773863299999917, 3.6674697300000716, 4.145154720000072, -0.643826059999931, 9.936498720000074, 19.201733390000072, 17.11476726000007, 5.380780590000072, 15.634721710000072, 12.97194114000007, 14.427487380000073, 2.112896180000071, 8.365898380000068, 0.3142401400000736, 0.04331663000007069, 4.851488950000068, 18.069622070000072, 16.83842731000007, 12.533337430000074, 10.413579370000072, -4.63120928999993, 2.593916270000072, 27.865963270000073, 4.400742640000068, 17.577111250000073, -3.6322605199999316, 19.623282260000074, 11.160216310000074, 6.230239640000072, 22.89691430000007, 3.87068909000007, -11.280296579999927, -5.990732839999936, 29.384602880000074, 16.04211021000007, 28.962856680000076, 11.420910020000075, 11.543351960000074, 7.721120830000068, 7.187438810000074, 20.822099100000074, 2.4081352800000744, 47.01742481000007, 2.4864407600000735, 18.03997329000007, 22.04458297000007, 28.12173709000007, 23.485632170000073, 14.268703500000072, 13.352259420000074, 16.59288402000007, 38.034107040000066, 14.012114720000067, 15.607213830000067, -8.432877749999939, 17.450421770000062, 23.78528194000006, 7.0922273600000665, 16.623695640000065, 17.950654030000067, 10.258738000000065, -0.6337544899999372, 14.000200530000065, 3.192828640000066, 12.436003680000063, 0.4594682500000644, 8.371062070000065, 3.0819798800000626, 8.660814750000064, 26.726350740000065, -15.64723807999993, 15.241783950000062, 13.349006410000065, 10.198783810000066, 13.762121660000062, 22.374595820000067, 7.866697790000067, 20.685443700000064, 18.126954480000066, 15.565464900000059, 10.83776362000006, 3.920814640000067, 2.672081510000062, 2.8761885700000605, 13.171929110000065, 9.224771520000061, 6.280583370000066, 17.654173980000067, 9.729641310000062, 12.977270580000067, 17.799376350000067, 5.827384260000066, 19.412444490000063, 13.519597180000062, 4.305749610000063, 1.8476174900000615, 8.942415540000063, 19.146059790000066, 5.8082764800000675, 7.278090460000065, -5.433847759999942, 11.843821820000066, 10.303513970000061, 23.441232390000067, 4.784711060000063, 18.991671370000063, 13.855040220000063, 6.782563210000063, 13.193177960000064, 17.377241960000063, -8.37574966999994, -3.74279662999993, 20.439050880000075, 17.197810430000068, 16.155796340000073, -8.705464359999922, -2.083646509999923, 9.914683100000076, 20.983291050000076, 4.47362547000008, 9.015405400000077, 14.02974143000008, 29.576457900000076, 7.796288300000079, 2.387310960000079, 16.318896380000076, 9.21296713000008, 15.719781550000079, 26.15387391000008, 6.967282640000079, 14.035248910000078, 3.1842389800000817, 14.42654969000008, 23.107397770000077, 0.8611200200000795, 23.00193277000008, 1.6391808500000806, 9.966113180000079, -0.8915598799999245, -12.20668974999991, 29.27304363000009, 7.091926290000092, 11.334845590000086, 22.84314930000008, 31.990966260000086, 10.365074160000084, -16.49614974999991, 15.65064105000009, 22.86380019000009, 4.75549665000009, 11.736605310000094, 16.348175040000093, -1.3581112599999088, 1.3387319100000923, 8.978455630000092, 10.121620120000095, 23.001190360000095, 9.72668716000009, 12.890643150000095, 4.41409158000009, 3.268203810000095, 13.978165670000095, 8.461878550000094, 14.241990450000095, -6.500655849999909, 12.756844370000096, -0.6181367199999102, -7.439419959999903, 9.350506320000093, 3.9972431800000905, 7.590232100000094, 5.588317980000092, 23.88791489000009, 13.579401850000096, 13.48219312000009, -5.437612659999907, 4.572535350000095, 13.400606860000096, -2.1420174699999066, -5.828820509999915, 13.745162550000089, 5.569342720000087, 17.059674660000084, 14.210151770000088, 16.246753450000085, 10.513171560000089, 9.477816250000082, 5.423494510000083, 13.521818080000088, 22.665729250000084, -4.051994949999909, 21.905947000000083, 2.486512700000084, 25.490604610000084, 9.530861990000083, 17.331651510000086, 14.175881910000086, 3.0922212100000834, 1.305680470000084, 7.708067500000084, 27.157997670000086, 25.840355630000083, 7.925789590000086, 27.432942960000084, 7.481384820000088, 7.404527690000087, 1.782848080000086, 12.513459490000088, 10.382287550000086, -6.7155106599999215, -8.029506989999916, 19.843708310000082, 19.676844150000086, 5.118581270000085, 22.338160330000086, 15.890082700000093, 5.896897850000087, -1.8854813799999164, 25.597184340000084, 13.591022070000086, 22.893731930000087, 17.120363390000087, 9.557333470000088, 10.069126630000085, 9.242023550000084, 6.838190790000084, 22.777622870000087, 25.397184020000083, 13.347744620000086, 0.8304557700000856, -0.07518005999991573, 12.68628250000009, 14.631211520000086, 20.411431440000086, 16.739527210000084, 15.025772130000085, 14.133595020000087, 2.398356170000085, 16.274279610000086, 7.845849040000083, 9.134059990000083, 10.988029370000085, 8.422250440000084, 12.214015310000086, 14.904721490000085, 10.474641110000086, -10.051987309999916, 2.778122620000076, 6.870670040000078, 19.80405348000008, 20.487551290000077, 6.397680900000076, 13.481915850000078, 6.67871755000008, 29.05751111000008, 6.212801780000078, 14.428679990000077, 0.6065350300000816, 18.153211950000077, 8.79688825000008, 2.7878053600000783, 20.174291600000082, 7.85327575000008, 34.872113060000075, 8.280112660000075, 7.742328700000073, -6.910178439999925, -8.704668069999926, -0.07241696999992087, 0.14587512000007763, 1.282531480000081, 8.520886170000075, 15.404972370000081, 12.65366729000008, 6.71409777000008, 2.6522461600000753, -11.72537420999992, 13.445295630000075, 21.128835800000076, -2.342398099999926, 11.665473320000075, 14.464216690000079, -4.426065779999924, 7.61829206000008, -3.8667710599999197, 8.789991840000077, 27.51581723000008, 12.829383050000075, 20.17409061000008, 5.98723006000008, 1.36200936000008, 9.623738140000079, 15.837219200000085, 23.218744980000075, -3.405143709999919, 15.652608810000075, 13.666963060000079, 14.331973600000083, -2.0378780299999235, 20.43954853000008, 14.236904190000075, 20.268174730000077, 2.6387320600000805, 1.4428615600000754, 19.31744006000008, 21.59271729000008, 23.90896850000008, 17.506891380000077, 15.005966130000076, -0.5807534799999203, 9.845165810000076, 15.992804100000079, 26.471756090000078, 0.006696820000080095, 16.23495356000008, 12.029009200000075, 8.419745400000075, -9.599013489999926, 16.87394771000008, -0.6730179099999205, -12.591449589999911, 22.88364980000008, 20.362607530000083, 7.359210340000082, -1.3293256899999122, 33.92200720000009, -3.7207357699999086, 26.19477145000008, 31.701295500000082, 5.1019746300000826, 8.465151260000084, 19.026264230000088, 9.667253340000087, -6.1877090799999195, 21.462798080000084, 13.302390700000089, 6.987941080000084, 16.517635330000083, -1.3024404999999177, 15.073768850000086, 3.3903450800000883, -12.653481719999917, 27.368218560000088, -9.140633079999915, 0.2685219600000863, -6.476751049999919, 22.86961698000009, 5.230860150000083, -6.771491839999911, 11.937902080000086, 1.3056358000000827, 1.046297860000088, 3.300673940000088, 14.499600290000082, 27.784779920000084, 4.915992870000082, 12.755945330000088, 30.099039480000084, 9.215362560000088, 10.206760990000085, 9.393040220000088, 3.452675510000084, 31.150170930000087, 6.956269870000085, 2.9555962400000837, 21.32166698000009, 20.307904410000084, 6.195153640000086, 10.622365200000083, 5.270282070000086, 17.189754430000086, 22.139588420000088, 0.9115381100000874, 15.508509890000084]
(IS) Inbound supply record for node 1:[0, 40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011]
(IO) Demand for node 1:[40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(OQ) Order for node 1:[40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(DMFS) Supply for node 1:[40.82375027, 52.14738966, 60.0, 79.19040905, 38.55712103999999, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 60.00000000000001, 59.84423231999999, 58.13622274, 36.45920065, 60.00000000000001, 29.315788769999994, 36.50381173, 60.000000000000014, 60.36212938, 58.502145490000004, 39.65202476, 60.0, 61.92064406, 64.49842163, 62.71338952, 60.0, 61.22740136, 70.23851942, 58.01444668, 49.66626021, 51.651557200000006, 39.04665674, 60.0, 60.36735494, 33.136658589999996, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 60.0, 43.83430912, 40.11569481, 41.14122154, 60.0, 55.50853362, 60.0, 49.28673858, 60.0, 51.826651319999996, 47.45456837, 60.0, 59.63065539, 51.60903641, 60.0, 61.60623207, 63.90147326999999, 60.00000000000001, 63.19736871, 43.80498393999999, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.00000000000001, 31.515848349999995, 51.91296281, 28.46517264, 40.10240766, 60.00000000000001, 52.797069719999996, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.00000000000001, 60.86769073, 71.75914449999999, 44.15432482999999, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 60.00000000000001, 53.926809539999994, 50.86950739, 48.36167266, 22.450230899999998, 60.0, 38.19180861999999, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 60.00000000000001, 62.08592411000001, 54.23526563999998, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 60.00000000000002, 62.14646239, 36.79055354999998, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.00000000000002, 60.62291336, 52.04315375999998, 47.45495781, 54.28503892, 60.00000000000002, 64.87789837, 49.451401549999986, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.000000000000014, 45.513490209999986, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 60.000000000000014, 75.96743368999998, 60.00000000000003, 56.059393909999976, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 60.00000000000003, 63.50399348999998, 60.00000000000003, 62.41643813999997, 54.07870036, 60.00000000000003, 67.00149046999996, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 60.00000000000003, 57.812399819999975, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.00000000000003, 52.46141540999997, 57.17322036, 60.00000000000003, 61.354012209999965, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 60.00000000000003, 60.58494867999997, 60.000000000000036, 64.70580396999996, 52.87589081, 42.93746836, 47.79811843, 60.000000000000036, 61.026216809999966, 59.09637309, 60.000000000000036, 59.954008139999964, 30.84778871, 51.34895029, 60.00000000000004, 80.48373984999995, 51.85360396, 60.00000000000005, 66.89593433, 46.19015850999994, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 60.00000000000006, 48.62654569999993, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.000000000000064, 60.74407206, 45.686675829999935, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 60.000000000000064, 69.35402992, 59.25329277999994, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 60.000000000000064, 60.97767586999994, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 60.000000000000064, 42.767127709999926, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 60.000000000000064, 51.062258859999936, 53.9800537, 55.9969334, 60.000000000000064, 57.66949890999994, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 60.000000000000064, 39.89513067999994, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 60.000000000000064, 64.44032850999994, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 60.00000000000006, 59.66063361999994, 58.54301084, 60.00000000000006, 60.19544584, 34.64922468999994, 54.83414731, 47.44522918, 60.000000000000064, 52.17186451999995, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.000000000000064, 58.064661829999935, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 60.000000000000064, 50.45078340999993, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 60.000000000000064, 32.446841759999934, 60.000000000000064, 33.89832760999994, 43.46356658, 51.75366537, 51.49580521, 60.00000000000007, 30.859428309999927, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 60.00000000000007, 61.70060353, 40.473069119999934, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.00000000000007, 28.79380193999993, 51.78457677, 30.52585322, 60.00000000000007, 56.16212683999993, 44.62327739, 37.29171116, 60.00000000000007, 53.21521576999993, 42.62958318, 36.35568269, 60.00000000000007, 68.33181247, 65.09372178, 51.54914795999992, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 60.00000000000008, 68.89370839, 63.75797876999993, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 60.00000000000007, 49.447575559999926, 44.83513004, 51.8651704, 60.00000000000007, 66.17792248, 62.28887994, 44.12437450999993, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 60.000000000000064, 59.37023119999994, 52.86153489, 55.41568884, 60.000000000000064, 34.699181629999934, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 60.000000000000064, 57.40289567999992, 37.67529845, 52.16721723, 60.00000000000007, 65.48474707999992, 60.00000000000008, 49.13439077999992, 38.1328005, 60.00000000000008, 48.17574742999992, 54.37723508, 36.58464228, 60.00000000000008, 59.648408389999915, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 60.00000000000008, 49.88664628999992, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 60.00000000000008, 44.962955619999924, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.00000000000008, 44.882363819999924, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 60.00000000000008, 48.26381252999992, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.00000000000007, 35.03181499999992, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.00000000000007, 50.97992641999993, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 60.00000000000007, 53.27996256999993, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 60.00000000000007, 59.106393569999916, 55.85484528, 60.00000000000007, 50.70732733999993, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 60.00000000000007, 62.03729301999993, 32.13403673, 55.59925736, 42.42288875, 60.00000000000007, 44.00897825999993, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 60.00000000000007, 71.28029658, 36.60612995999993, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 60.00000000000007, 50.98245597999994, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.000000000000064, 46.63355395999994, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 60.000000000000064, 60.40545412999993, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 60.000000000000064, 53.59002593999994, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 60.000000000000064, 68.37574967, 43.303745749999926, 42.80218957, 43.84420366, 60.00000000000007, 68.70546436, 52.168963409999925, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.00000000000008, 60.89155988, 42.933646119999906, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 60.000000000000085, 60.84550869999991, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 60.00000000000009, 60.01937934999991, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 60.00000000000009, 53.743811479999906, 60.00000000000009, 60.61813672, 58.0889136399999, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 60.00000000000009, 60.865077309999904, 46.59939314, 60.00000000000009, 62.14201747, 52.08365795999991, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 60.000000000000085, 42.14604794999991, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 60.000000000000085, 66.71551066, 48.18579867999992, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 60.000000000000085, 36.28829703999992, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.000000000000085, 47.38889755999991, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 60.000000000000085, 67.27386468999993, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 60.00000000000007, 66.91017844, 68.70466807, 59.92654184999992, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 60.00000000000008, 58.28007857999992, 38.8711642, 60.00000000000008, 50.67692477999993, 45.53578331, 60.00000000000008, 56.80777371999992, 60.00000000000008, 55.07677921999992, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 60.00000000000008, 47.75253489999992, 46.33303694, 45.668026399999995, 60.00000000000008, 41.59832949999992, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.00000000000008, 50.73558766999992, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 60.00000000000008, 52.72506577999992, 60.00000000000008, 60.67301791, 49.70779978999992, 39.63739247, 52.64078966, 60.000000000000085, 27.407318489999913, 60.000000000000085, 37.525964319999915, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 60.000000000000085, 44.72491099999992, 46.697609299999996, 53.01205892, 43.48236467, 60.000000000000085, 46.22867164999992, 56.60965492, 60.000000000000085, 45.285263159999914, 60.000000000000085, 68.87211111999991, 60.000000000000085, 43.607134069999915, 54.76913985, 60.000000000000085, 54.83358975999991, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(EIL) Ending Inventory record for node 1:[19.176249730000002, 7.852610339999998, -19.19040905, -1.8560166199999912, 23.298895580000007, 8.468629170000007, 1.1208494700000102, 17.29876971000001, 14.300057680000009, 8.831042190000005, 16.512934380000004, -1.384173969999992, 1.53994165000001, 1.8637772600000062, 23.540799350000007, -1.7492203399999937, 32.43343157000001, 23.496188270000008, -0.36212937999999184, -6.640644140000006, 8.138498650000002, 20.347975239999997, -1.9206440600000008, -4.498421629999996, -12.16493242, 9.4515429, -1.2274013600000018, -10.238519420000003, -7.305505159999996, 9.291058479999997, 10.333739790000003, 8.348442799999994, 20.953343259999997, -0.36735493999999846, -1.574523339999999, 28.437864750000003, 7.002471500000002, 14.033030060000002, 9.321330359999997, 22.94076752, 2.725434530000001, 32.19033245, 1.3268761699999985, 32.492687939999996, 17.007861189999996, 9.516744180000003, 2.9750997399999974, 0.22282443999999657, -1.5731003699999988, 17.73879125, 19.88430519, 18.858778460000003, -3.703954860000003, 8.195421240000002, -4.195951239999999, 14.909212660000001, -2.771963249999999, 10.945311930000003, 12.545431630000003, -8.582602010000002, 8.951946620000001, 8.390963589999998, -1.6062320699999972, -9.151654759999992, 5.25018149000001, -3.197368709999992, -3.9814621699999933, 20.176478230000008, 3.5257207900000083, 32.849411520000004, 19.995467830000006, 16.737441710000002, 5.011000410000008, 28.599441800000008, -0.8401078599999963, 29.324259510000008, 8.087037190000004, 31.53482736000001, 19.89759234000001, -2.6783857199999943, 9.881316000000005, 0.5279326800000064, 16.425613710000007, 16.490380710000004, 3.6811851700000062, 17.09870319000001, -0.8676907299999925, -11.759144499999984, -1.9331239499999953, 17.77879912000001, 3.0532829900000067, 5.911533250000005, 10.173230810000007, 15.870248220000008, 6.365083410000004, 14.195513130000009, 25.62426691000001, 7.124176100000014, 17.905958360000007, 0.40347443000000993, 6.618060710000009, 14.93425915000001, 23.627936010000006, 5.345835000000008, -1.349495989999994, 7.422686450000008, 9.130492610000005, 11.638327340000004, 37.549769100000006, -0.39444216999999426, 22.202633550000005, 10.025726640000006, 8.625481930000007, 7.851007480000007, 28.147846810000008, -2.08592410999999, -3.389589709999985, 9.154324070000019, 22.451936100000022, 1.2037093200000228, 6.1265909200000195, 13.77579358000002, 13.22022843000002, 2.4586908700000194, -2.146462389999982, -0.3792032199999795, 23.588649670000024, 25.84994553000002, 14.06867114000002, 17.884255590000024, 6.049648000000019, 11.655637370000022, 13.730190390000018, 18.39832018000002, 8.765742340000024, 22.564585770000022, 4.152287090000023, 12.734524780000022, 5.028357820000018, 3.755098270000019, 15.276584890000024, 18.13399449000002, 17.56038784000002, -0.622913359999977, -1.1094645699999788, 9.066310810000019, 12.545042190000018, 5.714961080000023, -4.877898369999976, -11.457391119999983, 22.00598957000001, 4.2894400400000166, 4.253844020000017, 23.95761097000001, 20.662548780000016, 14.640800770000013, 21.378535340000013, 23.184749910000015, 6.832110160000013, 17.029387710000016, 4.745224910000012, -0.36938513999998435, 14.855894930000012, 4.072091330000013, 4.5024980800000165, 30.102652650000014, 21.15986329000001, 4.696725160000014, 9.465379530000014, 9.628454270000013, -17.063805469999984, 1.0963717800000268, -3.462037099999975, 7.402643190000028, 14.447234620000032, 20.38653303000003, 19.626834380000027, 26.006606350000027, -5.2447196799999745, 1.740726190000025, -5.648439239999973, 3.2320011000000335, 5.921299640000029, -8.011653249999966, 1.0101627800000301, 9.642559310000031, 13.84792153000003, 10.086146150000026, 13.172099030000027, 24.57928434000003, 20.914755150000026, 9.839728640000025, 6.788790740000032, 5.656272460000025, -2.8488880899999742, 5.036488270000028, 17.653025920000026, 4.600463160000025, 0.24345800000002527, 12.551713530000029, 21.11789332000003, -0.07572130999997029, 7.614305900000026, 2.8267796400000265, -4.862472499999967, 3.5084602900000306, 12.470185630000032, 15.785479580000029, 7.3118977000000385, 0.9510194000000283, 7.5303592200000296, -21.18103789999997, 20.59608922000004, -9.801421079999962, 5.095617110000035, 7.124109190000034, 17.06253164000004, 12.201881570000033, -4.594826419999968, 3.5686096100000384, 0.9036269100000354, -13.837648649999963, 13.883640510000042, 29.152211290000043, 8.651049710000045, -21.16365688999995, 0.6799170400000492, 8.146396040000049, -6.895934329999953, -6.994125019999942, 20.80396651000006, 9.21810279000006, 0.38173185000005816, 15.108813460000057, 17.664810870000053, 7.514928880000056, 5.218426620000059, 10.607248290000058, 14.629140900000053, 8.591295040000055, 3.361279320000058, -11.25693235999993, 22.630386660000063, 27.586223190000062, 18.554208220000064, 12.719768090000066, 9.142393690000063, -0.7440720599999366, -1.0727154799999354, 15.386039650000065, 6.689633460000067, 5.280503510000067, 26.823597950000064, 22.843168680000062, 15.209125170000064, 0.7044171400000607, 0.5032445800000644, 13.265647640000061, 14.155095990000063, 16.89395894000006, 17.60935727000006, 4.139417810000062, 17.09495026000006, 8.732987490000063, -9.354029919999938, -1.9310938499999395, 2.677801070000065, 16.176740900000063, 9.182792760000062, 11.787734570000062, 18.106942040000064, 15.360189950000063, 17.49930402000006, 6.61220380000006, 5.261306630000064, -12.250607609999939, 11.272931740000061, 28.086068780000065, 15.056707390000064, 17.798626820000067, 0.04568956000006352, 7.337809700000072, -4.560163309999929, 21.793035600000067, 3.034415460000062, 9.198909070000063, 16.221413350000063, 12.82374201000006, 18.366678030000067, -2.6297052199999342, 11.567446360000062, 6.019946300000065, 4.003066600000061, -0.8360531799999364, 3.166554270000063, 22.980686700000064, 10.399540660000063, 2.9523063500000646, 19.087837930000063, 12.374420480000062, 21.141086280000067, 16.19816047000006, 3.684817120000062, 6.961212730000064, 22.175173250000064, -7.5065122899999395, 27.611381610000066, 0.4744944700000673, 1.2989024000000597, 15.494125700000069, 17.230677320000062, 7.940122130000063, 11.498150980000062, 5.555953780000067, 10.447153450000066, 12.238293710000065, 16.476339780000067, -4.678048029999935, 0.2377195200000557, 13.383132020000055, 19.516792020000054, 4.715017830000058, 3.9204128900000583, 14.402975390000059, -1.7062169499999413, 2.0455833300000563, 1.456989160000056, -0.19544583999994103, -5.571721719999935, 30.92249703000006, 5.165852690000062, 12.554770820000066, -5.397902919999943, 13.226038400000057, 18.226252550000062, 6.038644990000066, 12.848536520000067, 10.393747280000063, 19.843533600000065, 14.957402330000065, 25.549101830000062, 0.5975733200000661, 17.571405220000067, 3.9688312600000657, 0.3797594900000618, 7.505789010000065, 15.134442250000063, 4.438814990000061, 19.021199320000065, 13.088026700000071, -0.6634129999999345, 2.598751170000064, 7.139124770000066, 11.135873240000066, 4.658500610000061, 16.257737220000067, 4.718685510000064, 3.7680185000000677, 14.115260760000062, 11.691111820000067, 4.677062730000067, 2.5656228200000655, 5.644931720000066, 13.798743880000067, 15.682428220000062, 20.62221766000006, 22.405828280000065, 15.536092620000062, 33.62712800000007, 17.410615950000068, 7.371111540000065, 22.939883090000066, 20.14775790000006, 11.881606830000067, 5.754337580000062, 4.815038890000061, 30.970259860000063, 5.910744610000066, 20.175331960000065, 13.817790990000063, 4.797839280000062, 17.872688410000066, -2.7783959999999297, 12.327612590000065, 2.226866760000064, 16.088291540000064, 17.76503027000006, 13.937773830000062, 18.29044714000006, 15.01521988000006, 7.079379070000066, 13.557671820000067, 25.933414940000063, 22.511999220000064, 27.31176461000006, 14.359320240000066, 9.949793790000065, 23.559662700000068, 24.81561633000006, 13.161590400000065, 20.357585210000067, 8.661921470000067, 12.373719010000066, 13.978120100000062, 4.728159120000065, 7.891580300000058, 11.803994770000067, 5.218850100000068, 3.581347810000061, 32.532983850000065, 17.025834740000064, 5.817659560000067, -1.150312569999933, 28.703470810000063, -3.3638321999999405, 29.465504590000066, 16.53643342000007, 8.24633463000007, 8.50419479000007, -2.285934519999927, 31.42650621000007, 0.31701182000006867, 8.62000869000007, 9.120589440000074, 0.3470980600000715, 9.041918680000073, -1.7006035299999311, -1.5822768899999318, 21.10920777000007, 7.946080110000068, 26.052043470000072, 15.313677680000069, 9.655158460000074, 3.9289099700000705, 18.93644963000007, 11.00066674000007, 20.330153510000073, -0.9485371699999305, 32.15473523000007, 8.21542323000007, 29.474146780000073, -18.06142160999993, 21.899294770000072, 15.376722610000076, 22.708288840000076, -0.7678085999999311, 7.552592830000073, 17.370416820000074, 23.64431731000007, -8.331812469999932, -5.093721779999925, -0.2513335099999239, 8.702185550000081, 0.9153640700000807, 19.79597794000008, 22.00872513000008, 24.287219850000078, 13.580388660000075, 30.986754880000078, 18.58452836000008, 9.85053621000008, -8.893708389999922, -4.249068239999929, 0.49108947000006964, 14.768003770000071, 0.6337053200000682, 17.765502560000073, 24.170664440000074, 18.735257390000072, 11.511041410000068, 0.5846697200000719, 2.5242930800000707, 12.39670657000007, -3.656106489999928, 14.208530930000073, 15.164869960000068, 8.134829600000074, -6.1779224799999355, -2.28887993999993, -3.683600669999933, 19.559226160000065, 7.579690040000067, 19.155312390000063, 1.9611611300000646, 3.1561035000000643, 6.990578720000066, 17.770623750000063, 8.148063590000064, 8.214542100000067, 9.204792750000067, 21.97854929000006, 8.134504980000067, 8.063963770000065, 17.947655040000065, 7.766139580000065, 23.181726300000065, 9.942647140000062, 25.649255320000066, 0.23241472000006524, 2.940352450000063, 26.402896300000066, 12.427350300000057, 13.98098208000006, 27.321758200000062, 29.068574070000064, 5.779567140000061, 15.629055760000064, 16.529641050000066, 3.946888550000061, 3.9149550700000617, 12.891355980000064, 17.750608150000062, 10.095381840000066, -7.016529089999942, 7.6462978900000635, 7.1384651100000625, 4.584311160000063, -2.3877012399999344, 27.688519610000064, 7.752532630000061, 8.88016936000006, 21.27907927000006, 6.642865240000063, 8.437443830000063, 28.070824260000062, 12.926733650000067, 5.042452950000062, 5.7493722400000635, 9.065743450000063, 31.400976180000065, 7.427464490000062, 17.709120810000066, 19.296489680000064, 23.998657820000062, 4.143939920000065, 10.657851490000063, 16.81781665000006, 23.549847760000063, 5.005763430000066, 10.649061770000067, -23.683556609999926, 26.280660930000074, 22.32470155000007, 7.8327827700000725, -7.521028009999924, 2.036280930000075, -2.870763029999921, 13.73637225000008, 21.867199500000076, -4.743603009999923, 16.567855580000078, 5.62276492000008, 23.41535772000008, -10.790887659999918, 11.14247927000008, 18.687443720000076, 19.622779070000078, 0.969359130000079, 24.644356820000077, 12.23485618000008, 19.832460160000075, 11.655557200000082, -3.439401469999922, 13.552755180000077, 10.25924056000008, 0.1799470800000762, 11.922067070000075, 11.730296030000076, 1.9045440100000803, 16.917873830000076, 7.199864670000075, 9.770685820000075, 17.24777023000008, 4.978997510000077, -1.627572189999924, 16.664616570000078, 6.692882890000078, 5.896587860000075, 21.56642619000008, 5.380136080000078, -0.703858629999921, 15.821494810000075, 9.001275530000079, 7.225002260000075, 30.06831941000008, 13.405907850000077, 16.604531560000076, -3.3157481499999193, 15.05193562000008, 7.62178688000008, 20.97693462000008, 11.94100256000008, 3.7052978300000774, 4.080940720000079, 24.88387824000008, 17.56139506000008, 26.756335840000077, 12.327977440000076, 43.022289970000074, -0.5000651199999275, 25.468250120000075, 17.762151850000077, 12.980315230000077, 10.735685750000073, 15.303610030000073, 9.29675450000007, 22.07865106000007, 26.416721510000073, 19.445519390000072, 2.0162200400000714, -0.18864085999992852, 9.208714440000072, 14.867012010000074, 16.63410665000007, 4.896883930000072, 10.686536990000072, 17.623450490000074, 17.191765480000072, -5.282238119999931, 12.002275550000071, 17.04901409000007, 7.82677939000007, 2.094774640000068, 21.16060383000007, 15.484741550000074, -2.773863299999917, 3.6674697300000716, 4.145154720000072, -0.643826059999931, 9.936498720000074, 19.201733390000072, 17.11476726000007, 5.380780590000072, 15.634721710000072, 12.97194114000007, 14.427487380000073, 2.112896180000071, 8.365898380000068, 0.3142401400000736, 0.04331663000007069, 4.851488950000068, 18.069622070000072, 16.83842731000007, 12.533337430000074, 10.413579370000072, -4.63120928999993, 2.593916270000072, 27.865963270000073, 4.400742640000068, 17.577111250000073, -3.6322605199999316, 19.623282260000074, 11.160216310000074, 6.230239640000072, 22.89691430000007, 3.87068909000007, -11.280296579999927, -5.990732839999936, 29.384602880000074, 16.04211021000007, 28.962856680000076, 11.420910020000075, 11.543351960000074, 7.721120830000068, 7.187438810000074, 20.822099100000074, 2.4081352800000744, 47.01742481000007, 2.4864407600000735, 18.03997329000007, 22.04458297000007, 28.12173709000007, 23.485632170000073, 14.268703500000072, 13.352259420000074, 16.59288402000007, 38.034107040000066, 14.012114720000067, 15.607213830000067, -8.432877749999939, 17.450421770000062, 23.78528194000006, 7.0922273600000665, 16.623695640000065, 17.950654030000067, 10.258738000000065, -0.6337544899999372, 14.000200530000065, 3.192828640000066, 12.436003680000063, 0.4594682500000644, 8.371062070000065, 3.0819798800000626, 8.660814750000064, 26.726350740000065, -15.64723807999993, 15.241783950000062, 13.349006410000065, 10.198783810000066, 13.762121660000062, 22.374595820000067, 7.866697790000067, 20.685443700000064, 18.126954480000066, 15.565464900000059, 10.83776362000006, 3.920814640000067, 2.672081510000062, 2.8761885700000605, 13.171929110000065, 9.224771520000061, 6.280583370000066, 17.654173980000067, 9.729641310000062, 12.977270580000067, 17.799376350000067, 5.827384260000066, 19.412444490000063, 13.519597180000062, 4.305749610000063, 1.8476174900000615, 8.942415540000063, 19.146059790000066, 5.8082764800000675, 7.278090460000065, -5.433847759999942, 11.843821820000066, 10.303513970000061, 23.441232390000067, 4.784711060000063, 18.991671370000063, 13.855040220000063, 6.782563210000063, 13.193177960000064, 17.377241960000063, -8.37574966999994, -3.74279662999993, 20.439050880000075, 17.197810430000068, 16.155796340000073, -8.705464359999922, -2.083646509999923, 9.914683100000076, 20.983291050000076, 4.47362547000008, 9.015405400000077, 14.02974143000008, 29.576457900000076, 7.796288300000079, 2.387310960000079, 16.318896380000076, 9.21296713000008, 15.719781550000079, 26.15387391000008, 6.967282640000079, 14.035248910000078, 3.1842389800000817, 14.42654969000008, 23.107397770000077, 0.8611200200000795, 23.00193277000008, 1.6391808500000806, 9.966113180000079, -0.8915598799999245, -12.20668974999991, 29.27304363000009, 7.091926290000092, 11.334845590000086, 22.84314930000008, 31.990966260000086, 10.365074160000084, -16.49614974999991, 15.65064105000009, 22.86380019000009, 4.75549665000009, 11.736605310000094, 16.348175040000093, -1.3581112599999088, 1.3387319100000923, 8.978455630000092, 10.121620120000095, 23.001190360000095, 9.72668716000009, 12.890643150000095, 4.41409158000009, 3.268203810000095, 13.978165670000095, 8.461878550000094, 14.241990450000095, -6.500655849999909, 12.756844370000096, -0.6181367199999102, -7.439419959999903, 9.350506320000093, 3.9972431800000905, 7.590232100000094, 5.588317980000092, 23.88791489000009, 13.579401850000096, 13.48219312000009, -5.437612659999907, 4.572535350000095, 13.400606860000096, -2.1420174699999066, -5.828820509999915, 13.745162550000089, 5.569342720000087, 17.059674660000084, 14.210151770000088, 16.246753450000085, 10.513171560000089, 9.477816250000082, 5.423494510000083, 13.521818080000088, 22.665729250000084, -4.051994949999909, 21.905947000000083, 2.486512700000084, 25.490604610000084, 9.530861990000083, 17.331651510000086, 14.175881910000086, 3.0922212100000834, 1.305680470000084, 7.708067500000084, 27.157997670000086, 25.840355630000083, 7.925789590000086, 27.432942960000084, 7.481384820000088, 7.404527690000087, 1.782848080000086, 12.513459490000088, 10.382287550000086, -6.7155106599999215, -8.029506989999916, 19.843708310000082, 19.676844150000086, 5.118581270000085, 22.338160330000086, 15.890082700000093, 5.896897850000087, -1.8854813799999164, 25.597184340000084, 13.591022070000086, 22.893731930000087, 17.120363390000087, 9.557333470000088, 10.069126630000085, 9.242023550000084, 6.838190790000084, 22.777622870000087, 25.397184020000083, 13.347744620000086, 0.8304557700000856, -0.07518005999991573, 12.68628250000009, 14.631211520000086, 20.411431440000086, 16.739527210000084, 15.025772130000085, 14.133595020000087, 2.398356170000085, 16.274279610000086, 7.845849040000083, 9.134059990000083, 10.988029370000085, 8.422250440000084, 12.214015310000086, 14.904721490000085, 10.474641110000086, -10.051987309999916, 2.778122620000076, 6.870670040000078, 19.80405348000008, 20.487551290000077, 6.397680900000076, 13.481915850000078, 6.67871755000008, 29.05751111000008, 6.212801780000078, 14.428679990000077, 0.6065350300000816, 18.153211950000077, 8.79688825000008, 2.7878053600000783, 20.174291600000082, 7.85327575000008, 34.872113060000075, 8.280112660000075, 7.742328700000073, -6.910178439999925, -8.704668069999926, -0.07241696999992087, 0.14587512000007763, 1.282531480000081, 8.520886170000075, 15.404972370000081, 12.65366729000008, 6.71409777000008, 2.6522461600000753, -11.72537420999992, 13.445295630000075, 21.128835800000076, -2.342398099999926, 11.665473320000075, 14.464216690000079, -4.426065779999924, 7.61829206000008, -3.8667710599999197, 8.789991840000077, 27.51581723000008, 12.829383050000075, 20.17409061000008, 5.98723006000008, 1.36200936000008, 9.623738140000079, 15.837219200000085, 23.218744980000075, -3.405143709999919, 15.652608810000075, 13.666963060000079, 14.331973600000083, -2.0378780299999235, 20.43954853000008, 14.236904190000075, 20.268174730000077, 2.6387320600000805, 1.4428615600000754, 19.31744006000008, 21.59271729000008, 23.90896850000008, 17.506891380000077, 15.005966130000076, -0.5807534799999203, 9.845165810000076, 15.992804100000079, 26.471756090000078, 0.006696820000080095, 16.23495356000008, 12.029009200000075, 8.419745400000075, -9.599013489999926, 16.87394771000008, -0.6730179099999205, -12.591449589999911, 22.88364980000008, 20.362607530000083, 7.359210340000082, -1.3293256899999122, 33.92200720000009, -3.7207357699999086, 26.19477145000008, 31.701295500000082, 5.1019746300000826, 8.465151260000084, 19.026264230000088, 9.667253340000087, -6.1877090799999195, 21.462798080000084, 13.302390700000089, 6.987941080000084, 16.517635330000083, -1.3024404999999177, 15.073768850000086, 3.3903450800000883, -12.653481719999917, 27.368218560000088, -9.140633079999915, 0.2685219600000863, -6.476751049999919, 22.86961698000009, 5.230860150000083, -6.771491839999911, 11.937902080000086, 1.3056358000000827, 1.046297860000088, 3.300673940000088, 14.499600290000082, 27.784779920000084, 4.915992870000082, 12.755945330000088, 30.099039480000084, 9.215362560000088, 10.206760990000085, 9.393040220000088, 3.452675510000084, 31.150170930000087, 6.956269870000085, 2.9555962400000837, 21.32166698000009, 20.307904410000084, 6.195153640000086, 10.622365200000083, 5.270282070000086, 17.189754430000086, 22.139588420000088, 0.9115381100000874, 15.508509890000084, 8.573967150000087]
(BO) Backorders for node 1:[0, 0, 19.19040905, 1.8560166199999912, 0, 0, 0, 0, 0, 0, 0, 1.384173969999992, 0, 0, 0, 1.7492203399999937, 0, 0, 0.36212937999999184, 6.640644140000006, 0, 0, 1.9206440600000008, 4.498421629999996, 12.16493242, 0, 1.2274013600000018, 10.238519420000003, 7.305505159999996, 0, 0, 0, 0, 0.36735493999999846, 1.574523339999999, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.5731003699999988, 0, 0, 0, 3.703954860000003, 0, 4.195951239999999, 0, 2.771963249999999, 0, 0, 8.582602010000002, 0, 0, 1.6062320699999972, 9.151654759999992, 0, 3.197368709999992, 3.9814621699999933, 0, 0, 0, 0, 0, 0, 0, 0.8401078599999963, 0, 0, 0, 0, 2.6783857199999943, 0, 0, 0, 0, 0, 0, 0.8676907299999925, 11.759144499999984, 1.9331239499999953, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.349495989999994, 0, 0, 0, 0, 0.39444216999999426, 0, 0, 0, 0, 0, 2.08592410999999, 3.389589709999985, 0, 0, 0, 0, 0, 0, 0, 2.146462389999982, 0.3792032199999795, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.622913359999977, 1.1094645699999788, 0, 0, 0, 4.877898369999976, 11.457391119999983, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.36938513999998435, 0, 0, 0, 0, 0, 0, 0, 0, 17.063805469999984, 0, 3.462037099999975, 0, 0, 0, 0, 0, 5.2447196799999745, 0, 5.648439239999973, 0, 0, 8.011653249999966, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.8488880899999742, 0, 0, 0, 0, 0, 0, 0.07572130999997029, 0, 0, 4.862472499999967, 0, 0, 0, 0, 0, 0, 21.18103789999997, 0, 9.801421079999962, 0, 0, 0, 0, 4.594826419999968, 0, 0, 13.837648649999963, 0, 0, 0, 21.16365688999995, 0, 0, 6.895934329999953, 6.994125019999942, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11.25693235999993, 0, 0, 0, 0, 0, 0.7440720599999366, 1.0727154799999354, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9.354029919999938, 1.9310938499999395, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12.250607609999939, 0, 0, 0, 0, 0, 0, 4.560163309999929, 0, 0, 0, 0, 0, 0, 2.6297052199999342, 0, 0, 0, 0.8360531799999364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7.5065122899999395, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.678048029999935, 0, 0, 0, 0, 0, 0, 1.7062169499999413, 0, 0, 0.19544583999994103, 5.571721719999935, 0, 0, 0, 5.397902919999943, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.6634129999999345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.7783959999999297, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.150312569999933, 0, 3.3638321999999405, 0, 0, 0, 0, 2.285934519999927, 0, 0, 0, 0, 0, 0, 1.7006035299999311, 1.5822768899999318, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.9485371699999305, 0, 0, 0, 18.06142160999993, 0, 0, 0, 0.7678085999999311, 0, 0, 0, 8.331812469999932, 5.093721779999925, 0.2513335099999239, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.893708389999922, 4.249068239999929, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.656106489999928, 0, 0, 0, 6.1779224799999355, 2.28887993999993, 3.683600669999933, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7.016529089999942, 0, 0, 0, 2.3877012399999344, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23.683556609999926, 0, 0, 0, 7.521028009999924, 0, 2.870763029999921, 0, 0, 4.743603009999923, 0, 0, 0, 10.790887659999918, 0, 0, 0, 0, 0, 0, 0, 0, 3.439401469999922, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.627572189999924, 0, 0, 0, 0, 0, 0.703858629999921, 0, 0, 0, 0, 0, 0, 3.3157481499999193, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5000651199999275, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.18864085999992852, 0, 0, 0, 0, 0, 0, 0, 5.282238119999931, 0, 0, 0, 0, 0, 0, 2.773863299999917, 0, 0, 0.643826059999931, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.63120928999993, 0, 0, 0, 0, 3.6322605199999316, 0, 0, 0, 0, 0, 11.280296579999927, 5.990732839999936, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.432877749999939, 0, 0, 0, 0, 0, 0, 0.6337544899999372, 0, 0, 0, 0, 0, 0, 0, 0, 15.64723807999993, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5.433847759999942, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.37574966999994, 3.74279662999993, 0, 0, 0, 8.705464359999922, 2.083646509999923, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.8915598799999245, 12.20668974999991, 0, 0, 0, 0, 0, 0, 16.49614974999991, 0, 0, 0, 0, 0, 1.3581112599999088, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.500655849999909, 0, 0.6181367199999102, 7.439419959999903, 0, 0, 0, 0, 0, 0, 0, 5.437612659999907, 0, 0, 2.1420174699999066, 5.828820509999915, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.051994949999909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.7155106599999215, 8.029506989999916, 0, 0, 0, 0, 0, 0, 1.8854813799999164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07518005999991573, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10.051987309999916, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.910178439999925, 8.704668069999926, 0.07241696999992087, 0, 0, 0, 0, 0, 0, 0, 11.72537420999992, 0, 0, 2.342398099999926, 0, 0, 4.426065779999924, 0, 3.8667710599999197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.405143709999919, 0, 0, 0, 2.0378780299999235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5807534799999203, 0, 0, 0, 0, 0, 0, 0, 9.599013489999926, 0, 0.6730179099999205, 12.591449589999911, 0, 0, 0, 1.3293256899999122, 0, 3.7207357699999086, 0, 0, 0, 0, 0, 0, 6.1877090799999195, 0, 0, 0, 0, 1.3024404999999177, 0, 0, 12.653481719999917, 0, 9.140633079999915, 0, 6.476751049999919, 0, 0, 6.771491839999911, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
(TC) Total Cost for node 1:[191.7624973, 78.52610339999998, 479.76022624999996, 46.40041549999978, 232.98895580000007, 84.68629170000007, 11.208494700000102, 172.9876971000001, 143.0005768000001, 88.31042190000005, 165.12934380000004, 34.6043492499998, 15.3994165000001, 18.637772600000062, 235.4079935000001, 43.73050849999984, 324.3343157000001, 234.96188270000007, 9.053234499999796, 166.01610350000016, 81.38498650000002, 203.47975239999997, 48.01610150000002, 112.4605407499999, 304.1233105, 94.515429, 30.685034000000044, 255.96298550000006, 182.6376289999999, 92.91058479999997, 103.33739790000003, 83.48442799999994, 209.53343259999997, 9.183873499999962, 39.36308349999997, 284.37864750000006, 70.02471500000001, 140.33030060000002, 93.21330359999997, 229.40767520000003, 27.25434530000001, 321.9033245, 13.268761699999985, 324.92687939999996, 170.07861189999997, 95.16744180000003, 29.750997399999974, 2.2282443999999657, 39.32750924999997, 177.38791249999997, 198.84305189999998, 188.58778460000002, 92.59887150000009, 81.95421240000002, 104.89878099999999, 149.09212660000003, 69.29908124999997, 109.45311930000003, 125.45431630000003, 214.56505025000007, 89.51946620000001, 83.90963589999998, 40.15580174999993, 228.7913689999998, 52.5018149000001, 79.9342177499998, 99.53655424999982, 201.7647823000001, 35.25720790000008, 328.4941152, 199.95467830000007, 167.37441710000002, 50.11000410000008, 285.9944180000001, 21.002696499999907, 293.2425951000001, 80.87037190000004, 315.3482736000001, 198.9759234000001, 66.95964299999986, 98.81316000000005, 5.279326800000064, 164.25613710000007, 164.90380710000005, 36.81185170000006, 170.9870319000001, 21.692268249999813, 293.9786124999996, 48.32809874999988, 177.78799120000008, 30.532829900000067, 59.11533250000005, 101.73230810000007, 158.70248220000008, 63.65083410000004, 141.9551313000001, 256.2426691000001, 71.24176100000014, 179.05958360000005, 4.034744300000099, 66.18060710000009, 149.34259150000008, 236.27936010000008, 53.45835000000008, 33.73739974999985, 74.22686450000008, 91.30492610000005, 116.38327340000004, 375.49769100000003, 9.861054249999857, 222.02633550000004, 100.25726640000005, 86.25481930000007, 78.51007480000007, 281.4784681000001, 52.14810274999975, 84.73974274999962, 91.54324070000018, 224.51936100000023, 12.037093200000228, 61.265909200000195, 137.7579358000002, 132.2022843000002, 24.586908700000194, 53.661559749999554, 9.480080499999488, 235.88649670000024, 258.4994553000002, 140.6867114000002, 178.84255590000024, 60.49648000000019, 116.55637370000022, 137.30190390000018, 183.9832018000002, 87.65742340000024, 225.64585770000022, 41.52287090000023, 127.34524780000022, 50.28357820000018, 37.55098270000019, 152.76584890000024, 181.3399449000002, 175.60387840000018, 15.572833999999425, 27.73661424999947, 90.66310810000019, 125.45042190000018, 57.14961080000023, 121.94745924999938, 286.4347779999996, 220.0598957000001, 42.894400400000166, 42.53844020000017, 239.57610970000013, 206.62548780000014, 146.40800770000013, 213.78535340000013, 231.84749910000016, 68.32110160000013, 170.29387710000015, 47.45224910000012, 9.234628499999609, 148.5589493000001, 40.720913300000134, 45.024980800000165, 301.02652650000016, 211.5986329000001, 46.96725160000014, 94.65379530000014, 96.28454270000013, 426.5951367499996, 10.963717800000268, 86.55092749999937, 74.02643190000028, 144.47234620000032, 203.86533030000032, 196.26834380000025, 260.06606350000027, 131.11799199999936, 17.40726190000025, 141.21098099999932, 32.320011000000335, 59.21299640000029, 200.29133124999916, 10.101627800000301, 96.42559310000031, 138.4792153000003, 100.86146150000026, 131.72099030000027, 245.7928434000003, 209.14755150000025, 98.39728640000025, 67.88790740000032, 56.56272460000025, 71.22220224999936, 50.36488270000028, 176.53025920000027, 46.00463160000025, 2.4345800000002527, 125.51713530000029, 211.1789332000003, 1.8930327499992572, 76.14305900000026, 28.267796400000265, 121.56181249999918, 35.084602900000306, 124.70185630000032, 157.8547958000003, 73.11897700000038, 9.510194000000283, 75.3035922000003, 529.5259474999992, 205.9608922000004, 245.03552699999904, 50.95617110000035, 71.24109190000034, 170.62531640000037, 122.01881570000033, 114.87066049999922, 35.686096100000384, 9.036269100000354, 345.94121624999906, 138.83640510000043, 291.5221129000004, 86.51049710000045, 529.0914222499987, 6.7991704000004916, 81.46396040000049, 172.39835824999884, 174.85312549999855, 208.0396651000006, 92.1810279000006, 3.8173185000005816, 151.08813460000056, 176.64810870000053, 75.14928880000056, 52.18426620000059, 106.07248290000058, 146.29140900000053, 85.91295040000055, 33.61279320000058, 281.42330899999826, 226.30386660000062, 275.8622319000006, 185.54208220000064, 127.19768090000066, 91.42393690000063, 18.601801499998416, 26.817886999998386, 153.86039650000066, 66.89633460000067, 52.805035100000666, 268.23597950000067, 228.43168680000062, 152.09125170000064, 7.044171400000607, 5.0324458000006445, 132.6564764000006, 141.55095990000063, 168.9395894000006, 176.0935727000006, 41.394178100000616, 170.9495026000006, 87.32987490000063, 233.85074799999845, 48.27734624999849, 26.77801070000065, 161.7674090000006, 91.82792760000062, 117.87734570000062, 181.06942040000064, 153.60189950000063, 174.99304020000062, 66.1220380000006, 52.61306630000064, 306.26519024999845, 112.72931740000061, 280.8606878000007, 150.56707390000065, 177.98626820000067, 0.4568956000006352, 73.37809700000072, 114.00408274999822, 217.93035600000067, 30.34415460000062, 91.98909070000063, 162.21413350000063, 128.2374201000006, 183.66678030000065, 65.74263049999836, 115.67446360000062, 60.19946300000065, 40.03066600000061, 20.90132949999841, 31.66554270000063, 229.80686700000064, 103.99540660000063, 29.523063500000646, 190.87837930000063, 123.74420480000062, 211.41086280000067, 161.98160470000062, 36.84817120000062, 69.61212730000064, 221.75173250000063, 187.66280724999848, 276.1138161000007, 4.744944700000673, 12.989024000000597, 154.9412570000007, 172.30677320000063, 79.40122130000063, 114.98150980000062, 55.55953780000067, 104.47153450000066, 122.38293710000065, 164.7633978000007, 116.95120074999838, 2.377195200000557, 133.83132020000056, 195.16792020000054, 47.150178300000576, 39.20412890000058, 144.02975390000057, 42.65542374999853, 20.455833300000563, 14.56989160000056, 4.886145999998526, 139.2930429999984, 309.22497030000056, 51.658526900000616, 125.54770820000066, 134.94757299999858, 132.26038400000056, 182.2625255000006, 60.386449900000656, 128.48536520000067, 103.93747280000063, 198.43533600000063, 149.57402330000065, 255.49101830000063, 5.975733200000661, 175.71405220000065, 39.68831260000066, 3.797594900000618, 75.05789010000065, 151.34442250000063, 44.38814990000061, 190.21199320000065, 130.8802670000007, 16.585324999998363, 25.98751170000064, 71.39124770000066, 111.35873240000066, 46.58500610000061, 162.57737220000067, 47.18685510000064, 37.68018500000068, 141.15260760000064, 116.91111820000067, 46.77062730000067, 25.656228200000655, 56.449317200000664, 137.9874388000007, 156.82428220000062, 206.2221766000006, 224.05828280000065, 155.36092620000062, 336.27128000000073, 174.10615950000067, 73.71111540000065, 229.39883090000066, 201.4775790000006, 118.81606830000067, 57.54337580000062, 48.15038890000061, 309.7025986000006, 59.10744610000066, 201.75331960000065, 138.17790990000063, 47.97839280000062, 178.72688410000066, 69.45989999999824, 123.27612590000065, 22.26866760000064, 160.88291540000063, 177.65030270000062, 139.3777383000006, 182.9044714000006, 150.1521988000006, 70.79379070000066, 135.57671820000067, 259.3341494000006, 225.11999220000064, 273.1176461000006, 143.59320240000068, 99.49793790000065, 235.5966270000007, 248.15616330000063, 131.61590400000065, 203.57585210000067, 86.61921470000067, 123.73719010000066, 139.78120100000064, 47.28159120000065, 78.91580300000058, 118.03994770000067, 52.188501000000684, 35.81347810000061, 325.3298385000006, 170.25834740000064, 58.17659560000067, 28.757814249998326, 287.0347081000006, 84.0958049999985, 294.6550459000007, 165.36433420000068, 82.46334630000071, 85.04194790000071, 57.14836299999817, 314.2650621000007, 3.1701182000006867, 86.2000869000007, 91.20589440000074, 3.470980600000715, 90.41918680000073, 42.51508824999828, 39.556922249998294, 211.0920777000007, 79.46080110000068, 260.5204347000007, 153.1367768000007, 96.55158460000074, 39.289099700000705, 189.3644963000007, 110.00666740000071, 203.30153510000073, 23.713429249998264, 321.54735230000074, 82.1542323000007, 294.74146780000075, 451.53554024999823, 218.9929477000007, 153.76722610000076, 227.08288840000077, 19.195214999998278, 75.52592830000073, 173.70416820000074, 236.4431731000007, 208.2953117499983, 127.34304449999811, 6.283337749998097, 87.02185550000081, 9.153640700000807, 197.9597794000008, 220.0872513000008, 242.87219850000076, 135.80388660000074, 309.8675488000008, 185.84528360000078, 98.5053621000008, 222.34270974999805, 106.22670599999822, 4.910894700000696, 147.6800377000007, 6.337053200000682, 177.65502560000073, 241.70664440000076, 187.35257390000072, 115.11041410000068, 5.846697200000719, 25.242930800000707, 123.9670657000007, 91.4026622499982, 142.08530930000074, 151.64869960000067, 81.34829600000074, 154.4480619999984, 57.22199849999825, 92.09001674999833, 195.59226160000065, 75.79690040000067, 191.55312390000063, 19.611611300000646, 31.561035000000643, 69.90578720000066, 177.7062375000006, 81.48063590000064, 82.14542100000067, 92.04792750000067, 219.7854929000006, 81.34504980000067, 80.63963770000065, 179.47655040000063, 77.66139580000065, 231.81726300000065, 99.42647140000062, 256.49255320000066, 2.3241472000006524, 29.40352450000063, 264.02896300000066, 124.27350300000057, 139.8098208000006, 273.2175820000006, 290.6857407000006, 57.795671400000614, 156.29055760000062, 165.29641050000066, 39.46888550000061, 39.14955070000062, 128.91355980000066, 177.50608150000062, 100.95381840000066, 175.41322724999856, 76.46297890000064, 71.38465110000062, 45.843111600000626, 59.69253099999836, 276.88519610000066, 77.52532630000061, 88.8016936000006, 212.79079270000062, 66.42865240000063, 84.37443830000063, 280.70824260000063, 129.26733650000068, 50.42452950000062, 57.493722400000635, 90.65743450000063, 314.00976180000066, 74.27464490000062, 177.09120810000064, 192.96489680000064, 239.98657820000062, 41.43939920000065, 106.57851490000063, 168.1781665000006, 235.49847760000063, 50.05763430000066, 106.49061770000067, 592.0889152499981, 262.80660930000073, 223.2470155000007, 78.32782770000073, 188.0257002499981, 20.362809300000748, 71.76907574999802, 137.3637225000008, 218.67199500000078, 118.59007524999808, 165.67855580000077, 56.2276492000008, 234.15357720000083, 269.7721914999979, 111.42479270000081, 186.87443720000076, 196.22779070000078, 9.69359130000079, 246.44356820000075, 122.3485618000008, 198.32460160000073, 116.55557200000082, 85.98503674999805, 135.52755180000077, 102.5924056000008, 1.799470800000762, 119.22067070000075, 117.30296030000076, 19.045440100000803, 169.17873830000076, 71.99864670000075, 97.70685820000075, 172.47770230000077, 49.789975100000774, 40.6893047499981, 166.64616570000078, 66.92882890000078, 58.96587860000075, 215.66426190000078, 53.80136080000078, 17.596465749998025, 158.21494810000075, 90.01275530000079, 72.25002260000075, 300.6831941000008, 134.05907850000077, 166.04531560000078, 82.89370374999798, 150.5193562000008, 76.2178688000008, 209.7693462000008, 119.4100256000008, 37.052978300000774, 40.80940720000079, 248.8387824000008, 175.6139506000008, 267.56335840000077, 123.27977440000076, 430.22289970000077, 12.501627999998188, 254.68250120000076, 177.62151850000078, 129.80315230000076, 107.35685750000073, 153.03610030000073, 92.9675450000007, 220.7865106000007, 264.1672151000007, 194.45519390000072, 20.162200400000714, 4.716021499998213, 92.08714440000072, 148.67012010000076, 166.3410665000007, 48.968839300000724, 106.86536990000072, 176.23450490000073, 171.91765480000072, 132.05595299999828, 120.02275550000071, 170.4901409000007, 78.2677939000007, 20.947746400000682, 211.6060383000007, 154.84741550000075, 69.34658249999792, 36.674697300000716, 41.45154720000072, 16.095651499998276, 99.36498720000074, 192.01733390000072, 171.1476726000007, 53.807805900000716, 156.3472171000007, 129.7194114000007, 144.27487380000073, 21.12896180000071, 83.65898380000068, 3.142401400000736, 0.4331663000007069, 48.51488950000068, 180.6962207000007, 168.3842731000007, 125.33337430000074, 104.13579370000072, 115.78023224999825, 25.939162700000722, 278.6596327000007, 44.00742640000068, 175.77111250000073, 90.80651299999829, 196.23282260000076, 111.60216310000074, 62.30239640000072, 228.9691430000007, 38.7068909000007, 282.00741449999816, 149.7683209999984, 293.84602880000074, 160.42110210000072, 289.6285668000008, 114.20910020000076, 115.43351960000074, 77.21120830000068, 71.87438810000074, 208.22099100000074, 24.081352800000744, 470.17424810000074, 24.864407600000735, 180.39973290000069, 220.44582970000073, 281.21737090000073, 234.85632170000073, 142.68703500000072, 133.52259420000075, 165.9288402000007, 380.34107040000066, 140.12114720000068, 156.07213830000066, 210.82194374999847, 174.50421770000062, 237.8528194000006, 70.92227360000066, 166.23695640000065, 179.50654030000067, 102.58738000000065, 15.84386224999843, 140.00200530000063, 31.92828640000066, 124.36003680000063, 4.594682500000644, 83.71062070000065, 30.819798800000626, 86.60814750000064, 267.26350740000066, 391.18095199999823, 152.41783950000064, 133.49006410000067, 101.98783810000066, 137.62121660000062, 223.74595820000067, 78.66697790000067, 206.85443700000064, 181.26954480000066, 155.65464900000057, 108.37763620000061, 39.20814640000067, 26.72081510000062, 28.761885700000605, 131.71929110000065, 92.24771520000061, 62.80583370000066, 176.54173980000067, 97.29641310000062, 129.77270580000067, 177.99376350000068, 58.27384260000066, 194.12444490000064, 135.19597180000062, 43.057496100000634, 18.476174900000615, 89.42415540000063, 191.46059790000066, 58.082764800000675, 72.78090460000065, 135.84619399999855, 118.43821820000066, 103.03513970000061, 234.41232390000067, 47.84711060000063, 189.91671370000063, 138.55040220000063, 67.82563210000063, 131.93177960000065, 173.77241960000063, 209.39374174999853, 93.56991574999824, 204.39050880000076, 171.97810430000067, 161.55796340000074, 217.63660899999806, 52.09116274999808, 99.14683100000076, 209.83291050000076, 44.7362547000008, 90.15405400000077, 140.2974143000008, 295.76457900000077, 77.96288300000079, 23.873109600000788, 163.18896380000075, 92.12967130000081, 157.1978155000008, 261.5387391000008, 69.67282640000079, 140.35248910000078, 31.842389800000817, 144.2654969000008, 231.07397770000077, 8.611200200000795, 230.01932770000082, 16.391808500000806, 99.66113180000079, 22.288996999998112, 305.16724374999774, 292.73043630000086, 70.91926290000092, 113.34845590000086, 228.43149300000078, 319.90966260000084, 103.65074160000084, 412.4037437499977, 156.5064105000009, 228.63800190000092, 47.5549665000009, 117.36605310000094, 163.48175040000092, 33.95278149999772, 13.387319100000923, 89.78455630000092, 101.21620120000095, 230.01190360000095, 97.2668716000009, 128.90643150000096, 44.1409158000009, 32.68203810000095, 139.78165670000095, 84.61878550000094, 142.41990450000094, 162.5163962499977, 127.56844370000096, 15.453417999997754, 185.98549899999756, 93.50506320000093, 39.972431800000905, 75.90232100000094, 55.88317980000092, 238.8791489000009, 135.79401850000096, 134.8219312000009, 135.94031649999766, 45.72535350000095, 134.00606860000096, 53.550436749997665, 145.72051274999785, 137.4516255000009, 55.69342720000087, 170.59674660000084, 142.1015177000009, 162.46753450000085, 105.13171560000089, 94.77816250000082, 54.23494510000083, 135.21818080000088, 226.65729250000084, 101.29987374999772, 219.05947000000083, 24.86512700000084, 254.90604610000082, 95.30861990000083, 173.31651510000086, 141.75881910000086, 30.922212100000834, 13.05680470000084, 77.08067500000084, 271.5799767000009, 258.40355630000084, 79.25789590000086, 274.3294296000008, 74.81384820000088, 74.04527690000087, 17.82848080000086, 125.13459490000088, 103.82287550000086, 167.88776649999804, 200.7376747499979, 198.43708310000082, 196.76844150000085, 51.18581270000085, 223.38160330000085, 158.90082700000093, 58.96897850000087, 47.13703449999791, 255.97184340000084, 135.91022070000088, 228.93731930000087, 171.20363390000085, 95.57333470000088, 100.69126630000085, 92.42023550000084, 68.38190790000084, 227.77622870000087, 253.97184020000083, 133.47744620000086, 8.304557700000856, 1.8795014999978932, 126.8628250000009, 146.31211520000085, 204.11431440000086, 167.39527210000085, 150.25772130000087, 141.33595020000087, 23.98356170000085, 162.74279610000087, 78.45849040000083, 91.34059990000083, 109.88029370000085, 84.22250440000084, 122.14015310000086, 149.04721490000085, 104.74641110000086, 251.29968274999788, 27.78122620000076, 68.70670040000078, 198.04053480000078, 204.87551290000079, 63.976809000000756, 134.81915850000078, 66.7871755000008, 290.57511110000075, 62.12801780000078, 144.28679990000077, 6.065350300000816, 181.53211950000076, 87.9688825000008, 27.878053600000783, 201.74291600000083, 78.5327575000008, 348.7211306000007, 82.80112660000074, 77.42328700000073, 172.75446099999812, 217.61670174999813, 1.8104242499980217, 1.4587512000007763, 12.825314800000811, 85.20886170000075, 154.0497237000008, 126.5366729000008, 67.1409777000008, 26.522461600000753, 293.134355249998, 134.45295630000075, 211.28835800000076, 58.55995249999815, 116.65473320000075, 144.6421669000008, 110.6516444999981, 76.1829206000008, 96.66927649999799, 87.89991840000077, 275.15817230000084, 128.29383050000075, 201.74090610000079, 59.8723006000008, 13.6200936000008, 96.23738140000079, 158.37219200000084, 232.18744980000076, 85.12859274999798, 156.52608810000075, 136.6696306000008, 143.31973600000083, 50.94695074999809, 204.39548530000081, 142.36904190000075, 202.68174730000078, 26.387320600000805, 14.428615600000754, 193.1744006000008, 215.9271729000008, 239.08968500000077, 175.06891380000076, 150.05966130000076, 14.518836999998008, 98.45165810000076, 159.9280410000008, 264.71756090000076, 0.06696820000080095, 162.3495356000008, 120.29009200000075, 84.19745400000075, 239.97533724999815, 168.7394771000008, 16.825447749998013, 314.7862397499978, 228.8364980000008, 203.62607530000082, 73.59210340000082, 33.233142249997805, 339.22007200000087, 93.01839424999771, 261.9477145000008, 317.01295500000083, 51.019746300000826, 84.65151260000084, 190.2626423000009, 96.67253340000087, 154.692726999998, 214.62798080000084, 133.0239070000009, 69.87941080000084, 165.1763533000008, 32.56101249999794, 150.73768850000084, 33.90345080000088, 316.33704299999795, 273.6821856000009, 228.5158269999979, 2.6852196000008632, 161.91877624999796, 228.6961698000009, 52.30860150000083, 169.28729599999775, 119.37902080000086, 13.056358000000827, 10.46297860000088, 33.00673940000088, 144.9960029000008, 277.84779920000085, 49.15992870000082, 127.55945330000088, 300.99039480000084, 92.15362560000088, 102.06760990000085, 93.93040220000088, 34.52675510000084, 311.50170930000087, 69.56269870000085, 29.555962400000837, 213.2166698000009, 203.07904410000083, 61.95153640000086, 106.22365200000083, 52.702820700000856, 171.89754430000085, 221.39588420000086, 9.115381100000874, 155.08509890000084, 85.73967150000087]
Node 2
(IL) Starting Inventory record for node 2:[60, 19.176249730000002, 7.852610339999998, -19.19040905, -1.8560166199999912, 23.298895580000007, 8.468629170000007, 1.1208494700000102, 17.29876971000001, 14.300057680000009, 8.831042190000005, 16.512934380000004, -1.384173969999992, 1.53994165000001, 1.8637772600000062, 23.540799350000007, -1.7492203399999937, 32.43343157000001, 23.496188270000008, -0.36212937999999184, -6.640644140000006, 8.138498650000002, 20.347975239999997, -1.9206440600000008, -4.498421629999996, -12.16493242, 9.4515429, -1.2274013600000018, -10.238519420000003, -7.305505159999996, 9.291058479999997, 10.333739790000003, 8.348442799999994, 20.953343259999997, -0.36735493999999846, -1.574523339999999, 28.437864750000003, 7.002471500000002, 14.033030060000002, 9.321330359999997, 22.94076752, 2.725434530000001, 32.19033245, 1.3268761699999985, 32.492687939999996, 17.007861189999996, 9.516744180000003, 2.9750997399999974, 0.22282443999999657, -1.5731003699999988, 17.73879125, 19.88430519, 18.858778460000003, -3.703954860000003, 8.195421240000002, -4.195951239999999, 14.909212660000001, -2.771963249999999, 10.945311930000003, 12.545431630000003, -8.582602010000002, 8.951946620000001, 8.390963589999998, -1.6062320699999972, -9.151654759999992, 5.25018149000001, -3.197368709999992, -3.9814621699999933, 20.176478230000008, 3.5257207900000083, 32.849411520000004, 19.995467830000006, 16.737441710000002, 5.011000410000008, 28.599441800000008, -0.8401078599999963, 29.324259510000008, 8.087037190000004, 31.53482736000001, 19.89759234000001, -2.6783857199999943, 9.881316000000005, 0.5279326800000064, 16.425613710000007, 16.490380710000004, 3.6811851700000062, 17.09870319000001, -0.8676907299999925, -11.759144499999984, -1.9331239499999953, 17.77879912000001, 3.0532829900000067, 5.911533250000005, 10.173230810000007, 15.870248220000008, 6.365083410000004, 14.195513130000009, 25.62426691000001, 7.124176100000014, 17.905958360000007, 0.40347443000000993, 6.618060710000009, 14.93425915000001, 23.627936010000006, 5.345835000000008, -1.349495989999994, 7.422686450000008, 9.130492610000005, 11.638327340000004, 37.549769100000006, -0.39444216999999426, 22.202633550000005, 10.025726640000006, 8.625481930000007, 7.851007480000007, 28.147846810000008, -2.08592410999999, -3.389589709999985, 9.154324070000019, 22.451936100000022, 1.2037093200000228, 6.1265909200000195, 13.77579358000002, 13.22022843000002, 2.4586908700000194, -2.146462389999982, -0.3792032199999795, 23.588649670000024, 25.84994553000002, 14.06867114000002, 17.884255590000024, 6.049648000000019, 11.655637370000022, 13.730190390000018, 18.39832018000002, 8.765742340000024, 22.564585770000022, 4.152287090000023, 12.734524780000022, 5.028357820000018, 3.755098270000019, 15.276584890000024, 18.13399449000002, 17.56038784000002, -0.622913359999977, -1.1094645699999788, 9.066310810000019, 12.545042190000018, 5.714961080000023, -4.877898369999976, -11.457391119999983, 22.00598957000001, 4.2894400400000166, 4.253844020000017, 23.95761097000001, 20.662548780000016, 14.640800770000013, 21.378535340000013, 23.184749910000015, 6.832110160000013, 17.029387710000016, 4.745224910000012, -0.36938513999998435, 14.855894930000012, 4.072091330000013, 4.5024980800000165, 30.102652650000014, 21.15986329000001, 4.696725160000014, 9.465379530000014, 9.628454270000013, -17.063805469999984, 1.0963717800000268, -3.462037099999975, 7.402643190000028, 14.447234620000032, 20.38653303000003, 19.626834380000027, 26.006606350000027, -5.2447196799999745, 1.740726190000025, -5.648439239999973, 3.2320011000000335, 5.921299640000029, -8.011653249999966, 1.0101627800000301, 9.642559310000031, 13.84792153000003, 10.086146150000026, 13.172099030000027, 24.57928434000003, 20.914755150000026, 9.839728640000025, 6.788790740000032, 5.656272460000025, -2.8488880899999742, 5.036488270000028, 17.653025920000026, 4.600463160000025, 0.24345800000002527, 12.551713530000029, 21.11789332000003, -0.07572130999997029, 7.614305900000026, 2.8267796400000265, -4.862472499999967, 3.5084602900000306, 12.470185630000032, 15.785479580000029, 7.3118977000000385, 0.9510194000000283, 7.5303592200000296, -21.18103789999997, 20.59608922000004, -9.801421079999962, 5.095617110000035, 7.124109190000034, 17.06253164000004, 12.201881570000033, -4.594826419999968, 3.5686096100000384, 0.9036269100000354, -13.837648649999963, 13.883640510000042, 29.152211290000043, 8.651049710000045, -21.16365688999995, 0.6799170400000492, 8.146396040000049, -6.895934329999953, -6.994125019999942, 20.80396651000006, 9.21810279000006, 0.38173185000005816, 15.108813460000057, 17.664810870000053, 7.514928880000056, 5.218426620000059, 10.607248290000058, 14.629140900000053, 8.591295040000055, 3.361279320000058, -11.25693235999993, 22.630386660000063, 27.586223190000062, 18.554208220000064, 12.719768090000066, 9.142393690000063, -0.7440720599999366, -1.0727154799999354, 15.386039650000065, 6.689633460000067, 5.280503510000067, 26.823597950000064, 22.843168680000062, 15.209125170000064, 0.7044171400000607, 0.5032445800000644, 13.265647640000061, 14.155095990000063, 16.89395894000006, 17.60935727000006, 4.139417810000062, 17.09495026000006, 8.732987490000063, -9.354029919999938, -1.9310938499999395, 2.677801070000065, 16.176740900000063, 9.182792760000062, 11.787734570000062, 18.106942040000064, 15.360189950000063, 17.49930402000006, 6.61220380000006, 5.261306630000064, -12.250607609999939, 11.272931740000061, 28.086068780000065, 15.056707390000064, 17.798626820000067, 0.04568956000006352, 7.337809700000072, -4.560163309999929, 21.793035600000067, 3.034415460000062, 9.198909070000063, 16.221413350000063, 12.82374201000006, 18.366678030000067, -2.6297052199999342, 11.567446360000062, 6.019946300000065, 4.003066600000061, -0.8360531799999364, 3.166554270000063, 22.980686700000064, 10.399540660000063, 2.9523063500000646, 19.087837930000063, 12.374420480000062, 21.141086280000067, 16.19816047000006, 3.684817120000062, 6.961212730000064, 22.175173250000064, -7.5065122899999395, 27.611381610000066, 0.4744944700000673, 1.2989024000000597, 15.494125700000069, 17.230677320000062, 7.940122130000063, 11.498150980000062, 5.555953780000067, 10.447153450000066, 12.238293710000065, 16.476339780000067, -4.678048029999935, 0.2377195200000557, 13.383132020000055, 19.516792020000054, 4.715017830000058, 3.9204128900000583, 14.402975390000059, -1.7062169499999413, 2.0455833300000563, 1.456989160000056, -0.19544583999994103, -5.571721719999935, 30.92249703000006, 5.165852690000062, 12.554770820000066, -5.397902919999943, 13.226038400000057, 18.226252550000062, 6.038644990000066, 12.848536520000067, 10.393747280000063, 19.843533600000065, 14.957402330000065, 25.549101830000062, 0.5975733200000661, 17.571405220000067, 3.9688312600000657, 0.3797594900000618, 7.505789010000065, 15.134442250000063, 4.438814990000061, 19.021199320000065, 13.088026700000071, -0.6634129999999345, 2.598751170000064, 7.139124770000066, 11.135873240000066, 4.658500610000061, 16.257737220000067, 4.718685510000064, 3.7680185000000677, 14.115260760000062, 11.691111820000067, 4.677062730000067, 2.5656228200000655, 5.644931720000066, 13.798743880000067, 15.682428220000062, 20.62221766000006, 22.405828280000065, 15.536092620000062, 33.62712800000007, 17.410615950000068, 7.371111540000065, 22.939883090000066, 20.14775790000006, 11.881606830000067, 5.754337580000062, 4.815038890000061, 30.970259860000063, 5.910744610000066, 20.175331960000065, 13.817790990000063, 4.797839280000062, 17.872688410000066, -2.7783959999999297, 12.327612590000065, 2.226866760000064, 16.088291540000064, 17.76503027000006, 13.937773830000062, 18.29044714000006, 15.01521988000006, 7.079379070000066, 13.557671820000067, 25.933414940000063, 22.511999220000064, 27.31176461000006, 14.359320240000066, 9.949793790000065, 23.559662700000068, 24.81561633000006, 13.161590400000065, 20.357585210000067, 8.661921470000067, 12.373719010000066, 13.978120100000062, 4.728159120000065, 7.891580300000058, 11.803994770000067, 5.218850100000068, 3.581347810000061, 32.532983850000065, 17.025834740000064, 5.817659560000067, -1.150312569999933, 28.703470810000063, -3.3638321999999405, 29.465504590000066, 16.53643342000007, 8.24633463000007, 8.50419479000007, -2.285934519999927, 31.42650621000007, 0.31701182000006867, 8.62000869000007, 9.120589440000074, 0.3470980600000715, 9.041918680000073, -1.7006035299999311, -1.5822768899999318, 21.10920777000007, 7.946080110000068, 26.052043470000072, 15.313677680000069, 9.655158460000074, 3.9289099700000705, 18.93644963000007, 11.00066674000007, 20.330153510000073, -0.9485371699999305, 32.15473523000007, 8.21542323000007, 29.474146780000073, -18.06142160999993, 21.899294770000072, 15.376722610000076, 22.708288840000076, -0.7678085999999311, 7.552592830000073, 17.370416820000074, 23.64431731000007, -8.331812469999932, -5.093721779999925, -0.2513335099999239, 8.702185550000081, 0.9153640700000807, 19.79597794000008, 22.00872513000008, 24.287219850000078, 13.580388660000075, 30.986754880000078, 18.58452836000008, 9.85053621000008, -8.893708389999922, -4.249068239999929, 0.49108947000006964, 14.768003770000071, 0.6337053200000682, 17.765502560000073, 24.170664440000074, 18.735257390000072, 11.511041410000068, 0.5846697200000719, 2.5242930800000707, 12.39670657000007, -3.656106489999928, 14.208530930000073, 15.164869960000068, 8.134829600000074, -6.1779224799999355, -2.28887993999993, -3.683600669999933, 19.559226160000065, 7.579690040000067, 19.155312390000063, 1.9611611300000646, 3.1561035000000643, 6.990578720000066, 17.770623750000063, 8.148063590000064, 8.214542100000067, 9.204792750000067, 21.97854929000006, 8.134504980000067, 8.063963770000065, 17.947655040000065, 7.766139580000065, 23.181726300000065, 9.942647140000062, 25.649255320000066, 0.23241472000006524, 2.940352450000063, 26.402896300000066, 12.427350300000057, 13.98098208000006, 27.321758200000062, 29.068574070000064, 5.779567140000061, 15.629055760000064, 16.529641050000066, 3.946888550000061, 3.9149550700000617, 12.891355980000064, 17.750608150000062, 10.095381840000066, -7.016529089999942, 7.6462978900000635, 7.1384651100000625, 4.584311160000063, -2.3877012399999344, 27.688519610000064, 7.752532630000061, 8.88016936000006, 21.27907927000006, 6.642865240000063, 8.437443830000063, 28.070824260000062, 12.926733650000067, 5.042452950000062, 5.7493722400000635, 9.065743450000063, 31.400976180000065, 7.427464490000062, 17.709120810000066, 19.296489680000064, 23.998657820000062, 4.143939920000065, 10.657851490000063, 16.81781665000006, 23.549847760000063, 5.005763430000066, 10.649061770000067, -23.683556609999926, 26.280660930000074, 22.32470155000007, 7.8327827700000725, -7.521028009999924, 2.036280930000075, -2.870763029999921, 13.73637225000008, 21.867199500000076, -4.743603009999923, 16.567855580000078, 5.62276492000008, 23.41535772000008, -10.790887659999918, 11.14247927000008, 18.687443720000076, 19.622779070000078, 0.969359130000079, 24.644356820000077, 12.23485618000008, 19.832460160000075, 11.655557200000082, -3.439401469999922, 13.552755180000077, 10.25924056000008, 0.1799470800000762, 11.922067070000075, 11.730296030000076, 1.9045440100000803, 16.917873830000076, 7.199864670000075, 9.770685820000075, 17.24777023000008, 4.978997510000077, -1.627572189999924, 16.664616570000078, 6.692882890000078, 5.896587860000075, 21.56642619000008, 5.380136080000078, -0.703858629999921, 15.821494810000075, 9.001275530000079, 7.225002260000075, 30.06831941000008, 13.405907850000077, 16.604531560000076, -3.3157481499999193, 15.05193562000008, 7.62178688000008, 20.97693462000008, 11.94100256000008, 3.7052978300000774, 4.080940720000079, 24.88387824000008, 17.56139506000008, 26.756335840000077, 12.327977440000076, 43.022289970000074, -0.5000651199999275, 25.468250120000075, 17.762151850000077, 12.980315230000077, 10.735685750000073, 15.303610030000073, 9.29675450000007, 22.07865106000007, 26.416721510000073, 19.445519390000072, 2.0162200400000714, -0.18864085999992852, 9.208714440000072, 14.867012010000074, 16.63410665000007, 4.896883930000072, 10.686536990000072, 17.623450490000074, 17.191765480000072, -5.282238119999931, 12.002275550000071, 17.04901409000007, 7.82677939000007, 2.094774640000068, 21.16060383000007, 15.484741550000074, -2.773863299999917, 3.6674697300000716, 4.145154720000072, -0.643826059999931, 9.936498720000074, 19.201733390000072, 17.11476726000007, 5.380780590000072, 15.634721710000072, 12.97194114000007, 14.427487380000073, 2.112896180000071, 8.365898380000068, 0.3142401400000736, 0.04331663000007069, 4.851488950000068, 18.069622070000072, 16.83842731000007, 12.533337430000074, 10.413579370000072, -4.63120928999993, 2.593916270000072, 27.865963270000073, 4.400742640000068, 17.577111250000073, -3.6322605199999316, 19.623282260000074, 11.160216310000074, 6.230239640000072, 22.89691430000007, 3.87068909000007, -11.280296579999927, -5.990732839999936, 29.384602880000074, 16.04211021000007, 28.962856680000076, 11.420910020000075, 11.543351960000074, 7.721120830000068, 7.187438810000074, 20.822099100000074, 2.4081352800000744, 47.01742481000007, 2.4864407600000735, 18.03997329000007, 22.04458297000007, 28.12173709000007, 23.485632170000073, 14.268703500000072, 13.352259420000074, 16.59288402000007, 38.034107040000066, 14.012114720000067, 15.607213830000067, -8.432877749999939, 17.450421770000062, 23.78528194000006, 7.0922273600000665, 16.623695640000065, 17.950654030000067, 10.258738000000065, -0.6337544899999372, 14.000200530000065, 3.192828640000066, 12.436003680000063, 0.4594682500000644, 8.371062070000065, 3.0819798800000626, 8.660814750000064, 26.726350740000065, -15.64723807999993, 15.241783950000062, 13.349006410000065, 10.198783810000066, 13.762121660000062, 22.374595820000067, 7.866697790000067, 20.685443700000064, 18.126954480000066, 15.565464900000059, 10.83776362000006, 3.920814640000067, 2.672081510000062, 2.8761885700000605, 13.171929110000065, 9.224771520000061, 6.280583370000066, 17.654173980000067, 9.729641310000062, 12.977270580000067, 17.799376350000067, 5.827384260000066, 19.412444490000063, 13.519597180000062, 4.305749610000063, 1.8476174900000615, 8.942415540000063, 19.146059790000066, 5.8082764800000675, 7.278090460000065, -5.433847759999942, 11.843821820000066, 10.303513970000061, 23.441232390000067, 4.784711060000063, 18.991671370000063, 13.855040220000063, 6.782563210000063, 13.193177960000064, 17.377241960000063, -8.37574966999994, -3.74279662999993, 20.439050880000075, 17.197810430000068, 16.155796340000073, -8.705464359999922, -2.083646509999923, 9.914683100000076, 20.983291050000076, 4.47362547000008, 9.015405400000077, 14.02974143000008, 29.576457900000076, 7.796288300000079, 2.387310960000079, 16.318896380000076, 9.21296713000008, 15.719781550000079, 26.15387391000008, 6.967282640000079, 14.035248910000078, 3.1842389800000817, 14.42654969000008, 23.107397770000077, 0.8611200200000795, 23.00193277000008, 1.6391808500000806, 9.966113180000079, -0.8915598799999245, -12.20668974999991, 29.27304363000009, 7.091926290000092, 11.334845590000086, 22.84314930000008, 31.990966260000086, 10.365074160000084, -16.49614974999991, 15.65064105000009, 22.86380019000009, 4.75549665000009, 11.736605310000094, 16.348175040000093, -1.3581112599999088, 1.3387319100000923, 8.978455630000092, 10.121620120000095, 23.001190360000095, 9.72668716000009, 12.890643150000095, 4.41409158000009, 3.268203810000095, 13.978165670000095, 8.461878550000094, 14.241990450000095, -6.500655849999909, 12.756844370000096, -0.6181367199999102, -7.439419959999903, 9.350506320000093, 3.9972431800000905, 7.590232100000094, 5.588317980000092, 23.88791489000009, 13.579401850000096, 13.48219312000009, -5.437612659999907, 4.572535350000095, 13.400606860000096, -2.1420174699999066, -5.828820509999915, 13.745162550000089, 5.569342720000087, 17.059674660000084, 14.210151770000088, 16.246753450000085, 10.513171560000089, 9.477816250000082, 5.423494510000083, 13.521818080000088, 22.665729250000084, -4.051994949999909, 21.905947000000083, 2.486512700000084, 25.490604610000084, 9.530861990000083, 17.331651510000086, 14.175881910000086, 3.0922212100000834, 1.305680470000084, 7.708067500000084, 27.157997670000086, 25.840355630000083, 7.925789590000086, 27.432942960000084, 7.481384820000088, 7.404527690000087, 1.782848080000086, 12.513459490000088, 10.382287550000086, -6.7155106599999215, -8.029506989999916, 19.843708310000082, 19.676844150000086, 5.118581270000085, 22.338160330000086, 15.890082700000093, 5.896897850000087, -1.8854813799999164, 25.597184340000084, 13.591022070000086, 22.893731930000087, 17.120363390000087, 9.557333470000088, 10.069126630000085, 9.242023550000084, 6.838190790000084, 22.777622870000087, 25.397184020000083, 13.347744620000086, 0.8304557700000856, -0.07518005999991573, 12.68628250000009, 14.631211520000086, 20.411431440000086, 16.739527210000084, 15.025772130000085, 14.133595020000087, 2.398356170000085, 16.274279610000086, 7.845849040000083, 9.134059990000083, 10.988029370000085, 8.422250440000084, 12.214015310000086, 14.904721490000085, 10.474641110000086, -10.051987309999916, 2.778122620000076, 6.870670040000078, 19.80405348000008, 20.487551290000077, 6.397680900000076, 13.481915850000078, 6.67871755000008, 29.05751111000008, 6.212801780000078, 14.428679990000077, 0.6065350300000816, 18.153211950000077, 8.79688825000008, 2.7878053600000783, 20.174291600000082, 7.85327575000008, 34.872113060000075, 8.280112660000075, 7.742328700000073, -6.910178439999925, -8.704668069999926, -0.07241696999992087, 0.14587512000007763, 1.282531480000081, 8.520886170000075, 15.404972370000081, 12.65366729000008, 6.71409777000008, 2.6522461600000753, -11.72537420999992, 13.445295630000075, 21.128835800000076, -2.342398099999926, 11.665473320000075, 14.464216690000079, -4.426065779999924, 7.61829206000008, -3.8667710599999197, 8.789991840000077, 27.51581723000008, 12.829383050000075, 20.17409061000008, 5.98723006000008, 1.36200936000008, 9.623738140000079, 15.837219200000085, 23.218744980000075, -3.405143709999919, 15.652608810000075, 13.666963060000079, 14.331973600000083, -2.0378780299999235, 20.43954853000008, 14.236904190000075, 20.268174730000077, 2.6387320600000805, 1.4428615600000754, 19.31744006000008, 21.59271729000008, 23.90896850000008, 17.506891380000077, 15.005966130000076, -0.5807534799999203, 9.845165810000076, 15.992804100000079, 26.471756090000078, 0.006696820000080095, 16.23495356000008, 12.029009200000075, 8.419745400000075, -9.599013489999926, 16.87394771000008, -0.6730179099999205, -12.591449589999911, 22.88364980000008, 20.362607530000083, 7.359210340000082, -1.3293256899999122, 33.92200720000009, -3.7207357699999086, 26.19477145000008, 31.701295500000082, 5.1019746300000826, 8.465151260000084, 19.026264230000088, 9.667253340000087, -6.1877090799999195, 21.462798080000084, 13.302390700000089, 6.987941080000084, 16.517635330000083, -1.3024404999999177, 15.073768850000086, 3.3903450800000883, -12.653481719999917, 27.368218560000088, -9.140633079999915, 0.2685219600000863, -6.476751049999919, 22.86961698000009, 5.230860150000083, -6.771491839999911, 11.937902080000086, 1.3056358000000827, 1.046297860000088, 3.300673940000088, 14.499600290000082, 27.784779920000084, 4.915992870000082, 12.755945330000088, 30.099039480000084, 9.215362560000088, 10.206760990000085, 9.393040220000088, 3.452675510000084, 31.150170930000087, 6.956269870000085, 2.9555962400000837, 21.32166698000009, 20.307904410000084, 6.195153640000086, 10.622365200000083, 5.270282070000086, 17.189754430000086, 22.139588420000088, 0.9115381100000874, 15.508509890000084]
(IS) Inbound supply record for node 2:[0, 40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011]
(IO) Demand for node 2:[40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(OQ) Order for node 2:[40.82375027, 52.14738966, 79.19040905, 61.85601662, 36.70110442, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 61.38417397, 58.46005835, 58.13622274, 36.45920065, 61.74922034, 27.56656843, 36.50381173, 60.36212938, 66.64064414, 51.86150135, 39.65202476, 61.92064406, 64.49842163, 72.16493242, 50.5484571, 61.22740136, 70.23851942, 67.30550516, 50.70894152, 49.66626021, 51.651557200000006, 39.04665674, 60.36735494, 61.57452334, 31.56213525, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 61.57310037, 42.26120875, 40.11569481, 41.14122154, 63.70395486, 51.80457876, 64.19595124, 45.09078734, 62.77196325, 49.05468807, 47.45456837, 68.58260201, 51.04805338, 51.60903641, 61.60623207, 69.15165476, 54.74981851, 63.19736871, 63.98146217, 39.82352177, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.84010786, 30.67574049, 51.91296281, 28.46517264, 40.10240766, 62.67838572, 50.118684, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.86769073, 71.75914449999999, 61.93312395, 42.22120088, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 61.34949599, 52.57731355, 50.86950739, 48.36167266, 22.450230899999998, 60.39444217, 37.79736645, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 62.08592411, 63.38958971, 50.84567593, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 62.14646239, 60.37920322, 36.41135033, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.62291336, 61.10946457, 50.93368919, 47.45495781, 54.28503892, 64.87789837, 71.45739112, 37.99401043, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.36938514, 45.14410507, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 77.06380547, 58.90362822, 63.4620371, 52.59735681, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 65.24471968, 58.25927381, 65.64843924, 56.767998899999995, 54.07870036, 68.01165325, 58.98983722, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 62.84888809, 54.96351173, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.07572131, 52.3856941, 57.17322036, 64.8624725, 56.49153971, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 81.1810379, 39.40391078, 69.80142108, 54.90438289, 52.87589081, 42.93746836, 47.79811843, 64.59482642, 56.43139039, 59.09637309, 73.83764865, 46.11635949, 30.84778871, 51.34895029, 81.16365689, 59.32008296, 51.85360396, 66.89593433, 66.99412502, 39.19603349, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 71.25693236, 37.36961334, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.74407206, 61.07271548, 44.61396035, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 69.35402992, 61.93109385, 57.32219893, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 72.25060761, 48.72706826, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 64.56016331, 38.2069644, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 62.62970522, 48.43255364, 53.9800537, 55.9969334, 60.83605318, 56.83344573, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 67.50651229, 32.38861839, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 64.67804803, 59.76228048, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 61.70621695, 57.95441667, 58.54301084, 60.19544584, 65.57172172, 29.07750297, 54.83414731, 47.44522918, 65.39790292, 46.77396160000001, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.663413, 57.40124883, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 62.778395999999994, 47.67238741, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 61.15031257, 31.29652919, 63.363832200000004, 30.53449541, 43.46356658, 51.75366537, 51.49580521, 62.28593452, 28.57349379, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 61.70060353, 61.58227689, 38.89079223, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.94853717, 27.84526477, 51.78457677, 30.52585322, 78.06142161, 38.10070523, 44.62327739, 37.29171116, 60.7678086, 52.44740717, 42.62958318, 36.35568269, 68.33181247, 65.09372178, 60.25133351, 51.29781445, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 68.89370839, 64.24906824, 59.50891053, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 63.65610649, 45.79146907, 44.83513004, 51.8651704, 66.17792248, 62.28887994, 63.68360067, 40.44077384, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 67.01652909, 52.35370211, 52.86153489, 55.41568884, 62.38770124, 32.31148039, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 83.68355661, 33.71933907, 37.67529845, 52.16721723, 67.52102801, 57.96371907, 62.87076303, 46.26362775, 38.1328005, 64.74360301, 43.43214442, 54.37723508, 36.58464228, 70.79088766, 48.85752073, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 63.43940147, 46.44724482, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 61.62757219, 43.33538343, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.70385863, 44.17850519, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 63.31574815, 44.94806438, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.50006512, 34.53174988, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.18864086, 50.79128556, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 65.28223812, 47.99772445, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 62.77386329999999, 56.33253027, 55.85484528, 60.64382606, 50.06350128, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 64.63120929, 57.40608373, 32.13403673, 55.59925736, 42.42288875, 63.63226052, 40.37671774, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 71.28029658, 65.99073284, 30.61539712, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 68.43287775, 42.54957823, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.63375449, 45.99979947, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 75.64723808, 44.75821605, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 65.43384776, 48.15617818, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 68.37574967, 63.74279663, 39.56094912, 42.80218957, 43.84420366, 68.70546436, 62.08364651, 50.0853169, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.89155988, 72.20668975, 30.72695637, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 76.49614975, 44.34935895, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 61.35811126, 58.66126809, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 66.50065585, 47.24315563, 60.61813672, 67.43941996, 50.64949368, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 65.43761266, 55.42746465, 46.59939314, 62.14201747, 65.82882051, 46.25483745, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 64.05199495, 38.094053, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 66.71551066, 68.02950699, 40.15629169, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 61.88548138, 34.40281566, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.07518006, 47.313717499999996, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 70.05198731, 57.22187738, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 66.91017844, 68.70466807, 60.07241697, 59.85412488, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 71.72537421, 46.55470437, 38.8711642, 62.342398100000004, 48.33452668, 45.53578331, 64.42606578, 52.38170794, 63.86677106, 51.21000816, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 63.40514371, 44.34739119, 46.33303694, 45.668026399999995, 62.03787803, 39.56045147, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.58075348, 50.15483419, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 69.59901349, 43.12605229, 60.67301791, 72.59144959, 37.11635020000001, 39.63739247, 52.64078966, 61.32932569, 26.0779928, 63.72073577, 33.80522855, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 66.18770908, 38.53720192, 46.697609299999996, 53.01205892, 43.48236467, 61.3024405, 44.92623115, 56.60965492, 72.65348172, 32.63178144, 69.14063308, 59.73147804, 66.47675105, 37.13038302, 54.76913985, 66.77149184, 48.06209792, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(DMFS) Supply for node 2:[40.82375027, 52.14738966, 60.0, 79.19040905, 38.55712103999999, 51.53137083, 58.87915053, 42.70123029, 45.69994232, 51.16895781, 43.48706562, 60.00000000000001, 59.84423231999999, 58.13622274, 36.45920065, 60.00000000000001, 29.315788769999994, 36.50381173, 60.000000000000014, 60.36212938, 58.502145490000004, 39.65202476, 60.0, 61.92064406, 64.49842163, 62.71338952, 60.0, 61.22740136, 70.23851942, 58.01444668, 49.66626021, 51.651557200000006, 39.04665674, 60.0, 60.36735494, 33.136658589999996, 52.9975285, 45.96696994, 50.67866964, 37.05923248, 57.27456547, 27.80966755, 58.67312383, 27.50731206, 42.99213881, 50.48325582, 57.02490026, 59.77717556, 60.0, 43.83430912, 40.11569481, 41.14122154, 60.0, 55.50853362, 60.0, 49.28673858, 60.0, 51.826651319999996, 47.45456837, 60.0, 59.63065539, 51.60903641, 60.0, 61.60623207, 63.90147326999999, 60.00000000000001, 63.19736871, 43.80498393999999, 56.47427921, 27.15058848, 40.00453217, 43.26255829, 54.98899959, 31.4005582, 60.00000000000001, 31.515848349999995, 51.91296281, 28.46517264, 40.10240766, 60.00000000000001, 52.797069719999996, 59.47206732, 43.57438629, 43.50961929, 56.31881483, 42.90129681, 60.00000000000001, 60.86769073, 71.75914449999999, 44.15432482999999, 56.94671701, 54.08846675, 49.82676919, 44.12975178, 53.63491659, 45.80448687, 34.37573309, 52.87582389999999, 42.09404164, 59.59652557, 53.38193929, 45.06574085, 36.37206399, 54.654165, 60.00000000000001, 53.926809539999994, 50.86950739, 48.36167266, 22.450230899999998, 60.0, 38.19180861999999, 49.97427336, 51.37451807, 52.14899252, 31.85215319, 60.00000000000001, 62.08592411000001, 54.23526563999998, 37.548063899999995, 58.79629068, 53.87340908, 46.22420642, 46.77977157, 57.54130913, 60.00000000000002, 62.14646239, 36.79055354999998, 34.15005447, 45.93132886, 42.11574441, 53.950352, 48.34436263, 46.26980961, 41.60167982, 51.23425766, 37.43541423, 55.84771291, 47.26547522, 54.97164218, 56.24490173, 44.72341511, 41.86600551, 42.43961216, 60.00000000000002, 60.62291336, 52.04315375999998, 47.45495781, 54.28503892, 60.00000000000002, 64.87789837, 49.451401549999986, 55.71055996, 55.74615598, 36.04238903, 39.33745122, 45.35919923, 38.62146466, 36.81525009, 53.16788984, 42.97061229, 55.25477509, 60.000000000000014, 45.513490209999986, 55.92790867, 55.49750192, 29.89734735, 38.84013671, 55.30327484, 50.53462047, 50.37154573, 60.000000000000014, 75.96743368999998, 60.00000000000003, 56.059393909999976, 45.55276538, 39.61346697, 40.37316562, 33.99339365, 60.00000000000003, 63.50399348999998, 60.00000000000003, 62.41643813999997, 54.07870036, 60.00000000000003, 67.00149046999996, 50.35744069, 46.15207847, 49.91385385, 46.82790097, 35.42071566, 39.08524485, 50.16027136, 53.21120926, 54.34372754, 60.00000000000003, 57.812399819999975, 42.34697408, 55.39953684, 59.756542, 47.44828647, 38.88210668, 60.00000000000003, 52.46141540999997, 57.17322036, 60.00000000000003, 61.354012209999965, 47.52981437, 44.21452042, 52.68810229999999, 59.0489806, 52.46964078, 60.00000000000003, 60.58494867999997, 60.000000000000036, 64.70580396999996, 52.87589081, 42.93746836, 47.79811843, 60.000000000000036, 61.026216809999966, 59.09637309, 60.000000000000036, 59.954008139999964, 30.84778871, 51.34895029, 60.00000000000004, 80.48373984999995, 51.85360396, 60.00000000000005, 66.89593433, 46.19015850999994, 50.78189721, 59.61826815, 44.89118654, 42.33518913, 52.48507112, 54.78157338, 49.39275171, 45.370859100000004, 51.40870496, 56.63872068, 60.00000000000006, 48.62654569999993, 32.41377681, 41.44579178, 47.28023191, 50.85760631, 60.000000000000064, 60.74407206, 45.686675829999935, 53.31036654, 54.71949649, 33.17640205, 37.15683132, 44.79087483, 59.29558286, 59.49675542, 46.73435236, 45.84490401, 43.10604106, 42.39064273, 55.86058219, 42.90504974, 51.26701251, 60.000000000000064, 69.35402992, 59.25329277999994, 43.8232591, 50.81720724, 48.21226543, 41.89305796, 44.63981005, 42.50069598, 53.387796200000004, 54.73869337, 60.000000000000064, 60.97767586999994, 31.91393122, 44.94329261, 42.20137318, 59.95431044, 52.66219029999999, 60.000000000000064, 42.767127709999926, 56.96558454, 50.80109093, 43.77858665, 47.17625799, 41.63332197, 60.000000000000064, 51.062258859999936, 53.9800537, 55.9969334, 60.000000000000064, 57.66949890999994, 37.0193133, 49.60045934, 57.04769365, 40.91216207, 47.62557952, 38.85891372, 43.80183953, 56.31518288, 53.03878727, 37.82482675, 60.000000000000064, 39.89513067999994, 59.52550553, 58.701097600000004, 44.505874299999995, 42.76932268, 52.05987787, 48.50184902, 54.44404622, 49.55284655, 47.76170629, 43.52366022, 60.000000000000064, 64.44032850999994, 46.61686798, 40.48320798, 55.28498217, 56.07958711, 45.59702461, 60.00000000000006, 59.66063361999994, 58.54301084, 60.00000000000006, 60.19544584, 34.64922468999994, 54.83414731, 47.44522918, 60.000000000000064, 52.17186451999995, 41.77374745, 53.96135501, 47.15146348, 49.60625272, 40.1564664, 45.04259767, 34.45089817, 59.40242668, 42.42859478, 56.03116874, 59.62024051, 52.49421099, 44.86555775, 55.56118501, 40.97880068, 46.91197329999999, 60.000000000000064, 58.064661829999935, 52.86087523, 48.86412676, 55.34149939, 43.74226278, 55.28131449, 56.231981499999996, 45.88473924, 48.30888818, 55.32293727, 57.43437718, 54.35506828, 46.20125612, 44.31757178, 39.37778234, 37.59417172, 44.46390738, 26.372871999999997, 42.58938405, 52.62888846, 37.06011691, 39.852242100000005, 48.11839317, 54.24566242, 55.18496111, 29.02974014, 54.08925539, 39.82466804, 46.18220901, 55.20216072, 42.12731159, 60.000000000000064, 50.45078340999993, 57.77313324, 43.91170846, 42.23496973, 46.06222617, 41.70955286, 44.98478012, 52.92062093, 46.44232818, 34.06658506, 37.48800078, 32.68823539, 45.64067976, 50.05020621, 36.440337299999996, 35.18438367, 46.8384096, 39.64241479, 51.33807853, 47.62628099, 46.0218799, 55.27184088, 52.108419700000006, 48.19600523, 54.781149899999996, 56.41865219, 27.46701615, 42.97416526, 54.18234044, 60.000000000000064, 32.446841759999934, 60.000000000000064, 33.89832760999994, 43.46356658, 51.75366537, 51.49580521, 60.00000000000007, 30.859428309999927, 59.68298818, 51.37999131, 50.87941056, 59.65290194, 50.95808132, 60.00000000000007, 61.70060353, 40.473069119999934, 52.05391989, 33.94795653, 44.68632232, 50.34484154, 56.07109003, 41.06355037, 48.99933326, 39.66984649, 60.00000000000007, 28.79380193999993, 51.78457677, 30.52585322, 60.00000000000007, 56.16212683999993, 44.62327739, 37.29171116, 60.00000000000007, 53.21521576999993, 42.62958318, 36.35568269, 60.00000000000007, 68.33181247, 65.09372178, 51.54914795999992, 59.08463593, 40.20402206, 37.99127487, 35.71278015, 46.41961134, 29.01324512, 41.41547164, 50.14946379, 60.00000000000008, 68.89370839, 63.75797876999993, 45.23199623, 59.36629468, 42.23449744, 35.82933556, 41.26474261, 48.48895859, 59.41533028, 57.47570692, 47.60329343, 60.00000000000007, 49.447575559999926, 44.83513004, 51.8651704, 60.00000000000007, 66.17792248, 62.28887994, 44.12437450999993, 52.42030996, 40.84468761, 58.03883887, 56.8438965, 53.00942128, 42.22937625, 51.85193641, 51.7854579, 50.79520725, 38.02145071, 51.86549502, 51.93603623, 42.05234496, 52.23386042, 36.8182737, 50.05735286, 34.35074468, 59.76758528, 57.05964755, 33.5971037, 47.57264970000001, 46.01901792, 32.6782418, 30.93142593, 54.22043286, 44.37094424, 43.47035895, 56.05311145, 56.08504493, 47.10864402, 42.24939185, 49.90461816, 60.000000000000064, 59.37023119999994, 52.86153489, 55.41568884, 60.000000000000064, 34.699181629999934, 52.24746737, 51.11983064, 38.72092073, 53.35713476, 51.56255617, 31.92917574, 47.07326635, 54.95754705, 54.25062776, 50.93425655, 28.59902382, 52.57253551, 42.29087919, 40.70351032, 36.00134218, 55.85606008, 49.34214851, 43.18218335, 36.45015224, 54.99423657, 49.35093823, 60.000000000000064, 57.40289567999992, 37.67529845, 52.16721723, 60.00000000000007, 65.48474707999992, 60.00000000000008, 49.13439077999992, 38.1328005, 60.00000000000008, 48.17574742999992, 54.37723508, 36.58464228, 60.00000000000008, 59.648408389999915, 41.31255628, 40.37722093, 59.03064087, 35.35564318, 47.76514382, 40.16753984, 48.344442799999996, 60.00000000000008, 49.88664628999992, 49.74075944, 59.82005292, 48.07793293, 48.26970397, 58.09545599, 43.08212617, 52.80013533, 50.22931418, 42.75222977, 55.02100249, 60.00000000000008, 44.962955619999924, 53.30711711, 54.10341214, 38.43357381, 54.61986392, 60.00000000000008, 44.882363819999924, 50.99872447, 52.77499774, 29.93168059, 46.59409215, 43.39546844, 60.00000000000008, 48.26381252999992, 52.37821312, 39.02306538, 48.05899744, 56.29470217, 55.91905928, 35.11612176, 42.43860494, 33.24366416, 47.67202256, 16.97771003, 60.00000000000007, 35.03181499999992, 42.23784815, 47.01968477, 49.26431425, 44.69638997, 50.7032455, 37.92134894, 33.58327849, 40.55448061, 57.98377996, 60.00000000000007, 50.97992641999993, 45.13298799, 43.36589335, 55.10311607, 49.31346301, 42.37654951, 42.80823452, 60.00000000000007, 53.27996256999993, 42.95098591, 52.17322061, 57.90522536, 38.83939617, 44.51525845, 60.00000000000007, 59.106393569999916, 55.85484528, 60.00000000000007, 50.70732733999993, 40.79826661, 42.88523274, 54.61921941, 44.36527829, 47.02805886, 45.57251262, 57.88710382, 51.63410162, 59.68575986, 59.95668337, 55.14851105, 41.93037793, 43.16157269, 47.46666257, 49.58642063, 60.00000000000007, 62.03729301999993, 32.13403673, 55.59925736, 42.42288875, 60.00000000000007, 44.00897825999993, 48.83978369, 53.76976036, 37.1030857, 56.12931091, 60.00000000000007, 71.28029658, 36.60612995999993, 43.95788979, 31.03714332, 48.57908998, 48.45664804, 52.27887917, 52.81256119, 39.1779009, 57.59186472, 12.98257519, 57.51355924, 41.96002671, 37.95541703, 31.87826291, 36.51436783, 45.7312965, 46.64774058, 43.40711598, 21.96589296, 45.98788528, 44.39278617, 60.00000000000007, 50.98245597999994, 36.21471806, 52.90777264, 43.37630436, 42.04934597, 49.741262, 60.000000000000064, 46.63355395999994, 56.80717136, 47.56399632, 59.54053175, 51.62893793, 56.91802012, 51.33918525, 33.27364926, 60.000000000000064, 60.40545412999993, 46.65099359, 49.80121619, 46.23787834, 37.62540418, 52.13330221, 39.3145563, 41.87304552, 44.434535100000005, 49.16223638, 56.07918536, 57.32791849, 57.12381143, 46.82807089, 50.77522848, 53.71941663, 42.34582602, 50.27035869, 47.02272942, 42.20062365, 54.17261574, 40.58755551, 46.48040282, 55.69425039, 58.15238251, 51.05758446, 40.85394021, 54.19172352, 52.72190954, 60.000000000000064, 53.59002593999994, 49.69648603, 36.55876761, 55.21528894, 41.00832863, 46.14495978, 53.21743679, 46.80682204, 42.62275804, 60.000000000000064, 68.37574967, 43.303745749999926, 42.80218957, 43.84420366, 60.00000000000007, 68.70546436, 52.168963409999925, 39.01670895, 55.52637453, 50.9845946, 45.97025857, 30.423542100000002, 52.2037117, 57.61268904, 43.68110362, 50.78703287, 44.28021845, 33.84612609, 53.03271736, 45.96475109, 56.81576102, 45.57345031, 36.89260223, 59.13887998, 36.99806723, 58.36081915, 50.03388682, 60.00000000000008, 60.89155988, 42.933646119999906, 52.90807371, 48.66515441, 37.15685070000001, 28.00903374, 49.63492584, 60.000000000000085, 60.84550869999991, 37.13619981, 55.24450335, 48.26339469, 43.65182496, 60.00000000000009, 60.01937934999991, 51.02154437, 49.87837988, 36.99880964, 50.27331284, 47.10935685, 55.58590842, 56.73179619, 46.02183433, 51.53812145, 45.75800955, 60.00000000000009, 53.743811479999906, 60.00000000000009, 60.61813672, 58.0889136399999, 56.00275682, 52.4097679, 54.41168202, 36.11208511, 46.42059815, 46.51780688, 60.00000000000009, 60.865077309999904, 46.59939314, 60.00000000000009, 62.14201747, 52.08365795999991, 54.43065728, 42.94032534, 45.78984823, 43.75324655, 49.48682844, 50.52218375, 54.57650549, 46.47818192, 37.33427075, 60.000000000000085, 42.14604794999991, 57.5134873, 34.50939539, 50.46913801, 42.66834849, 45.82411809, 56.90777879, 58.69431953, 52.2919325, 32.84200233, 34.15964437, 52.07421041, 32.56705704, 52.51861518, 52.59547231, 58.21715192, 47.48654051, 49.61771245, 60.000000000000085, 66.71551066, 48.18579867999992, 40.32315585, 54.88141873, 37.66183967, 44.10991729999999, 54.10310215, 60.000000000000085, 36.28829703999992, 46.40897793, 37.10626807, 42.87963661, 50.44266653, 49.93087337, 50.75797645, 53.16180921, 37.22237713, 34.60281598, 46.65225538, 59.16954423, 60.000000000000085, 47.38889755999991, 45.36878848, 39.58856856, 43.26047279, 44.97422787, 45.86640498, 57.60164383, 43.72572039, 52.15415096, 50.86594001, 49.01197063, 51.57774956, 47.78598469, 45.09527851, 49.52535889, 60.000000000000085, 67.27386468999993, 53.12932996, 40.19594652, 39.51244871, 53.6023191, 46.51808415, 53.32128245, 30.94248889, 53.78719822, 45.57132001, 59.39346497, 41.84678805, 51.20311175, 57.21219464, 39.825708399999996, 52.14672425, 25.12788694, 51.71988734, 52.2576713, 60.00000000000007, 66.91017844, 68.70466807, 59.92654184999992, 58.71746852, 51.47911383, 44.59502763, 47.34633271, 53.28590223, 57.34775384, 60.00000000000008, 58.28007857999992, 38.8711642, 60.00000000000008, 50.67692477999993, 45.53578331, 60.00000000000008, 56.80777371999992, 60.00000000000008, 55.07677921999992, 32.48418277, 47.17061695, 39.82590939, 54.01276994, 58.63799064, 50.37626186, 44.16278079999999, 36.78125502, 60.00000000000008, 47.75253489999992, 46.33303694, 45.668026399999995, 60.00000000000008, 41.59832949999992, 45.76309581, 39.73182527, 57.36126794, 58.55713844, 40.68255994, 38.40728271, 36.0910315, 42.49310862, 44.99403387, 60.00000000000008, 50.73558766999992, 44.0071959, 33.52824391, 59.99330318, 43.76504644, 47.9709908, 51.5802546, 60.00000000000008, 52.72506577999992, 60.00000000000008, 60.67301791, 49.70779978999992, 39.63739247, 52.64078966, 60.000000000000085, 27.407318489999913, 60.000000000000085, 37.525964319999915, 28.2987045, 54.89802537, 51.53484874, 40.97373577, 50.33274666, 60.000000000000085, 44.72491099999992, 46.697609299999996, 53.01205892, 43.48236467, 60.000000000000085, 46.22867164999992, 56.60965492, 60.000000000000085, 45.285263159999914, 60.000000000000085, 68.87211111999991, 60.000000000000085, 43.607134069999915, 54.76913985, 60.000000000000085, 54.83358975999991, 58.6943642, 58.95370214, 56.69932606, 45.50039971, 32.21522008, 55.08400713, 47.24405467, 29.90096052, 50.78463744, 49.79323901, 50.60695978, 56.54732449, 28.84982907, 53.04373013, 57.04440376, 38.67833302, 39.69209559, 53.80484636, 49.3776348, 54.72971793, 42.81024557, 37.86041158, 59.08846189, 44.49149011, 51.42603285]
(EIL) Ending Inventory record for node 2:[19.176249730000002, 7.852610339999998, -19.19040905, -1.8560166199999912, 23.298895580000007, 8.468629170000007, 1.1208494700000102, 17.29876971000001, 14.300057680000009, 8.831042190000005, 16.512934380000004, -1.384173969999992, 1.53994165000001, 1.8637772600000062, 23.540799350000007, -1.7492203399999937, 32.43343157000001, 23.496188270000008, -0.36212937999999184, -6.640644140000006, 8.138498650000002, 20.347975239999997, -1.9206440600000008, -4.498421629999996, -12.16493242, 9.4515429, -1.2274013600000018, -10.238519420000003, -7.305505159999996, 9.291058479999997, 10.333739790000003, 8.348442799999994, 20.953343259999997, -0.36735493999999846, -1.574523339999999, 28.437864750000003, 7.002471500000002, 14.033030060000002, 9.321330359999997, 22.94076752, 2.725434530000001, 32.19033245, 1.3268761699999985, 32.492687939999996, 17.007861189999996, 9.516744180000003, 2.9750997399999974, 0.22282443999999657, -1.5731003699999988, 17.73879125, 19.88430519, 18.858778460000003, -3.703954860000003, 8.195421240000002, -4.195951239999999, 14.909212660000001, -2.771963249999999, 10.945311930000003, 12.545431630000003, -8.582602010000002, 8.951946620000001, 8.390963589999998, -1.6062320699999972, -9.151654759999992, 5.25018149000001, -3.197368709999992, -3.9814621699999933, 20.176478230000008, 3.5257207900000083, 32.849411520000004, 19.995467830000006, 16.737441710000002, 5.011000410000008, 28.599441800000008, -0.8401078599999963, 29.324259510000008, 8.087037190000004, 31.53482736000001, 19.89759234000001, -2.6783857199999943, 9.881316000000005, 0.5279326800000064, 16.425613710000007, 16.490380710000004, 3.6811851700000062, 17.09870319000001, -0.8676907299999925, -11.759144499999984, -1.9331239499999953, 17.77879912000001, 3.0532829900000067, 5.911533250000005, 10.173230810000007, 15.870248220000008, 6.365083410000004, 14.195513130000009, 25.62426691000001, 7.124176100000014, 17.905958360000007, 0.40347443000000993, 6.618060710000009, 14.93425915000001, 23.627936010000006, 5.345835000000008, -1.349495989999994, 7.422686450000008, 9.130492610000005, 11.638327340000004, 37.549769100000006, -0.39444216999999426, 22.202633550000005, 10.025726640000006, 8.625481930000007, 7.851007480000007, 28.147846810000008, -2.08592410999999, -3.389589709999985, 9.154324070000019, 22.451936100000022, 1.2037093200000228, 6.1265909200000195, 13.77579358000002, 13.22022843000002, 2.4586908700000194, -2.146462389999982, -0.3792032199999795, 23.588649670000024, 25.84994553000002, 14.06867114000002, 17.884255590000024, 6.049648000000019, 11.655637370000022, 13.730190390000018, 18.39832018000002, 8.765742340000024, 22.564585770000022, 4.152287090000023, 12.734524780000022, 5.028357820000018, 3.755098270000019, 15.276584890000024, 18.13399449000002, 17.56038784000002, -0.622913359999977, -1.1094645699999788, 9.066310810000019, 12.545042190000018, 5.714961080000023, -4.877898369999976, -11.457391119999983, 22.00598957000001, 4.2894400400000166, 4.253844020000017, 23.95761097000001, 20.662548780000016, 14.640800770000013, 21.378535340000013, 23.184749910000015, 6.832110160000013, 17.029387710000016, 4.745224910000012, -0.36938513999998435, 14.855894930000012, 4.072091330000013, 4.5024980800000165, 30.102652650000014, 21.15986329000001, 4.696725160000014, 9.465379530000014, 9.628454270000013, -17.063805469999984, 1.0963717800000268, -3.462037099999975, 7.402643190000028, 14.447234620000032, 20.38653303000003, 19.626834380000027, 26.006606350000027, -5.2447196799999745, 1.740726190000025, -5.648439239999973, 3.2320011000000335, 5.921299640000029, -8.011653249999966, 1.0101627800000301, 9.642559310000031, 13.84792153000003, 10.086146150000026, 13.172099030000027, 24.57928434000003, 20.914755150000026, 9.839728640000025, 6.788790740000032, 5.656272460000025, -2.8488880899999742, 5.036488270000028, 17.653025920000026, 4.600463160000025, 0.24345800000002527, 12.551713530000029, 21.11789332000003, -0.07572130999997029, 7.614305900000026, 2.8267796400000265, -4.862472499999967, 3.5084602900000306, 12.470185630000032, 15.785479580000029, 7.3118977000000385, 0.9510194000000283, 7.5303592200000296, -21.18103789999997, 20.59608922000004, -9.801421079999962, 5.095617110000035, 7.124109190000034, 17.06253164000004, 12.201881570000033, -4.594826419999968, 3.5686096100000384, 0.9036269100000354, -13.837648649999963, 13.883640510000042, 29.152211290000043, 8.651049710000045, -21.16365688999995, 0.6799170400000492, 8.146396040000049, -6.895934329999953, -6.994125019999942, 20.80396651000006, 9.21810279000006, 0.38173185000005816, 15.108813460000057, 17.664810870000053, 7.514928880000056, 5.218426620000059, 10.607248290000058, 14.629140900000053, 8.591295040000055, 3.361279320000058, -11.25693235999993, 22.630386660000063, 27.586223190000062, 18.554208220000064, 12.719768090000066, 9.142393690000063, -0.7440720599999366, -1.0727154799999354, 15.386039650000065, 6.689633460000067, 5.280503510000067, 26.823597950000064, 22.843168680000062, 15.209125170000064, 0.7044171400000607, 0.5032445800000644, 13.265647640000061, 14.155095990000063, 16.89395894000006, 17.60935727000006, 4.139417810000062, 17.09495026000006, 8.732987490000063, -9.354029919999938, -1.9310938499999395, 2.677801070000065, 16.176740900000063, 9.182792760000062, 11.787734570000062, 18.106942040000064, 15.360189950000063, 17.49930402000006, 6.61220380000006, 5.261306630000064, -12.250607609999939, 11.272931740000061, 28.086068780000065, 15.056707390000064, 17.798626820000067, 0.04568956000006352, 7.337809700000072, -4.560163309999929, 21.793035600000067, 3.034415460000062, 9.198909070000063, 16.221413350000063, 12.82374201000006, 18.366678030000067, -2.6297052199999342, 11.567446360000062, 6.019946300000065, 4.003066600000061, -0.8360531799999364, 3.166554270000063, 22.980686700000064, 10.399540660000063, 2.9523063500000646, 19.087837930000063, 12.374420480000062, 21.141086280000067, 16.19816047000006, 3.684817120000062, 6.961212730000064, 22.175173250000064, -7.5065122899999395, 27.611381610000066, 0.4744944700000673, 1.2989024000000597, 15.494125700000069, 17.230677320000062, 7.940122130000063, 11.498150980000062, 5.555953780000067, 10.447153450000066, 12.238293710000065, 16.476339780000067, -4.678048029999935, 0.2377195200000557, 13.383132020000055, 19.516792020000054, 4.715017830000058, 3.9204128900000583, 14.402975390000059, -1.7062169499999413, 2.0455833300000563, 1.456989160000056, -0.19544583999994103, -5.571721719999935, 30.92249703000006, 5.165852690000062, 12.554770820000066, -5.397902919999943, 13.226038400000057, 18.226252550000062, 6.038644990000066, 12.848536520000067, 10.393747280000063, 19.843533600000065, 14.957402330000065, 25.549101830000062, 0.5975733200000661, 17.571405220000067, 3.9688312600000657, 0.3797594900000618, 7.505789010000065, 15.134442250000063, 4.438814990000061, 19.021199320000065, 13.088026700000071, -0.6634129999999345, 2.598751170000064, 7.139124770000066, 11.135873240000066, 4.658500610000061, 16.257737220000067, 4.718685510000064, 3.7680185000000677, 14.115260760000062, 11.691111820000067, 4.677062730000067, 2.5656228200000655, 5.644931720000066, 13.798743880000067, 15.682428220000062, 20.62221766000006, 22.405828280000065, 15.536092620000062, 33.62712800000007, 17.410615950000068, 7.371111540000065, 22.939883090000066, 20.14775790000006, 11.881606830000067, 5.754337580000062, 4.815038890000061, 30.970259860000063, 5.910744610000066, 20.175331960000065, 13.817790990000063, 4.797839280000062, 17.872688410000066, -2.7783959999999297, 12.327612590000065, 2.226866760000064, 16.088291540000064, 17.76503027000006, 13.937773830000062, 18.29044714000006, 15.01521988000006, 7.079379070000066, 13.557671820000067, 25.933414940000063, 22.511999220000064, 27.31176461000006, 14.359320240000066, 9.949793790000065, 23.559662700000068, 24.81561633000006, 13.161590400000065, 20.357585210000067, 8.661921470000067, 12.373719010000066, 13.978120100000062, 4.728159120000065, 7.891580300000058, 11.803994770000067, 5.218850100000068, 3.581347810000061, 32.532983850000065, 17.025834740000064, 5.817659560000067, -1.150312569999933, 28.703470810000063, -3.3638321999999405, 29.465504590000066, 16.53643342000007, 8.24633463000007, 8.50419479000007, -2.285934519999927, 31.42650621000007, 0.31701182000006867, 8.62000869000007, 9.120589440000074, 0.3470980600000715, 9.041918680000073, -1.7006035299999311, -1.5822768899999318, 21.10920777000007, 7.946080110000068, 26.052043470000072, 15.313677680000069, 9.655158460000074, 3.9289099700000705, 18.93644963000007, 11.00066674000007, 20.330153510000073, -0.9485371699999305, 32.15473523000007, 8.21542323000007, 29.474146780000073, -18.06142160999993, 21.899294770000072, 15.376722610000076, 22.708288840000076, -0.7678085999999311, 7.552592830000073, 17.370416820000074, 23.64431731000007, -8.331812469999932, -5.093721779999925, -0.2513335099999239, 8.702185550000081, 0.9153640700000807, 19.79597794000008, 22.00872513000008, 24.287219850000078, 13.580388660000075, 30.986754880000078, 18.58452836000008, 9.85053621000008, -8.893708389999922, -4.249068239999929, 0.49108947000006964, 14.768003770000071, 0.6337053200000682, 17.765502560000073, 24.170664440000074, 18.735257390000072, 11.511041410000068, 0.5846697200000719, 2.5242930800000707, 12.39670657000007, -3.656106489999928, 14.208530930000073, 15.164869960000068, 8.134829600000074, -6.1779224799999355, -2.28887993999993, -3.683600669999933, 19.559226160000065, 7.579690040000067, 19.155312390000063, 1.9611611300000646, 3.1561035000000643, 6.990578720000066, 17.770623750000063, 8.148063590000064, 8.214542100000067, 9.204792750000067, 21.97854929000006, 8.134504980000067, 8.063963770000065, 17.947655040000065, 7.766139580000065, 23.181726300000065, 9.942647140000062, 25.649255320000066, 0.23241472000006524, 2.940352450000063, 26.402896300000066, 12.427350300000057, 13.98098208000006, 27.321758200000062, 29.068574070000064, 5.779567140000061, 15.629055760000064, 16.529641050000066, 3.946888550000061, 3.9149550700000617, 12.891355980000064, 17.750608150000062, 10.095381840000066, -7.016529089999942, 7.6462978900000635, 7.1384651100000625, 4.584311160000063, -2.3877012399999344, 27.688519610000064, 7.752532630000061, 8.88016936000006, 21.27907927000006, 6.642865240000063, 8.437443830000063, 28.070824260000062, 12.926733650000067, 5.042452950000062, 5.7493722400000635, 9.065743450000063, 31.400976180000065, 7.427464490000062, 17.709120810000066, 19.296489680000064, 23.998657820000062, 4.143939920000065, 10.657851490000063, 16.81781665000006, 23.549847760000063, 5.005763430000066, 10.649061770000067, -23.683556609999926, 26.280660930000074, 22.32470155000007, 7.8327827700000725, -7.521028009999924, 2.036280930000075, -2.870763029999921, 13.73637225000008, 21.867199500000076, -4.743603009999923, 16.567855580000078, 5.62276492000008, 23.41535772000008, -10.790887659999918, 11.14247927000008, 18.687443720000076, 19.622779070000078, 0.969359130000079, 24.644356820000077, 12.23485618000008, 19.832460160000075, 11.655557200000082, -3.439401469999922, 13.552755180000077, 10.25924056000008, 0.1799470800000762, 11.922067070000075, 11.730296030000076, 1.9045440100000803, 16.917873830000076, 7.199864670000075, 9.770685820000075, 17.24777023000008, 4.978997510000077, -1.627572189999924, 16.664616570000078, 6.692882890000078, 5.896587860000075, 21.56642619000008, 5.380136080000078, -0.703858629999921, 15.821494810000075, 9.001275530000079, 7.225002260000075, 30.06831941000008, 13.405907850000077, 16.604531560000076, -3.3157481499999193, 15.05193562000008, 7.62178688000008, 20.97693462000008, 11.94100256000008, 3.7052978300000774, 4.080940720000079, 24.88387824000008, 17.56139506000008, 26.756335840000077, 12.327977440000076, 43.022289970000074, -0.5000651199999275, 25.468250120000075, 17.762151850000077, 12.980315230000077, 10.735685750000073, 15.303610030000073, 9.29675450000007, 22.07865106000007, 26.416721510000073, 19.445519390000072, 2.0162200400000714, -0.18864085999992852, 9.208714440000072, 14.867012010000074, 16.63410665000007, 4.896883930000072, 10.686536990000072, 17.623450490000074, 17.191765480000072, -5.282238119999931, 12.002275550000071, 17.04901409000007, 7.82677939000007, 2.094774640000068, 21.16060383000007, 15.484741550000074, -2.773863299999917, 3.6674697300000716, 4.145154720000072, -0.643826059999931, 9.936498720000074, 19.201733390000072, 17.11476726000007, 5.380780590000072, 15.634721710000072, 12.97194114000007, 14.427487380000073, 2.112896180000071, 8.365898380000068, 0.3142401400000736, 0.04331663000007069, 4.851488950000068, 18.069622070000072, 16.83842731000007, 12.533337430000074, 10.413579370000072, -4.63120928999993, 2.593916270000072, 27.865963270000073, 4.400742640000068, 17.577111250000073, -3.6322605199999316, 19.623282260000074, 11.160216310000074, 6.230239640000072, 22.89691430000007, 3.87068909000007, -11.280296579999927, -5.990732839999936, 29.384602880000074, 16.04211021000007, 28.962856680000076, 11.420910020000075, 11.543351960000074, 7.721120830000068, 7.187438810000074, 20.822099100000074, 2.4081352800000744, 47.01742481000007, 2.4864407600000735, 18.03997329000007, 22.04458297000007, 28.12173709000007, 23.485632170000073, 14.268703500000072, 13.352259420000074, 16.59288402000007, 38.034107040000066, 14.012114720000067, 15.607213830000067, -8.432877749999939, 17.450421770000062, 23.78528194000006, 7.0922273600000665, 16.623695640000065, 17.950654030000067, 10.258738000000065, -0.6337544899999372, 14.000200530000065, 3.192828640000066, 12.436003680000063, 0.4594682500000644, 8.371062070000065, 3.0819798800000626, 8.660814750000064, 26.726350740000065, -15.64723807999993, 15.241783950000062, 13.349006410000065, 10.198783810000066, 13.762121660000062, 22.374595820000067, 7.866697790000067, 20.685443700000064, 18.126954480000066, 15.565464900000059, 10.83776362000006, 3.920814640000067, 2.672081510000062, 2.8761885700000605, 13.171929110000065, 9.224771520000061, 6.280583370000066, 17.654173980000067, 9.729641310000062, 12.977270580000067, 17.799376350000067, 5.827384260000066, 19.412444490000063, 13.519597180000062, 4.305749610000063, 1.8476174900000615, 8.942415540000063, 19.146059790000066, 5.8082764800000675, 7.278090460000065, -5.433847759999942, 11.843821820000066, 10.303513970000061, 23.441232390000067, 4.784711060000063, 18.991671370000063, 13.855040220000063, 6.782563210000063, 13.193177960000064, 17.377241960000063, -8.37574966999994, -3.74279662999993, 20.439050880000075, 17.197810430000068, 16.155796340000073, -8.705464359999922, -2.083646509999923, 9.914683100000076, 20.983291050000076, 4.47362547000008, 9.015405400000077, 14.02974143000008, 29.576457900000076, 7.796288300000079, 2.387310960000079, 16.318896380000076, 9.21296713000008, 15.719781550000079, 26.15387391000008, 6.967282640000079, 14.035248910000078, 3.1842389800000817, 14.42654969000008, 23.107397770000077, 0.8611200200000795, 23.00193277000008, 1.6391808500000806, 9.966113180000079, -0.8915598799999245, -12.20668974999991, 29.27304363000009, 7.091926290000092, 11.334845590000086, 22.84314930000008, 31.990966260000086, 10.365074160000084, -16.49614974999991, 15.65064105000009, 22.86380019000009, 4.75549665000009, 11.736605310000094, 16.348175040000093, -1.3581112599999088, 1.3387319100000923, 8.978455630000092, 10.121620120000095, 23.001190360000095, 9.72668716000009, 12.890643150000095, 4.41409158000009, 3.268203810000095, 13.978165670000095, 8.461878550000094, 14.241990450000095, -6.500655849999909, 12.756844370000096, -0.6181367199999102, -7.439419959999903, 9.350506320000093, 3.9972431800000905, 7.590232100000094, 5.588317980000092, 23.88791489000009, 13.579401850000096, 13.48219312000009, -5.437612659999907, 4.572535350000095, 13.400606860000096, -2.1420174699999066, -5.828820509999915, 13.745162550000089, 5.569342720000087, 17.059674660000084, 14.210151770000088, 16.246753450000085, 10.513171560000089, 9.477816250000082, 5.423494510000083, 13.521818080000088, 22.665729250000084, -4.051994949999909, 21.905947000000083, 2.486512700000084, 25.490604610000084, 9.530861990000083, 17.331651510000086, 14.175881910000086, 3.0922212100000834, 1.305680470000084, 7.708067500000084, 27.157997670000086, 25.840355630000083, 7.925789590000086, 27.432942960000084, 7.481384820000088, 7.404527690000087, 1.782848080000086, 12.513459490000088, 10.382287550000086, -6.7155106599999215, -8.029506989999916, 19.843708310000082, 19.676844150000086, 5.118581270000085, 22.338160330000086, 15.890082700000093, 5.896897850000087, -1.8854813799999164, 25.597184340000084, 13.591022070000086, 22.893731930000087, 17.120363390000087, 9.557333470000088, 10.069126630000085, 9.242023550000084, 6.838190790000084, 22.777622870000087, 25.397184020000083, 13.347744620000086, 0.8304557700000856, -0.07518005999991573, 12.68628250000009, 14.631211520000086, 20.411431440000086, 16.739527210000084, 15.025772130000085, 14.133595020000087, 2.398356170000085, 16.274279610000086, 7.845849040000083, 9.134059990000083, 10.988029370000085, 8.422250440000084, 12.214015310000086, 14.904721490000085, 10.474641110000086, -10.051987309999916, 2.778122620000076, 6.870670040000078, 19.80405348000008, 20.487551290000077, 6.397680900000076, 13.481915850000078, 6.67871755000008, 29.05751111000008, 6.212801780000078, 14.428679990000077, 0.6065350300000816, 18.153211950000077, 8.79688825000008, 2.7878053600000783, 20.174291600000082, 7.85327575000008, 34.872113060000075, 8.280112660000075, 7.742328700000073, -6.910178439999925, -8.704668069999926, -0.07241696999992087, 0.14587512000007763, 1.282531480000081, 8.520886170000075, 15.404972370000081, 12.65366729000008, 6.71409777000008, 2.6522461600000753, -11.72537420999992, 13.445295630000075, 21.128835800000076, -2.342398099999926, 11.665473320000075, 14.464216690000079, -4.426065779999924, 7.61829206000008, -3.8667710599999197, 8.789991840000077, 27.51581723000008, 12.829383050000075, 20.17409061000008, 5.98723006000008, 1.36200936000008, 9.623738140000079, 15.837219200000085, 23.218744980000075, -3.405143709999919, 15.652608810000075, 13.666963060000079, 14.331973600000083, -2.0378780299999235, 20.43954853000008, 14.236904190000075, 20.268174730000077, 2.6387320600000805, 1.4428615600000754, 19.31744006000008, 21.59271729000008, 23.90896850000008, 17.506891380000077, 15.005966130000076, -0.5807534799999203, 9.845165810000076, 15.992804100000079, 26.471756090000078, 0.006696820000080095, 16.23495356000008, 12.029009200000075, 8.419745400000075, -9.599013489999926, 16.87394771000008, -0.6730179099999205, -12.591449589999911, 22.88364980000008, 20.362607530000083, 7.359210340000082, -1.3293256899999122, 33.92200720000009, -3.7207357699999086, 26.19477145000008, 31.701295500000082, 5.1019746300000826, 8.465151260000084, 19.026264230000088, 9.667253340000087, -6.1877090799999195, 21.462798080000084, 13.302390700000089, 6.987941080000084, 16.517635330000083, -1.3024404999999177, 15.073768850000086, 3.3903450800000883, -12.653481719999917, 27.368218560000088, -9.140633079999915, 0.2685219600000863, -6.476751049999919, 22.86961698000009, 5.230860150000083, -6.771491839999911, 11.937902080000086, 1.3056358000000827, 1.046297860000088, 3.300673940000088, 14.499600290000082, 27.784779920000084, 4.915992870000082, 12.755945330000088, 30.099039480000084, 9.215362560000088, 10.206760990000085, 9.393040220000088, 3.452675510000084, 31.150170930000087, 6.956269870000085, 2.9555962400000837, 21.32166698000009, 20.307904410000084, 6.195153640000086, 10.622365200000083, 5.270282070000086, 17.189754430000086, 22.139588420000088, 0.9115381100000874, 15.508509890000084, 8.573967150000087]
(BO) Backorders for node 2:[0, 0, 19.19040905, 1.8560166199999912, 0, 0, 0, 0, 0, 0, 0, 1.384173969999992, 0, 0, 0, 1.7492203399999937, 0, 0, 0.36212937999999184, 6.640644140000006, 0, 0, 1.9206440600000008, 4.498421629999996, 12.16493242, 0, 1.2274013600000018, 10.238519420000003, 7.305505159999996, 0, 0, 0, 0, 0.36735493999999846, 1.574523339999999, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.5731003699999988, 0, 0, 0, 3.703954860000003, 0, 4.195951239999999, 0, 2.771963249999999, 0, 0, 8.582602010000002, 0, 0, 1.6062320699999972, 9.151654759999992, 0, 3.197368709999992, 3.9814621699999933, 0, 0, 0, 0, 0, 0, 0, 0.8401078599999963, 0, 0, 0, 0, 2.6783857199999943, 0, 0, 0, 0, 0, 0, 0.8676907299999925, 11.759144499999984, 1.9331239499999953, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.349495989999994, 0, 0, 0, 0, 0.39444216999999426, 0, 0, 0, 0, 0, 2.08592410999999, 3.389589709999985, 0, 0, 0, 0, 0, 0, 0, 2.146462389999982, 0.3792032199999795, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.622913359999977, 1.1094645699999788, 0, 0, 0, 4.877898369999976, 11.457391119999983, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.36938513999998435, 0, 0, 0, 0, 0, 0, 0, 0, 17.063805469999984, 0, 3.462037099999975, 0, 0, 0, 0, 0, 5.2447196799999745, 0, 5.648439239999973, 0, 0, 8.011653249999966, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.8488880899999742, 0, 0, 0, 0, 0, 0, 0.07572130999997029, 0, 0, 4.862472499999967, 0, 0, 0, 0, 0, 0, 21.18103789999997, 0, 9.801421079999962, 0, 0, 0, 0, 4.594826419999968, 0, 0, 13.837648649999963, 0, 0, 0, 21.16365688999995, 0, 0, 6.895934329999953, 6.994125019999942, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11.25693235999993, 0, 0, 0, 0, 0, 0.7440720599999366, 1.0727154799999354, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9.354029919999938, 1.9310938499999395, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12.250607609999939, 0, 0, 0, 0, 0, 0, 4.560163309999929, 0, 0, 0, 0, 0, 0, 2.6297052199999342, 0, 0, 0, 0.8360531799999364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7.5065122899999395, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.678048029999935, 0, 0, 0, 0, 0, 0, 1.7062169499999413, 0, 0, 0.19544583999994103, 5.571721719999935, 0, 0, 0, 5.397902919999943, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.6634129999999345, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2.7783959999999297, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.150312569999933, 0, 3.3638321999999405, 0, 0, 0, 0, 2.285934519999927, 0, 0, 0, 0, 0, 0, 1.7006035299999311, 1.5822768899999318, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.9485371699999305, 0, 0, 0, 18.06142160999993, 0, 0, 0, 0.7678085999999311, 0, 0, 0, 8.331812469999932, 5.093721779999925, 0.2513335099999239, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.893708389999922, 4.249068239999929, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.656106489999928, 0, 0, 0, 6.1779224799999355, 2.28887993999993, 3.683600669999933, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7.016529089999942, 0, 0, 0, 2.3877012399999344, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23.683556609999926, 0, 0, 0, 7.521028009999924, 0, 2.870763029999921, 0, 0, 4.743603009999923, 0, 0, 0, 10.790887659999918, 0, 0, 0, 0, 0, 0, 0, 0, 3.439401469999922, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.627572189999924, 0, 0, 0, 0, 0, 0.703858629999921, 0, 0, 0, 0, 0, 0, 3.3157481499999193, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5000651199999275, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.18864085999992852, 0, 0, 0, 0, 0, 0, 0, 5.282238119999931, 0, 0, 0, 0, 0, 0, 2.773863299999917, 0, 0, 0.643826059999931, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.63120928999993, 0, 0, 0, 0, 3.6322605199999316, 0, 0, 0, 0, 0, 11.280296579999927, 5.990732839999936, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.432877749999939, 0, 0, 0, 0, 0, 0, 0.6337544899999372, 0, 0, 0, 0, 0, 0, 0, 0, 15.64723807999993, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5.433847759999942, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8.37574966999994, 3.74279662999993, 0, 0, 0, 8.705464359999922, 2.083646509999923, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.8915598799999245, 12.20668974999991, 0, 0, 0, 0, 0, 0, 16.49614974999991, 0, 0, 0, 0, 0, 1.3581112599999088, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.500655849999909, 0, 0.6181367199999102, 7.439419959999903, 0, 0, 0, 0, 0, 0, 0, 5.437612659999907, 0, 0, 2.1420174699999066, 5.828820509999915, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4.051994949999909, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.7155106599999215, 8.029506989999916, 0, 0, 0, 0, 0, 0, 1.8854813799999164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07518005999991573, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10.051987309999916, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.910178439999925, 8.704668069999926, 0.07241696999992087, 0, 0, 0, 0, 0, 0, 0, 11.72537420999992, 0, 0, 2.342398099999926, 0, 0, 4.426065779999924, 0, 3.8667710599999197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.405143709999919, 0, 0, 0, 2.0378780299999235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5807534799999203, 0, 0, 0, 0, 0, 0, 0, 9.599013489999926, 0, 0.6730179099999205, 12.591449589999911, 0, 0, 0, 1.3293256899999122, 0, 3.7207357699999086, 0, 0, 0, 0, 0, 0, 6.1877090799999195, 0, 0, 0, 0, 1.3024404999999177, 0, 0, 12.653481719999917, 0, 9.140633079999915, 0, 6.476751049999919, 0, 0, 6.771491839999911, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
(TC) Total Cost for node 2:[191.7624973, 78.52610339999998, 479.76022624999996, 46.40041549999978, 232.98895580000007, 84.68629170000007, 11.208494700000102, 172.9876971000001, 143.0005768000001, 88.31042190000005, 165.12934380000004, 34.6043492499998, 15.3994165000001, 18.637772600000062, 235.4079935000001, 43.73050849999984, 324.3343157000001, 234.96188270000007, 9.053234499999796, 166.01610350000016, 81.38498650000002, 203.47975239999997, 48.01610150000002, 112.4605407499999, 304.1233105, 94.515429, 30.685034000000044, 255.96298550000006, 182.6376289999999, 92.91058479999997, 103.33739790000003, 83.48442799999994, 209.53343259999997, 9.183873499999962, 39.36308349999997, 284.37864750000006, 70.02471500000001, 140.33030060000002, 93.21330359999997, 229.40767520000003, 27.25434530000001, 321.9033245, 13.268761699999985, 324.92687939999996, 170.07861189999997, 95.16744180000003, 29.750997399999974, 2.2282443999999657, 39.32750924999997, 177.38791249999997, 198.84305189999998, 188.58778460000002, 92.59887150000009, 81.95421240000002, 104.89878099999999, 149.09212660000003, 69.29908124999997, 109.45311930000003, 125.45431630000003, 214.56505025000007, 89.51946620000001, 83.90963589999998, 40.15580174999993, 228.7913689999998, 52.5018149000001, 79.9342177499998, 99.53655424999982, 201.7647823000001, 35.25720790000008, 328.4941152, 199.95467830000007, 167.37441710000002, 50.11000410000008, 285.9944180000001, 21.002696499999907, 293.2425951000001, 80.87037190000004, 315.3482736000001, 198.9759234000001, 66.95964299999986, 98.81316000000005, 5.279326800000064, 164.25613710000007, 164.90380710000005, 36.81185170000006, 170.9870319000001, 21.692268249999813, 293.9786124999996, 48.32809874999988, 177.78799120000008, 30.532829900000067, 59.11533250000005, 101.73230810000007, 158.70248220000008, 63.65083410000004, 141.9551313000001, 256.2426691000001, 71.24176100000014, 179.05958360000005, 4.034744300000099, 66.18060710000009, 149.34259150000008, 236.27936010000008, 53.45835000000008, 33.73739974999985, 74.22686450000008, 91.30492610000005, 116.38327340000004, 375.49769100000003, 9.861054249999857, 222.02633550000004, 100.25726640000005, 86.25481930000007, 78.51007480000007, 281.4784681000001, 52.14810274999975, 84.73974274999962, 91.54324070000018, 224.51936100000023, 12.037093200000228, 61.265909200000195, 137.7579358000002, 132.2022843000002, 24.586908700000194, 53.661559749999554, 9.480080499999488, 235.88649670000024, 258.4994553000002, 140.6867114000002, 178.84255590000024, 60.49648000000019, 116.55637370000022, 137.30190390000018, 183.9832018000002, 87.65742340000024, 225.64585770000022, 41.52287090000023, 127.34524780000022, 50.28357820000018, 37.55098270000019, 152.76584890000024, 181.3399449000002, 175.60387840000018, 15.572833999999425, 27.73661424999947, 90.66310810000019, 125.45042190000018, 57.14961080000023, 121.94745924999938, 286.4347779999996, 220.0598957000001, 42.894400400000166, 42.53844020000017, 239.57610970000013, 206.62548780000014, 146.40800770000013, 213.78535340000013, 231.84749910000016, 68.32110160000013, 170.29387710000015, 47.45224910000012, 9.234628499999609, 148.5589493000001, 40.720913300000134, 45.024980800000165, 301.02652650000016, 211.5986329000001, 46.96725160000014, 94.65379530000014, 96.28454270000013, 426.5951367499996, 10.963717800000268, 86.55092749999937, 74.02643190000028, 144.47234620000032, 203.86533030000032, 196.26834380000025, 260.06606350000027, 131.11799199999936, 17.40726190000025, 141.21098099999932, 32.320011000000335, 59.21299640000029, 200.29133124999916, 10.101627800000301, 96.42559310000031, 138.4792153000003, 100.86146150000026, 131.72099030000027, 245.7928434000003, 209.14755150000025, 98.39728640000025, 67.88790740000032, 56.56272460000025, 71.22220224999936, 50.36488270000028, 176.53025920000027, 46.00463160000025, 2.4345800000002527, 125.51713530000029, 211.1789332000003, 1.8930327499992572, 76.14305900000026, 28.267796400000265, 121.56181249999918, 35.084602900000306, 124.70185630000032, 157.8547958000003, 73.11897700000038, 9.510194000000283, 75.3035922000003, 529.5259474999992, 205.9608922000004, 245.03552699999904, 50.95617110000035, 71.24109190000034, 170.62531640000037, 122.01881570000033, 114.87066049999922, 35.686096100000384, 9.036269100000354, 345.94121624999906, 138.83640510000043, 291.5221129000004, 86.51049710000045, 529.0914222499987, 6.7991704000004916, 81.46396040000049, 172.39835824999884, 174.85312549999855, 208.0396651000006, 92.1810279000006, 3.8173185000005816, 151.08813460000056, 176.64810870000053, 75.14928880000056, 52.18426620000059, 106.07248290000058, 146.29140900000053, 85.91295040000055, 33.61279320000058, 281.42330899999826, 226.30386660000062, 275.8622319000006, 185.54208220000064, 127.19768090000066, 91.42393690000063, 18.601801499998416, 26.817886999998386, 153.86039650000066, 66.89633460000067, 52.805035100000666, 268.23597950000067, 228.43168680000062, 152.09125170000064, 7.044171400000607, 5.0324458000006445, 132.6564764000006, 141.55095990000063, 168.9395894000006, 176.0935727000006, 41.394178100000616, 170.9495026000006, 87.32987490000063, 233.85074799999845, 48.27734624999849, 26.77801070000065, 161.7674090000006, 91.82792760000062, 117.87734570000062, 181.06942040000064, 153.60189950000063, 174.99304020000062, 66.1220380000006, 52.61306630000064, 306.26519024999845, 112.72931740000061, 280.8606878000007, 150.56707390000065, 177.98626820000067, 0.4568956000006352, 73.37809700000072, 114.00408274999822, 217.93035600000067, 30.34415460000062, 91.98909070000063, 162.21413350000063, 128.2374201000006, 183.66678030000065, 65.74263049999836, 115.67446360000062, 60.19946300000065, 40.03066600000061, 20.90132949999841, 31.66554270000063, 229.80686700000064, 103.99540660000063, 29.523063500000646, 190.87837930000063, 123.74420480000062, 211.41086280000067, 161.98160470000062, 36.84817120000062, 69.61212730000064, 221.75173250000063, 187.66280724999848, 276.1138161000007, 4.744944700000673, 12.989024000000597, 154.9412570000007, 172.30677320000063, 79.40122130000063, 114.98150980000062, 55.55953780000067, 104.47153450000066, 122.38293710000065, 164.7633978000007, 116.95120074999838, 2.377195200000557, 133.83132020000056, 195.16792020000054, 47.150178300000576, 39.20412890000058, 144.02975390000057, 42.65542374999853, 20.455833300000563, 14.56989160000056, 4.886145999998526, 139.2930429999984, 309.22497030000056, 51.658526900000616, 125.54770820000066, 134.94757299999858, 132.26038400000056, 182.2625255000006, 60.386449900000656, 128.48536520000067, 103.93747280000063, 198.43533600000063, 149.57402330000065, 255.49101830000063, 5.975733200000661, 175.71405220000065, 39.68831260000066, 3.797594900000618, 75.05789010000065, 151.34442250000063, 44.38814990000061, 190.21199320000065, 130.8802670000007, 16.585324999998363, 25.98751170000064, 71.39124770000066, 111.35873240000066, 46.58500610000061, 162.57737220000067, 47.18685510000064, 37.68018500000068, 141.15260760000064, 116.91111820000067, 46.77062730000067, 25.656228200000655, 56.449317200000664, 137.9874388000007, 156.82428220000062, 206.2221766000006, 224.05828280000065, 155.36092620000062, 336.27128000000073, 174.10615950000067, 73.71111540000065, 229.39883090000066, 201.4775790000006, 118.81606830000067, 57.54337580000062, 48.15038890000061, 309.7025986000006, 59.10744610000066, 201.75331960000065, 138.17790990000063, 47.97839280000062, 178.72688410000066, 69.45989999999824, 123.27612590000065, 22.26866760000064, 160.88291540000063, 177.65030270000062, 139.3777383000006, 182.9044714000006, 150.1521988000006, 70.79379070000066, 135.57671820000067, 259.3341494000006, 225.11999220000064, 273.1176461000006, 143.59320240000068, 99.49793790000065, 235.5966270000007, 248.15616330000063, 131.61590400000065, 203.57585210000067, 86.61921470000067, 123.73719010000066, 139.78120100000064, 47.28159120000065, 78.91580300000058, 118.03994770000067, 52.188501000000684, 35.81347810000061, 325.3298385000006, 170.25834740000064, 58.17659560000067, 28.757814249998326, 287.0347081000006, 84.0958049999985, 294.6550459000007, 165.36433420000068, 82.46334630000071, 85.04194790000071, 57.14836299999817, 314.2650621000007, 3.1701182000006867, 86.2000869000007, 91.20589440000074, 3.470980600000715, 90.41918680000073, 42.51508824999828, 39.556922249998294, 211.0920777000007, 79.46080110000068, 260.5204347000007, 153.1367768000007, 96.55158460000074, 39.289099700000705, 189.3644963000007, 110.00666740000071, 203.30153510000073, 23.713429249998264, 321.54735230000074, 82.1542323000007, 294.74146780000075, 451.53554024999823, 218.9929477000007, 153.76722610000076, 227.08288840000077, 19.195214999998278, 75.52592830000073, 173.70416820000074, 236.4431731000007, 208.2953117499983, 127.34304449999811, 6.283337749998097, 87.02185550000081, 9.153640700000807, 197.9597794000008, 220.0872513000008, 242.87219850000076, 135.80388660000074, 309.8675488000008, 185.84528360000078, 98.5053621000008, 222.34270974999805, 106.22670599999822, 4.910894700000696, 147.6800377000007, 6.337053200000682, 177.65502560000073, 241.70664440000076, 187.35257390000072, 115.11041410000068, 5.846697200000719, 25.242930800000707, 123.9670657000007, 91.4026622499982, 142.08530930000074, 151.64869960000067, 81.34829600000074, 154.4480619999984, 57.22199849999825, 92.09001674999833, 195.59226160000065, 75.79690040000067, 191.55312390000063, 19.611611300000646, 31.561035000000643, 69.90578720000066, 177.7062375000006, 81.48063590000064, 82.14542100000067, 92.04792750000067, 219.7854929000006, 81.34504980000067, 80.63963770000065, 179.47655040000063, 77.66139580000065, 231.81726300000065, 99.42647140000062, 256.49255320000066, 2.3241472000006524, 29.40352450000063, 264.02896300000066, 124.27350300000057, 139.8098208000006, 273.2175820000006, 290.6857407000006, 57.795671400000614, 156.29055760000062, 165.29641050000066, 39.46888550000061, 39.14955070000062, 128.91355980000066, 177.50608150000062, 100.95381840000066, 175.41322724999856, 76.46297890000064, 71.38465110000062, 45.843111600000626, 59.69253099999836, 276.88519610000066, 77.52532630000061, 88.8016936000006, 212.79079270000062, 66.42865240000063, 84.37443830000063, 280.70824260000063, 129.26733650000068, 50.42452950000062, 57.493722400000635, 90.65743450000063, 314.00976180000066, 74.27464490000062, 177.09120810000064, 192.96489680000064, 239.98657820000062, 41.43939920000065, 106.57851490000063, 168.1781665000006, 235.49847760000063, 50.05763430000066, 106.49061770000067, 592.0889152499981, 262.80660930000073, 223.2470155000007, 78.32782770000073, 188.0257002499981, 20.362809300000748, 71.76907574999802, 137.3637225000008, 218.67199500000078, 118.59007524999808, 165.67855580000077, 56.2276492000008, 234.15357720000083, 269.7721914999979, 111.42479270000081, 186.87443720000076, 196.22779070000078, 9.69359130000079, 246.44356820000075, 122.3485618000008, 198.32460160000073, 116.55557200000082, 85.98503674999805, 135.52755180000077, 102.5924056000008, 1.799470800000762, 119.22067070000075, 117.30296030000076, 19.045440100000803, 169.17873830000076, 71.99864670000075, 97.70685820000075, 172.47770230000077, 49.789975100000774, 40.6893047499981, 166.64616570000078, 66.92882890000078, 58.96587860000075, 215.66426190000078, 53.80136080000078, 17.596465749998025, 158.21494810000075, 90.01275530000079, 72.25002260000075, 300.6831941000008, 134.05907850000077, 166.04531560000078, 82.89370374999798, 150.5193562000008, 76.2178688000008, 209.7693462000008, 119.4100256000008, 37.052978300000774, 40.80940720000079, 248.8387824000008, 175.6139506000008, 267.56335840000077, 123.27977440000076, 430.22289970000077, 12.501627999998188, 254.68250120000076, 177.62151850000078, 129.80315230000076, 107.35685750000073, 153.03610030000073, 92.9675450000007, 220.7865106000007, 264.1672151000007, 194.45519390000072, 20.162200400000714, 4.716021499998213, 92.08714440000072, 148.67012010000076, 166.3410665000007, 48.968839300000724, 106.86536990000072, 176.23450490000073, 171.91765480000072, 132.05595299999828, 120.02275550000071, 170.4901409000007, 78.2677939000007, 20.947746400000682, 211.6060383000007, 154.84741550000075, 69.34658249999792, 36.674697300000716, 41.45154720000072, 16.095651499998276, 99.36498720000074, 192.01733390000072, 171.1476726000007, 53.807805900000716, 156.3472171000007, 129.7194114000007, 144.27487380000073, 21.12896180000071, 83.65898380000068, 3.142401400000736, 0.4331663000007069, 48.51488950000068, 180.6962207000007, 168.3842731000007, 125.33337430000074, 104.13579370000072, 115.78023224999825, 25.939162700000722, 278.6596327000007, 44.00742640000068, 175.77111250000073, 90.80651299999829, 196.23282260000076, 111.60216310000074, 62.30239640000072, 228.9691430000007, 38.7068909000007, 282.00741449999816, 149.7683209999984, 293.84602880000074, 160.42110210000072, 289.6285668000008, 114.20910020000076, 115.43351960000074, 77.21120830000068, 71.87438810000074, 208.22099100000074, 24.081352800000744, 470.17424810000074, 24.864407600000735, 180.39973290000069, 220.44582970000073, 281.21737090000073, 234.85632170000073, 142.68703500000072, 133.52259420000075, 165.9288402000007, 380.34107040000066, 140.12114720000068, 156.07213830000066, 210.82194374999847, 174.50421770000062, 237.8528194000006, 70.92227360000066, 166.23695640000065, 179.50654030000067, 102.58738000000065, 15.84386224999843, 140.00200530000063, 31.92828640000066, 124.36003680000063, 4.594682500000644, 83.71062070000065, 30.819798800000626, 86.60814750000064, 267.26350740000066, 391.18095199999823, 152.41783950000064, 133.49006410000067, 101.98783810000066, 137.62121660000062, 223.74595820000067, 78.66697790000067, 206.85443700000064, 181.26954480000066, 155.65464900000057, 108.37763620000061, 39.20814640000067, 26.72081510000062, 28.761885700000605, 131.71929110000065, 92.24771520000061, 62.80583370000066, 176.54173980000067, 97.29641310000062, 129.77270580000067, 177.99376350000068, 58.27384260000066, 194.12444490000064, 135.19597180000062, 43.057496100000634, 18.476174900000615, 89.42415540000063, 191.46059790000066, 58.082764800000675, 72.78090460000065, 135.84619399999855, 118.43821820000066, 103.03513970000061, 234.41232390000067, 47.84711060000063, 189.91671370000063, 138.55040220000063, 67.82563210000063, 131.93177960000065, 173.77241960000063, 209.39374174999853, 93.56991574999824, 204.39050880000076, 171.97810430000067, 161.55796340000074, 217.63660899999806, 52.09116274999808, 99.14683100000076, 209.83291050000076, 44.7362547000008, 90.15405400000077, 140.2974143000008, 295.76457900000077, 77.96288300000079, 23.873109600000788, 163.18896380000075, 92.12967130000081, 157.1978155000008, 261.5387391000008, 69.67282640000079, 140.35248910000078, 31.842389800000817, 144.2654969000008, 231.07397770000077, 8.611200200000795, 230.01932770000082, 16.391808500000806, 99.66113180000079, 22.288996999998112, 305.16724374999774, 292.73043630000086, 70.91926290000092, 113.34845590000086, 228.43149300000078, 319.90966260000084, 103.65074160000084, 412.4037437499977, 156.5064105000009, 228.63800190000092, 47.5549665000009, 117.36605310000094, 163.48175040000092, 33.95278149999772, 13.387319100000923, 89.78455630000092, 101.21620120000095, 230.01190360000095, 97.2668716000009, 128.90643150000096, 44.1409158000009, 32.68203810000095, 139.78165670000095, 84.61878550000094, 142.41990450000094, 162.5163962499977, 127.56844370000096, 15.453417999997754, 185.98549899999756, 93.50506320000093, 39.972431800000905, 75.90232100000094, 55.88317980000092, 238.8791489000009, 135.79401850000096, 134.8219312000009, 135.94031649999766, 45.72535350000095, 134.00606860000096, 53.550436749997665, 145.72051274999785, 137.4516255000009, 55.69342720000087, 170.59674660000084, 142.1015177000009, 162.46753450000085, 105.13171560000089, 94.77816250000082, 54.23494510000083, 135.21818080000088, 226.65729250000084, 101.29987374999772, 219.05947000000083, 24.86512700000084, 254.90604610000082, 95.30861990000083, 173.31651510000086, 141.75881910000086, 30.922212100000834, 13.05680470000084, 77.08067500000084, 271.5799767000009, 258.40355630000084, 79.25789590000086, 274.3294296000008, 74.81384820000088, 74.04527690000087, 17.82848080000086, 125.13459490000088, 103.82287550000086, 167.88776649999804, 200.7376747499979, 198.43708310000082, 196.76844150000085, 51.18581270000085, 223.38160330000085, 158.90082700000093, 58.96897850000087, 47.13703449999791, 255.97184340000084, 135.91022070000088, 228.93731930000087, 171.20363390000085, 95.57333470000088, 100.69126630000085, 92.42023550000084, 68.38190790000084, 227.77622870000087, 253.97184020000083, 133.47744620000086, 8.304557700000856, 1.8795014999978932, 126.8628250000009, 146.31211520000085, 204.11431440000086, 167.39527210000085, 150.25772130000087, 141.33595020000087, 23.98356170000085, 162.74279610000087, 78.45849040000083, 91.34059990000083, 109.88029370000085, 84.22250440000084, 122.14015310000086, 149.04721490000085, 104.74641110000086, 251.29968274999788, 27.78122620000076, 68.70670040000078, 198.04053480000078, 204.87551290000079, 63.976809000000756, 134.81915850000078, 66.7871755000008, 290.57511110000075, 62.12801780000078, 144.28679990000077, 6.065350300000816, 181.53211950000076, 87.9688825000008, 27.878053600000783, 201.74291600000083, 78.5327575000008, 348.7211306000007, 82.80112660000074, 77.42328700000073, 172.75446099999812, 217.61670174999813, 1.8104242499980217, 1.4587512000007763, 12.825314800000811, 85.20886170000075, 154.0497237000008, 126.5366729000008, 67.1409777000008, 26.522461600000753, 293.134355249998, 134.45295630000075, 211.28835800000076, 58.55995249999815, 116.65473320000075, 144.6421669000008, 110.6516444999981, 76.1829206000008, 96.66927649999799, 87.89991840000077, 275.15817230000084, 128.29383050000075, 201.74090610000079, 59.8723006000008, 13.6200936000008, 96.23738140000079, 158.37219200000084, 232.18744980000076, 85.12859274999798, 156.52608810000075, 136.6696306000008, 143.31973600000083, 50.94695074999809, 204.39548530000081, 142.36904190000075, 202.68174730000078, 26.387320600000805, 14.428615600000754, 193.1744006000008, 215.9271729000008, 239.08968500000077, 175.06891380000076, 150.05966130000076, 14.518836999998008, 98.45165810000076, 159.9280410000008, 264.71756090000076, 0.06696820000080095, 162.3495356000008, 120.29009200000075, 84.19745400000075, 239.97533724999815, 168.7394771000008, 16.825447749998013, 314.7862397499978, 228.8364980000008, 203.62607530000082, 73.59210340000082, 33.233142249997805, 339.22007200000087, 93.01839424999771, 261.9477145000008, 317.01295500000083, 51.019746300000826, 84.65151260000084, 190.2626423000009, 96.67253340000087, 154.692726999998, 214.62798080000084, 133.0239070000009, 69.87941080000084, 165.1763533000008, 32.56101249999794, 150.73768850000084, 33.90345080000088, 316.33704299999795, 273.6821856000009, 228.5158269999979, 2.6852196000008632, 161.91877624999796, 228.6961698000009, 52.30860150000083, 169.28729599999775, 119.37902080000086, 13.056358000000827, 10.46297860000088, 33.00673940000088, 144.9960029000008, 277.84779920000085, 49.15992870000082, 127.55945330000088, 300.99039480000084, 92.15362560000088, 102.06760990000085, 93.93040220000088, 34.52675510000084, 311.50170930000087, 69.56269870000085, 29.555962400000837, 213.2166698000009, 203.07904410000083, 61.95153640000086, 106.22365200000083, 52.702820700000856, 171.89754430000085, 221.39588420000086, 9.115381100000874, 155.08509890000084, 85.73967150000087]
###Markdown
Base Stock Level Plots
###Code
totalCostList = []
baseStockList = []
baseStockTestDict = {0: 60, 1: 30, 2: 30}
for i in range(40,101,5):
totalCost = 0
baseStockTestDict = {0: i, 1: 60, 2: 60}
# Print out 45 and 50 because they be funky
myInvSim.playSimulation(gameType = "multiNodeVerify", BSLevel=baseStockTestDict, demandMethod="useFileDemand", fileDemand=df["IO"], connectionCase="and", printOut=False)
for node in myInvSim.nodeDict.values():
#print(sum(node.costRecord))
totalCost += sum(node.costRecord)
totalCostList.append(totalCost)
baseStockList.append(i)
plt.scatter(baseStockList, totalCostList)
plt.plot(baseStockList, totalCostList)
plt.title("Total Cost vs Base Stock Level for Changing Node 0 Base Stock Level")
plt.xlabel("Base Stock Level")
plt.ylabel("Total Cost")
plt.show()
totalCostList = []
baseStockList = []
baseStockTestDict = {0: 60, 1: 30, 2: 30}
for i in range(40,101,5):
totalCost = 0
baseStockTestDict = {0: 60, 1: i, 2: i}
# Print out 45 and 50 because they be funky
myInvSim.playSimulation(gameType = "multiNodeVerify", BSLevel=baseStockTestDict, demandMethod="useFileDemand", fileDemand=df["IO"], connectionCase="and", printOut=False)
for node in myInvSim.nodeDict.values():
#print(sum(node.costRecord))
totalCost += sum(node.costRecord)
totalCostList.append(totalCost)
baseStockList.append(i)
plt.scatter(baseStockList, totalCostList)
plt.plot(baseStockList, totalCostList)
plt.title("Total Cost vs Base Stock Level for Changing Node 1 and 2 Base Stock Level")
plt.xlabel("Base Stock Level")
plt.ylabel("Total Cost")
plt.show()
# FINISHED SEP 22nd, 2020
totalCostList = []
retailerBaseStockList = []
supplierBaseStockList = []
baseStockTestDict = {0: 60, 1: 30, 2: 30}
for j in range(20,101,5):
for i in range(20,101,5):
totalCost = 0
baseStockTestDict = {0: i, 1: j/2, 2: j/2}
myInvSim.playSimulation(gameType = "multiNodeVerify", BSLevel=baseStockTestDict, demandMethod="useFileDemand", fileDemand=df["IO"], connectionCase="and", printOut=False)
for node in myInvSim.nodeDict.values():
totalCost += sum(node.costRecord)
totalCostList.append(totalCost)
retailerBaseStockList.append(i)
supplierBaseStockList.append(j/2)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.scatter(np.array(retailerBaseStockList), np.array(supplierBaseStockList), np.array(totalCostList))
ax.set_xlabel('Retailer Base Stock Level')
ax.set_ylabel('Warehouse Base Stock Level')
ax.set_zlabel('Total Supply Chain Cost')
plt.show()
###Output
_____no_output_____
###Markdown
Find the minimum value of basic 3-node simulation (Node 0 retailer, node 1 and 2 wholesaler
###Code
# Find the minimum of the above simulation
minTC = float('inf')
bestRetailerBS = 0
bestWholesalerBS = 0
for i in range(len(totalCostList)):
if totalCostList[i] < minTC:
minTC = totalCostList[i]
bestRetailerBS = retailerBaseStockList[i]
bestWholesalerBS = supplierBaseStockList[i]
print("Minimum Cost for this simulation")
print("Lowest Cost found: " + str(minTC))
print("Associated Retailer Base Stock Level: " + str(bestRetailerBS))
print("Associated Wholesaler Base Stock Level: " + str(bestWholesalerBS))
###Output
Minimum Cost for this simulation
Lowest Cost found: 511109.6726379993
Associated Retailer Base Stock Level: 70
Associated Wholesaler Base Stock Level: 50.0
###Markdown
PRINTING METHOD DEMO
###Code
# printing method demo:
for node in myInvSim.nodeDict.values():
print(node)
###Output
_____no_output_____
###Markdown
Excel Export of Last Run of Simulation
###Code
# Export to Excel Method Demo:
import pandas as pd
def createExcelFile(myInvSim, fname):
nodeDataDF = {}
for node in myInvSim.nodeDict.values():
nodeDataDF["node " + str(node.id) + " starting Inventory"] = node.startingInventoryRecord
nodeDataDF["node " + str(node.id) + " backorders Fulfilled"] = node.backordersFulfilledArray
nodeDataDF["node " + str(node.id) + " demand"] = node.demandArray
nodeDataDF["node " + str(node.id) + " supply"] = node.supplyArray
nodeDataDF["node " + str(node.id) + " backorders"] = node.backorderRecord
nodeDataDF["node " + str(node.id) + " ending Inventory"] = node.endingInventoryRecord
nodeDataDF["node " + str(node.id) + " cost"] = node.costRecord
nodeDataDF = pd.DataFrame(nodeDataDF)
nodeDataDF.to_excel(fname)
# Checking Starting Supply over Time
from operator import add
import matplotlib.pyplot as plt
startingInv = myInvSim.nodeDict[0].startingInventoryRecord[1:100]
node1Supply = myInvSim.nodeDict[1].supplyArray
node2Supply = myInvSim.nodeDict[2].supplyArray
combinedSupply = list(map(add, node1Supply, node2Supply))[0:99]
shouldbe60 = list(map(add, combinedSupply, startingInv))
pd = [i for i in range(len(shouldbe60))]
scat = plt.scatter(pd, shouldbe60)
plt.title("Starting Inventory Level + Inbound Supply for Node 0 vs. Period")
plt.xlabel("Period")
plt.ylabel("Starting Inventory + Inbound Supply for Node 0")
plt.show()
###Output
_____no_output_____
###Markdown
PLOTS
###Code
# Demand for Node 0 over time
pd0Demand = myInvSim.nodeDict[0].demandArray
print(pd0Demand)
pd = [i for i in range(len(pd0Demand))]
plt.scatter(pd, pd0Demand)
plt.plot(pd, pd0Demand)
plt.title("Starting Inventory Level + Inbound Supply for Node 0 vs. Period")
plt.xlabel("Period")
plt.ylabel("Starting Inventory + Inbound Supply for Node 0")
plt.show()
# Demand for Node 0 over time
node0Demand = myInvSim.nodeDict[0].demandArray
node1Demand = myInvSim.nodeDict[1].demandArray
node2Demand = myInvSim.nodeDict[2].demandArray
plt.scatter(node0Demand, node1Demand)
plt.plot(node0Demand, node1Demand)
plt.title("Node 1 Qty Demanded vs. Node 0 Qty Demanded")
plt.xlabel("Node 0 Qty Demanded")
plt.ylabel("Node 1 Qty Demanded")
plt.show()
plt.scatter(node0Demand, node2Demand)
plt.plot(node0Demand, node2Demand)
plt.title("Node 2 Qty Demanded vs. Node 0 Qty Demanded")
plt.xlabel("Node 0 Qty Demanded")
plt.ylabel("Node 2 Qty Demanded")
plt.show()
plt.scatter(node1Demand, node2Demand)
plt.plot(node1Demand, node2Demand)
plt.title("Node 2 Qty Demanded vs. Node 1 Qty Demanded")
plt.xlabel("Node 1 Qty Demanded")
plt.ylabel("Node 2 Qty Demanded")
plt.show()
for node in myInvSim.nodeDict.values():
cost = node.costRecord
EI = node.endingInventoryRecord
plt.scatter(EI, cost)
plt.xlabel("Ending Inventory")
plt.ylabel("Cost (this Period)")
plt.title("Cost as a function of Ending Inventory for node " + str(node.id))
plt.show()
###Output
_____no_output_____
###Markdown
Notes / Tasks / TODO 
###Code
'''
TODO: 15SEP2020
Things to do:
- Change base stock level for node 1 and node 2 to 50 (actually 30)
- Plot cost as a function of base stock level (see above, change up the nodes)
- AND case issue... How to implement ordering?
'''
'''
Notes:
Total Cost as a function of base stock level
Allow user to specify base stock level for specific nodes
- Change one base stock level, keep other two constant, and plot the total SC cost
-> Rinse and repeat for the other nodes
AND siutation:
Node 1 has inventory of 100
Node 2 has inventory of 80
Can retailer see the inventory for both?
Node 0 orders what it needs, if extra stuff arrives, then it has to just let it sit there?
Implement using a dictionary w/ PreDict and RecDict values
Assume that node 1 and 2 have their inventories
If node 0 orders, node 0 doesn't have matching pairs, it needs to hang on to the extra
Right now nobody's selfish
Assembly Supply Chain: ex: 0->1->2
There is an optimal policy
"Diamond" Supply chain (see image)
Distribution nodes screw things up, make the problems harder
-> Because of allocation
'''
###Output
_____no_output_____ |
Lessons/Lesson03_BasicObjectTypes.ipynb | ###Markdown
"Geo Data Science with Python" Notebook Lesson 2 Basic Object Types: Numbers, Booleans and StringsThis lesson discusses more details of the basic Python object types **Numbers**, **Booleans** and **Strings**. In the aftermath, strings will be further deepened by the subsequent reading material *String Fundamentals* (Lutz, 2013), available in the course's reading list on canvas. SourcesSome elements of this notebook source from Chapter 5 and Chapter 7 of Lutz (2013).--- Part A: Numeric Object Types in PythonEffective data-driven science and computation requires understanding how data is stored and manipulated (VanderPlas, 2016).Most of Python's number types are typical and will seem familiar if you have used other programming languages before. However, numbers are not really a single object type but rather a category. Python supports the usual numeric types (integers and floating point) as well as literals for creating numbers, expressions for processing them and some built-in functions and modules. Python also allows to write integers using hexadecimal, octal and binary literals; offers complex number types Python and allows integers to have unlimited precision - they can grow to have as many ditigs as your memory space allows. Lutz (2013) gives the following overview for numeric object types in Python:Table 1: *Numeric literals and constructors (Lutz, 2013, Table 5.1).* Built-in numbers are enough to represent most numeric quantities - from your age to your bank balance - but more types are available from external (third-party) Python packages.Below we briefly introduce the most important ones for this course. These are integer and floating numbers as well as Boolean types. The latter allows for logic operations. IntegersIntegers are written as strings of decimal digits. These numbers have no fractional component. The size of integer numbers is only limited by your computer's memory. Python's basic number types support the normal mathematical operations, like addition and substraction with the plus and minus signs ```+/-```, multiplication with the star sign ```*```, and two stars are used for exponentiation ```**```. Try to edit and execute the following example performing substractions, multiplications and divisions. What happens? Are the results of all of these operations also of type integer?
###Code
123 + 222
type(123 + 222)
###Output
_____no_output_____
###Markdown
Indeed, most mathematical operations involving two integer numbers, will also return an integer number. However, divisions do not return an integer number. This is holds even for divisions without remainder. Instead, thanks to the dynamic typing in Python, we get a floating point number:
###Code
type(4/2)
###Output
_____no_output_____
###Markdown
In Python 3 (which we are using here, as you can see from the Kernel type at the top right), if you want to specifically perform an integer division, you have to mark this by using a double division symbol: ```//```.
###Code
4//2 # integer division
type(4//2)
###Output
_____no_output_____
###Markdown
Just as a side note: The integer division ```//``` in Python 3 is actually a floor division, provided by the Python module math. We will discuss Python modules, at a later point in the course.
###Code
import math
math.floor(123/222)
###Output
_____no_output_____
###Markdown
Floating-point NumbersFloating-point numbers have a fractional component. A decimal point and/or an optional signed exponent introduced by an ```e``` or ```E``` and followed by an optional sign are used to write floating-point numbers.
###Code
type(3.14) # literal for a floating-point number
314e-2 # literal for a floating-point number in scientific notation
###Output
_____no_output_____
###Markdown
Floating-point numbers are implemented as C "doubles", therefore they get as much precision as the C compiler used to build the Python interpreter gives to doubles (usually that is 15 decimal digits of precision). For more precision, external Python packages have to be used. In addition, Python 3 automatically handles a user-friendly output of floating-point numbers. For example, if we define the mathematical constant π to a numeric object, the unformatted output on screen will have the same length.
###Code
pi_approximate = 3.14
pi_accurate = 3.141592653589793
print(pi_approximate)
print(pi_accurate)
###Output
3.14
3.141592653589793
###Markdown
However, when printing the variable to the screen, you can also change the precision of the output, by using the modulus operator ```%```. If you want to print out 4 digits after the comma, indicate this with ```%.4f``` in the following way:
###Code
print('%.4f'%pi_accurate) # formated screen output using print() for floating-point numbers
###Output
3.1416
###Markdown
Alternatively, the output can be formatted in scientific notation or as ingeter number, thought the indicators ```e``` and ```i```:
###Code
print('%.4e'%pi_accurate) # formated screen output using print() for numbers in scientific notation
print('%i'%pi_accurate) # formated screen output using print() for integer numbers
###Output
3
###Markdown
We will discuss further details of formatted output using the print function, further below in the section about strings. Furthermore, since variables are strongly typed in Python, you cannot change their type, but you can change the output to the screen or assign a changed output to another variable.For example, the function ```int()``` truncates a floating-point number into an integer number:
###Code
int(3.141)
###Output
_____no_output_____
###Markdown
And the function ```float()``` does the opposite:
###Code
float(3)
###Output
_____no_output_____
###Markdown
Take notice what happens, if an operation is performed that involves both number types floating-point and integer. In that case, before the Python intepreter performs the operation, it converts the elements of the operation up to the most complicated type. Hence, the output object type of a mathematical operation that includes integer and floating-point numbers will be of floating-point type:
###Code
type(40 + 3.141)
###Output
_____no_output_____
###Markdown
Built-in Numeric ToolsWe have already mentioned some basic mathematic operations. Now let's discuss more expressions available for processing numeric object types and some built-in functions and modules. We will meet many of these as we go along. *Expression operators:*```+```, ```-```, ```/```, ```*```, ```**```, ```%```, etc.Expressions operators are used for mathematical operations between two numbers. Above listed are the operands of an addition, substraction, division, multiplication, exponent, and modulus. Go to this website to find a comprehensive list of expression operators: https://www.tutorialspoint.com/python/python_basic_operators.htmIt is important to keep in mind that:* Mixed operators follow operator precedence (similar to mathematical operations: multiplications precede additions, hence, ```5+1*10=50```. For a full list of precedence orders see table 6.16 in the Python documentation: https://docs.python.org/3/reference/expressions.html)* Parantheses group subexpressions (exactly like in mathematics: ```(5+1)*10=60``` but ```5+(1*10)=50```)* Mixed types are converted up (as already discussed for the last example in the section about floating-point numbers) *Built-in functions:*Python has some built-in functions and some of them are useful for basic numeric processing. Examples are:```pow()```, ```abs()```, ```round()```, ```int()```, ```hex()```, ```bin()```, etc. The documentation pages of the Python language provides a comprehensive list: https://docs.python.org/3/library/functions.html *Utility modules:*The packages (modules) ```random``` and ```math``` provide further functions useful for mathematical operations. The documentation pages of the Python language provides a comprehenisve overview of functions coming with the math module: https://docs.python.org/3/library/math.html Such modules have to be imported before first, and then functions in that module can be accessed by combining their names with a literal ```.``` (similar to the example above using the ```math``` function ```floor()``` ):
###Code
import math
math.floor(3.14)
###Output
_____no_output_____
###Markdown
The ```math``` module contains more advanced numeric tools as functions. Conveniently, the math module comes also with some mathematical constants and trigonometric functions:
###Code
math.sqrt(99)
math.pi, math.e # returns the mathematical constants pi and euler's number e
math.sin(math.pi/2)
###Output
_____no_output_____
###Markdown
After importing the ```random``` module, you can perform random-number generation ...
###Code
import random
random.random()
###Output
_____no_output_____
###Markdown
... and random selections (here, from a Python *list* coded in square brackets - an object type to be indroduced later in this course module):
###Code
random.choice([1,2,3,4]) # choice([L]) chooses a random element from L
###Output
_____no_output_____
###Markdown
Go ahead and use the following code cell to try some of the functions and modules in the examples and/or links above (but be aware that some in the links listed functions request more advanced object types, that we haven't discussed yet).
###Code
math.ceil(3.14) # ceil(x) returns the smallest integer >= x.
###Output
_____no_output_____
###Markdown
And of course, you can do all of discussed and listed numerical operations with variables that have been assigned with a numerical values.
###Code
a = math.pi
b = math.sin(math.pi*5/4)
print(b)
###Output
-0.7071067811865475
###Markdown
Using variable of numeric object type with expressions, the following has to be kept in mind for Python:* Variables are created when they are first assigned values.* Variables are replaced with their values when used in expressions.* Variables must be assigned before they can be used in expressions.* Variables refer to objects and are never declared ahead of time.Now, you have gained the most important knowledge to use and process variables of numeric object type in Python. For even more complex numerical operations, especially involving data tables, one has to refer to separate, external Python packages. We will discuss modules in general and external Python packages in specific during a later course module. Python HELP???!!!If you ever wonder what a function's function is without starting any literature or internet search, you may always consult the very useful built-in function ```help()```, through which you can request the manual entry for any function:
###Code
help(abs)
###Output
Help on built-in function abs in module builtins:
abs(x, /)
Return the absolute value of the argument.
###Markdown
The returned text delivers information about syntax and semantics of the function. This work also for functions of imported modules:
###Code
help(math.ceil)
###Output
Help on built-in function ceil in module math:
ceil(x, /)
Return the ceiling of x as an Integral.
This is the smallest integer >= x.
###Markdown
Part B: Boolean Types: Truth Values, Comparisons & TestsPython's Boolean type and its operators are a bit different from their counterparts in languages like C. In Python, the Boolean type, ```bool```, is numeric in nature because its two values, ```True``` and ```False```, are basically custom versions of 1 and 0. Also Boolean values ```True``` or ```False``` are treated as numeric *constants* in Python (see the Table 1) and their Boolean object type (```bool```) is actually a subtype (subclass) of integers (```int```).
###Code
type(True)
###Output
_____no_output_____
###Markdown
Let's look at some examples to understand how Boolean types and their operators function in Python. Boolean Truth ValuesIn Python all objects have an inherent *Boolean* true or false value. We can define:* Any nonzero number or nonempty object is true.* Zero numbers, empty objects, and a special object ```None``` are considered false.The built-in function ```bool()```, which tests the Boolean value of an argument, is available to request this inherent value for any variable. For example:
###Code
a = 0
b = None
c = 10.0
bool(a), bool(b), bool(c)
###Output
_____no_output_____
###Markdown
Because of Python's customization of the Boolean type, the output of Boolean expressions typed at the interactive prompt prints as the words ```True``` and ```False``` instead of the older and less obvious ```1``` and ```0```. Most programmers had been assigning ```True``` and ```False``` to ```1``` and ```0``` anyway. The ```bool``` type simply makes this standard. It's implementation can lead to curious results, though. Because ```True``` is just the integer ```1``` with a custom display format, ```True + 4``` yields integer ```5``` in Python!
###Code
True + 4
###Output
_____no_output_____
###Markdown
By the way, very much like the Boolean values ```True``` and ```False```, also the value ```None``` is a built-in constant. However the ```None``` value is special, as it basically sets a variable to an empty value (much like a ```NULL``` pointer in C) and it has it's very separate and unique object type:
###Code
type(None), type(True)
###Output
_____no_output_____
###Markdown
See the top of this Python documentation page for explanations of the built-in constants: https://docs.python.org/3/library/constants.html Comparisons & Equality testsAlso comparisons and equality tests return ```True``` or ```False```. Range comparisons can be performed using the expression operators ``````, ```>=```, ```<=```; and equality tests using the expression operators ```==```, ```!=```. For example:
###Code
a < c, a==c, b!=c
###Output
_____no_output_____
###Markdown
Notice how mixed types are allowed in numeric expressions (only). In the first test above, Python compares an integer and a floating-point number with each other as well as a number with the NoneType. Boolean TestsBoolean tests use the logical operators ```and``` and ```or``` and they return a true or false operand object. Such Boolean operators combine the results of other tests in richer ways to produce new truth values. For that, revise also the operator precedence ([Table 6.16 of the Python documentation](https://docs.python.org/3/reference/expressions.html)).More formally, there are three Boolean expression operators in Python, which are typed out as workds in Python (in contrast to other languages):* ```X and Y``` Is true if both ```X``` and ```Y``` are true* ```X or Y``` Is true if either ```X``` or ```Y``` is true* ```not X``` Is true if ```X``` is false (the expression returns ```True``` or ```False```)Here, ```X``` and ```Y``` may be any truth value, or any expression that returns a truth value (e.g., an equality test, range comparison, and so on).Keep in mind, that the Boolean ```and``` and ```or``` operators return a true or false object, not the values ```True``` or ```False```. Let's look at a few examples to see how this works. Compare the following comparison:
###Code
1 < 2, 3 < 1
###Output
_____no_output_____
###Markdown
... with the output of the following Boolean tests:
###Code
1 or 2, 3 or 1
None or 3
0 and 3
###Output
_____no_output_____
###Markdown
You can see, that ```and``` and ```or``` operators always return an object. Either the object on the *left* side of the operator or the object on the *right*. If we test their results, using the built-in function ```bool()``` they will be as expected (remember, every object is inherently true or false), but we won't get back a simple ```True``` or ```False```.Furthermore, Boolean ```or``` tests are done in a so called *short-circuit evaluations*. This means the interpreter evaluates the operand objects from left to right. Once it finds the first true operand, it terminates (short-circuits) the evaluation of the rest of the expression. After the first true operand was found, the values of further operands in the expression won't be able to change the outcome of an ```or``` test: ```true``` or anything is always true.Similarily, the Python ```and``` operands stop as soon as the result is known. However, in this case Python evaluates the operands from left to right and stops if the left operand is a ```false``` object because it determines the result: false ```and``` anything is always false.The concept of *short-circuit evaluations* has to be known, to predict the exact output of a Boolean test. Below some examples to study:
###Code
True or 20 # Evaluation stops after first True object: result is True
10 or 20 # Evaluation stops after first non-zero object: result is 10
False and 20 # Evaluation stops after first False: result is False
10 and False # Evaluation stops after first False: result is False
10 and 20 # Evaluation continues until last object: results is 20
# (no zero or false object)
10 and 20 and 30 # Evaluation continues until last object: results is 30
###Output
_____no_output_____
###Markdown
Chained ComparisonsIn addition to that, Python allows us to chain multiple comparisons together. Chained compariosns are sort of shorthand for larger Boolean expressions. This allows to perform range tests. For instance, the expression ```(a < b < c)``` tests wheter ```b``` is between ```a``` and ```c```; it is equivalent to the Boolean test ```(a < b and b < c)```. But the former is easier on the eyes (and the keyboard).For example:
###Code
a = 20
b = 40
c = 60
###Output
_____no_output_____
###Markdown
Now compare:
###Code
a < b < c
###Output
_____no_output_____
###Markdown
with:
###Code
a < b and b < c
###Output
_____no_output_____
###Markdown
You can build even longer chains or add comparisons into the chained tests.
###Code
1 < 2 < 3 < 4.0 < 5
###Output
_____no_output_____
###Markdown
But the resulting expressions can become nonintuitive, unless you evaluate them the way Python does. The following, for example, is false just because 1 is not equal to 2:
###Code
1 == 2 < 3 # Same as 1 == 2 and 2 < 3 (not same as False < 3)
###Output
_____no_output_____
###Markdown
In this example, Python does not compare the ```1 == 2``` expression's ```False``` result to 3. This would technically mean the same as ```0 < 3```, which would be ```True```. Identity OperatorsLastly, identity operators compare the memory locations of two objects. There are two identity operators: ```is``` and ```is not```.* ```is``` evaluates to true if the variables on either side of the operator point to the same object and false otherwise.* ```is not``` evaluates to false if the variables on either side of the operator point to the same object and true otherwise.For example, remember from the last notebook what we have learned about how Variable names are referenced to objects in Python? From that, it becomes obvious the following identity test has to be true:
###Code
a = 3
b = a
a is b
###Output
_____no_output_____
###Markdown
And with identity tests, we can also show, that the Boolean "number" ```True``` and the integer number ```1``` are of the same value (both are basically an integer number ```1```), but not of the same object:
###Code
True == 1 # Same value
True is 1 # But a different object
###Output
<>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
<ipython-input-48-6e82371b8779>:1: SyntaxWarning: "is" with a literal. Did you mean "=="?
True is 1 # But a different object
###Markdown
Boolean Types: SummarySo let's summarize briefly, what we have discussed about Boolean types and operators:* Any nonzero number or nonempty object is true.* Zero numbers, empty objects, and a special object ```None``` are considered false.* Comparisons and equality tests are applied recursively to data structures.* Comparisons, equality tests and identity operators return ```True``` or ```False``` (which are custom versions of 1 and 0)* Boolean ```and``` and ```or``` operators return a true or false operand object.* Boolean operators stop evaluating ("short circuit") as soon as a result is known.Refer back to this website to find a comprehensive list of expression operators, including those for comparisons and equality test as well as logical operators and identity operators: https://www.tutorialspoint.com/python/python_basic_operators.htm Part C: Strings in Python Strings are used to record both, textual information (your name, for instance) as well as arbritrary collection of bytes (such as image file's contents). They are our first example, of what in Python we call a ***sequence*** - **a positionally ordered collection of other objects**. Sequences maintain a **left-to-right order** among the items they contain: their items are stored and fetched by their relative positions. Strictly speaking, strings are sequences of one-character strings; other, more general sequence types include *lists* and *tuples*, coverd later (Lutz, 2013). But let's first begin with the syntax for generating strings. String LiteralsPython strings are easy to use and several syntax forms can be used to generate them. For example, we can assign the a string "```knight's```" to a variable ```S``` in different ways:
###Code
S1 = 'knight"s' # single quotes
S2 = "knight's" # double quotes
S3 = '''knights''' # triple quotes
S4 = '\nknight\'s' # escape sequence
print(S1 , S2 , S3 , S4, )
###Output
knight"s knight's knights
knight's
###Markdown
Single and double-quote characters are interchangeable and they can be enclosed in either. You can also embed one in the other and vice versa, as seen in the examples above. Triple quotes are an alternative to code entire *block strings*. That is a syntactic convenience for coding mulitiline text data.Escape sequences allow embedding of special characters in string cannot easily be typed on a keyborad. In the string literal, one Backslash ```\``` precedes a character. The character pair is then replaced by a single character to be stored in the string:* ```\n``` stores a newline* ```\t``` stores a horizontal tab* ```\v``` stores a vertical tab* ```\\```,```\'```,```\''``` for special caracters like Backslash, single quotes or double quotes The ```\\``` stores one ```\``` in the string. While the function print replaces the escape characters (see code cell above). However, the interactive echo of the interpreter keeps showing the special characters as escapes:
###Code
S4
###Output
_____no_output_____
###Markdown
String PropertiesBecause strings are sequences, they support operations that assume a positional ordering among its items. For example, one can request the length of a string with the built-in function ```len()```. And one can select and print out certain items of a string, or in other words, fetch its components with *indexing* expressions.
###Code
len(S1) # len returns ength of a string sequence
S1[0] # returns the first item from the left
S1[1] # returns the second item from the left
###Output
_____no_output_____
###Markdown
In Python, indexing is coded as offsets from the front. The first item is at index 0, the second at index 1 and so on. In addition to that, strings allow the following typcial sequence operations.* slicing: general form of indexing - extract an entire section (slice) of a string in a single step* concatenating: joining two strings into a new string* repeating: making a new string by repeating anotherHere some examples:
###Code
S1[1:4] # slicing an index
S2 + S3 # concatenating an index
S3*3 # Repetition
###Output
_____no_output_____
###Markdown
Index operations will be discussed in more detail in the upcoming reading material.Another property of strings in Python is *immutability*. In the previous notebook you have learned about the concepts of mutability and immutability. Now, strings being immutable means they cannot be changed in place after they are created: any operations performed on strings cannot overwrite the values of a string object. But you can always build a new one and assign it to the same name. To illustrate that, let's look at two examples. Immutabilitity means, that you cannot change a single item of a string like this:
###Code
S1[1]='y'
###Output
_____no_output_____
###Markdown
Instead, we get a ```TypeError```, stating that string objects do not support item assignment! But we can run expressions to make new objects and reference them to the same name:
###Code
S1 = 'y' + S1
print(S1)
###Output
_____no_output_____
###Markdown
In this case, the old object and its reference are then deleted. In fact, Python cleans up old objects as you go. You will learn more about that in the upcoming reading material. Formatted output of strings using ```print()```You have already used ```print()``` to quickly print variable to the screen. The function, however, can be fed with syntax that formats the output of strings and numbers. For that, two different flavors are possible. The original technique available since Python's beginning, which is based on the C language and is used widely:* String formatting expressions: ```'...%s...' % (values)```A newer technique added since Python 2.6:* String formatting method calls: ```'...{}...'.format(values)```The second method is syntactically a bit more complex, expecially since it uses object oriented syntax, which we will discuss at a later point in the course. However, it has a clear advantage, as type codes are optional and different object types handled automatically. Both flavors can be used without (as interactive echo of the interpreter) and with the ```print()``` function. Below you can find a list of type codes useful for the second option (string formatting expressions). The list is not complete, but contains all codes relevant for this course.Table 2: *Selected string Formatting Type Codes.*| Code | Meaning / Object Type | :-: | :- || ```%s``` | String | ```%c``` | Character (int or str) | ```%d``` | Decimal (base-10 integer)| ```%i``` | Integer| ```%e``` | Floating-point with exponent, lower case| ```%E``` | Same as ```e``` but uses upper case ```E```| ```%f``` | Floating-point decimal | ```%``` | Literal % (coded as %%) In the following examples, both formatting techniques are adapted. Try to alter them and learn how they work:
###Code
print("The %s robe is green!" % S2) # formatting expression
print('The {} robe is green!'.format(S2)) # formatting method calls
knifes = 2
print("The %s has %i knifes in his hand." % (S2,knifes))
print("The {} has {} knifes in his hand.".format(S2,knifes))
###Output
_____no_output_____
###Markdown
Precision of floating points can be controlled for the second formatting method by entering parameter into the curvy brackets, for example in the following way if you want to print two digits after the comma. Also the positions of the variable replacements can be switched:
###Code
money = 2.222222
print("The {1:.3f} cents in the {0} pockets were stolen.".format(S2,money))
print("The {0:.3} cents in the {1:0.3} pockets were stolen.".format(S2,money))
###Output
_____no_output_____
###Markdown
If you like to get into the details of the very flexible string formatting using method calls, check the following pages:* https://www.digitalocean.com/community/tutorials/how-to-use-string-formatters-in-python-3 * https://pyformat.info/ Type Specific Operations and MethodsLastly, I would like to provide an overview of type specific operations for strings in Python.Table 3: *String Type Specific Operations (after Lutz, 2013, Table 7-1).*| Operation | Interpretation | :----------- | :----------- || ```S1 + S2``` | Concatenate | ```S1 * 3``` | Repeat | ```S[i]``` | Indexing | ```S[i:j]``` | Slicing | ```len(S)``` | Length | ```"The sum of 1 + 2 is %i" % (1+2)``` | String formatting expression | ```"The sum of 1 + 2 is {0}".format(1+2)``` | String formatting method calls| ```.find('pa')``` | String methods: search | ```.strip()``` | Remove all leading and trailing whitespace| ```.rstrip()``` | Remove trailing whitespace| ```.replace('pa','xx')``` | Replacement| ```.split(',')``` | Split on delimiter| ```.splitlines()``` | split string at all ‘\n’ and return a list of the lines| ```.lower()``` | Case conversion (to lower case)| ```.upper()``` | Case conversion (to upper case)| ```.endswith(spam')``` | End testThe first seven entries have been addressed in this notebook. All remaining entries are so called methods. Methods are specific functions that are applied with the following syntax: ```stringname.methodname(arguments)```. The methods in the table are specifically designed to handle strings. These methods may appear to alter the content of strings. However, they are actually not changing the original strings but create new strings as results - because strings are immutable.Investigate and practice the functionality of these methods. You can use the examples below, the Python ```help()``` function or search them in the Python documentation: https://docs.python.org/3/library/stdtypes.html (scroll down to the section "String Methods"). Alternatively, study the following external Jupyter Notebook, which discusses the most important string methods: https://www.digitalocean.com/community/tutorials/an-introduction-to-string-functions-in-python-3
###Code
S = 'Hello World ! '# define a string S
S.find('World') # find the substring 'World'
S.replace('World','Class') # replace the substring 'World' with 'Class'
S.rstrip(), S.lower(), S.upper() # check what happened to the spaces and the letters
S.split(' ') # splits the string at a given delimiter (here space)
S # even after the performed operations, the immutable string S remains unchanged
help(str.find) # request help for a method
###Output
_____no_output_____
###Markdown
Now, you can move on to read the book section about "Strings in Action" from Lutz (2013), which you can download on Canvas. The material will strengthen you knowledge about strings sequences, most importantly details about **indexing and slicing**. You can use the code cells below, to practice the examples in the book section.
###Code
# add your code here
# add your code here
# add your code here
# add your code here
###Output
_____no_output_____ |
code/notebooks/Phytoliths_Classifier/Background_images_generator.ipynb | ###Markdown
Generador de recortes del fondo de la imagen*** Este notebook tiene como objetivo la obtención de recortes del fondo de las imágenes de fitolitos. La obtención de estos es fundamental para la utilización de técnicas de clasificación y/o reconocimiento de objetos mediante clasificadores junto a descriptores.Para ello:1. Leemos las imágenes junto a sus coordenadas almacenadas en un fichero *JSON*.2. Obtenemos recortes (de distintos tamaños) de la imágen siempre y cuando sea un area sin un fitolito.Las imágenes generadas se almacenan en "Background2" dentro de "code/rsc/img" para no alterar el conjunto de imágenes del fondo que se aporta inicialmente.
###Code
%matplotlib inline
from __future__ import print_function
from ipywidgets import interact_manual, fixed
import matplotlib.pyplot as plt
import os, os.path
import re
import numpy as np
import math
from math import ceil
from sklearn.feature_extraction.image import PatchExtractor
from skimage import io
from skimage.transform import rescale
import copy
import json
import warnings
import random
def extract_patches(img, coords_list, patch_size, N=math.inf, scale=1.0, random_patch_size = True):
"""Extraemos los recortes de una imagen
dado un tamaño de recorte."""
patches = []
count = 0
y_size, x_size = patch_size
h, w, _ = img.shape
for y in range(0, h, 400):
y2 = y+y_size
if(y2 > h):
break
for x in range(0, w, 400):
y2 = y+y_size
x2 = x+x_size
if(x2 > w):
break
else:
# Transformación aleatoria del patch_size
# para tener mayor variabilidad en los
# tamaños del recorte
if(random_patch_size == True):
rand = random.random()
if rand > 0.85:
y2 = y + round(y_size*0.5)
elif rand > 0.7:
x2 = x + round(x_size*0.5)
elif rand > 0.55:
y2 = y + round(y_size*0.5)
x2 = x + round(x_size*0.5)
patches.append((img[y:y2,x:x2],(x,y,x2,y2)))
count += 1
if(count > N):
return patches
return patches
def is_containing_objects(patch_coords, coords):
""""""
is_containing_corners = []
height = coords[3] - coords[1]
width = coords[2] - coords[0]
# TODO Refactorizar
is_containing_corners.append(patch_coords[0] <= coords[0] <= patch_coords[2]\
and patch_coords[1] <= coords[1] <= patch_coords[3])
is_containing_corners.append(patch_coords[0] <= (coords[0] + width)\
<= patch_coords[2] and patch_coords[1] <= coords[1] <= patch_coords[3])
is_containing_corners.append(patch_coords[0] <= coords[0] <= patch_coords[2]\
and patch_coords[1] <= (coords[1] + height) <= patch_coords[3])
is_containing_corners.append(patch_coords[0] <= coords[2] <= patch_coords[2]\
and patch_coords[1] <= coords[3] <= patch_coords[3])
height = patch_coords[3] - patch_coords[1]
width = patch_coords[2] - patch_coords[0]
is_containing_corners.append(coords[0] <= patch_coords[0] <= coords[2]\
and coords[1] <= patch_coords[1] <= coords[3])
is_containing_corners.append(coords[0] <= (patch_coords[0] + width)\
<= coords[2] and coords[1] <= patch_coords[1] <= coords[3])
is_containing_corners.append(coords[0] <= patch_coords[0] <= coords[2]\
and coords[1] <= (patch_coords[1] + height) <= coords[3])
is_containing_corners.append(coords[0] <= patch_coords[2] <= coords[2]\
and coords[1] <= patch_coords[3] <= coords[3])
return any(is_containing_corners)
def supress_contained_patches(patches, coords_list):
"""Función que recibe un conjunto de recortes
junto a sus coordenadas dentro de la imagen y
elimina todos los recortes que pertenezcan al
area en la que se encuentren fitolitos"""
cleaned_patches = []
contained = False
count = 0
for complete_patch in patches:
patch = complete_patch[0]
patch_coords = complete_patch[1]
for coords in coords_list:
if (is_containing_objects(patch_coords,
coords)):
contained = True
count += 1
break
if contained == False:
cleaned_patches.append(complete_patch)
else:
contained = False
return cleaned_patches
def save_patches(patches, path, image_name = ''):
"""Función que guarda cada uno de
los recortes como imágen"""
count = 0
for patch in patches:
io.imsave(path + image_name +str(patch[1][0]) + "_"
+ str(patch[1][1]) + "_"
+ str(patch[1][2]) + "_"
+ str(patch[1][3]) + ".jpg",
patch[0], quality=30)
count += 1
path="../../rsc/img/Default"
dest_path = "../../rsc/img/Background2/"
pattern = re.compile("^.*\.jpg$", re.IGNORECASE)
def list_images(path='../../rsc/img/Default'):
"""Contamos el número de imágenes que tenemos en
el directorio de las imágenes etiquetadas"""
images_list = []
for name in os.listdir(path):
json_name = name.split(".")[0] + ".json"
if pattern.match(name) \
and os.path.exists(path + "/" + json_name):
images_list.append(path + "/" + name)
return images_list
def read_coords_conversion(coords_dict):
coords_list =[]
for _, coords in coords_dict.items():
coords_mod = np.array(coords)
coords_mod = coords_mod[:,[2,0,3,1]]
coords_mod = coords_mod.tolist()
for coords in coords_mod:
coords_list.append(coords)
return coords_list
def background_images_generator(path, number_of_images, dest_path):
images_names_list = list_images(path)
initial_value = len(images_names_list)
if initial_value == 0:
raise ValueError("Number of images must be greater than 0")
count = 0
images_per_image = ceil(number_of_images / initial_value)
for image_path in images_names_list:
warnings.filterwarnings("ignore")
image = rescale(io.imread(image_path), 0.5)
json_path = "../.." + image_path.split(".")[-2] + ".json"
image_name = os.path.split(image_path)[1].split(".")[0]
image_with_format = image_name + ".jpg"
# Cargamos coordenadas, si existen,
# y si no existe fichero de coordenadas
# pasamos a la siguiente imagen
if os.path.exists(json_path):
with open(json_path) as jsonfile:
coords_dict = json.load(jsonfile)
coords_dict = coords_dict[image_with_format]
coords_list = read_coords_conversion(coords_dict)
else:
continue
# Generamos recortes del fondo de la imagen
patches = extract_patches(image, coords_list, patch_size=(250,250), N=images_per_image)
patches = supress_contained_patches(patches, coords_list)
save_patches(patches, dest_path, image_name)
count += len(patches)
if count > number_of_images:
break
interact_manual(background_images_generator,
number_of_images=(10,4000,10),
path=fixed(path),
dest_path=fixed(dest_path))
###Output
_____no_output_____ |
lesson3/dataframes.ipynb | ###Markdown
always add the following cell to the start of a notebook when using spark
###Code
# lets start the spark session
# the entry point for an spark app is the SparkSession
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[2]").appName("FirstApp").getOrCreate()
# if you don't get an output here it means that jupyter isn't connected to pyspark
spark
###Output
_____no_output_____
###Markdown
use this to debug any errors related to wrong path/file not found
###Code
import os
os.getcwd()
# os.path.abspath(os.getcwd())
###Output
_____no_output_____
###Markdown
Dataframes we can create a dataframe from a list that we parallelize
###Code
data = [
('1', 'JS', 179),
('2', 'CL', 175),
('3', 'AS', 140),
('4', 'LF', 170)
]
df = spark.createDataFrame(
data,
['Id', 'Name', 'Height'] # column list
)
df.printSchema()
df.show(10) # default 20 rows
# we can retrieve a subset of the df using head
df.head(2)
type(df.head(2))
df.head(2)[0][2]
# we can also pass the schema
from pyspark.sql.types import *
schema = StructType([
# StructField("column_name", columnType(), Nullable),
StructField("id", StringType(), False),
StructField("name", StringType(), True),
StructField("height", IntegerType(), False)
])
df = spark.createDataFrame(data=data, schema=schema)
df.printSchema()
###Output
root
|-- id: string (nullable = false)
|-- name: string (nullable = true)
|-- height: integer (nullable = false)
###Markdown
SPARK.READ usually we want to create a df from a data source.Spark can read from the following sources CSVspark.read.csvusefull when reading from delimited files
###Code
csv_path = '../data/airports.text'
df = spark.read.csv(
csv_path,
# header=True,
inferSchema=True # affects performance as data as parsed a second time to inferSchema
)
df.printSchema()
# describe() can be used to glance over the data statics
df.describe().show()
df.show()
###Output
+---+--------------------+--------------+----------------+---+----+---------+----------+----+----+----+--------------------+
|_c0| _c1| _c2| _c3|_c4| _c5| _c6| _c7| _c8| _c9|_c10| _c11|
+---+--------------------+--------------+----------------+---+----+---------+----------+----+----+----+--------------------+
| 1| Goroka| Goroka|Papua New Guinea|GKA|AYGA|-6.081689|145.391881|5282|10.0| U|Pacific/Port_Moresby|
| 2| Madang| Madang|Papua New Guinea|MAG|AYMD|-5.207083| 145.7887| 20|10.0| U|Pacific/Port_Moresby|
| 3| Mount Hagen| Mount Hagen|Papua New Guinea|HGU|AYMH|-5.826789|144.295861|5388|10.0| U|Pacific/Port_Moresby|
| 4| Nadzab| Nadzab|Papua New Guinea|LAE|AYNZ|-6.569828|146.726242| 239|10.0| U|Pacific/Port_Moresby|
| 5|Port Moresby Jack...| Port Moresby|Papua New Guinea|POM|AYPY|-9.443383| 147.22005| 146|10.0| U|Pacific/Port_Moresby|
| 6| Wewak Intl| Wewak|Papua New Guinea|WWK|AYWK|-3.583828|143.669186| 19|10.0| U|Pacific/Port_Moresby|
| 7| Narsarsuaq| Narssarssuaq| Greenland|UAK|BGBW|61.160517|-45.425978| 112|-3.0| E| America/Godthab|
| 8| Nuuk| Godthaab| Greenland|GOH|BGGH|64.190922|-51.678064| 283|-3.0| E| America/Godthab|
| 9| Sondre Stromfjord| Sondrestrom| Greenland|SFJ|BGSF|67.016969|-50.689325| 165|-3.0| E| America/Godthab|
| 10| Thule Air Base| Thule| Greenland|THU|BGTL|76.531203|-68.703161| 251|-4.0| E| America/Thule|
| 11| Akureyri| Akureyri| Iceland|AEY|BIAR|65.659994|-18.072703| 6| 0.0| N| Atlantic/Reykjavik|
| 12| Egilsstadir| Egilsstadir| Iceland|EGS|BIEG|65.283333|-14.401389| 76| 0.0| N| Atlantic/Reykjavik|
| 13| Hornafjordur| Hofn| Iceland|HFN|BIHN|64.295556|-15.227222| 24| 0.0| N| Atlantic/Reykjavik|
| 14| Husavik| Husavik| Iceland|HZK|BIHU|65.952328|-17.425978| 48| 0.0| N| Atlantic/Reykjavik|
| 15| Isafjordur| Isafjordur| Iceland|IFJ|BIIS|66.058056|-23.135278| 8| 0.0| N| Atlantic/Reykjavik|
| 16|Keflavik Internat...| Keflavik| Iceland|KEF|BIKF| 63.985|-22.605556| 171| 0.0| N| Atlantic/Reykjavik|
| 17| Patreksfjordur|Patreksfjordur| Iceland|PFJ|BIPA|65.555833| -23.965| 11| 0.0| N| Atlantic/Reykjavik|
| 18| Reykjavik| Reykjavik| Iceland|RKV|BIRK| 64.13|-21.940556| 48| 0.0| N| Atlantic/Reykjavik|
| 19| Siglufjordur| Siglufjordur| Iceland|SIJ|BISI|66.133333|-18.916667| 10| 0.0| N| Atlantic/Reykjavik|
| 20| Vestmannaeyjar|Vestmannaeyjar| Iceland|VEY|BIVM|63.424303|-20.278875| 326| 0.0| N| Atlantic/Reykjavik|
+---+--------------------+--------------+----------------+---+----+---------+----------+----+----+----+--------------------+
only showing top 20 rows
###Markdown
using the output from the previous 2 cells, build a schema and pass it at read
###Code
csv_schema = StructType([
# StructField("column_name", columnType(), Nullable),
# edit this and add the columns
])
df = spark.read.csv(csv_path, schema=csv_schema)
df.show()
###Output
++
||
++
||
||
||
||
||
||
||
||
||
||
||
||
||
||
||
||
||
||
||
||
++
only showing top 20 rows
###Markdown
TEXTspark.read.textsimilar to spark.Context.textFile
###Code
text_path = '../data/word_count.text'
df = spark.read.text(text_path)
df.show()
help(spark.read.text)
###Output
Help on method text in module pyspark.sql.readwriter:
text(paths, wholetext=False, lineSep=None, pathGlobFilter=None, recursiveFileLookup=None) method of pyspark.sql.readwriter.DataFrameReader instance
Loads text files and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
The text files must be encoded as UTF-8.
By default, each line in the text file is a new row in the resulting DataFrame.
:param paths: string, or list of strings, for input path(s).
:param wholetext: if true, read each file from input path(s) as a single row.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\r``, ``\r\n`` and ``\n``.
:param pathGlobFilter: an optional glob pattern to only include files with paths matching
the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.
It does not change the behavior of `partition discovery`_.
:param recursiveFileLookup: recursively scan a directory for files. Using this option
disables `partition discovery`_.
>>> df = spark.read.text('python/test_support/sql/text-test.txt')
>>> df.collect()
[Row(value='hello'), Row(value='this')]
>>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True)
>>> df.collect()
[Row(value='hello\nthis')]
.. versionadded:: 1.6
###Markdown
JSONspark.read.json
###Code
json_path = '../data/resource_hvrh-b6nb.json'
df = spark.read.json(json_path)
df.printSchema()
###Output
root
|-- dropoff_latitude: string (nullable = true)
|-- dropoff_longitude: string (nullable = true)
|-- extra: string (nullable = true)
|-- fare_amount: string (nullable = true)
|-- improvement_surcharge: string (nullable = true)
|-- lpep_dropoff_datetime: string (nullable = true)
|-- lpep_pickup_datetime: string (nullable = true)
|-- mta_tax: string (nullable = true)
|-- passenger_count: string (nullable = true)
|-- payment_type: string (nullable = true)
|-- pickup_latitude: string (nullable = true)
|-- pickup_longitude: string (nullable = true)
|-- ratecodeid: string (nullable = true)
|-- store_and_fwd_flag: string (nullable = true)
|-- tip_amount: string (nullable = true)
|-- tolls_amount: string (nullable = true)
|-- total_amount: string (nullable = true)
|-- trip_distance: string (nullable = true)
|-- trip_type: string (nullable = true)
|-- vendorid: string (nullable = true)
###Markdown
as long as they have a valid schema the json can be different
###Code
jsonStrings = ['{"uploadTimeStamp":"1500618037189","ID":"123ID","data":[{"Data":{"unit":"rpm","value":"0"},"EventID":"E1","Timestamp":1500618037189,"pii":{}},{"Data":{"heading":"N","loc1":"false","loc2":"13.022425","loc3":"77.760587","loc4":"false","speed":"10"},"EventID":"E2","Timestamp":1500618037189,"pii":{}},{"Data":{"x":"1.1","y":"1.2","z":"2.2"},"EventID":"E3","Timestamp":1500618037189,"pii":{}},{"EventID":"E4","Data":{"value":"50","unit":"percentage"},"Timestamp":1500618037189},{"Data":{"unit":"kmph","value":"60"},"EventID":"E5","Timestamp":1500618037189,"pii":{}}]}',
'{"uploadTimeStamp":"1500618045735","ID":"123ID","data":[{"Data":{"unit":"rpm","value":"0"},"EventID":"E1","Timestamp":1500618045735,"pii":{}},{"Data":{"heading":"N","loc1":"false","loc2":"13.022425","loc3":"77.760587","loc4":"false","speed":"10"},"EventID":"E2","Timestamp":1500618045735,"pii":{}},{"Data":{"x":"1.1","y":"1.2","z":"2.2"},"EventID":"E3","Timestamp":1500618045735,"pii":{}},{"EventID":"E4","Data":{"value":"50","unit":"percentage"},"Timestamp":1500618045735},{"Data":{"unit":"kmph","value":"60"},"EventID":"E5","Timestamp":1500618045735,"pii":{}}]}',
'{"REGULAR_DUMMY":"REGULAR_DUMMY", "ID":"123ID", "uploadTimeStamp":1500546893837}',
'{"REGULAR_DUMMY":"text_of_json_per_item_in_list"}'
]
jsonRDD = spark.sparkContext.parallelize(jsonStrings)
df = spark.read.json(jsonRDD)
df.show()
# the schema of the json is merged
df.printSchema()
# Starting with Spark 2.2 you can read a multiline json
# ideally you want to receive the json on a single line
m_json = '../data/multiline.json'
spark.read.json(m_json).printSchema()
spark.read.json(m_json).show()
spark.read.json(m_json, multiLine=True).printSchema()
df = df.filter(df['data'].isNotNull()).drop('REGULAR_DUMMY')
df.select('data').show(20, False) # why did I used False here?!
df.select('data').printSchema()
df.select('data.Data.speed').show(20, False)
df.select('data.Data.speed').printSchema()
# exploding nested jsons fields is a "hard" problem in spark
from pyspark.sql.functions import explode, arrays_zip
df.select(explode(arrays_zip('data'))).show(20, False)
df.select(explode(arrays_zip('data'))).printSchema()
###Output
root
|-- col: struct (nullable = false)
| |-- data: struct (nullable = true)
| | |-- Data: struct (nullable = true)
| | | |-- heading: string (nullable = true)
| | | |-- loc1: string (nullable = true)
| | | |-- loc2: string (nullable = true)
| | | |-- loc3: string (nullable = true)
| | | |-- loc4: string (nullable = true)
| | | |-- speed: string (nullable = true)
| | | |-- unit: string (nullable = true)
| | | |-- value: string (nullable = true)
| | | |-- x: string (nullable = true)
| | | |-- y: string (nullable = true)
| | | |-- z: string (nullable = true)
| | |-- EventID: string (nullable = true)
| | |-- Timestamp: long (nullable = true)
###Markdown
JDBCspark.read.jdbcdepending on the number of partitions, the db will receive multiple connections. This might make the db unresponsive.used less in big projectsthe code below is just an example. read the following article for more details about jdbc readshttps://github.com/awesome-spark/spark-gotchas/blob/master/05_spark_sql_and_dataset_api.mdreading-data-using-jdbc-source
###Code
jdbcDF = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.load()
jdbcDF2 = spark.read \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Specifying dataframe column data types on read
jdbcDF3 = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.option("customSchema", "id DECIMAL(38, 0), name STRING") \
.load()
###Output
_____no_output_____
###Markdown
Parquetspark.read.parquethttps://databricks.com/glossary/what-is-parquet
###Code
df = spark.read.parquet(parquet_path)
###Output
_____no_output_____
###Markdown
read more about partition discoveryhttps://spark.apache.org/docs/latest/sql-data-sources-parquet.htmlpartition-discovery FORMAT & LOADgeneric way of reading data from the above data sources
###Code
df = spark.read.format("parquet").load(parquet_path)
df = spark.read.format('jdbc').option().load()
df = spark.read.format('csv').option().load()
###Output
_____no_output_____
###Markdown
usefull when developing frameworks (reading metadata and using generic ETL) Writesame as read, with additional options related to number of partitions.assuming df is the final dataframe, you can do something like in the cells belowread the entire list of options athttps://spark.apache.org/docs/latest/api/python/pyspark.sql.htmlpyspark.sql.DataFrameWriter
###Code
df.write.csv(output_csv)
df.write.parquet(output_parquet)
df.write.json(output_json)
df.write.jdbc()
df.format('parquet|jdbc|json').option().save()
###Output
_____no_output_____
###Markdown
Transformations
###Code
csv_file = '../data/uk-postcode.csv'
df = spark.read.csv(csv_file, header = True, inferSchema=True)
df.show()
df.describe().show(5, False)
+-------+---------+------------------+------------------+------------------+-----------------+--------+-----------------+--------+-----------------+-----------------+------------------+------------------+
|summary|Post Code|Latitude |Longitude |Easting |Northing |GridRef |Town/Area |Region |Postcodes |Active postcodes |Population |Households |
+-------+---------+------------------+------------------+------------------+-----------------+--------+-----------------+--------+-----------------+-----------------+------------------+------------------+
|count |3107 |3094 |3094 |3082 |3082 |3082 |3107 |3106 |3086 |3086 |2814 |2814 |
|mean |null |53.034849482870136|-2.051575161550915|399520.80012978584|351774.8997404283|null |null |null |832.2216461438755|564.9565780946209|22437.184434968018|9390.271144278608 |
|stddev |null |1.8865014315147148|1.8334605907478179|121798.85778550198|209187.830957896 |null |null |null |600.2495165657779|397.5467297411277|16578.512623860708|6814.9887522729805|
|min |AB1 |49.1995 |-7.82593 |22681 |8307 |HU390111|Abbey Hey, Gorton|Aberdeen|1 |0 |2 |1 |
|max |ZE3 |60.3156 |1.73337 |653560 |1159304 |TV604994|York City Centre |York |3621 |2644 |153812 |61886 |
+-------+---------+------------------+------------------+------------------+-----------------+--------+-----------------+--------+-----------------+-----------------+------------------+------------------+
# select fields
df.select('Postcode', 'Latitude').show()
# rename column
df = df.withColumnRenamed('Postcode', 'Post Code')
df.show()
df.schema.fieldNames()
from pyspark.sql.functions import col, when
df = df.withColumn('type', when(col('Population') < 10000, 'village').when(df['Population'] < 20000, 'town').otherwise('city').alias('type'))
df.select('type').distinct().show()
# if you write a condition like this, is easier to read it
df = df.withColumn('schema',
when(col('Population').isNull(), None)\
.when(col('Population') < 10000, 'village')\
.when(df['Population'] < 20000, 'town')\
.otherwise('city'))
df.filter(df.schema.isNull()).show(5)
# why do we have an error here?
df.filter(df['schema'].isNull()).show(5)
+---------+--------+---------+-------+--------+--------+--------------------+--------+---------+----------------+----------+----------+----+
|Post Code|Latitude|Longitude|Easting|Northing| GridRef| Town/Area| Region|Postcodes|Active postcodes|Population|Households|type|
+---------+--------+---------+-------+--------+--------+--------------------+--------+---------+----------------+----------+----------+----+
| AB1| 57.1269| -2.13644| 391839| 804005|NJ918040| Aberdeen|Aberdeen| 2655| 0| null| null|city|
| AB2| 57.1713| -2.14152| 391541| 808948|NJ915089| Aberdeen|Aberdeen| 3070| 0| null| null|city|
| AB3| 57.0876| -2.59624| 363963| 799780|NO639997| Aberdeen|Aberdeen| 2168| 0| null| null|city|
| AB4| 57.5343| -2.12713| 392487| 849358|NJ924493|Fraserburgh, Pete...|Aberdeen| 2956| 0| null| null|city|
| AB5| 57.4652| -2.64764| 361248| 841843|NJ612418|Buckie, Huntly, I...|Aberdeen| 3002| 0| null| null|city|
+---------+--------+---------+-------+--------+--------+--------------------+--------+---------+----------------+----------+----------+----+
# replace null
df.na.fill('').filter(df['GridRef'] == 'SJ261898').show()
df.na.fill('ThisWasNULL').filter(df['GridRef'] == 'SJ261898').show()
df.na.drop().filter(df['GridRef'] == 'SJ261898').show()
df.select('schema').na.fill('').replace({'town': 'Town'}).distinct().collect()
df.withColumn('extra_column', 'literal_value').printSchema()
from pyspark.sql.functions import lit
df.withColumn('extra_column', lit('literal_value')).printSchema()
# group by
ag = df.groupby('Region', 'Town/Area')
print(ag.count(), df.count())
df.groupby('Region').count().show()
df.groupby('Region', 'type').sum().show()
ag = df.groupby('Region', 'type').agg({'Region': 'count'}).withColumnRenamed('count(Region)', 'asd')
ag.show(20)
import pyspark.sql.functions as pf
ag = df.groupby('Region', 'type').agg(pf.sum('Population').alias('sum_population'), pf.count('PostCodes'))
ag.show()
###Output
+--------------------+-------+--------------+----------------+
| Region| type|sum_population|count(PostCodes)|
+--------------------+-------+--------------+----------------+
| New Forest| city| 157569| 5|
| North Down| city| 83041| 3|
| Hereford|village| 19355| 3|
| Llandrindod Wells|village| 19566| 6|
| Mole Valley|village| 4313| 1|
| Hambleton| town| 12832| 1|
| South Ayrshire| town| 61814| 4|
| Blaenau Gwent| city| 75512| 3|
| Rotherham| town| 19772| 1|
|Richmond upon Thames| town| 52176| 3|
| Somerset|village| 58212| 14|
| Strabane|village| 7691| 1|
| Leeds|village| 45159| 6|
| Telford and Wrekin| city| 99808| 3|
| Blackburn| city| null| 1|
| Bournemouth| city| 102019| 4|
| Antrim| city| 40205| 1|
| Tendring| town| 25258| 2|
| East Hampshire| town| 23140| 2|
| Rother| town| 59782| 4|
+--------------------+-------+--------------+----------------+
only showing top 20 rows
###Markdown
PartitionsChoose the right partition column. Think about how the cardinality of that column affects how the data gets distributed.When in doubt hash is better (safer)
###Code
# get number of partitions
df.rdd.getNumPartitions()
# you can use repartition to redistribuite data
# triggers a shuffle
# repartition by hash
df = df.repartition(10)
# repartition by columns
df = df.repartition('col1','col2')
# repartition by hash and cols
df = df.repartition(10, 'col1', 'col2')
# you can use coalesce to reduce the number of partitions
# assuming 10 partitions and 5 workers
df = df.coalesce(5)
# will reduce the number of partions without triggering a shuffle
# df.coalesce(20) will not do anything because 20 > 10
df.select('Region').distinct().count()
df_200 = df.repartition('Region')
df_200.rdd.getNumPartitions()
df_200.show(20)
df_200.coalesce(427).rdd.getNumPartitions()
df_200.coalesce(100).rdd.getNumPartitions()
# if you do coalesce(1) only one worker will do the work.
# if you have "unexecuted" transformations repartition(1) is better
# "save" the dataframe in memory or disk to reusse it
df = df.cache() # <== very important to "store" the result in a new variable
###Output
_____no_output_____
###Markdown
UDF
###Code
from pyspark.sql.functions import udf
def my_custom_fct(x, y):
if condition:
return int
else
return str
return x+y
udf_my_custom_fct = udf(my_custom_fct)
df.show(20)
df.withColumn('calculated_value', udf_my_custom_fct(df['Population']))
###Output
_____no_output_____ |
world_series_prediction-master/notebooks_for_clean_data/FinalProject_DataML.ipynb | ###Markdown
Import csvs for 1905 and 1969 to clean up dataset for machine learning application. Remove columns that are not needed for analysis.
###Code
# Import dependencies.
import pandas as pd
# Open up the 1905.csv and inspect.
df2 = pd.read_csv("../clean_data/1905.csv")
df2 = df2.drop("Unnamed: 0", axis=1)
df2
df3 = pd.read_csv("../clean_data/1969.csv")
df3 = df3.drop("Unnamed: 0", axis=1)
df3
df2 = df2.drop(["teamID", "divID", "Rank", "Ghome", "DivWin", "WCWin", "LgWin", "SF", "ER", "CG", "SHO", "SV",
"name", "park", "attendance", "BPF", "PPF", "teamIDlahman45", "teamIDretro", "AB", "RA", "IPouts",
"index", "L", "teamIDBR", "HBP", "lgID", "2B", "3B", "CS", "DP", "FP", "SO"], axis=1)
df2.head()
df3 = df3.drop(["teamID", "divID", "Rank", "Ghome", "DivWin", "WCWin", "LgWin", "SF", "ER", "CG", "SHO", "SV",
"name", "park", "attendance", "BPF", "PPF", "teamIDlahman45", "teamIDretro", "AB", "RA", "IPouts",
"index", "L", "teamIDBR", "HBP", "lgID", "2B", "3B", "CS", "DP", "FP", "SO"], axis=1)
df3.head()
###Output
_____no_output_____
###Markdown
Determine the win percent (WP) per team. Append WP column with data. Remove the W and G column.
###Code
win_percent1 = []
for entry in range(len(df2)):
win = df2["W"][entry]
total = df2["G"][entry]
percentage = int((win/total) * 100)
win_percent1.append(percentage)
df2["WP"] = win_percent1
df2 = df2.drop(["G", "W"], axis=1)
df2
win_percent2 = []
for entry in range(len(df3)):
win = df3["W"][entry]
total = df3["G"][entry]
percentage = int((win/total) * 100)
win_percent2.append(percentage)
df3["WP"] = win_percent2
df3 = df3.drop(["W", "G"], axis=1)
df3
###Output
_____no_output_____
###Markdown
Export the two tables.
###Code
df2.to_csv("../clean_data/1905ML.csv")
df3.to_csv("../clean_data/1969ML.csv")
###Output
_____no_output_____ |
practical_introduction_to_NLP_part1.ipynb | ###Markdown
Getting Started with NLTK
###Code
import nltk
nltk.download()
nltk.download('book')
from nltk.book import *
text1
text9
###Output
_____no_output_____
###Markdown
Searching Text A concordance view shows us every occurrence of a given word, together with some context.
###Code
text1.concordance("monstrous")
text9.concordance("Thursday")
text3.concordance("lived")
text2.concordance("affection")
text1.similar("monstrous")
text2.similar("monstrous")
text2.common_contexts(["monstrous","very"])
text4.dispersion_plot(["citizens", "democracy", "freedom", "duties", "America"])
text2.dispersion_plot(["citizens", "democracy", "freedom", "duties", "America"])
# text3.generate()# not font in nltk 3
###Output
_____no_output_____
###Markdown
Counting Vocabulary
###Code
len(text2)
sorted(set(text2))
len(set(text2))
len(set(text3)) / len(text3)
text3.count("smote")
100 * text4.count('a') / len(text4)
def lexical_diversity(text):
return len(set(text)) / len(text)
lexical_diversity(text3)
lexical_diversity(text4)
def percentage(count, total):
return 100 * count / total
percentage(4, 5)
percentage(text4.count('a'), len(text4))
###Output
_____no_output_____
###Markdown
Texts as Lists of Words Lists
###Code
sent1 = ['Call', 'me', 'Rashmi', '.']
sent1
len(sent1)
lexical_diversity(sent1)
sent2
sent3
['Monty', 'Python'] + ['and', 'the', 'Holy', 'Grail']
sent4+ sent5
sent1.append("Some")
sent1
###Output
_____no_output_____
###Markdown
Indexing Lists
###Code
text4[13]
text4.index('Among')
text5[16715:16735]
text6[1600:1625]
###Output
_____no_output_____
###Markdown
Frequency Distributions
###Code
fdist1 = FreqDist(text1)
fdist1
fdist1.most_common(50)
fdist1.most_common(5)
fdist1['like']
## Fine-grained Selection of Words
###Output
_____no_output_____
###Markdown
Fine-grained Selection of Words
###Code
V = set(text1)
long_words = [w for w in V if len(w) > 15]
sorted(long_words)
fdist5 = FreqDist(text5)
sorted(w for w in set(text5) if len(w) > 7 and fdist5[w] > 7)
###Output
_____no_output_____
###Markdown
Collocations and Bigrams
###Code
list(bigrams(['more', 'is', 'said', 'than', 'done']))
text4.collocations()
###Output
_____no_output_____
###Markdown
Counting Other Things
###Code
[len(w) for w in text1]
fdist = FreqDist(len(w) for w in text1)
fdist
fdist.most_common()
fdist.max()
sorted(w for w in set(text1) if w.endswith('ableness'))
###Output
_____no_output_____ |
benchmark_notebooks/similar/ood/Similar_OOD.ipynb | ###Markdown
SMI AL Loop
###Code
import h5py
import time
import random
import datetime
import copy
import numpy as np
import os
import csv
import json
import subprocess
import sys
import PIL.Image as Image
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.models as models
from matplotlib import pyplot as plt
from distil.distil.utils.models.resnet import ResNet18
from trust.trust.utils.custom_dataset import load_dataset_custom
from torch.utils.data import Subset
from torch.autograd import Variable
import tqdm
from math import floor
from sklearn.metrics.pairwise import cosine_similarity, pairwise_distances
from distil.distil.active_learning_strategies.scmi import SCMI
from distil.distil.active_learning_strategies.smi import SMI
from distil.distil.active_learning_strategies.badge import BADGE
from distil.distil.active_learning_strategies.entropy_sampling import EntropySampling
from distil.distil.active_learning_strategies.gradmatch_active import GradMatchActive
seed=42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
from distil.distil.utils.utils import *
def model_eval_loss(data_loader, model, criterion):
total_loss = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(data_loader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
total_loss += loss.item()
return total_loss
def init_weights(m):
# torch.manual_seed(35)
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def weight_reset(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.reset_parameters()
def create_model(name, num_cls, device, embedding_type):
if name == 'ResNet18':
if embedding_type == "gradients":
model = ResNet18(num_cls)
else:
model = models.resnet18()
elif name == 'MnistNet':
model = MnistNet()
elif name == 'ResNet164':
model = ResNet164(num_cls)
model.apply(init_weights)
model = model.to(device)
return model
def loss_function():
criterion = nn.CrossEntropyLoss()
criterion_nored = nn.CrossEntropyLoss(reduction='none')
return criterion, criterion_nored
def optimizer_with_scheduler(model, num_epochs, learning_rate, m=0.9, wd=5e-4):
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=m, weight_decay=wd)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
return optimizer, scheduler
def optimizer_without_scheduler(model, learning_rate, m=0.9, wd=5e-4):
# optimizer = optim.Adam(model.parameters(),weight_decay=wd)
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=m, weight_decay=wd)
return optimizer
def generate_cumulative_timing(mod_timing):
tmp = 0
mod_cum_timing = np.zeros(len(mod_timing))
for i in range(len(mod_timing)):
tmp += mod_timing[i]
mod_cum_timing[i] = tmp
return mod_cum_timing/3600
def find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications,
final_tst_predictions, saveDir, prefix):
#find queries from the validation set that are erroneous
# saveDir = os.path.join(saveDir, prefix)
# if(not(os.path.exists(saveDir))):
# os.mkdir(saveDir)
val_err_idx = list(np.where(np.array(final_val_classifications) == False)[0])
tst_err_idx = list(np.where(np.array(final_tst_classifications) == False)[0])
val_class_err_idxs = []
tst_err_log = []
val_err_log = []
for i in range(num_cls):
if(feature=="ood"): tst_class_idxs = list(torch.where(torch.Tensor(test_set.targets.float()) == i)[0].cpu().numpy())
if(feature=="classimb"): tst_class_idxs = list(torch.where(torch.Tensor(test_set.targets) == i)[0].cpu().numpy())
val_class_idxs = list(torch.where(torch.Tensor(val_set.targets.float()) == i)[0].cpu().numpy())
#err classifications per class
val_err_class_idx = set(val_err_idx).intersection(set(val_class_idxs))
tst_err_class_idx = set(tst_err_idx).intersection(set(tst_class_idxs))
if(len(val_class_idxs)>0):
val_error_perc = round((len(val_err_class_idx)/len(val_class_idxs))*100,2)
else:
val_error_perc = 0
tst_error_perc = round((len(tst_err_class_idx)/len(tst_class_idxs))*100,2)
print("val, test error% for class ", i, " : ", val_error_perc, tst_error_perc)
val_class_err_idxs.append(val_err_class_idx)
tst_err_log.append(tst_error_perc)
val_err_log.append(val_error_perc)
tst_err_log.append(sum(tst_err_log)/len(tst_err_log))
val_err_log.append(sum(val_err_log)/len(val_err_log))
return tst_err_log, val_err_log, val_class_err_idxs
def aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget, augrandom=False):
all_lake_idx = list(range(len(lake_set)))
if(not(len(subset)==budget) and augrandom):
print("Budget not filled, adding ", str(int(budget) - len(subset)), " randomly.")
remain_budget = int(budget) - len(subset)
remain_lake_idx = list(set(all_lake_idx) - set(subset))
random_subset_idx = list(np.random.choice(np.array(remain_lake_idx), size=int(remain_budget), replace=False))
subset += random_subset_idx
lake_ss = SubsetWithTargets(true_lake_set, subset, torch.Tensor(true_lake_set.targets.float())[subset])
if(feature=="ood"):
ood_lake_idx = list(set(lake_subset_idxs)-set(subset))
private_set = SubsetWithTargets(true_lake_set, ood_lake_idx, torch.Tensor(np.array([split_cfg['num_cls_idc']]*len(ood_lake_idx))).float())
remain_lake_idx = list(set(all_lake_idx) - set(lake_subset_idxs))
remain_lake_set = SubsetWithTargets(lake_set, remain_lake_idx, torch.Tensor(lake_set.targets.float())[remain_lake_idx])
remain_true_lake_set = SubsetWithTargets(true_lake_set, remain_lake_idx, torch.Tensor(true_lake_set.targets.float())[remain_lake_idx])
print(len(lake_ss),len(remain_lake_set),len(lake_set))
if(feature!="ood"): assert((len(lake_ss)+len(remain_lake_set))==len(lake_set))
aug_train_set = torch.utils.data.ConcatDataset([train_set, lake_ss])
if(feature=="ood"):
return aug_train_set, remain_lake_set, remain_true_lake_set, private_set, lake_ss
else:
return aug_train_set, remain_lake_set, remain_true_lake_set, lake_ss
def getQuerySet(val_set, val_class_err_idxs, imb_cls_idx, miscls):
miscls_idx = []
if(miscls):
for i in range(len(val_class_err_idxs)):
if i in imb_cls_idx:
miscls_idx += val_class_err_idxs[i]
print("total misclassified ex from imb classes: ", len(miscls_idx))
else:
for i in imb_cls_idx:
imb_cls_samples = list(torch.where(torch.Tensor(val_set.targets.float()) == i)[0].cpu().numpy())
miscls_idx += imb_cls_samples
print("total samples from imb classes as targets: ", len(miscls_idx))
return Subset(val_set, miscls_idx)
def getPrivateSet(lake_set, subset, private_set):
#augment prev private set and current subset
new_private_set = SubsetWithTargets(lake_set, subset, torch.Tensor(lake_set.targets.float())[subset])
# new_private_set = Subset(lake_set, subset)
total_private_set = torch.utils.data.ConcatDataset([private_set, new_private_set])
return total_private_set
def remove_ood_points(lake_set, subset, idc_idx):
idx_subset = []
subset_cls = torch.Tensor(lake_set.targets.float())[subset]
for i in idc_idx:
idc_subset_idx = list(torch.where(subset_cls == i)[0].cpu().numpy())
idx_subset += list(np.array(subset)[idc_subset_idx])
print(len(idx_subset),"/",len(subset), " idc points.")
return idx_subset
def getPerClassSel(lake_set, subset, num_cls):
perClsSel = []
subset_cls = torch.Tensor(lake_set.targets.float())[subset]
for i in range(num_cls):
cls_subset_idx = list(torch.where(subset_cls == i)[0].cpu().numpy())
perClsSel.append(len(cls_subset_idx))
return perClsSel
feature = "ood"
device_id = 0
run="fkna_3"
datadir = 'data/'
data_name = 'cifar10'
model_name = 'ResNet18'
num_rep = 10
learning_rate = 0.01
num_runs = 1 # number of random runs
computeClassErrorLog = True
magnification = 1
device = "cuda:"+str(device_id) if torch.cuda.is_available() else "cpu"
datkbuildPath = "./datk/build"
exePath = "cifarSubsetSelector"
print("Using Device:", device)
doublePrecision = True
linearLayer = True
miscls = False
# handler = DataHandler_CIFAR10
augTarget = True
embedding_type = "gradients"
if(feature=="ood"):
num_cls=8
budget=250
num_epochs = int(10)
split_cfg = {'num_cls_idc':8, 'per_idc_train':200, 'per_idc_val':10, 'per_idc_lake':500, 'per_ood_train':0, 'per_ood_val':0, 'per_ood_lake':5000}#cifar10
# split_cfg = {'num_cls_idc':50, 'per_idc_train':100, 'per_idc_val':2, 'per_idc_lake':100, 'per_ood_train':0, 'per_ood_val':0, 'per_ood_lake':500}#cifar100
initModelPath = "weights/" + data_name + "_" + feature + "_" + model_name + "_" + str(learning_rate) + "_" + str(split_cfg["per_idc_train"]) + "_" + str(split_cfg["per_idc_val"]) + "_" + str(split_cfg["num_cls_idc"])
###Output
_____no_output_____
###Markdown
AL Like Train Loop
###Code
def train_model_al(datkbuildPath, exePath, num_epochs, dataset_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run,
device, computeErrorLog, strategy="SIM", sf=""):
# torch.manual_seed(42)
# np.random.seed(42)
print(strategy, sf)
#load the dataset based on type of feature
train_set, val_set, test_set, lake_set, sel_cls_idx, num_cls = load_dataset_custom(datadir, dataset_name, feature, split_cfg, False, True)
print("selected classes are: ", sel_cls_idx)
if(feature=="ood"): num_cls+=1 #Add one class for OOD class
N = len(train_set)
trn_batch_size = 20
val_batch_size = 10
tst_batch_size = 100
trainloader = torch.utils.data.DataLoader(train_set, batch_size=trn_batch_size,
shuffle=True, pin_memory=True)
valloader = torch.utils.data.DataLoader(val_set, batch_size=val_batch_size,
shuffle=False, pin_memory=True)
tstloader = torch.utils.data.DataLoader(test_set, batch_size=tst_batch_size,
shuffle=False, pin_memory=True)
lakeloader = torch.utils.data.DataLoader(lake_set, batch_size=tst_batch_size,
shuffle=False, pin_memory=True)
true_lake_set = copy.deepcopy(lake_set)
# Budget for subset selection
bud = budget
# Variables to store accuracies
fulltrn_losses = np.zeros(num_epochs)
val_losses = np.zeros(num_epochs)
tst_losses = np.zeros(num_epochs)
timing = np.zeros(num_epochs)
val_acc = np.zeros(num_epochs)
full_trn_acc = np.zeros(num_epochs)
tst_acc = np.zeros(num_epochs)
final_tst_predictions = []
final_tst_classifications = []
best_val_acc = -1
csvlog = []
val_csvlog = []
# Results logging file
print_every = 3
# all_logs_dir = '/content/drive/MyDrive/research/tdss/SMI_active_learning_results_woVal/' + dataset_name + '/' + feature + '/'+ sf + '/' + str(bud) + '/' + str(run)
all_logs_dir = './SMI_active_learning_results/' + dataset_name + '/' + feature + '/'+ sf + '/' + str(bud) + '/' + str(run)
print("Saving results to: ", all_logs_dir)
subprocess.run(["mkdir", "-p", all_logs_dir])
exp_name = dataset_name + "_" + feature + "_" + strategy + "_" + str(len(sel_cls_idx)) +"_" + sf + '_budget:' + str(bud) + '_epochs:' + str(num_epochs) + '_linear:' + str(linearLayer) + '_runs' + str(run)
print(exp_name)
res_dict = {"dataset":data_name,
"feature":feature,
"sel_func":sf,
"sel_budget":budget,
"num_selections":num_epochs,
"model":model_name,
"learning_rate":learning_rate,
"setting":split_cfg,
"all_class_acc":None,
"test_acc":[],
"sel_per_cls":[],
"sel_cls_idx":sel_cls_idx.tolist()}
# Model Creation
model = create_model(model_name, num_cls, device, embedding_type)
model1 = create_model(model_name, num_cls, device, embedding_type)
# Loss Functions
criterion, criterion_nored = loss_function()
strategy_args = {'batch_size': 20, 'device':'cuda', 'num_partitions':1, 'wrapped_strategy_class': None,
'embedding_type':'gradients', 'keep_embedding':False}
unlabeled_lake_set = LabeledToUnlabeledDataset(lake_set)
if(strategy == "AL"):
if(sf=="badge"):
strategy_sel = BADGE(train_set, unlabeled_lake_set, model, num_cls, strategy_args)
elif(sf=="us"):
strategy_sel = EntropySampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)
elif(sf=="glister" or sf=="glister-tss"):
strategy_sel = GLISTER(train_set, unlabeled_lake_set, model, num_cls, strategy_args, val_set, typeOf='rand', lam=0.1)
elif(sf=="gradmatch-tss"):
strategy_sel = GradMatchActive(train_set, unlabeled_lake_set, model, num_cls, strategy_args, val_set)
elif(sf=="coreset"):
strategy_sel = CoreSet(train_set, unlabeled_lake_set, model, num_cls, strategy_args)
elif(sf=="leastconf"):
strategy_sel = LeastConfidence(train_set, unlabeled_lake_set, model, num_cls, strategy_args)
elif(sf=="margin"):
strategy_sel = MarginSampling(train_set, unlabeled_lake_set, model, num_cls, strategy_args)
if(strategy == "SIM"):
if(sf.endswith("mic")):
strategy_args['scmi_function'] = sf.split("mic")[0] + "cmi"
strategy_sel = SCMI(train_set, unlabeled_lake_set, val_set, val_set, model, num_cls, strategy_args)
if(sf.endswith("mi")):
strategy_args['smi_function'] = sf
strategy_sel = SMI(train_set, unlabeled_lake_set, val_set, model, num_cls, strategy_args)
strategy_args['verbose'] = True
strategy_args['optimizer'] = "LazyGreedy"
# Getting the optimizer and scheduler
# optimizer, scheduler = optimizer_with_scheduler(model, num_epochs, learning_rate)
optimizer = optimizer_without_scheduler(model, learning_rate)
private_set = []
for i in range(num_epochs):
print("AL epoch: ", i)
tst_loss = 0
tst_correct = 0
tst_total = 0
val_loss = 0
val_correct = 0
val_total = 0
if(i==0):
print("initial training epoch")
if(os.path.exists(initModelPath)):
model.load_state_dict(torch.load(initModelPath, map_location=device))
print("Init model loaded from disk, skipping init training: ", initModelPath)
model.eval()
with torch.no_grad():
final_val_predictions = []
final_val_classifications = []
for batch_idx, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
val_loss += loss.item()
if(feature=="ood"):
_, predicted = outputs[...,:-1].max(1)
else:
_, predicted = outputs.max(1)
val_total += targets.size(0)
val_correct += predicted.eq(targets).sum().item()
final_val_predictions += list(predicted.cpu().numpy())
final_val_classifications += list(predicted.eq(targets).cpu().numpy())
final_tst_predictions = []
final_tst_classifications = []
for batch_idx, (inputs, targets) in enumerate(tstloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
tst_loss += loss.item()
if(feature=="ood"):
_, predicted = outputs[...,:-1].max(1)
else:
_, predicted = outputs.max(1)
tst_total += targets.size(0)
tst_correct += predicted.eq(targets).sum().item()
final_tst_predictions += list(predicted.cpu().numpy())
final_tst_classifications += list(predicted.eq(targets).cpu().numpy())
best_val_acc = (val_correct/val_total)
val_acc[i] = val_correct / val_total
tst_acc[i] = tst_correct / tst_total
val_losses[i] = val_loss
tst_losses[i] = tst_loss
res_dict["test_acc"].append(tst_acc[i])
continue
else:
unlabeled_lake_set = LabeledToUnlabeledDataset(lake_set)
strategy_sel.update_data(train_set, unlabeled_lake_set)
#compute the error log before every selection
if(computeErrorLog):
tst_err_log, val_err_log, val_class_err_idxs = find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, final_tst_predictions, all_logs_dir, sf+"_"+str(bud))
csvlog.append(tst_err_log)
val_csvlog.append(val_err_log)
####SIM####
if(strategy=="SIM" or strategy=="SF"):
if(sf.endswith("mi")):
if(feature=="classimb"):
#make a dataloader for the misclassifications - only for experiments with targets
miscls_set = getQuerySet(val_set, val_class_err_idxs, sel_cls_idx, miscls)
strategy_sel.update_queries(miscls_set)
elif(sf.endswith("mic")): #configured for the OOD setting
print("val set targets: ", val_set.targets)
strategy_sel.update_queries(val_set) #In-dist samples are in Val
if(len(private_set)!=0):
print("private set targets: ", private_set.targets)
strategy_sel.update_privates(private_set)
###AL###
elif(strategy=="AL"):
if(sf=="glister-tss" or sf=="gradmatch-tss"):
miscls_set = getQuerySet(val_set, val_class_err_idxs, sel_cls_idx, miscls)
strategy_sel.update_queries(miscls_set)
print("reinit AL with targeted miscls samples")
elif(strategy=="random"):
subset = np.random.choice(np.array(list(range(len(lake_set)))), size=budget, replace=False)
strategy_sel.update_model(model)
subset = strategy_sel.select(budget)
# print("True targets of subset: ", torch.Tensor(true_lake_set.targets.float())[subset])
# hypothesized_targets = strategy_sel.predict(unlabeled_lake_set)
# print("Hypothesized targets of subset: ", hypothesized_targets)
print("#### SELECTION COMPLETE ####")
lake_subset_idxs = subset #indices wrt to lake that need to be removed from the lake
if(feature=="ood"): #remove ood points from the subset
subset = remove_ood_points(true_lake_set, subset, sel_cls_idx)
print("selEpoch: %d, Selection Ended at:" % (i), str(datetime.datetime.now()))
perClsSel = getPerClassSel(true_lake_set, lake_subset_idxs, num_cls)
res_dict['sel_per_cls'].append(perClsSel)
#augment the train_set with selected indices from the lake
if(feature=="classimb"):
train_set, lake_set, true_lake_set, add_val_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget, True) #aug train with random if budget is not filled
if(augTarget): val_set = ConcatWithTargets(val_set, add_val_set)
elif(feature=="ood"):
train_set, lake_set, true_lake_set, new_private_set, add_val_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget)
train_set = torch.utils.data.ConcatDataset([train_set, new_private_set]) #Add the OOD samples with a common OOD class
val_set = ConcatWithTargets(val_set, add_val_set)
if(len(private_set)!=0):
private_set = ConcatWithTargets(private_set, new_private_set)
else:
private_set = new_private_set
else:
train_set, lake_set, true_lake_set = aug_train_subset(train_set, lake_set, true_lake_set, subset, lake_subset_idxs, budget)
print("After augmentation, size of train_set: ", len(train_set), " lake set: ", len(lake_set), " val set: ", len(val_set))
# Reinit train and lake loaders with new splits and reinit the model
trainloader = torch.utils.data.DataLoader(train_set, batch_size=trn_batch_size, shuffle=True, pin_memory=True)
lakeloader = torch.utils.data.DataLoader(lake_set, batch_size=tst_batch_size, shuffle=False, pin_memory=True)
if(augTarget):
valloader = torch.utils.data.DataLoader(val_set, batch_size=len(val_set), shuffle=False, pin_memory=True)
model = create_model(model_name, num_cls, device, strategy_args['embedding_type'])
optimizer = optimizer_without_scheduler(model, learning_rate)
#Start training
start_time = time.time()
num_ep=1
while(full_trn_acc[i]<0.99 and num_ep<300):
model.train()
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
# Variables in Pytorch are differentiable.
inputs, target = Variable(inputs), Variable(inputs)
# This will zero out the gradients for this batch.
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# scheduler.step()
full_trn_loss = 0
full_trn_correct = 0
full_trn_total = 0
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
full_trn_loss += loss.item()
_, predicted = outputs.max(1)
full_trn_total += targets.size(0)
full_trn_correct += predicted.eq(targets).sum().item()
full_trn_acc[i] = full_trn_correct / full_trn_total
print("Selection Epoch ", i, " Training epoch [" , num_ep, "]" , " Training Acc: ", full_trn_acc[i], end="\r")
num_ep+=1
timing[i] = time.time() - start_time
with torch.no_grad():
final_val_predictions = []
final_val_classifications = []
for batch_idx, (inputs, targets) in enumerate(valloader): #Compute Val accuracy
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
val_loss += loss.item()
if(feature=="ood"):
_, predicted = outputs[...,:-1].max(1)
else:
_, predicted = outputs.max(1)
val_total += targets.size(0)
val_correct += predicted.eq(targets).sum().item()
final_val_predictions += list(predicted.cpu().numpy())
final_val_classifications += list(predicted.eq(targets).cpu().numpy())
final_tst_predictions = []
final_tst_classifications = []
for batch_idx, (inputs, targets) in enumerate(tstloader): #Compute test accuracy
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
tst_loss += loss.item()
if(feature=="ood"):
_, predicted = outputs[...,:-1].max(1)
else:
_, predicted = outputs.max(1)
tst_total += targets.size(0)
tst_correct += predicted.eq(targets).sum().item()
final_tst_predictions += list(predicted.cpu().numpy())
final_tst_classifications += list(predicted.eq(targets).cpu().numpy())
val_acc[i] = val_correct / val_total
tst_acc[i] = tst_correct / tst_total
val_losses[i] = val_loss
fulltrn_losses[i] = full_trn_loss
tst_losses[i] = tst_loss
full_val_acc = list(np.array(val_acc))
full_timing = list(np.array(timing))
res_dict["test_acc"].append(tst_acc[i])
print('Epoch:', i + 1, 'FullTrn,TrainAcc,ValLoss,ValAcc,TstLoss,TstAcc,Time:', full_trn_loss, full_trn_acc[i], val_loss, val_acc[i], tst_loss, tst_acc[i], timing[i])
if(i==0):
print("saving initial model")
torch.save(model.state_dict(), initModelPath) #save initial train model if not present
if(computeErrorLog):
tst_err_log, val_err_log, val_class_err_idxs = find_err_per_class(test_set, val_set, final_val_classifications, final_val_predictions, final_tst_classifications, final_tst_predictions, all_logs_dir, sf+"_"+str(bud))
csvlog.append(tst_err_log)
val_csvlog.append(val_err_log)
print(csvlog)
res_dict["all_class_acc"] = csvlog
res_dict["all_val_class_acc"] = val_csvlog
with open(os.path.join(all_logs_dir, exp_name+".csv"), "w") as f:
writer = csv.writer(f)
writer.writerows(csvlog)
#save results dir with test acc and per class selections
with open(os.path.join(all_logs_dir, exp_name+".json"), 'w') as fp:
json.dump(res_dict, fp)
###Output
_____no_output_____
###Markdown
FLCMI
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'flmic')
###Output
_____no_output_____
###Markdown
LOGDETCMI
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'logdetmic')
###Output
_____no_output_____
###Markdown
FL2MI
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'fl2mi')
###Output
_____no_output_____
###Markdown
FL1MI
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'fl1mi')
###Output
_____no_output_____
###Markdown
BADGE
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "AL","badge")
###Output
_____no_output_____
###Markdown
US
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "AL","us")
###Output
_____no_output_____
###Markdown
GLISTER
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "AL","glister-tss")
###Output
_____no_output_____
###Markdown
GCMI+DIV
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'div-gcmi')
###Output
_____no_output_____
###Markdown
GCMI
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'gcmi')
###Output
_____no_output_____
###Markdown
LOGDETMI
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SIM",'logdetmi')
###Output
_____no_output_____
###Markdown
FL
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SF",'fl')
###Output
_____no_output_____
###Markdown
LOGDET
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "SF",'logdet')
###Output
_____no_output_____
###Markdown
Random
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "random",'random')
###Output
_____no_output_____
###Markdown
CORESET
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "AL","coreset")
###Output
_____no_output_____
###Markdown
LEASTCONF
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "AL","leastconf")
###Output
_____no_output_____
###Markdown
MARGIN SAMPLING
###Code
train_model_al(datkbuildPath, exePath, num_epochs, data_name, datadir, feature, model_name, budget, split_cfg, learning_rate, run, device, computeClassErrorLog, "AL","margin")
###Output
_____no_output_____ |
docs/samples/ML Toolbox/Regression/Census/5 Service Predict.ipynb | ###Markdown
Deploying a Model and Predicting with Cloud Machine Learning Engine This notebook is the final step in a series of notebooks for doing machine learning on cloud. The [previous notebook](./4 Service Evaluate.ipynb), demonstrated evaluating a model. In a real-world scenario, it is likely that there are multiple evaluation datasets, as well as multiple models that need to be evaluated, before there is a model suitable for deployment. Workspace SetupThe first step is to setup the workspace that we will use within this notebook - the python libraries, and the Google Cloud Storage bucket that will be used to contain the inputs and outputs produced over the course of the steps.
###Code
import google.datalab as datalab
import google.datalab.ml as ml
import mltoolbox.regression.dnn as regression
import os
import requests
import time
###Output
_____no_output_____
###Markdown
The storage bucket was created earlier. We'll re-declare it here, so we can use it.
###Code
storage_bucket = 'gs://' + datalab.Context.default().project_id + '-datalab-workspace/'
storage_region = 'us-central1'
workspace_path = os.path.join(storage_bucket, 'census')
training_path = os.path.join(workspace_path, 'training')
model_name = 'census'
model_version = 'v1'
###Output
_____no_output_____
###Markdown
ModelLets take a quick look at the model that was previously produced as a result of the training job. This is the model that was evaluated, and is going to be deployed.
###Code
!gsutil ls -r {training_path}/model
###Output
gs://cloud-ml-users-datalab-workspace/census/training/model/:
gs://cloud-ml-users-datalab-workspace/census/training/model/
gs://cloud-ml-users-datalab-workspace/census/training/model/saved_model.pb
gs://cloud-ml-users-datalab-workspace/census/training/model/assets.extra/:
gs://cloud-ml-users-datalab-workspace/census/training/model/assets.extra/
gs://cloud-ml-users-datalab-workspace/census/training/model/assets.extra/features.json
gs://cloud-ml-users-datalab-workspace/census/training/model/assets.extra/schema.json
gs://cloud-ml-users-datalab-workspace/census/training/model/variables/:
gs://cloud-ml-users-datalab-workspace/census/training/model/variables/
gs://cloud-ml-users-datalab-workspace/census/training/model/variables/variables.data-00000-of-00001
gs://cloud-ml-users-datalab-workspace/census/training/model/variables/variables.index
###Markdown
DeploymentCloud Machine Learning Engine provides APIs to deploy and manage models. The first step is to create a named model resource, which can be referred to by name. The second step is to deploy the trained model binaries as a version within the model resource.**NOTE**: These steps can take a few minutes.
###Code
!gcloud ml-engine models create {model_name} --regions {storage_region}
!gcloud ml-engine versions create {model_version} --model {model_name} --origin {training_path}/model
###Output
Creating version (this might take a few minutes)......done.
###Markdown
At this point the model is ready for batch prediction jobs. It is also automatically exposed as an HTTP endpoint for performing online prediction. Online PredictionOnline prediction is accomplished by issuing HTTP requests to the specific model version endpoint. Instances to be predicted are formatted as JSON in the request body. The structure of instances depend on the model. The census model in this sample was trained using data formatted as CSV, and so the model expects inputs as CSV formatted strings.Prediction results are returned as JSON in the response.HTTP requests must contain an OAuth token auth header to succeed. In the Datalab notebook, the OAuth token corresponding to the environment is accessible without a requiring OAuth flow. Actual applications will need to determine the best strategy for acquringing OAuth tokens, generally using [Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials).
###Code
api = 'https://ml.googleapis.com/v1/projects/{project}/models/{model}/versions/{version}:predict'
url = api.format(project=datalab.Context.default().project_id,
model=model_name,
version=model_version)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + datalab.Context.default().credentials.get_access_token().access_token
}
body = {
'instances': [
'490,64,2,0,1,0,2,8090,015,01,1,00590,00500,1,18,0,2,1',
'1225,32,5,0,4,5301,2,9680,015,01,1,00100,00100,1,21,2,1,1',
'1226,30,1,0,1,0,2,8680,020,01,1,00100,00100,1,16,0,2,1'
]
}
response = requests.post(url, json=body, headers=headers)
predictions = response.json()['predictions']
predictions
###Output
_____no_output_____
###Markdown
It is quite simple to issue these requests using your HTTP library of choice. Actual applications should include the logic to handle errors, including retries. Batch PredictionWhile online prediction is optimized for low-latency requests over small lists of instances, batch prediction is designed for high-throughput prediction for large datasets. The same model can be used for both.Batch prediction jobs can also be submitted via the API. They are easily submitted via the gcloud tool as well.
###Code
%file /tmp/instances.csv
490,64,2,0,1,0,2,8090,015,01,1,00590,00500,1,18,0,2,1
1225,32,5,0,4,5301,2,9680,015,01,1,00100,00100,1,21,2,1,1
1226,30,1,0,1,0,2,8680,020,01,1,00100,00100,1,16,0,2,1
prediction_data_path = os.path.join(workspace_path, 'data/prediction.csv')
!gsutil -q cp /tmp/instances.csv {prediction_data_path}
###Output
_____no_output_____
###Markdown
Each batch prediction job must have a unique name within the scope of a project. The specified name below may need to be changed if you are re-running this notebook.
###Code
job_name = 'census_prediction_' + str(int(time.time()))
prediction_path = os.path.join(workspace_path, 'predictions')
###Output
_____no_output_____
###Markdown
**NOTE**: A batch prediction job can take a few minutes, due to overhead of provisioning resources, which is reasonable for large jobs, but can far exceed the time to complete a tiny dataset such as the one used in this sample.
###Code
!gcloud ml-engine jobs submit prediction {job_name} --model {model_name} --version {model_version} --data-format TEXT --input-paths {prediction_data_path} --output-path {prediction_path} --region {storage_region}
###Output
createTime: '2017-03-07T20:00:36Z'
jobId: census_prediction_1488916830
predictionInput:
dataFormat: TEXT
inputPaths:
- gs://cloud-ml-users-datalab-workspace/census/data/prediction.csv
outputPath: gs://cloud-ml-users-datalab-workspace/census/predictions
region: us-central1
runtimeVersion: '1.0'
versionName: projects/cloud-ml-users/models/census/versions/v1
predictionOutput:
outputPath: gs://cloud-ml-users-datalab-workspace/census/predictions
state: QUEUED
###Markdown
The status of the job can be inspected in the [Cloud Console](https://console.cloud.google.com/mlengine/jobs). Once it is completed, the outputs should be visible in the specified output path.
###Code
!gsutil ls {prediction_path}
!gsutil cat {prediction_path}/prediction*
###Output
{"SERIALNO": "490", "predicted": 26.395479202270508}
{"SERIALNO": "1225", "predicted": 68.57681274414062}
{"SERIALNO": "1226", "predicted": 13.854779243469238}
|
Model backlog/Inference/110-tweet-inference-5fold-roberta-base-config-drop.ipynb | ###Markdown
Dependencies
###Code
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
###Output
_____no_output_____
###Markdown
Load data
###Code
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
###Output
Test samples: 3534
###Markdown
Model parameters
###Code
input_base_path = '/kaggle/input/110roberta-base/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
# vocab_path = input_base_path + 'vocab.json'
# merges_path = input_base_path + 'merges.txt'
base_path = '/kaggle/input/qa-transformers/roberta/'
vocab_path = base_path + 'roberta-base-vocab.json'
merges_path = base_path + 'roberta-base-merges.txt'
config['base_model_path'] = base_path + 'roberta-base-tf_model.h5'
config['config_path'] = base_path + 'roberta-base-config.json'
model_path_list = glob.glob(input_base_path + 'model' + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
###Output
Models to predict:
/kaggle/input/110roberta-base/model_fold_1.h5
/kaggle/input/110roberta-base/model_fold_2.h5
/kaggle/input/110roberta-base/model_fold_3.h5
/kaggle/input/110roberta-base/model_fold_4.h5
/kaggle/input/110roberta-base/model_fold_5.h5
###Markdown
Tokenizer
###Code
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
###Output
_____no_output_____
###Markdown
Pre process
###Code
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
test["text"] = test["text"].apply(lambda x: x.strip())
x_test = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
###Output
_____no_output_____
###Markdown
Model
###Code
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False,
attention_probs_dropout_prob=0.2, hidden_dropout_prob=0.2)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
x = layers.Dropout(.1)(last_hidden_state)
x_start = layers.Dense(1)(x)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dense(1)(x)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
###Output
_____no_output_____
###Markdown
Make predictions
###Code
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(x_test)
test_start_preds += test_preds[0] / len(model_path_list)
test_end_preds += test_preds[1] / len(model_path_list)
###Output
/kaggle/input/110roberta-base/model_fold_1.h5
/kaggle/input/110roberta-base/model_fold_2.h5
/kaggle/input/110roberta-base/model_fold_3.h5
/kaggle/input/110roberta-base/model_fold_4.h5
/kaggle/input/110roberta-base/model_fold_5.h5
###Markdown
Post process
###Code
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test["end"].clip(0, test["text_len"], inplace=True)
test["start"].clip(0, test["end"], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
test["selected_text"].fillna(test["text"], inplace=True)
###Output
_____no_output_____
###Markdown
Visualize predictions
###Code
display(test.head(10))
###Output
_____no_output_____
###Markdown
Test set predictions
###Code
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
###Output
_____no_output_____ |
examples/Embedding+Clustering/PCN_EmbeddingClustering.ipynb | ###Markdown
THIS NOTEBOOK CONTAINS AN EXAMPLE OF A EMBEDDING + CLUSTERING ALGORITHM, IN THIS CASE LAPLACIANEIGENMAPS+KMEANS, APPLIED TO A Protein Contact Network OF THE SARSCOV2 SPIKE PROTEIN
###Code
#handle different path separators
from sys import platform
if platform == "linux" or platform == "linux2":
# linux
add_slash_to_path = '/'
elif platform == "darwin":
# OS X
add_slash_to_path = '/'
elif platform == "win32":
# Windows...
add_slash_to_path = '\\'
import numpy as np
import subprocess
import networkx as nx
import os
try:
from pcn.pcn_miner import pcn_miner, pcn_pymol_scripts #installed with pip
except:
try:
import sys #git cloned
cwd = os.getcwd()
exd = os.path.abspath(os.path.join(cwd, os.pardir))
pcnd = os.path.abspath(os.path.join(exd, os.pardir)) + add_slash_to_path + "pcn"
sys.path.append(pcnd)
from pcn_miner import pcn_miner, pcn_pymol_scripts
except:
raise ImportError("PCN-Miner is not correctly installed.")
output_path = ""
adj_path = "Adj\\"
protein = "6vxx"
protein_path = "{}.pdb".format(protein)
atoms = pcn_miner.readPDBFile(protein_path) #read
coordinates = pcn_miner.getResidueCoordinates(atoms)
coordinates
dict_residue_name = pcn_miner.associateResidueName(coordinates)
residue_names = np.array(list (dict_residue_name.items()))
residue_names
A = pcn_miner.adjacent_matrix(output_path, coordinates, protein, 4, 8)
A
k = 14
d = 128
lem_km_labels = pcn_miner.kmeans_laplacianeigenmaps(A, k, d)
lem_km_labels
###Output
Laplacian matrix recon. error (low rank): 57.350107
|
advanced_examples/eofs_package_example.ipynb | ###Markdown
This example uses the eofs [python package](https://ajdawson.github.io/eofs/latest/) designed by AJ Dawson for running EOF analysis on monthly Sea Surface Temperature anomaly data that is only in the central and northern Pacific Ocean. This package can be used on any gridded spatio-temporal gridded data.
###Code
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from eofs.xarray import Eof
from eofs.examples import example_data_path
# Read SST anomalies using the xarray module. The file contains November-March
# averages of SST anomaly in the central and northern Pacific.
filename = example_data_path('sst_ndjfm_anom.nc')
sst = xr.open_dataset(filename)['sst']
sst
# Create an EOF solver to do the EOF analysis. Square-root of cosine of
# latitude weights are applied before the computation of EOFs.
coslat = np.cos(np.deg2rad(sst.coords['latitude'].values))
wgts = np.sqrt(coslat)[..., np.newaxis]
solver = Eof(sst, weights=wgts)
solver
# Retrieve the leading EOF, expressed as the correlation between the leading
# PC time series and the input SST anomalies at each grid point, and the
# leading PC time series itself.
eof1 = solver.eofsAsCorrelation(neofs=1)
pc1 = solver.pcs(npcs=1, pcscaling=1)
eof1
# Plot the leading EOF expressed as correlation in the Pacific domain.
clevs = np.linspace(-1, 1, 11)
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=190))
fill = eof1[0].plot.contourf(ax=ax, levels=clevs, cmap=plt.cm.RdBu_r,
add_colorbar=False, transform=ccrs.PlateCarree())
ax.add_feature(cfeature.LAND, facecolor='w', edgecolor='k')
cb = plt.colorbar(fill, orientation='horizontal')
cb.set_label('correlation coefficient', fontsize=12)
ax.set_title('EOF1 expressed as correlation', fontsize=16)
plt.show()
# Plot the leading PC time series.
plt.figure()
pc1[:, 0].plot(color='b', linewidth=2)
ax = plt.gca()
ax.axhline(0, color='k')
ax.set_ylim(-3, 3)
ax.set_xlabel('Year')
ax.set_ylabel('Normalized Units')
ax.set_title('PC1 Time Series', fontsize=16)
plt.show()
###Output
_____no_output_____ |
ch00python/050import.ipynb | ###Markdown
Using Libraries Import To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
###Code
math.sin(1.6)
import math
math.sin(1.6)
###Output
_____no_output_____
###Markdown
We call these libraries **modules**:
###Code
type(math)
###Output
_____no_output_____
###Markdown
The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
###Code
dir(math)
###Output
_____no_output_____
###Markdown
They include properties as well as functions:
###Code
math.pi
###Output
_____no_output_____
###Markdown
You can always find out where on your storage medium a library has been imported from:
###Code
print(math.__file__[0:50])
print(math.__file__[50:])
###Output
/usr/local/Cellar/python3/3.5.2_1/Frameworks/Pytho
n.framework/Versions/3.5/lib/python3.5/lib-dynload/math.cpython-35m-darwin.so
###Markdown
Note that `import` does *not* install libraries. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library*. **Supplementary Materials**: Review the list of standard library modules: https://docs.python.org/library/ If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.**Supplementary Materials**: Review the list of modules that are packaged with Anaconda by default on different architectures: https://docs.anaconda.com/anaconda/packages/pkg-docs/ (modules installed by default are shown with ticks)We'll see later how to add more libraries to our setup. Why bother? Why bother with modules? Why not just have everything available all the time?The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this. Importing from modules Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.Things can be imported *from* modules to become part of the current module:
###Code
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
###Output
_____no_output_____
###Markdown
Importing one-by-one like this is a nice compromise between typing and risk of name clashes. It *is* possible to import **everything** from a module, but you risk name clashes.
###Code
from math import *
sin(pi)
###Output
_____no_output_____
###Markdown
Import and rename You can rename things as you import them to avoid clashes or for typing convenience
###Code
import math as m
m.cos(0)
pi = 3
from math import pi as realpi
print(sin(pi), sin(realpi))
###Output
0.1411200080598672 1.2246467991473532e-16
###Markdown
Using Libraries Import To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
###Code
math.sin(1.6)
import math
math.sin(1.6)
###Output
_____no_output_____
###Markdown
We call these libraries **modules**:
###Code
type(math)
###Output
_____no_output_____
###Markdown
The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
###Code
dir(math)
###Output
_____no_output_____
###Markdown
They include properties as well as functions:
###Code
math.pi
###Output
_____no_output_____
###Markdown
You can always find out where on your storage medium a library has been imported from:
###Code
print(math.__file__[0:50])
print(math.__file__[50:])
###Output
/usr/local/Cellar/python3/3.5.2_1/Frameworks/Pytho
n.framework/Versions/3.5/lib/python3.5/lib-dynload/math.cpython-35m-darwin.so
###Markdown
Note that `import` does *not* install libraries from PyPI. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library* **Supplementary Materials**: Review the list of standard library modules: https://docs.python.org/2/library/ If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.**Supplementary Materials**: Review the list of modules that are packaged with Anaconda by default: http://docs.continuum.io/anaconda/pkg-docs.html (The green ticks)We'll see later how to add more libraries to our setup. Why bother? Why bother with modules? Why not just have everything available all the time?The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this. Importing from modules Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.Things can be imported *from* modules to become part of the current module:
###Code
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
###Output
_____no_output_____
###Markdown
Importing one-by-one like this is a nice compromise between typing and risk of name clashes. It *is* possible to import **everything** from a module, but you risk name clashes.
###Code
from math import *
sin(pi)
###Output
_____no_output_____
###Markdown
Import and rename You can rename things as you import them to avoid clashes or for typing convenience
###Code
import math as m
m.cos(0)
pi=3
from math import pi as realpi
print(sin(pi), sin(realpi))
###Output
0.1411200080598672 1.2246467991473532e-16
###Markdown
Using Libraries Import To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
###Code
math.sin(1.6)
import math
math.sin(1.6)
###Output
_____no_output_____
###Markdown
We call these libraries **modules**:
###Code
type(math)
###Output
_____no_output_____
###Markdown
The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
###Code
dir(math)
###Output
_____no_output_____
###Markdown
They include properties as well as functions:
###Code
math.pi
###Output
_____no_output_____
###Markdown
You can always find out where on your storage medium a library has been imported from:
###Code
print(math.__file__[0:50])
print(math.__file__[50:])
###Output
/usr/local/Cellar/python3/3.5.2_1/Frameworks/Pytho
n.framework/Versions/3.5/lib/python3.5/lib-dynload/math.cpython-35m-darwin.so
###Markdown
Note that `import` does *not* install libraries. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library*. **Supplementary Materials**: Review the list of standard library modules: https://docs.python.org/library/ If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.**Supplementary Materials**: Review the list of modules that are packaged with Anaconda by default on different architectures: https://docs.anaconda.com/anaconda/packages/pkg-docs/ (modules installed by default are shown with ticks)We'll see later how to add more libraries to our setup. Why bother? Why bother with modules? Why not just have everything available all the time?The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this. Importing from modules Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.Things can be imported *from* modules to become part of the current module:
###Code
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
###Output
_____no_output_____
###Markdown
Importing one-by-one like this is a nice compromise between typing and risk of name clashes. It *is* possible to import **everything** from a module, but you risk name clashes.
###Code
from math import *
sin(pi)
###Output
_____no_output_____
###Markdown
Import and rename You can rename things as you import them to avoid clashes or for typing convenience
###Code
import math as m
m.cos(0)
pi=3
from math import pi as realpi
print(sin(pi), sin(realpi))
###Output
0.1411200080598672 1.2246467991473532e-16
###Markdown
Using Libraries Import To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
###Code
math.sin(1.6)
import math
math.sin(1.6)
###Output
_____no_output_____
###Markdown
We call these libraries **modules**:
###Code
type(math)
###Output
_____no_output_____
###Markdown
The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
###Code
dir(math)
###Output
_____no_output_____
###Markdown
They include properties as well as functions:
###Code
math.pi
###Output
_____no_output_____
###Markdown
You can always find out where on your storage medium a library has been imported from:
###Code
print(math.__file__[0:50])
print(math.__file__[50:])
###Output
/usr/local/Cellar/python3/3.5.2_1/Frameworks/Pytho
n.framework/Versions/3.5/lib/python3.5/lib-dynload/math.cpython-35m-darwin.so
###Markdown
Note that `import` does *not* install libraries. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library*. **Supplementary Materials**: Review the [list of standard library modules](https://docs.python.org/library/). If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.**Supplementary Materials**: Review the [list of modules that are packaged with Anaconda by default on different architectures](https://docs.anaconda.com/anaconda/packages/pkg-docs/) (modules installed by default are shown with ticks).We'll see later how to add more libraries to our setup. Why bother? Why bother with modules? Why not just have everything available all the time?The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this. Importing from modules Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.Things can be imported *from* modules to become part of the current module:
###Code
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
###Output
_____no_output_____
###Markdown
Importing one-by-one like this is a nice compromise between typing and risk of name clashes. It *is* possible to import **everything** from a module, but you risk name clashes.
###Code
from math import *
sin(pi)
###Output
_____no_output_____
###Markdown
Import and rename You can rename things as you import them to avoid clashes or for typing convenience
###Code
import math as m
m.cos(0)
pi = 3
from math import pi as realpi
print(sin(pi), sin(realpi))
###Output
0.1411200080598672 1.2246467991473532e-16
###Markdown
Using Libraries Import To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
###Code
math.sin(1.6)
import math
math.sin(1.6)
###Output
_____no_output_____
###Markdown
We call these libraries **modules**:
###Code
type(math)
###Output
_____no_output_____
###Markdown
The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
###Code
dir(math)
###Output
_____no_output_____
###Markdown
They include properties as well as functions:
###Code
math.pi
###Output
_____no_output_____
###Markdown
You can always find out where on your storage medium a library has been imported from:
###Code
print(math.__file__[0:50])
print(math.__file__[50:])
###Output
/usr/local/Cellar/python3/3.5.2_1/Frameworks/Pytho
n.framework/Versions/3.5/lib/python3.5/lib-dynload/math.cpython-35m-darwin.so
###Markdown
Note that `import` does *not* install libraries. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library*. **Supplementary Materials**: Review the list of standard library modules: https://docs.python.org/library/ If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.**Supplementary Materials**: Review the list of modules that are packaged with Anaconda by default on different architectures: https://docs.anaconda.com/anaconda/packages/pkg-docs/ (modules installed by default are shown with ticks)We'll see later how to add more libraries to our setup. Why bother? Why bother with modules? Why not just have everything available all the time?The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this. Importing from modules Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.Things can be imported *from* modules to become part of the current module:
###Code
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
###Output
_____no_output_____
###Markdown
Importing one-by-one like this is a nice compromise between typing and risk of name clashes. It *is* possible to import **everything** from a module, but you risk name clashes.
###Code
from math import *
sin(pi)
###Output
_____no_output_____
###Markdown
Import and rename You can rename things as you import them to avoid clashes or for typing convenience
###Code
import math as m
m.cos(0)
pi = 3
from math import pi as realpi
print(sin(pi), sin(realpi))
###Output
0.1411200080598672 1.2246467991473532e-16
###Markdown
Using Libraries Import To use a function or type from a python library, rather than a **built-in** function or type, we have to import the library.
###Code
math.sin(1.6)
import math
math.sin(1.6)
###Output
_____no_output_____
###Markdown
We call these libraries **modules**:
###Code
type(math)
###Output
_____no_output_____
###Markdown
The tools supplied by a module are *attributes* of the module, and as such, are accessed with a dot.
###Code
dir(math)
###Output
_____no_output_____
###Markdown
They include properties as well as functions:
###Code
math.pi
###Output
_____no_output_____
###Markdown
You can always find out where on your storage medium a library has been imported from:
###Code
print(math.__file__[0:50])
print(math.__file__[50:])
###Output
/Users/jroberts/opt/anaconda3/envs/rsd-course/lib/
python3.8/lib-dynload/math.cpython-38-darwin.so
###Markdown
Note that `import` does *not* install libraries. It just makes them available to your current notebook session, assuming they are already installed. Installing libraries is harder, and we'll cover it later.So what libraries are available? Until you install more, you might have just the modules that come with Python, the *standard library*. **Supplementary Materials**: Review the list of standard library modules: https://docs.python.org/library/ If you installed via Anaconda, then you also have access to a bunch of modules that are commonly used in research.**Supplementary Materials**: Review the list of modules that are packaged with Anaconda by default on different architectures: https://docs.anaconda.com/anaconda/packages/pkg-docs/ (modules installed by default are shown with ticks)We'll see later how to add more libraries to our setup. Why bother? Why bother with modules? Why not just have everything available all the time?The answer is that there are only so many names available! Without a module system, every time I made a variable whose name matched a function in a library, I'd lose access to it. In the olden days, people ended up having to make really long variable names, thinking their names would be unique, and they still ended up with "name clashes". The module mechanism avoids this. Importing from modules Still, it can be annoying to have to write `math.sin(math.pi)` instead of `sin(pi)`.Things can be imported *from* modules to become part of the current module:
###Code
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
###Output
_____no_output_____
###Markdown
Importing one-by-one like this is a nice compromise between typing and risk of name clashes. It *is* possible to import **everything** from a module, but you risk name clashes.
###Code
from math import *
sin(pi)
###Output
_____no_output_____
###Markdown
Import and rename You can rename things as you import them to avoid clashes or for typing convenience
###Code
import math as m
m.cos(0)
pi = 3
from math import pi as realpi
print(sin(pi), sin(realpi))
###Output
0.1411200080598672 1.2246467991473532e-16
|
Facial_Expression_Recognition.ipynb | ###Markdown
Facial Expression Recognition* The data consists of 48x48 pixel grayscale images of faces. * The faces have been automatically registered so that the face is more or less centered and occupies about the same amount of space in each image. * The task is to categorize each face based on the emotion shown in the facial expression in to one of seven categories:0. `Angry`1. `Disgust` 2. `Fear` 3. `Happy`4. `Sad`5. `Surprise`,6. `Neutral` Download Dataset* For running on Google Colab
###Code
# !pip install -q torchsummary
# from google.colab import files
# import os
# if not os.path.exists(r"/content/fer2018.zip"):
# print("Upload your kaggle.json file containing your API keys")
# uploaded = files.upload()
# for fn in uploaded.keys():
# print('User uploaded file "{name}" with length {length} bytes'.format(
# name=fn, length=len(uploaded[fn])))
# !mkdir ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 /root/.kaggle/kaggle.json
# !kaggle datasets download -d ashishpatel26/fer2018
# !unzip -qq fer2018.zip -d datasets/
###Output
Upload your kaggle.json file containing your API keys
###Markdown
Imports
###Code
import warnings
warnings.filterwarnings("ignore")
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from torchsummary import summary
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import torchvision.transforms as T
from torchvision.utils import make_grid
sns.set_style('whitegrid')
plt.style.use("fivethirtyeight")
pd.set_option('display.max_columns', 20)
%matplotlib inline
###Output
_____no_output_____
###Markdown
Dataset Preparation
###Code
emotions = {
0: 'Angry',
1: 'Disgust',
2: 'Fear',
3: 'Happy',
4: 'Sad',
5: 'Surprise',
6: 'Neutral'
}
dataset = pd.read_csv('/content/datasets/fer20131.csv')
dataset.info()
dataset.head()
dataset.Usage.value_counts()
###Output
_____no_output_____
###Markdown
* We're going to use the `Training` and `PublicTest` rows combined together for training and validation set split into 80-20 proportion* `PrivateTest` will be our final test dataset.
###Code
# extracting pixel data from pixel column
# convert it to integer
# drop original pixel column
# add all pixels as individual column
pixels = []
for pix in dataset.pixels:
values = [int(i) for i in pix.split()]
pixels.append(values)
pixels = np.array(pixels)
# rescaling pixel values
pixels = pixels/255.0
dataset.drop(columns=['pixels'], axis=1, inplace=True)
pix_cols = [] # for keeping track of column names
# add each pixel value as a column
for i in range(pixels.shape[1]):
name = f'pixel_{i}'
pix_cols.append(name)
dataset[name] = pixels[:, i]
dataset.head()
###Output
_____no_output_____
###Markdown
Dataset Class
###Code
class FERDataset(Dataset):
'''
Parse raw data to form a Dataset of (X, y).
'''
def __init__(self, df, transform=None):
self.df = df
self.transform = transform
self.tensor_transform = T.ToTensor()
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.iloc[idx]
img_id = int(row['emotion'])
img = np.copy(row[pix_cols].values.reshape(48, 48))
img.setflags(write=True)
if self.transform:
img = Image.fromarray(img)
img = self.transform(img)
else:
img = self.tensor_transform(img)
return img, img_id
###Output
_____no_output_____
###Markdown
Data Imbalance* To deal with class Imbalance we can try different image transformations* We can also combine angry and disgust class as one as they are closely related.
###Code
plt.figure(figsize=(9, 8))
sns.countplot(x=dataset.emotion)
_ = plt.title('Emotion Distribution')
_ = plt.xticks(ticks=range(0, 7), labels=[emotions[i] for i in range(0, 7)], )
# combine digust and angry classes
replacements = {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5}
dataset['emotion'] = dataset.emotion.replace(to_replace=replacements.keys(), value=replacements.values())
# update the emotions dictionary
emotions = {
0: 'Angry',
1: 'Fear',
2: 'Happy',
3: 'Sad',
4: 'Surprise',
5: 'Neutral'
}
###Output
_____no_output_____
###Markdown
Data Augmentations* We're going to apply various augmentation techniques.* All available transformations are listed in : [pytorch transforms](https://pytorch.org/docs/stable/torchvision/transforms.html)
###Code
def image_transformations() -> (object, object):
'''
Return transformations to be applied.
Input:
None
Output:
train_tfms: transformations to be applied on the training set
valid_tfms: transformations to be applied on the validation or test set
'''
train_trans = [
T.RandomRotation(15),
T.RandomAffine(
degrees=0,
translate=(0.01, 0.12),
shear=(0.01, 0.03),
),
T.RandomHorizontalFlip(),
T.RandomCrop(48, padding=8, padding_mode='reflect'),
T.ToTensor(),
]
val_trans = [
T.ToTensor(),
]
train_transformations = T.Compose(train_trans)
valid_tfms = T.Compose(val_trans)
return train_transformations, valid_tfms
###Output
_____no_output_____
###Markdown
Dataset and Dataloader
###Code
def get_train_dataset(dataframe: object, transformation: bool=True) -> (object, object):
'''
Returns an object on FERDataset class
Input:
dataframe: object -> DataFrame object containing the whole data
transformation: bool [optional] -> Apply transformations
'''
# extracts rows specific to Training, PublicTest
dataframe = dataframe.loc[dataframe.Usage.isin(['Training', 'PublicTest'])]
# drop Usage column as it's no longer needed
dataframe = dataframe.drop('Usage', axis=1)
# split dataset into training and validation set
np.random.seed(42)
msk = np.random.rand(len(dataframe)) < 0.8
train_df = dataframe[msk].reset_index()
val_df = dataframe[~msk].reset_index()
# get transformations
if transformation:
train_tfms, valid_tfms = image_transformations()
else:
train_tfms, valid_tfms = None, None
# fetch dataset
train_ds = FERDataset(dataframe, transform=train_tfms)
val_ds = FERDataset(dataframe, transform=valid_tfms)
return train_ds, val_ds
def get_train_dataloader(dataframe: object, transformation=True, batch_size: int=64) -> (object, object):
'''
Returns train and test dataloaders.
Input:
dataframe: dataset DataFrame object
batch_size: [optional] int
Output:
train_dl: train dataloader object
valid_dl: validation dataloader object
'''
# fetech train and validation dataset
train_ds, valid_ds = get_train_dataset(dataframe, transformation=transformation)
train_dl = DataLoader(train_ds, batch_size, shuffle=True,
num_workers=3, pin_memory=True)
valid_dl = DataLoader(valid_ds, batch_size*2,
num_workers=2, pin_memory=True)
return train_dl, valid_dl
def get_test_dataloader(dataframe: object, batch_size: int=128) -> object:
'''
Returns test set dataloaders.
Input:
dataframe: dataset DataFrame object
batch_size: [optional] int
Output:
test_dl: test dataloader object
'''
# extracts rows specific to PrivateTest
test_df = dataframe.loc[dataset.Usage.isin(['PrivateTest'])]
# drop Usage column as it's no longer needed
test_df = test_df.drop('Usage', axis=1)
# get transformations same as validation set
_, valid_tfms = image_transformations()
test_dataset = FERDataset(test_df, transform=valid_tfms)
test_dl = DataLoader(test_dataset, batch_size, num_workers=3 , pin_memory=True)
# move loader to GPU (class defined ahead)
test_dl = DeviceDataLoader(test_dl, device)
return test_dl
###Output
_____no_output_____
###Markdown
Visualization
###Code
train_dl_un, _ = get_train_dataloader(dataset, transformation=False)
train_dl, _ = get_train_dataloader(dataset)
for images, _ in train_dl:
print('images.shape:', images.shape)
plt.figure(figsize=(16, 8))
plt.axis("off")
plt.imshow(make_grid(images, nrow=8).permute((1, 2, 0))) # move the channel dimension
break
_ = plt.suptitle("Transformed Images", y=0.92, fontsize=16)
for images, _ in train_dl:
print('images.shape:', images.shape)
plt.figure(figsize=(16, 8))
plt.axis("off")
plt.imshow(make_grid(images, nrow=8).permute((1, 2, 0))) # move the channel dimension
break
_ = plt.suptitle("Transformed Images", y=0.92, fontsize=16)
###Output
images.shape: torch.Size([64, 1, 48, 48])
###Markdown
Setting up GPU usage
###Code
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
device = get_default_device()
device
###Output
_____no_output_____
###Markdown
Model Building Base Image Classification Class
###Code
# Can be used for any Image Classification task
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
inputs, labels = batch
outputs = self(inputs)
loss = F.cross_entropy(outputs, labels)
acc = accuracy(outputs, labels)
return {'loss': loss, 'acc': acc.detach()}
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc.detach()}
def get_metrics_epoch_end(self, outputs, validation=True):
if validation:
loss_ = 'val_loss'
acc_ = 'val_acc'
else:
loss_ = 'loss'
acc_ = 'acc'
batch_losses = [x[f'{loss_}'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
batch_accs = [x[f'{acc_}'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean()
return {f'{loss_}': epoch_loss.detach().item(), f'{acc_}': epoch_acc.detach().item()}
def epoch_end(self, epoch, result, num_epochs):
print(f"Epoch: {epoch+1}/{num_epochs} -> lr: {result['lrs'][-1]:.5f} loss: {result['loss']:.4f}, acc: {result['acc']:.4f}, val_loss: {result['val_loss']:.4f}, val_acc: {result['val_acc']:.4f}\n")
###Output
_____no_output_____
###Markdown
Metric
###Code
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
###Output
_____no_output_____
###Markdown
Model: ResNet-9
###Code
def conv_block(in_channels, out_channels, pool=False):
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
]
if pool:
layers.append(nn.MaxPool2d(kernel_size=2))
return nn.Sequential(*layers)
# updated channels for the use case
# added and additional layer in classifier
class ResNet9(ImageClassificationBase):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv1 = conv_block(in_channels, 16, pool=False) # 16 x 48 x 48
self.conv2 = conv_block(16, 32, pool=True) # 32 x 24 x 24
self.res1 = nn.Sequential( # 32 x 24 x 24
conv_block(32, 32, pool=False),
conv_block(32, 32, pool=False)
)
self.conv3 = conv_block(32, 64, pool=True) # 64 x 12 x 12
self.conv4 = conv_block(64, 128, pool=True) # 128 x 6 x 6
self.res2 = nn.Sequential( # 128 x 6 x 6
conv_block(128, 128),
conv_block(128, 128)
)
self.classifier = nn.Sequential(
nn.MaxPool2d(kernel_size=2), # 128 x 3 x 3
nn.Flatten(),
nn.Linear(128*3*3, 512), #512
nn.Linear(512, num_classes) # 6
)
self.network = nn.Sequential(
self.conv1,
self.conv2,
self.res1,
self.conv3,
self.conv4,
self.res2,
self.classifier,
)
def forward(self, xb):
out = self.conv1(xb)
out = self.conv2(out)
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.classifier(out)
return out
def __repr__(self):
return f"{self.network}"
def __str__(self):
summary(self.network, (1, 48, 48))
###Output
_____no_output_____
###Markdown
Model: From scratch
###Code
class EmotionRecognition(ImageClassificationBase):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
self.network = nn.Sequential( #1 x 48 x 48
nn.Conv2d(1, 32, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 32 x 24 x 24
nn.BatchNorm2d(32),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 64 x 12 x 12
nn.BatchNorm2d(64),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2), # output: 128 x 6 x 6
nn.BatchNorm2d(128),
nn.Flatten(),
nn.Linear(128*6*6, 64),
nn.ReLU(),
nn.BatchNorm1d(64),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, num_classes))
def forward(self, xb):
return self.network(xb)
def __repr__(self):
return f"{self.network}"
def __str__(self):
summary(self.network, (1, 48, 48))
###Output
_____no_output_____
###Markdown
Setup Training Helper Functions
###Code
@torch.no_grad()
def evaluate(model: object, val_loader: object) -> dict:
'''
Evaluate model on the validation set
Input:
model: training model object
val_loder: validation data loader object
Output:
validation metrics
'''
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.get_metrics_epoch_end(outputs=outputs, validation=True)
def get_lr(optimizer: object) -> float:
''' Returns current learning rate'''
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_model(model_name: str,
model: object,
epochs: int,
lr: float,
train_loader: object,
val_loader: object,
opt_func: object=torch.optim.SGD):
'''
This function is responsible for training our model.
We use a One Cycle learning rate policy to update our learning rate
with each epoch.
The best model is saved during each epoch.
Input:
model_name: str
model: object
epochs: int -> Max epochs
lr: float -> learning rate
train_loader: training set data loader
val_loader: validation set data loader
opt_func: optimzer object
Output:
history: list of metrics
'''
torch.cuda.empty_cache()
BEST_VAL_SCORE = 0.0 # for keeping track of best model score
history = []
optimizer = opt_func(model.parameters(), lr)
# scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, max_lr=max_lr,
# epochs=epochs,
# steps_per_epoch=len(train_loader))
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=1, factor=0.01)
for epoch in range(epochs):
train_history = []
lrs = []
# Training Phase
model.train()
for batch in tqdm(train_loader, desc=f'Epoch: {epoch+1}/{epochs}'):
info = model.training_step(batch)
loss = info['loss']
# contains batch loss and acc for training phase
train_history.append(info)
loss.backward()
# Gradient clipping
# if grad_clip:
# nn.utils.clip_grad_value_(model.parameters(), grad_clip)
optimizer.step()
optimizer.zero_grad()
lrs.append(get_lr(optimizer))
# scheduler.step()
train_result = model.get_metrics_epoch_end(train_history, validation=False)
val_result = evaluate(model, val_loader)
result = {**train_result, **val_result}
# call scheduler to check validation loss
scheduler.step(result['val_loss'])
result['lrs'] = lrs
model.epoch_end(epoch, result, epochs)
# Save the best model
if result['val_acc'] > BEST_VAL_SCORE:
BEST_VAL_SCORE = result['val_acc']
save_name = f"{model_name}_epoch-{epoch+1}_score-{round(result['val_acc'], 4)}.pth"
!rm -f '{model_name}'_*
torch.save(model.state_dict(), save_name)
history.append(result)
return history
# functions to fetch test dataset and generate submission file for best model
def load_best(model_name: str) -> object:
'''Returns the best model'''
# get model defintion
best_model = models[model_name]
# load trained weights
path = r"/content/"
file_path = ''
for i in os.listdir(path):
if os.path.isfile(os.path.join(path,i)) and i.startswith(f'{model_name}'):
file_path = os.path.join(path, i)
print(f"Loaded model: {file_path[9:]} weights.")
best_model.load_state_dict(torch.load(file_path))
# move model to gpu
best_model = to_device(best_model, device)
return best_model
@torch.no_grad()
def generate_prediction(model_name: str) -> None:
'''Generate prediction on the test set'''
# load test dataset
test_dl = get_test_dataloader(dataset)
# load model
model = load_best(model_name)
# clear cuda cache
torch.cuda.empty_cache()
# generate prediction using the validation step method defined in Base class
with torch.no_grad():
model.eval()
outputs = [model.validation_step(batch) for batch in test_dl]
metrics = model.get_metrics_epoch_end(outputs=outputs, validation=True)
print(f"Test Scores:\n Loss: {round(metrics['val_loss'], 3)}, Accuracy: {round(metrics['val_acc'], 3)}")
def end_to_end(model_name: str, parameters: dict=None) -> dict:
'''
A simple function end-to-end training and testing on the selected model.
Inputs:
model_name: str -> chosen model name
parameters: dict -> dictionary of hyperparameters for the model
Outputs:
history: dict -> dictionary containing model metrics(loss, score, lr)
'''
torch.cuda.empty_cache()
# hyperparameters
BATCH_SIZE = parameters['batch_size']
epochs = parameters["epochs"]
lr = parameters["lr"]
opt_func = parameters["opt_func"]
# get transformed dataset
train_dl, valid_dl = get_train_dataloader(dataset, batch_size=BATCH_SIZE)
# move dataset to use GPU
train_dl = DeviceDataLoader(train_dl, device)
valid_dl = DeviceDataLoader(valid_dl, device)
# get model
model = models[model_name]
# move model to GPU
model = to_device(model, device)
# train model
history = fit_model(
model_name,
model,
epochs,
lr,
train_dl,
valid_dl,
opt_func
)
# cleaning
torch.cuda.empty_cache()
# generate predictions
print("Genearating predictions on the Test set")
generate_prediction(model_name)
return history
# plotting metrics
def plot_accuracies(history):
train_acc = [r['acc'] for r in history]
val_acc = [r['val_acc'] for r in history]
plt.plot(train_acc, '-kx', label="train_acc")
plt.plot(val_acc, '-rx', label="val_acc")
plt.legend()
_ = plt.xticks(ticks=range(len(train_acc)),
labels=[str(i) for i in range(1, len(train_acc)+1)])
plt.xlabel('epoch')
plt.ylabel('Accuracy')
plt.title('Accuracy vs. epochs')
def plot_losses(history):
train_losses = [r['loss'] for r in history]
val_losses = [r['val_loss'] for r in history]
plt.plot(train_losses, '-kx', label="train_loss")
plt.plot(val_losses, '-rx', label="val_loss")
plt.legend()
_ = plt.xticks(ticks=range(len(train_losses)),
labels=[str(i) for i in range(1, len(train_losses)+1)])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Loss vs. epochs')
def plot_lrs(history):
lrs = np.concatenate([x.get('lrs', []) for x in history])
plt.plot(lrs)
plt.xlabel('Batch no.')
plt.ylabel('Learning rate')
plt.title('Learning Rate vs. Batch no.');
###Output
_____no_output_____
###Markdown
Models
###Code
models = {
'ResNet9': ResNet9(in_channels=1, num_classes=len(emotions.keys())),
'EmotionRecognition': EmotionRecognition(len(emotions.keys())),
}
###Output
_____no_output_____
###Markdown
Train Model
###Code
# TRAINING CONSTANTS
training_parameters = {
"epochs": 30,
"lr": 0.001,
"opt_func": torch.optim.Adam,
"batch_size": 128,
}
# using lr_scheduler = ReduceLROnPlateau
model_name = "ResNet9"
# model_name = "EmotionRecognition"
history = end_to_end(model_name, training_parameters)
###Output
Epoch: 1/30: 100%|██████████| 253/253 [02:18<00:00, 1.82it/s]
###Markdown
Training plots
###Code
# plotting score and loss
plt.figure(figsize=(18, 6))
plt.subplot(1, 3, 1)
plot_accuracies(history)
plt.subplot(1, 3, 2)
plot_losses(history)
plt.subplot(1, 3, 3)
plot_lrs(history)
###Output
_____no_output_____
###Markdown
###Code
#Downloading the dataset
!wget --no-check-certificate \
"https://storage.googleapis.com/kaggle-datasets/64681/127167/fer20131.csv.zip?GoogleAccessId=web-data@kaggle-161607.iam.gserviceaccount.com&Expires=1561353681&Signature=reLsF1yri6BKDnH1ull9DHGDo2hIzIeQKt6mGz8xoiZ5uPSVnIx9%2BcJAx5XPdfulo8LgadzEr7iuJGK7Xv4VXiGMX7j6YFRbe%2FCFxIafWrpcSa32%2FRgG22mT%2FnXIsR4vRkBxJ21jy5aMRiXm3tP30tIUZLG2EPG4ZZffIGVDSxFPD%2BVaxvcw2BBvad84IuEoqmaQL5YqdoNZzAYrd1rGN%2FsBUifdt7hDiaZdI64tCnAXhN5WRU6plUnFxhn%2FpPjyRNuMJ0%2B5YBUuAzibdxmSQkq8YFuiF6tbSsbJzK5itfCD3eR04M1rUqe039EhU5DXmZQR%2F7bEY3LqNAF%2FXsBIZQ%3D%3D"\
-O "/tmp/fer.zip"
# Unzipping the downloaded dataset
import os
import zipfile
local_zip='/tmp/fer.zip'
zip_ref=zipfile.ZipFile(local_zip,'r')
zip_ref.extractall('/tmp/fer')
zip_ref.close()
#The fer folder contains a .csv file containing the pixels and labels of the images.
import csv
fields = []
rows = []
import numpy as np
with open('/tmp/fer/fer20131.csv') as training_file:
# creating a csv reader object
csvreader = csv.reader(training_file)
# extracting field names through first row
fields = next(csvreader)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
#The first column corresponds to the label, the second column corresponsd to the pixels of the images and the third column corresponds to the Training/Test label.
rows[0]
#suffling the data
import random
rows=random.sample(rows,len(rows))
#Separating the Training and Test samples from the dataset
train_images=[]
train_labels=[]
test_images=[]
test_labels=[]
for i in range(len(rows)):
if rows[i][2]=='Training':
train_images.append(rows[i][1].split())
train_labels.append(float(rows[i][0]))
else:
test_images.append(rows[i][1].split())
test_labels.append(float(rows[i][0]))
#Converting the pixels of the images from string to float
for i in range(len(train_images)):
train_images[i] = list(map(float, train_images[i]))
for i in range(len(test_images)):
test_images[i] = list(map(float, test_images[i]))
# Converting the images and labels to numpy array
train_images=np.array(train_images)
test_images=np.array(test_images)
train_labels=np.array(train_labels)
test_labels=np.array(test_labels)
train_img=np.zeros((len(train_images),48,48))
test_img=np.zeros((len(test_images),48,48))
#Reshaping the Images from 2304 pixels to 48x48
for i in range(len(train_images)):
train_img[i]=train_images[i].reshape((48,48))
for i in range(len(test_images)):
test_img[i]=test_images[i].reshape((48,48))
#Extending the dimension of the Images
train_img=np.expand_dims(train_img,axis=3)
test_img=np.expand_dims(test_img,axis=3)
#Data Summary
print('No. of training images: ',len(train_img))
print('No. of test images: ',len(test_img))
print('No. of classes in training data: ', len(np.unique(train_labels)))
print('No. of classes in test data: ', len(np.unique(test_labels)))
print('Dimension of training images: ',train_img.shape)
print('Dimension of test images: ',test_img.shape)
print('Dimension of training labels: ',train_labels.shape)
print('Dimension of test labels: ',test_labels.shape)
#Displaying the images of facial expressions
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#We'll output images in a 2X4 configuration.
nrows=4
ncols=4
fig=plt.gcf()
fig.set_size_inches(ncols*4,nrows*4)
for i in range(len(train_img[:16])):
sp=plt.subplot(nrows,ncols,i+1)
sp.axis('Off')
X = np.squeeze(train_img[i], axis=(2,)) # sample 2D array
plt.imshow(X, cmap="gray")
plt.show()
import tensorflow as tf
from tensorflow import keras
#Image Generator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Add data-augmentation parameters to ImageDataGenerator
"""train_datagen = ImageDataGenerator(rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')"""
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen=ImageDataGenerator(rescale=1./255)
model=tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3),activation='relu',input_shape=(48,48,1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(256,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024,activation='relu'),
#tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(7,activation='softmax')
])
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])
history=model.fit_generator(
train_datagen.flow(train_img,train_labels,batch_size=100),
steps_per_epoch=len(train_img)/100,
epochs=10,
validation_data=test_datagen.flow(test_img,test_labels,batch_size=10),
validation_steps=len(test_img)/10,
verbose=1
)
#Evaluating Accuracy and Loss of the model
%matplotlib inline
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(len(acc)) #No. of epochs
#Plot training and validation accuracy per epoch
import matplotlib.pyplot as plt
plt.plot(epochs,acc,'r',label='Training Accuracy')
plt.plot(epochs,val_acc,'g',label='Testing Accuracy')
plt.legend()
plt.figure()
#Plot training and validation loss per epoch
plt.plot(epochs,loss,'r',label='Training Loss')
plt.plot(epochs,val_loss,'g',label='Testing Loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Importing Necessary Packages
###Code
import sys, os
import pandas as pd
import cv2
import numpy as np
import seaborn as sns
import glob
import PIL
from tensorflow.keras.models import Sequential,model_from_json
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization,AveragePooling2D
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.callbacks import History
from tensorflow.keras.optimizers import Adam,SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing import image
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Preprocessing the Data and Reading the data
###Code
num_features = 64
num_labels = 7
batch_size = 128
epochs = 150
width, height = 48, 48
df=pd.read_csv('C:/Users/Kunj/Downloads/fer2013/fer2013.csv')
#df=pd.read_csv('C:\\Users\\Kunj\\Downloads\\ck+')
# print(df.info())
# print(df["Usage"].value_counts())
# print(df.head())
X_train,train_y,X_test,test_y,X_Ptest,Ptest_y=[],[],[],[],[],[]
for index, row in df.iterrows():
val=row['pixels'].split(" ")
try:
if 'Training' in row['Usage']:
X_train.append(np.array(val,'float32'))
train_y.append(row['emotion'])
elif 'PublicTest' in row['Usage']:
X_test.append(np.array(val,'float32'))
test_y.append(row['emotion'])
elif 'PrivateTest' in row['Usage']:
X_Ptest.append(np.array(val,'float32'))
Ptest_y.append(row['emotion'])
except:
print(f"error occured at index :{index} and row:{row}")
num_angry_tr, num_disgust_tr, num_fear_tr, num_happy_tr, num_sad_tr, num_surprise_tr, num_neutral_tr=0,0,0,0,0,0,0
num_angry_te, num_disgust_te, num_fear_te, num_happy_te, num_sad_te, num_surprise_te, num_neutral_te=0,0,0,0,0,0,0
num_angry_pte, num_disgust_pte, num_fear_pte, num_happy_pte, num_sad_pte, num_surprise_pte, num_neutral_pte=0,0,0,0,0,0,0
for index, row in df.iterrows():
if 'Training' in row['Usage']:
if row['emotion']==0:
num_angry_tr+=1
if row['emotion']==1:
num_disgust_tr+=1
if row['emotion']==2:
num_fear_tr+=1
if row['emotion']==3:
num_happy_tr+=1
if row['emotion']==4:
num_sad_tr+=1
if row['emotion']==5:
num_surprise_tr+=1
if row['emotion']==6:
num_neutral_tr+=1
elif 'PublicTest' in row['Usage']:
if row['emotion']==0:
num_angry_te+=1
if row['emotion']==1:
num_disgust_te+=1
if row['emotion']==2:
num_fear_te+=1
if row['emotion']==3:
num_happy_te+=1
if row['emotion']==4:
num_sad_te+=1
if row['emotion']==5:
num_surprise_te+=1
if row['emotion']==6:
num_neutral_te+=1
elif 'PrivateTest' in row['Usage']:
if row['emotion']==0:
num_angry_pte+=1
if row['emotion']==1:
num_disgust_pte+=1
if row['emotion']==2:
num_fear_pte+=1
if row['emotion']==3:
num_happy_pte+=1
if row['emotion']==4:
num_sad_pte+=1
if row['emotion']==5:
num_surprise_pte+=1
if row['emotion']==6:
num_neutral_pte+=1
print("Number of Training Samples for Angry Expression = ",num_angry_tr)
print("Number of Training Samples for Disgust Expression = ",num_disgust_tr)
print("Number of Training Samples for Happy Expression = ",num_happy_tr)
print("Number of Training Samples for Fear Expression = ",num_fear_tr)
print("Number of Training Samples for Sad Expression = ",num_sad_tr)
print("Number of Training Samples for Surprise Expression = ",num_surprise_tr)
print("Number of Training Samples for Neutral Expression = ",num_neutral_tr)
Y=[num_angry_tr, num_disgust_tr, num_fear_tr, num_happy_tr, num_sad_tr, num_surprise_tr, num_neutral_tr]
X=['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
%matplotlib inline
plt.style.use('ggplot')
plt.xlabel('Emotion')
plt.ylabel('Number of samples')
plt.title('Number of Training samples for each Emotion')
plt.bar(X,Y,color='green')
print("Number of Validation Samples for Angry Expression = ",num_angry_te)
print("Number of Validation Samples for Disgust Expression = ",num_disgust_te)
print("Number of Validation Samples for Happy Expression = ",num_happy_te)
print("Number of Validation Samples for Fear Expression = ",num_fear_te)
print("Number of Validation Samples for Sad Expression = ",num_sad_te)
print("Number of Validation Samples for Surprise Expression = ",num_surprise_te)
print("Number of Validation Samples for Neutral Expression = ",num_neutral_te)
Y_val=[num_angry_te, num_disgust_te, num_fear_te, num_happy_te, num_sad_te, num_surprise_te, num_neutral_te]
X_val=['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
%matplotlib inline
plt.style.use('ggplot')
plt.xlabel('Emotion')
plt.ylabel('Number of samples')
plt.title('Number of Validation samples for each Emotion')
plt.bar(X_val,Y_val,color='red')
print("Number of Private Test Samples for Angry Expression = ",num_angry_pte)
print("Number of Private Test Samples for Disgust Expression = ",num_disgust_pte)
print("Number of Private Test Samples for Happy Expression = ",num_happy_pte)
print("Number of Private Test Samples for Fear Expression = ",num_fear_pte)
print("Number of Private Test Samples for Sad Expression = ",num_sad_pte)
print("Number of Private Test Samples for Surprise Expression = ",num_surprise_pte)
print("Number of Private Test Samples for Neutral Expression = ",num_neutral_pte)
Y_PTest=[num_angry_te, num_disgust_te, num_fear_te, num_happy_te, num_sad_te, num_surprise_te, num_neutral_te]
X_PTest=['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
%matplotlib inline
plt.style.use('ggplot')
plt.xlabel('Emotion')
plt.ylabel('Number of samples')
plt.title('Number of Validation samples for each Emotion')
plt.bar(X_PTest,Y_PTest,color='blue')
X_train = np.array(X_train,'float32')
train_y = np.array(train_y,'float32')
X_test = np.array(X_test,'float32')
test_y = np.array(test_y,'float32')
train_y=to_categorical(train_y, num_classes=num_labels)
test_y=to_categorical(test_y, num_classes=num_labels)
#cannot produce
#normalizing data between oand 1
X_train -= np.mean(X_train, axis=0)
X_train /= np.std(X_train, axis=0)
X_test -= np.mean(X_test, axis=0)
X_test /= np.std(X_test, axis=0)
X_train = X_train.reshape(X_train.shape[0], 48, 48, 1)
X_test = X_test.reshape(X_test.shape[0], 48, 48, 1)
# print(f"shape:{X_train.shape}")
##designing the cnn
model = Sequential()
#1st convolution layer
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(X_train.shape[1:])))
model.add(Conv2D(64,kernel_size= (3, 3), activation='relu'))
# model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
model.add(Dropout(0.5))
#2nd convolution layer
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
# model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
model.add(Dropout(0.5))
#3rd convolution layer
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(Conv2D(256, (3, 3), activation='relu'))
# model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
model.add(Flatten())
#fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(num_labels, activation='softmax'))
model.summary()
#Compliling the model
model.compile(loss=categorical_crossentropy,
optimizer=SGD(learning_rate=0.075),
metrics=['accuracy'])
#Training the model
seqModel=model.fit(X_train, train_y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_test, test_y),
shuffle=True)
train_loss= seqModel.history['loss']
train_acc= seqModel.history['accuracy']
val_loss= seqModel.history['val_loss']
val_acc= seqModel.history['val_accuracy']
xc = range(epochs)
plt.figure(figsize=(15,10))
plt.plot(xc, train_loss)
plt.figure(figsize=(15,10))
plt.plot(xc, train_acc)
plt.figure(figsize=(15,10))
plt.plot(xc,val_loss)
plt.plot(xc,val_acc)
plt.figure(figsize=(15,10))
plt.plot(xc,val_acc)
#Saving the model to use it later on
fer_json = model.to_json()
with open("C:/Users/Kunj/Downloads/fer2013/fer3.json", "w") as json_file:
json_file.write(fer_json)
model.save_weights("C:/Users/Kunj/Downloads/fer2013/fer3.h5")
#load model
model = model_from_json(open("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\fer3.json", "r").read())
#load weights
model.load_weights("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\fer3.h5")
human_angry=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\anger\\*.png")
print("Number of images in Angry emotion = "+str(len(human_angry)))
human_angry_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_angry]
human_angry_imageName = [str(i.split("\\")[7]) for i in human_angry]
human_angry_emotion = [["Angry"]*len(human_angry)][0]
human_angry_label = [1]*len(human_angry)
len(human_angry_folderName), len(human_angry_imageName), len(human_angry_emotion), len(human_angry_label)
df_angry = pd.DataFrame()
df_angry["folderName"] = human_angry_folderName
df_angry["imageName"] = human_angry_imageName
df_angry["Emotion"] = human_angry_emotion
df_angry["Labels"] = human_angry_label
df_angry.head()
human_disgust=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\disgust\\*.png")
print("Number of images in Disgust emotion = "+str(len(human_disgust)))
human_disgust_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_disgust]
human_disgust_imageName = [str(i.split("\\")[7]) for i in human_disgust]
human_disgust_emotion = [["Disgust"]*len(human_disgust)][0]
human_disgust_label = [2]*len(human_disgust)
len(human_disgust_folderName), len(human_disgust_imageName), len(human_disgust_emotion), len(human_disgust_label)
df_disgust = pd.DataFrame()
df_disgust["folderName"] = human_disgust_folderName
df_disgust["imageName"] = human_disgust_imageName
df_disgust["Emotion"] = human_disgust_emotion
df_disgust["Labels"] = human_disgust_label
df_disgust.head()
human_fear=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\fear\\*.png")
print("Number of images in Fear emotion = "+str(len(human_fear)))
human_fear_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_fear]
human_fear_imageName = [str(i.split("\\")[7]) for i in human_fear]
human_fear_emotion = [["Fear"]*len(human_fear)][0]
human_fear_label = [3]*len(human_fear)
len(human_fear_folderName), len(human_fear_imageName), len(human_fear_emotion), len(human_fear_label)
df_fear = pd.DataFrame()
df_fear["folderName"] = human_fear_folderName
df_fear["imageName"] = human_fear_imageName
df_fear["Emotion"] = human_fear_emotion
df_fear["Labels"] = human_fear_label
df_fear.head()
human_happy=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\happy\\*.png")
print("Number of images in Happy emotion = "+str(len(human_happy)))
human_happy_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_happy]
human_happy_imageName = [str(i.split("\\")[7]) for i in human_happy]
human_happy_emotion = [["Happy"]*len(human_happy)][0]
human_happy_label = [4]*len(human_happy)
len(human_happy_folderName), len(human_happy_imageName), len(human_happy_emotion), len(human_happy_label)
df_happy = pd.DataFrame()
df_happy["folderName"] = human_happy_folderName
df_happy["imageName"] = human_happy_imageName
df_happy["Emotion"] = human_happy_emotion
df_happy["Labels"] = human_happy_label
df_happy.head()
human_sad=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\sadness\\*.png")
print("Number of images in Sad emotion = "+str(len(human_sad)))
human_sad_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_sad]
human_sad_imageName = [str(i.split("\\")[7]) for i in human_sad]
human_sad_emotion = [["Sad"]*len(human_sad)][0]
human_sad_label = [5]*len(human_sad)
len(human_sad_folderName), len(human_sad_imageName), len(human_sad_emotion), len(human_neutral_label)
df_sad = pd.DataFrame()
df_sad["folderName"] = human_sad_folderName
df_sad["imageName"] = human_sad_imageName
df_sad["Emotion"] = human_sad_emotion
df_sad["Labels"] = human_sad_label
df_sad.head()
human_surprise=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\surprise\\*.png")
print("Number of images in Surprise emotion = "+str(len(human_surprise)))
human_surprise_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_surprise]
human_surprise_imageName = [str(i.split("\\")[7]) for i in human_surprise]
human_surprise_emotion = [["Surprise"]*len(human_surprise)][0]
human_surprise_label = [6]*len(human_surprise)
len(human_surprise_folderName), len(human_surprise_imageName), len(human_surprise_emotion), len(human_surprise_label)
df_surprise = pd.DataFrame()
df_surprise["folderName"] = human_surprise_folderName
df_surprise["imageName"] = human_surprise_imageName
df_surprise["Emotion"] = human_surprise_emotion
df_surprise["Labels"] = human_surprise_label
df_surprise.head()
human_neutral=glob.glob("C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\DATA\\neutral\\*.png")
print("Number of images in Neutral emotion = "+str(len(human_neutral)))
human_neutral_folderName = [str("/".join(i.split("\\")[:7]))+"/" for i in human_neutral]
human_neutral_imageName = [str(i.split("\\")[7]) for i in human_neutral]
human_neutral_emotion = [["Neutral"]*len(human_neutral)][0]
human_neutral_label = [7]*len(human_neutral)
len(human_neutral_folderName), len(human_neutral_imageName), len(human_neutral_emotion), len(human_neutral_label)
df_neutral = pd.DataFrame()
df_neutral["folderName"] = human_neutral_folderName
df_neutral["imageName"] = human_neutral_imageName
df_neutral["Emotion"] = human_neutral_emotion
df_neutral["Labels"] = human_neutral_label
df_neutral.head()
print(df_neutral["folderName"][1])
###Output
C:/Users/Kunj/Downloads/Facial_Expression_Recognition/DATA/neutral/
###Markdown
Concatening All DataFrames
###Code
frames = [df_angry, df_disgust, df_fear, df_happy, df_neutral, df_sad, df_surprise]
Final_Test_human = pd.concat(frames)
Final_Test_human.shape
Final_Test_human.reset_index(inplace = True, drop = True)
Final_Test_human = Final_Test_human.sample(frac = 1.0) #shuffling the dataframe
Final_Test_human.reset_index(inplace = True, drop = True)
Final_Test_human.head()
TestData_distribution = Final_Test_human["Emotion"].value_counts().sort_index()
TestData_distribution_sorted = sorted(TestData_distribution.items(), key = lambda d: d[1], reverse = True)
fig = plt.figure(figsize = (10, 6))
ax = fig.add_axes([0,0,1,1])
ax.set_title("Count of each Emotion in Test Data", fontsize = 20)
sns.countplot(x = "Emotion", data = Final_Test_human)
plt.grid()
for i in ax.patches:
ax.text(x = i.get_x() + 0.27, y = i.get_height()+0.2, s = str(i.get_height()), fontsize = 20, color = "grey")
plt.xlabel("")
plt.ylabel("Count", fontsize = 15)
plt.tick_params(labelsize = 15)
plt.xticks(rotation = 40)
plt.show()
for i in TestData_distribution_sorted:
print("Number of training data points in class "+str(i[0])+" = "+str(i[1])+ "("+str(np.round(((i[1]/Final_Test_human.shape[0])*100), 4))+"%)")
face_cascade = cv2.CascadeClassifier('C:\\Users\\Kunj\\Downloads\\Facial_Expression_Recognition\\haarcascade_frontalface_default.xml')
#download this xml file from link: https://github.com/opencv/opencv/tree/master/data/haarcascades.
def face_det_crop_resize(img_path):
img = cv2.imread(img_path)
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img, 1.3, 5)
for (x,y,w,h) in faces:
face_clip = img[y:y+h, x:x+w] #cropping the face in image
cv2.imwrite(img_path, cv2.resize(face_clip, (350, 350)))
for i, d in Final_Test_human.iterrows():
img_path = os.path.join(d["folderName"], d["imageName"])
face_det_crop_resize(img_path)
print(img_path)
def print_confusionMatrix(Y_TestLabels, PredictedLabels):
confusionMatx = confusion_matrix(Y_TestLabels, PredictedLabels)
precision = confusionMatx/confusionMatx.sum(axis = 0)
recall = (confusionMatx.T/confusionMatx.sum(axis = 1)).T
sns.set(font_scale=1.5)
# confusionMatx = [[1, 2],
# [3, 4]]
# confusionMatx.T = [[1, 3],
# [2, 4]]
# confusionMatx.sum(axis = 1) axis=0 corresponds to columns and axis=1 corresponds to rows in two diamensional array
# confusionMatx.sum(axix =1) = [[3, 7]]
# (confusionMatx.T)/(confusionMatx.sum(axis=1)) = [[1/3, 3/7]
# [2/3, 4/7]]
# (confusionMatx.T)/(confusionMatx.sum(axis=1)).T = [[1/3, 2/3]
# [3/7, 4/7]]
# sum of row elements = 1
labels = ["ANGRY", "DISGUST", "FEAR", "HAPPY", "NEUTRAL", "SAD", "SURPRISE"]
plt.figure(figsize=(16,7))
sns.heatmap(confusionMatx, cmap = "Blues", annot = True, fmt = ".1f", xticklabels=labels, yticklabels=labels)
plt.title("Confusion Matrix", fontsize = 30)
plt.xlabel('Predicted Class', fontsize = 20)
plt.ylabel('Original Class', fontsize = 20)
plt.tick_params(labelsize = 15)
plt.xticks(rotation = 90)
plt.show()
print("-"*125)
plt.figure(figsize=(16,7))
sns.heatmap(precision, cmap = "Blues", annot = True, fmt = ".2f", xticklabels=labels, yticklabels=labels)
plt.title("Precision Matrix", fontsize = 30)
plt.xlabel('Predicted Class', fontsize = 20)
plt.ylabel('Original Class', fontsize = 20)
plt.tick_params(labelsize = 15)
plt.xticks(rotation = 90)
plt.show()
print("-"*125)
plt.figure(figsize=(16,7))
sns.heatmap(recall, cmap = "Blues", annot = True, fmt = ".2f", xticklabels=labels, yticklabels=labels)
plt.title("Recall Matrix", fontsize = 30)
plt.xlabel('Predicted Class', fontsize = 20)
plt.ylabel('Original Class', fontsize = 20)
plt.tick_params(labelsize = 15)
plt.xticks(rotation = 90)
plt.show()
predicted_labels, true_labels=[],[]
for i, d in Final_Test_human.iterrows():
img_path = os.path.join(d["folderName"], d["imageName"])
img_label=d["Labels"]
#img_r = cv2.resize(img,(48,48))
img=PIL.Image.open(img_path)
img_pixels = image.img_to_array(img)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255
preds=model.predict(img_pixels)
predicted_labels.append(preds[0].argmax())
true_labels.append(img_label)
accuracy=accuracy_score(true_labels,predicted_labels)
print("Accuracy on Human Test Data = {}%".format(np.round(float(accuracy*100), 2)))
print_confusionMatrix(true_labels, predicted_labels)
###Output
C:\Users\Kunj\anaconda3\envs\tf-gpu\lib\site-packages\ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in true_divide
after removing the cwd from sys.path.
C:\Users\Kunj\anaconda3\envs\tf-gpu\lib\site-packages\ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in true_divide
###Markdown
Import all the necessary library
###Code
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
from IPython.display import SVG, Image
import tensorflow as tf
print("Tensorflow version:", tf.__version__)
###Output
Tensorflow version: 2.4.1
###Markdown
Plot some images from the dataset
###Code
def plot_example_images(plt):
img_size = 48
plt.figure(0, figsize=(12,20))
ctr = 0
for expression in os.listdir("/content/drive/MyDrive/facial expression/data/train/"):
for i in range(1,6):
ctr += 1
plt.subplot(7,5,ctr)
img = load_img("/content/drive/MyDrive/facial expression/data/train/" + expression + "/" +os.listdir("/content/drive/MyDrive/facial expression/data/train/" + expression)[i], target_size=(img_size, img_size))
plt.imshow(img, cmap="gray")
plt.tight_layout()
return plt
plot_example_images(plt).show()
for expression in os.listdir("/content/drive/MyDrive/facial expression/data/train/"):
print(str(len(os.listdir("/content/drive/MyDrive/facial expression/data/train/" + expression))) + " " + expression + " images")
###Output
3995 angry images
4097 fear images
4830 sad images
4965 neutral images
7215 happy images
3171 surprise images
436 disgust images
###Markdown
split dataset for training and validation
###Code
img_size = 48
batch_size = 64
datagen_train = ImageDataGenerator(horizontal_flip=True)
train_generator = datagen_train.flow_from_directory("/content/drive/MyDrive/facial expression/data/train/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=True)
datagen_validation = ImageDataGenerator(horizontal_flip=True)
validation_generator = datagen_validation.flow_from_directory("/content/drive/MyDrive/facial expression/data/test/",
target_size=(img_size,img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
###Output
Found 28709 images belonging to 7 classes.
Found 7178 images belonging to 7 classes.
###Markdown
Model
###Code
model = Sequential()
# 1 - Convolution
model.add(Conv2D(64,(3,3), padding='same', input_shape=(48, 48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(128,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 3rd Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 4th Convolution layer
model.add(Conv2D(512,(3,3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Fully connected layer 2nd layer
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7, activation='softmax'))
opt = Adam(lr=0.0005)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
import keras
keras.utils.plot_model(model,'/content/drive/MyDrive/facial expression/model.png', show_shapes=True)
import tensorflow as tf
###Output
_____no_output_____
###Markdown
training the model
###Code
with tf.device('/GPU:0'):
epochs = 15
steps_per_epoch = train_generator.n//train_generator.batch_size
validation_steps = validation_generator.n//validation_generator.batch_size
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=2, min_lr=0.00001, mode='auto')
checkpoint = ModelCheckpoint("/content/drive/MyDrive/facial expression/face_model_weights.h5", monitor='val_accuracy',
save_weights_only=True, mode='max', verbose=1)
callbacks = [ checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data = validation_generator,
validation_steps = validation_steps,
callbacks=callbacks
)
###Output
Epoch 1/15
448/448 [==============================] - 6280s 14s/step - loss: 1.9596 - accuracy: 0.2568 - val_loss: 1.5205 - val_accuracy: 0.4136
Epoch 00001: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 2/15
448/448 [==============================] - 43s 96ms/step - loss: 1.5125 - accuracy: 0.4210 - val_loss: 1.5629 - val_accuracy: 0.4295
Epoch 00002: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 3/15
448/448 [==============================] - 42s 93ms/step - loss: 1.3506 - accuracy: 0.4834 - val_loss: 1.3200 - val_accuracy: 0.5064
Epoch 00003: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 4/15
448/448 [==============================] - 41s 92ms/step - loss: 1.2552 - accuracy: 0.5188 - val_loss: 1.1997 - val_accuracy: 0.5371
Epoch 00004: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 5/15
448/448 [==============================] - 42s 93ms/step - loss: 1.1810 - accuracy: 0.5457 - val_loss: 1.2215 - val_accuracy: 0.5244
Epoch 00005: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 6/15
448/448 [==============================] - 41s 92ms/step - loss: 1.1551 - accuracy: 0.5602 - val_loss: 1.1624 - val_accuracy: 0.5479
Epoch 00006: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 7/15
448/448 [==============================] - 41s 92ms/step - loss: 1.1113 - accuracy: 0.5798 - val_loss: 1.1528 - val_accuracy: 0.5628
Epoch 00007: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 8/15
448/448 [==============================] - 41s 92ms/step - loss: 1.0747 - accuracy: 0.5910 - val_loss: 1.0864 - val_accuracy: 0.5936
Epoch 00008: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 9/15
448/448 [==============================] - 43s 95ms/step - loss: 1.0621 - accuracy: 0.5990 - val_loss: 1.1393 - val_accuracy: 0.5703
Epoch 00009: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 10/15
448/448 [==============================] - 42s 95ms/step - loss: 1.0297 - accuracy: 0.6099 - val_loss: 1.0840 - val_accuracy: 0.5889
Epoch 00010: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 11/15
448/448 [==============================] - 42s 93ms/step - loss: 1.0112 - accuracy: 0.6151 - val_loss: 1.0707 - val_accuracy: 0.5960
Epoch 00011: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 12/15
448/448 [==============================] - 42s 94ms/step - loss: 0.9912 - accuracy: 0.6264 - val_loss: 1.0822 - val_accuracy: 0.5933
Epoch 00012: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 13/15
448/448 [==============================] - 42s 93ms/step - loss: 0.9622 - accuracy: 0.6305 - val_loss: 1.1562 - val_accuracy: 0.5590
Epoch 00013: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 14/15
448/448 [==============================] - 42s 93ms/step - loss: 0.9168 - accuracy: 0.6566 - val_loss: 0.9639 - val_accuracy: 0.6410
Epoch 00014: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
Epoch 15/15
448/448 [==============================] - 42s 93ms/step - loss: 0.8838 - accuracy: 0.6686 - val_loss: 0.9648 - val_accuracy: 0.6445
Epoch 00015: saving model to /content/drive/MyDrive/facial expression/face_model_weights.h5
###Markdown
save the model
###Code
model_json = model.to_json()
with open("/content/drive/MyDrive/facial expression/model.json", "w") as json_file:
json_file.write(model_json)
###Output
_____no_output_____
###Markdown
**IMPORTING LIBRARIES**
###Code
import os
import shutil
import random #for random distribution of data
from shutil import copyfile
from os import getcwd # getcwd returns current working directory
import pandas as pd # for data manipulation
import numpy as np # for operation
import tensorflow as tf
from tensorflow.keras.preprocessing import image # for image processing
import matplotlib.pyplot as plt
from keras.utils import np_utils
import cv2
from google.colab.patches import cv2_imshow
from scipy import stats
from PIL import Image
import math
%matplotlib inline
import matplotlib.image as mpimg
import numpy as np
import dlib
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from google.colab import drive # for importing dataset from google drive
drive.mount('/content/drive')
from zipfile import ZipFile # for importing dataset locally from the colab
file_name = '/content/datasets.zip'
with ZipFile(file_name,'r') as zip:
zip.extractall()
SOURCE = '/content/datasets/CK+48/' #source directory for images
os.listdir(SOURCE) # subdirectories of source directory
###Output
_____no_output_____
###Markdown
**PREPROCESSING**
###Code
def HistEqualization(image, number_bins = 256): #implementing the histogram equalization
# get the image histogram
image_Hist, bins = np.histogram(image.flatten(), number_bins, [0, 256])
cdf = image_Hist.cumsum() # cumulative distribution function
cdf = image_Hist.max()*cdf/cdf.max() #normalize
cdf_mask = np.ma.masked_equal(cdf, 0)
cdf_mask = (cdf_mask - cdf_mask.min())*255/(cdf_mask.max()-cdf_mask.min())
cdf = np.ma.filled(cdf_mask,0).astype('uint8')
return cdf[image.astype('uint8')]
!wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
!bunzip2 "shape_predictor_68_face_landmarks.dat.bz2"
predictor_path = 'shape_predictor_68_face_landmarks.dat' # path of data
#initializes dlib’s pre-trained face detector based on a modification to the standard Histogram of Oriented Gradients + Linear SVM method for object detection.
detector = dlib.get_frontal_face_detector()
#loads the facial landmark predictor using the path
predictor = dlib.shape_predictor(predictor_path)
# take a bounding predicted by dlib and convert it
def rect_to_bb(rect):
# to the format (x, y, w, h) as we would normally do
# with OpenCV
x1 = rect.left()
y1 = rect.top()
w1 = rect.right() - x1
h1 = rect.bottom() - y1
# return a tuple of (x, y, w, h)
return (x1, y1, w1, h1)
# extract 68 coordinate from shape object
def shape_to_np(shape, dtype = int):
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0,68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords # loop over the 68 facial landmarks and convert them
# calculate forehead distance to use in cropping image
def forehead_dist(coords):
d = (np.sum(coords[42:47,1]) - np.sum(coords[36:41,1]))/ 6
return d
# calculate angle using eye landmark points i.e 42 to 47 is right eye and 36 to 41 is left eye
def required_angle(shape):
val = (np.sum(shape[42:47,1]) - np.sum(shape[36:41,1]))/(np.sum(shape[42:47,0]) - np.sum(shape[36:41,0]))
angle = math.degrees(math.atan(val))
return angle
#finally rotate image obtained by required_angle function
def rotate_image(image, shape):
angle = required_angle(shape)
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
rotated_image = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return rotated_image
def face_alignment(image): # implementing face alignment
image = np.array(image)
image = image.astype(np.uint8)
gray_image = image
#gray_image = cv2.cvtColor(image ,cv2.COLOR_BGR2GRAY) # convert color image to grayscale image
rects = detector(gray_image ,1) # detect faces in the grayscale image
if len(rects) > 0:
images = []
for (i, rect) in enumerate(rects):
shape = predictor(image, rect)
shape = shape_to_np(shape)
rotated_image = rotate_image(image , shape)
images.append(rotated_image)
if len(rects) == 1 :
return rotated_image
else:
return images
else:
#print("Error : number of detected face is zero, so we just return original image")
return image
def face_cropping_without_forehead(image): # implementing face cropping without forehead
gray_image = image
#gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # convert color image to grayscale image
rects = detector(gray_image ,1) # detect faces in the grayscale image
if len(rects) > 0:
images = []
for (i, rect) in enumerate(rects):
shape = predictor(image, rect)
shape = shape_to_np(shape)
# convert dlib's rectangle to a OpenCV-style bounding box
# [i.e., (x, y, w, h)], then draw the face bounding box
(x1, y1, w1, h1) = rect_to_bb(rect)
d = forehead_dist(shape)
top_y = int(np.sum(shape[42 : 47, 1]) / 6 - 0.6 * d)
left_x, left_y = shape[0]
bottom_x, bottom_y = shape[8]
right_x, right_y = shape[16]
cropped_image = image[top_y : bottom_y, left_x : right_x]
if cropped_image.shape[0] == 0:
cropped_image = image[0:-1,left_x : right_x]
if cropped_image.shape[1] == 0:
cropped_image = image[top_y : bottom_y, 0:-1]
images.append(cropped_image)
if len(rects) == 1 :
return cropped_image
else:
return images
else:
#print("Error : number of detected face is zero, so we just return original image")
return image
def face_cropping_without_background(image): # implementing face cropping without background
gray_image=image
#gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # convert color image to grayscale image
rects = detector(gray_image ,1) # detect faces in the grayscale image
if len(rects) > 0:
images = []
for (i, rect) in enumerate(rects):
shape = predictor(image, rect)
shape = shape_to_np(shape)
# convert dlib's rectangle to a OpenCV-style bounding box
# [i.e., (x, y, w, h)], then draw the face bounding box
(x1, y1, w1, h1) = rect_to_bb(rect)
top_x, top_y = shape[19]
left_x, left_y = shape[0]
bottom_x, bottom_y = shape[8]
right_x, right_y = shape[16]
cropped_image = image[ min(top_y, abs(y1)) : max(bottom_y, abs(y1) + w1), min(left_x, abs(x1)) : max(right_x, abs(x1) + w1)]
if cropped_image.shape[0] == 0:
cropped_image = image[:,min(left_x, abs(x1)) : max(right_x, abs(x1) + w1)]
if cropped_image.shape[1] == 0:
cropped_image = image[min(top_y, abs(y1)) : max(bottom_y, abs(y1) + w1), :]
images.append(cropped_image)
if len(rects) == 1 :
return cropped_image
else:
return images
else:
print("Error : number of detected face is zero, so we just return original image")
return image
def preprocessing(images): # A function for overall preprocessing including alignment ,cropping and normalization
images = face_alignment(images)
images = face_cropping_without_background(images)
images = HistEqualization(images,256)
normalized_img = stats.zscore(images)
images = normalized_img*255
#images = cv2.cvtColor(images,cv2.COLOR_BGR2GRAY)
images = cv2.resize(images,(100,100))
images = np.array(images)
try:
images = images.reshape(100,100,1)
except:
return images
return images
# just a checkout code , NO need to run it
image = cv2.imread('/content/drive/My Drive/datasets/jaffe/angry/KA.AN1.39.png',0)
cv2_imshow(preprocessing(image))
print(preprocessing(image).shape)
###Output
_____no_output_____
###Markdown
**MAKING DIRECTORIES FOR TRAINING AND VALIDATION IMAGES**
###Code
os.mkdir('/content/images')
os.mkdir('/content/images/train')
os.mkdir('/content/images/test')
os.mkdir('/content/images/train/anger')
os.mkdir('/content/images/train/sadness')
os.mkdir('/content/images/train/happy')
os.mkdir('/content/images/train/fear')
os.mkdir('/content/images/train/surprise')
os.mkdir('/content/images/train/disgust')
os.mkdir('/content/images/train/contempt')
os.mkdir('/content/images/test/anger')
os.mkdir('/content/images/test/sadness')
os.mkdir('/content/images/test/happy')
os.mkdir('/content/images/test/fear')
os.mkdir('/content/images/test/surprise')
os.mkdir('/content/images/test/contempt')
os.mkdir('/content/images/test/disgust')
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE): # A function that splits the data present in source directory
files = [] # into training and test sets
for filename in os.listdir(SOURCE): # filename is the name of image files in the source dir
file = SOURCE + filename # this file will contain the path of the images
if os.path.getsize(file) > 0: # files will contain the paths of all images in source dir
files.append(filename)
else:
print(filename + " is zero length, so ignoring.")
#print(len(files))
training_length = int(len(files) * SPLIT_SIZE)
testing_length = int(len(files) - training_length)
shuffled_set = random.sample(files, len(files))
training_set = shuffled_set[0:training_length]
testing_set = shuffled_set[:testing_length]
for filename in training_set:
this_file = SOURCE + filename
destination = TRAINING + filename
copyfile(this_file, destination)
for filename in testing_set:
this_file = SOURCE + filename
destination = TESTING + filename
copyfile(this_file, destination)
split_size = 0.8
anger_train_dir = '/content/images/train/anger/'
sadness_train_dir = '/content/images/train/sadness/'
disgust_train_dir = '/content/images/train/disgust/'
happy_train_dir = '/content/images/train/happy/'
fear_train_dir = '/content/images/train/fear/'
contempt_train_dir = '/content/images/train/contempt/'
surprise_train_dir = '/content/images/train/surprise/'
anger_test_dir = '/content/images/test/anger/'
sadness_test_dir = '/content/images/test/sadness/'
disgust_test_dir = '/content/images/test/disgust/'
happy_test_dir = '/content/images/test/happy/'
fear_test_dir = '/content/images/test/fear/'
contempt_test_dir = '/content/images/test/contempt/'
surprise_test_dir = '/content/images/test/surprise/'
anger_source_dir = '/content/datasets/CK+48/anger/'
sadness_source_dir = '/content/datasets/CK+48/sadness/'
disgust_source_dir = '/content/datasets/CK+48/disgust/'
happy_source_dir = '/content/datasets/CK+48/happy/'
fear_source_dir = '/content/datasets/CK+48/fear/'
contempt_source_dir = '/content/datasets/CK+48/contempt/'
surprise_source_dir = '/content/datasets/CK+48/surprise/'
len(os.listdir(anger_source_dir))
split_data(anger_source_dir,anger_train_dir,anger_test_dir,split_size)
split_data(sadness_source_dir,sadness_train_dir,sadness_test_dir,split_size)
split_data(disgust_source_dir,disgust_train_dir,disgust_test_dir,split_size)
split_data(happy_source_dir,happy_train_dir,happy_test_dir,split_size)
split_data(fear_source_dir,fear_train_dir,fear_test_dir,split_size)
split_data(contempt_source_dir,contempt_train_dir,contempt_test_dir,split_size)
split_data(surprise_source_dir,surprise_train_dir,surprise_test_dir,split_size)
###Output
_____no_output_____
###Markdown
**DATA AUGMENTATION**
###Code
TRAINING_DIR = "/content/images/train"
train_datagen = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
rotation_range=2,
preprocessing_function=preprocessing
)
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
batch_size=50,
class_mode='categorical',
target_size=(100,100),
shuffle=True,
color_mode='grayscale')
VALIDATION_DIR = "/content/images/test"
validation_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=2,
preprocessing_function=preprocessing
)
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
batch_size=50,
class_mode='categorical',
target_size=(100,100),
shuffle=True,
color_mode='grayscale')
validation_generator.shuffle = False
validation_generator.index_array = None
###Output
Found 783 images belonging to 7 classes.
Found 198 images belonging to 7 classes.
###Markdown
**MAKING CNN MODEL**
###Code
def CNN_model_with_0_neurons(num_of_classes):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5,5), activation = 'relu', name = 'conv2d_1',),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_1'),
tf.keras.layers.Conv2D(64,(5,5), activation = 'relu', name = 'conv2d_2'),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_2'),
tf.keras.layers.Flatten(name = 'flatten_1'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_of_classes,activation = 'softmax')
])
return model
def CNN_model_with_256_neurons(num_of_classes):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5,5), activation = 'relu', name = 'conv2d_1',),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_1'),
tf.keras.layers.Conv2D(64,(5,5), activation = 'relu', name = 'conv2d_2'),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_2'),
tf.keras.layers.Flatten(name = 'flatten_1'),
tf.keras.layers.Dense(256, activation = 'relu', name = "full_connected_1"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_of_classes,activation = 'softmax')
])
return model
def CNN_model_with_512_neurons(num_of_classes):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5,5), activation = 'relu', name = 'conv2d_1',),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_1'),
tf.keras.layers.Conv2D(64,(5,5), activation = 'relu', name = 'conv2d_2'),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_2'),
tf.keras.layers.Flatten(name = 'flatten_1'),
tf.keras.layers.Dense(512, activation = 'relu', name = "full_connected_1"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_of_classes,activation = 'softmax')
])
return model
def CNN_model_with_1024_neurons(num_of_classes):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5,5), activation = 'relu', name = 'conv2d_1',),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_1'),
tf.keras.layers.Conv2D(64,(5,5), activation = 'relu', name = 'conv2d_2'),
tf.keras.layers.MaxPooling2D((2,2), name = 'max_pool_2'),
tf.keras.layers.Flatten(name = 'flatten_1'),
tf.keras.layers.Dense(1024, activation = 'relu', name = "full_connected_1"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_of_classes,activation = 'softmax')
])
return model
model = CNN_model_with_0_neurons(7)
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy',metrics = ['accuracy'])
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
###Output
_____no_output_____
###Markdown
**TRAINING THE MODEL**
###Code
callbacks = myCallback()
history = model.fit(train_generator,epochs=10,callbacks=[callbacks],batch_size=50,shuffle=True,validation_data=validation_generator)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy with 256 neurons')
plt.legend(loc=0)
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
**EVALUATION VIA CONFUSION MATRIX**
###Code
emotion = os.listdir('/content/images/train')
import itertools
from sklearn.metrics import confusion_matrix,classification_report
def plot_confusion_matrix(cm):
print(cm)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap="BuPu")
plt.title('Confusion Matrix on Validation Data')
plt.colorbar()
tick_marks = np.arange(len(emotion))
plt.xticks(tick_marks, emotion,rotation=45)
plt.yticks(tick_marks, emotion)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True Emotions')
plt.xlabel('Predicted Emotions')
plt.show()
predictions = model.predict(validation_generator, 4)
y_pred = np.argmax(predictions, axis=1)
plot_confusion_matrix(confusion_matrix(validation_generator.classes, y_pred))
###Output
[[14 0 0 0 0 0 0]
[ 0 6 0 0 0 0 0]
[ 0 0 18 0 0 0 0]
[ 0 0 0 8 0 0 0]
[ 0 0 0 1 20 0 0]
[ 0 0 0 0 0 9 0]
[ 0 0 0 0 0 0 25]]
###Markdown
***Cross validation***
###Code
os.mkdir('/content/folds')
for i in range(0,10):
os.mkdir('/content/folds/fold'+str(i))
os.mkdir('/content/folds/fold'+str(i)+'/train')
os.mkdir('/content/folds/fold'+str(i)+'/test')
for j in os.listdir('/content/datasets/CK+48'):
os.mkdir('/content/folds/fold'+str(i)+'/train/'+j)
os.mkdir('/content/folds/fold'+str(i)+'/test/'+j)
for i in range(0,10):
fold_path = '/content/folds/fold' + str(i)
test_fold = '/content/folds/fold' + str(i) + '/test'
train_fold = '/content/folds/fold' + str(i) + '/train'
for j in os.listdir(train_fold):
emotion_train_fold = train_fold + '/' + j
emotion_test_fold = test_fold + '/' + j
emotion_source = '/content/datasets/CK+48/' + j
length = len(os.listdir(emotion_source))
initial_size = int(i*length/10)
final_size = int((i+1)*length/10)
files = []
for k in os.listdir(emotion_source):
path = emotion_source + '/' + k
files.append(k)
testing_set = files[initial_size:final_size]
training_set = []
for n in files:
if n not in testing_set:
training_set.append(n)
for filename in training_set:
src = emotion_source + '/' + filename
des = emotion_train_fold + '/' + filename
copyfile(src,des)
for filename in testing_set:
src = emotion_source + '/' + filename
des = emotion_test_fold + '/' + filename
copyfile(src,des)
sum_acc=0
for i in range(0,10):
fold_path = '/content/folds/fold' + str(i)
TRAINING_DIR = train_fold = '/content/folds/fold' + str(i) + '/train'
train_datagen = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
rotation_range=2,
preprocessing_function=preprocessing
)
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
batch_size=50,
class_mode='categorical',
target_size=(100,100),
shuffle=True,
color_mode='grayscale')
VALIDATION_DIR = '/content/folds/fold' + str(i) + '/test'
validation_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=2,
preprocessing_function=preprocessing
)
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
batch_size=50,
class_mode='categorical',
target_size=(100,100),
shuffle=True,
color_mode='grayscale')
validation_generator.shuffle = False
validation_generator.index_array = None
model = CNN_model_with_256_neurons(7)
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy',metrics = ['accuracy'])
history = model.fit(train_generator,epochs=10,batch_size=50,shuffle=True)
test_loss,test_acc=model.evaluate(validation_generator)
sum_acc=sum_acc+test_acc
avg_acc=sum_acc/10
print(avg_acc)
image = cv2.imread('/content/happily-surprised.jpg',0)
cv2_imshow(image)
image = preprocessing(image)
image = np.array(image)
cv2_imshow(image)
image = image.reshape(1,100,100,1)
print('The emotion in the given figure is ' + emotion[np.argmax(model.predict(image))])
###Output
_____no_output_____ |
notebooks/predict.ipynb | ###Markdown
Specify parameters
###Code
input_dir = '../example_data/img'
bbox_fn = '../example_data/bboxes.csv'
model_dir = '../outputs/model/'
wn = 'weights_best.pth'
output_dir = '../outputs/outlines/'
batch_size = 2
overlap_threshold=0.1
detection_threshold=0.1
figsize = 4
model_name = os.listdir(model_dir)[0]
model_name
###Output
_____no_output_____
###Markdown
Predict
###Code
df = detect_bboxes(input_dir=input_dir,
model_fn=os.path.join(model_dir, model_name, wn),
batch_size=batch_size,
overlap_threshold=overlap_threshold,
detection_threshold=detection_threshold)
###Output
_____no_output_____
###Markdown
Overlay bboxes
###Code
overlay_bboxes_batch(df=df, input_dir=input_dir,
output_dir=os.path.join(output_dir, model_name))
###Output
_____no_output_____
###Markdown
Display example data
###Code
files = walk_dir(os.path.join(output_dir, model_name))
for i in range(min(5, len(files))):
plt.figure(figsize=(figsize, figsize))
io.imshow(io.imread(files[i]))
###Output
_____no_output_____
###Markdown
Predict on new data using a trained CNN on XPS data on Google Colab In this notebook, we will use a trained convolutional network to predict on unseen XPS spectra. Setup Mount google drive, change working directory
###Code
# Mount drive
from google.colab import drive
import os
drive.mount('/content/drive')
# Change working path
os.chdir('/content/drive/My Drive/deepxps')
###Output
_____no_output_____
###Markdown
Install packages and import modules
###Code
%%capture
# Install packages
!pip install python-docx
# Import standard modules and magic commands
import datetime
import numpy as np
import pytz
import importlib
import matplotlib.pyplot as plt
# Magic commands
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Disable tf warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Set seeds and restart session to ensure reproducibility
###Code
def reset_seeds_and_session(seed=1):
os.environ['PYTHONHASHSEED']=str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(),
config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)
reset_seeds_and_session(seed=1)
###Output
_____no_output_____
###Markdown
Check TensorFlow version
###Code
f"TF version: {tf.__version__}."
###Output
_____no_output_____
###Markdown
Predict on new data set Load custom modules
###Code
try:
import importlib
importlib.reload(classifier)
importlib.reload(clfutils)
print('\n Modules were reloaded.')
except:
import xpsdeeplearning.network.classifier as classifier
import xpsdeeplearning.network.utils as clfutils
print('Modules were loaded.')
###Output
_____no_output_____
###Markdown
Set up the parameters & folder structure
###Code
np.random.seed(502)
time = datetime.datetime.now().astimezone(pytz.timezone('Europe/Berlin')).strftime("%Y%m%d_%Hh%Mm")
exp_name = 'test'
clf = classifier.Classifier(time = time,
exp_name = exp_name,
task = 'regression',
intensity_only = True)
###Output
_____no_output_____
###Markdown
Load and inspect the data
###Code
input_filepath = r'/content/drive/My Drive/deepxps/datasets/20210903_CoFe_combined_without_auger_peaks.h5'
train_test_split = 0.99
train_val_split = 0
no_of_examples = 100#000 #180
X_train, X_val, X_test, y_train, y_val, y_test,\
names_train, names_val, names_test =\
clf.load_data_preprocess(input_filepath = input_filepath,
no_of_examples = no_of_examples,
train_test_split = train_test_split,
train_val_split = train_val_split)
# Check how the examples are distributed across the classes.
class_distribution = clf.datahandler.check_class_distribution(clf.task)
clf.plot_class_distribution()
clf.plot_random(no_of_spectra = 10, dataset = 'test')
###Output
_____no_output_____
###Markdown
Continue with 10-point average of last values (cutoff: 5 eV on each side)
###Code
for dataset in [clf.datahandler.X,
clf.datahandler.X_train,
clf.datahandler.X_val,
clf.datahandler.X_test]:
for arr in dataset:
arr[:100,:] = np.average(arr[100:110,:],
axis=0)
arr[-100:,:] = np.average(arr[-110:-100,:],
axis=0)
clf.plot_random(no_of_spectra = 10, dataset = 'test')
###Output
_____no_output_____
###Markdown
Load and compile the model
###Code
clf.load_model(model_path = '/content/drive/My Drive/deepxps/runs/20210914_19h11m_FeCo_combined_without_auger_7_classes_no_window/model')
###Output
_____no_output_____
###Markdown
Plot summary and save model plot.
###Code
clf.summary()
clf.save_and_print_model_image()
###Output
_____no_output_____
###Markdown
Evaluate on test data
###Code
clf.logging.hyperparams['batch_size'] = 32
if clf.task == 'classification':
score = clf.evaluate()
test_loss, test_accuracy = score[0], score[1]
print('Test loss: ' + str(np.round(test_loss, decimals=8)))
print('Test accuracy: ' + str(np.round(test_accuracy, decimals=3)))
elif clf.task == 'regression':
test_loss = clf.evaluate()
print('Test loss: ' + str(np.round(test_loss, decimals=8)))
###Output
_____no_output_____
###Markdown
Predict on train & test data
###Code
pred_train, pred_test = clf.predict()
if clf.task == 'classification':
pred_train_classes, pred_test_classes = clf.predict_classes()
###Output
_____no_output_____
###Markdown
Show some predictions on random test samples
###Code
clf.plot_random(no_of_spectra = 15, dataset = 'test', with_prediction = True)
clf.datahandler.plot_spectra(no_of_spectra=20, dataset="test", indices=list(range(20)), with_prediction=True)
###Output
_____no_output_____
###Markdown
Show the worst predictions on the test samples
###Code
clf.show_worst_predictions(no_of_spectra = 10)
###Output
_____no_output_____
###Markdown
Save data
###Code
#clf.save_hyperparams()
clf.pickle_results()
###Output
_____no_output_____
###Markdown
Check where and why the predictions fail Show worst predictions for single spectra
###Code
clf.show_worst_predictions(no_of_spectra = 10, kind = 'single')
###Output
_____no_output_____
###Markdown
Show worst predictions for different loss thresholds (single spectra)
###Code
threshold = 0.2
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'single',
threshold = threshold)
threshold = 0.1
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'single',
threshold = threshold)
threshold = 0.05
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'single',
threshold = threshold)
threshold = 0.02
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'single',
threshold = threshold)
threshold = 0.01
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'single',
threshold = threshold)
###Output
_____no_output_____
###Markdown
Show worst predictions for linearly combined spectra
###Code
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb')
###Output
_____no_output_____
###Markdown
Show worst predictions for different loss thresholds (linearly combined spectra)
###Code
threshold = 0.3
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
threshold = 0.2
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
threshold = 0.1
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
threshold = 0.05
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
threshold = 0.025
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
threshold = 0.01
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
threshold = 0.005
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'linear_comb',
threshold = threshold)
###Output
_____no_output_____
###Markdown
Show worst predictions for all
###Code
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all')
###Output
_____no_output_____
###Markdown
Show worst predictions for different loss thresholds (all spectra)
###Code
threshold = 0.3
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.2
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.1
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.05
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.025
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.01
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.005
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.001
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.0005
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.00025
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.0001
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
threshold = 0.00001
clf.show_worst_predictions(no_of_spectra = 10,
kind = 'all',
threshold = threshold)
###Output
_____no_output_____
###Markdown
Remove empty model directory
###Code
import shutil
shutil.rmtree(clf.logging.model_dir)
del(clf.logging.model_dir)
###Output
_____no_output_____
###Markdown
Save output of notebook
###Code
from IPython.display import Javascript, display
from nbconvert import HTMLExporter
def save_notebook():
display(Javascript("IPython.notebook.save_notebook()"),
include=['application/javascript'])
def output_HTML(read_file, output_file):
import codecs
import nbformat
exporter = HTMLExporter()
# read_file is '.ipynb', output_file is '.html'
output_notebook = nbformat.read(read_file, as_version=4)
output, resources = exporter.from_notebook_node(output_notebook)
codecs.open(output_file, 'w', encoding='utf-8').write(output)
import time
import os
time.sleep(20)
save_notebook()
print('Notebook saved!')
time.sleep(30)
current_file = '/content/drive/My Drive/deepxps/xpsdeeplearning/notebooks/predict.ipynb'
output_file = os.path.join(clf.logging.log_dir,
'predict_out.html')
output_HTML(current_file, output_file)
print('HTML file saved!')
###Output
_____no_output_____
###Markdown
Load predictor
###Code
%matplotlib inline
import os
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
matplotlib.use("Agg")
os.getcwd()
os.chdir('/home/del/research/span_ae')
import span_ae
from allennlp.models.archival import load_archive
from allennlp.service.predictors import Predictor
archive = load_archive("models/baseline/model.tar.gz")
predictor = Predictor.from_archive(archive, 'span_ae')
###Output
_____no_output_____
###Markdown
Func
###Code
def predict_plot(sentence):
# predict
result = predictor.predict_json(sentence)
attention_matrix = result['attention_matrix']
predicted_tokens = result['predicted_tokens']
survived_span_ids = result['top_spans']
input_sentence = ['BOS'] + sentence['src'].split() + ['EOS']
predicted_tokens = predicted_tokens + ['EOS']
survived_spans = []
for span_id in survived_span_ids:
ind_from = span_id[0]
ind_to = span_id[1] + 1
survived_spans.append(" ".join(input_sentence[ind_from:ind_to]))
attention_matrix_local = attention_matrix[0:len(predicted_tokens)]
att_matrix_np = np.array([np.array(xi) for xi in attention_matrix_local])
#print
print('ORIGINAL :', " ".join(input_sentence))
#print('TOP SPANs:', " \n ".join(survived_spans))
print('PREDICTED:', " ".join(predicted_tokens))
#print('span scores:', result['top_spans_scores'])
print('\nAttnetion matrix:')
# plot
plt.figure(figsize=(9, 9), dpi= 80, facecolor='w', edgecolor='k')
plt.imshow(att_matrix_np.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(predicted_tokens))])
plt.gca().set_yticks([i for i in range(0, len(survived_spans))])
plt.gca().set_xticklabels(predicted_tokens, rotation='vertical')
plt.gca().set_yticklabels(survived_spans)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Inference
###Code
# change it
sentence = "to school"
# do not change it
predict_plot({'src': sentence})
# change it
sentence = "school"
# do not change it
predict_plot({'src': sentence})
# change it
sentence = "it is spring already , but there are a lot of snow out there"
# do not change it
predict_plot({'src': sentence})
b
# change it
sentence = "let us discard our entire human knowledge"
# do not change it
predict_plot({'src': sentence})
###Output
ORIGINAL : BOS let us discard our entire human knowledge EOS
PREDICTED: let us discard our entire development knowledge EOS
Attnetion matrix:
###Markdown
Load Data and Build Model
###Code
seed_everything(42, workers=True)
DEVICE = torch.device("cuda:1")
config, module, model, light_model = load_model_from_path(
# "/shared/gbiamby/geo/models/geoscreens_009-resnest50_fpn-with_augs/",
# "/home/gbiamby/proj/geoscreens/tools/output/keep/gs_012_extra_augs_more_epochs--geoscreens_012-model_faster_rcnn-bb_resnest50_fpn-36e514692a/",
"/home/gbiamby/proj/geoscreens/tools/output/gs_urls_02b_013--geoscreens_013-model_faster_rcnn-bb_resnest50_fpn-2e71bb2f4d/",
device=DEVICE,
)
model, light_model = model.eval(), light_model.eval()
geoscreens_data = GeoScreensDataModule(config, module)
###Output
_____no_output_____
###Markdown
Show Some Training Samples
###Code
train_ds = geoscreens_data.train_ds
# Show an element of the train_ds with augmentation transformations applied
samples = [train_ds[10] for _ in range(3)]
show_samples(samples, ncols=3)
###Output
_____no_output_____
###Markdown
Show some validation set samples
###Code
module.show_batch(first(geoscreens_data.val_dataloader()), ncols=4)
###Output
_____no_output_____
###Markdown
Show some predictions
###Code
num_samples = 10
size = 30
module.show_results(
light_model,
geoscreens_data.valid_ds,
num_samples=num_samples,
detection_threshold=0.5,
device=DEVICE,
figsize=(size, (size * num_samples) / 2),
)
###Output
_____no_output_____
###Markdown
--- Prediction Testing Dataloader and Batching
###Code
from icevision.core import ClassMap
from icevision.core.record import BaseRecord
from icevision.core.record_components import ClassMapRecordComponent, ImageRecordComponent
from icevision.tfms import Transform
from PIL import Image
class GeoscreensInferenceDataset(object):
"""
Only usable for inference.
Provides a dataset over a folder with video frames in form::
<video_id_1>/
frame_....jpg
<video_id_2>/
frame_....jpg
If no video_id specified, the dataset will loop over all <video_id>
subfolders and include all frames in each.
"""
def __init__(
self,
frames_path: Union[str, Path],
class_map: ClassMap,
video_ids: Union[int, List[int]] = None,
tfm: Optional[Transform] = None,
):
self.frames_path = Path(frames_path).resolve()
assert self.frames_path.exists(), f"Frames path not found: {self.frames_path}"
assert self.frames_path.is_dir(), f"Frames path is not a directory: {self.frames_path}"
if video_ids and isinstance(video_ids, str):
video_ids = [video_ids]
elif video_ids is None:
video_ids = []
self.tfm = tfm
self.class_map = class_map
self.frames = []
record_id: int = 0
print("video_ids")
for video_id in video_ids:
frames = sorted((self.frames_path / video_id).glob("*.jpg"))
print("Num frames found: ", len(frames))
for f in frames:
record = BaseRecord((ImageRecordComponent(),))
record.set_record_id(record_id)
# record.set_img(image)
# TODO, HACK: adding class map because of `convert_raw_prediction`
record.add_component(ClassMapRecordComponent(task=tasks.detection))
if class_map is not None:
record.detection.set_class_map(class_map)
parts = f.stem.replace("frame_", "").replace("s", "").split("-")
self.frames.append(
{
"video_id": video_id,
"frame_idx": -1,
"file_path": f,
"frame_idx": int(parts[0]),
"seconds": round(float(parts[1]), 2),
"record": record,
}
)
record_id += 1
def __len__(self):
return len(self.frames)
def __getitem__(self, i: int):
meta = self.frames[i]
record = meta["record"]
img = np.array(Image.open(str(meta["file_path"])))
record.set_img(img)
record.load()
if self.tfm is not None:
record = self.tfm(record)
# else:
# # HACK FIXME
# # record.set_img(np.array(record.img))
# pass
return record
def __repr__(self):
return f"<{self.__class__.__name__} with {len(self.records)} items>"
# video_path = Path("/shared/gbiamby/geo/video_frames/pF9OA332DPk.mp4")
frames_path = "/shared/gbiamby/geo/video_frames"
infer_tfms = tfms.A.Adapter(
[*tfms.A.resize_and_pad(config.dataset_config.img_size), tfms.A.Normalize()]
)
infer_ds = GeoscreensFramesDataset(
frames_path, geoscreens_data.parser.class_map, "pF9OA332DPk", infer_tfms
)
infer_dl = module.infer_dl(infer_ds, batch_size=8, shuffle=False, num_workers=16)
print("len ds: ", len(infer_ds))
preds = module.predict_from_dl(model, infer_dl, detection_threshold=0.5)
preds
preds[0].pred
f_name = "frame_00039798-001326.600s.jpg"
parts = f_name.replace("frame_", "").replace(".jpg", "").split("-")
frame_idx = int(parts[0])
seconds = round(float(parts[1].replace("s", "")), 2)
frame_idx, seconds
def get_detections_from_generator():
raw_frames = [np.array(frame)]
infer_ds = Dataset.from_images(
raw_frames, infer_tfms, class_map=geoscreens_data.parser.class_map
)
preds = module.predict(model, infer_ds, detection_threshold=0.5)
if preds:
assert len(preds) == 1, "Expected list of size 1."
preds = preds[0]
detections[frame_counter] = {
"label_ids": [int(l) for l in preds.detection.label_ids],
"scores": preds.detection.scores.tolist(),
"bboxes": [
{
"xmin": float(box.xmin),
"ymin": float(box.ymin),
"xmax": float(box.xmax),
"ymax": float(box.ymax),
}
for box in preds.detection.bboxes
],
}
@timeit_context("")
def get_frames_wrapper(fn, config, video_path):
return [f for f in fn(config, video_path)]
def get_indices_to_sample(config, total_frames: int, fps: float) -> List[int]:
indices = map(
int,
np.linspace(
start=0.0,
stop=total_frames,
num=int(total_frames * (config.frame_sample_rate_fps / fps)),
retstep=False,
endpoint=False,
),
)
return list(indices)
# def get_frames_generator_opencv(
# config: DictConfig,
# video_path: Path,
# ):
# print("Segmenting video: ", video_path)
# error_state = False
# cap = cv2.VideoCapture(str(video_path))
# if not cap.isOpened():
# print("Error opening input video: {}".format(video_path))
# return
# num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# fps = cap.get(cv2.CAP_PROP_FPS)
# sample_indices = get_indices_to_sample(config, num_frames, fps)
# print(f"total_frames: {num_frames:,}, num_to_sample: {len(sample_indices):,}, fps: {fps}")
# print("config.frame_sample_rate_fps: ", config.frame_sample_rate_fps)
# for frame_counter in tqdm(range(len(sample_indices)), total=len(sample_indices)):
# frame_idx = sample_indices[frame_counter]
# if config.fast_debug and frame_counter >= config.debug_max_frames:
# break
# seconds = frame_idx / fps
# cap.set(cv2.CAP_PROP_POS_MSEC, (seconds * 1000))
# ret, frame = cap.read()
# if not ret:
# raise Error(f"Error while processing video_id: {video_path} (ret:{ret}")
# break
# yield (seconds, frame_idx, frame)
# video_path = Path("/home/gbiamby/proj/geoscreens/data/videos/pF9OA332DPk.mp4")
# config = DictConfig(
# {
# "frame_sample_rate_fps": 4.0,
# "fast_debug": False,
# "debug_max_frames": 300,
# }
# )
# frames_cv = get_frames_wrapper(get_frames_generator_opencv, config, video_path)
# print("num_frames sampled: ", len(frames_cv))
from decord import VideoReader, cpu, gpu
def get_frames_generator_decord(config, video_path):
vr = VideoReader(str(video_path), ctx=cpu(0))
sample_indices = get_indices_to_sample(config, len(vr), vr.get_avg_fps())
print(
f"num_frames: {len(vr):,}, num_to_sample: {len(sample_indices):,}, fps: {vr.get_avg_fps()}"
)
print("config.frame_sample_rate: ", config.frame_sample_rate_fps)
for sample_idx in tqdm(range(len(sample_indices)), total=len(sample_indices)):
frame_idx = sample_indices[sample_idx]
if config.fast_debug and sample_idx >= config.debug_max_frames:
break
frame = vr[frame_idx]
seconds = round(frame_idx / vr.get_avg_fps(), 2)
yield (frame_idx, seconds, frame)
# video_path = Path("/home/gbiamby/proj/geoscreens/data/videos/pF9OA332DPk.mp4")
# config = DictConfig(
# {
# "frame_sample_rate_fps": 4.0,
# "fast_debug": True,
# "debug_max_frames": 30,
# "video_frames_path": "/home/gbiamby/proj/geoscreens/data/video_frames",
# }
# )
# frames_decord = get_frames_wrapper(get_frames_generator_decord, config, video_path)
# print("num_frames sampled: ", len(frames_decord))
# frames_decord[:10], frames_decord[:-10]
from typing import Callable
@timeit_context("extract_frames")
def extract_frames(config: DictConfig, video_path: Path, get_frames_fn: Callable):
frames_path = Path(config.video_frames_path) / video_path.stem
frames_path.mkdir(exist_ok=True, parents=True)
print("Saving frames to: ", frames_path)
for frame_idx, seconds, frame in get_frames_fn(config, video_path):
frame_out_path = frames_path / f"frame_{frame_idx:08}-{seconds:010.3f}s.jpg"
cv2.imwrite(str(frame_out_path), cv2.cvtColor(frame.asnumpy(), cv2.COLOR_RGB2BGR))
video_path = Path("/shared/g-luo/geoguessr/videos/pF9OA332DPk.mp4")
config = DictConfig(
{
"frame_sample_rate_fps": 4.0,
"fast_debug": False,
"debug_max_frames": 30,
"video_frames_path": "/shared/gbiamby/geo/video_frames",
}
)
extract_frames(config, video_path, get_frames_generator_decord)
from multiprocessing import Pool
def extract_frames_fake(config: DictConfig, video_path: Path, get_frames_fn: Callable):
frames_path = Path(config.video_frames_path) / video_path.stem
frames_path.mkdir(exist_ok=True, parents=True)
print("Saving frames to: ", frames_path)
def process_videos_muli_cpu(config: DictConfig):
files = sorted(Path(config.videos_path).glob("*.mp4"))
print(len(files))
with Pool(processes=4) as pool:
result = pool.map(extract_frames_fake, (config, files))
print(result.get(timeout=1))
config = DictConfig(
{
"frame_sample_rate_fps": 4.0,
"fast_debug": False,
"debug_max_frames": 30,
"video_frames_path": "/shared/gbiamby/geo/video_frames",
"videos_path": "/shared/g-luo/geoguessr/videos",
"num_workers": 4,
}
)
process_videos_muli_cpu(config)
# from geoscreens.utils import timeit_context
# # Using the decord batching is somehow slower than just using the VideoReader indexing, i.e,
# # get_frames_generator_decord().
# @timeit_context("get_frames_generator_decord_batched")
# def get_frames_generator_decord_batched(config, video_path):
# vr = VideoReader(str(video_path), ctx=cpu(0))
# indices = get_indices_to_sample(config, len(vr), vr.get_avg_fps())
# print(f"num_frames: {len(vr):,}, fps: {vr.get_avg_fps()}")
# print("config.frame_sample_rate: ", config.frame_sample_rate_fps)
# if config.fast_debug and len(indices) > config.debug_max_frames:
# indices = indices[: config.debug_max_frames]
# frames = vr.get_batch(indices).asnumpy()
# yield from frames
# video_path = Path("/shared/g-luo/geoguessr/videos/pF9OA332DPk.mp4")
# config = DictConfig(
# {
# "frame_sample_rate_fps": 4.0,
# "fast_debug": True,
# "debug_max_frames": 10000,
# }
# )
# frames = get_frames_wrapper(get_frames_generator_decord_batched, config, video_path)
# print("num_frames sampled: ", len(frames))
# To get multiple frames at once, use get_batch
# this is the efficient way to obtain a long list of frames
frames = vr.get_batch([1, 3, 5, 7, 9])
print(frames.shape)
# (5, 240, 320, 3)
# duplicate frame indices will be accepted and handled internally to avoid duplicate decoding
frames2 = vr.get_batch([1, 2, 3, 2, 3, 4, 3, 4, 5]).asnumpy()
print(frames2.shape)
# (9, 240, 320, 3)
# 2. you can do cv2 style reading as well
# skip 100 frames
vr.skip_frames(100)
# seek to start
vr.seek(0)
batch = vr.next()
print("frame shape:", batch.shape)
print("numpy frames:", batch.asnumpy())
# from torchvision import transforms as t
# from torchvision.datasets.folder import make_dataset
# def get_samples(root, extensions=(".mp4", ".avi")):
# _, class_to_idx = _find_classes(root)
# return make_dataset(root, class_to_idx, extensions=extensions)
# class RandomDataset(torch.utils.data.IterableDataset):
# def __init__(
# self, root, epoch_size=None, frame_transform=None, video_transform=None, clip_len=16,
# video_id: str =
# ):
# super(RandomDataset).__init__()
# self.samples = []
# # Allow for temporal jittering
# if epoch_size is None:
# epoch_size = len(self.samples)
# self.epoch_size = epoch_size
# self.clip_len = clip_len
# self.frame_transform = frame_transform
# self.video_transform = video_transform
# def __iter__(self):
# for i in range(self.epoch_size):
# # Get random sample
# path, target = random.choice(self.samples)
# # Get video object
# vid = torchvision.io.VideoReader(path, "video")
# metadata = vid.get_metadata()
# video_frames = [] # video frame buffer
# # Seek and return frames
# max_seek = metadata["video"]["duration"][0] - (
# self.clip_len / metadata["video"]["fps"][0]
# )
# start = random.uniform(0.0, max_seek)
# for frame in itertools.islice(vid.seek(start), self.clip_len):
# video_frames.append(self.frame_transform(frame["data"]))
# current_pts = frame["pts"]
# # Stack it into a tensor
# video = torch.stack(video_frames, 0)
# if self.video_transform:
# video = self.video_transform(video)
# output = {
# "path": path,
# "video": video,
# "target": target,
# "start": start,
# "end": current_pts,
# }
# yield output
###Output
_____no_output_____
###Markdown
--- Naive Detection of Bad Ground Truth Lables
###Code
tasks = json.load(
open("/shared/gbiamby/geo/exports/geoscreens_009-from_proj_id_58.json", "r", encoding="utf-8")
)
mistakes = []
for i, t in enumerate(tqdm(tasks, total=len(tasks))):
# if i >= 10:
# break
# print("")
anns_results = [ann["result"] for ann in t["annotations"]]
# print(anns_results)
# print([ann for ann in anns_results])
labels = [ann["value"]["rectanglelabels"][0] for ann in anns_results[0]]
if len(labels) != len(set(labels)):
mistakes.append(t)
len(mistakes)
[m["data"] for m in mistakes]
[m["data"] for m in mistakes]
for i, t in enumerate(tqdm(tasks, total=len(tasks))):
# if i >= 10:
# break
if "aob8sh6l-6M/frame_00000221" in t["data"]["image"]:
print("")
print(t["id"], t["data"]["image"])
anns_results = [ann["result"] for ann in t["annotations"]]
print("anns_results: ", anns_results, len(anns_results))
labels = [ann["value"]["rectanglelabels"][0] for ann in anns_results[0]]
print("labels: ", labels)
###Output
_____no_output_____
###Markdown
--- Scratch / Junk Find/FIlter Duplicates
###Code
path_to_task = defaultdict(list)
for t in tasks:
path_to_task[t["data"]["full_path"]].append(t)
print(len(tasks), len(path_to_task))
c = Counter([t["data"]["full_path"] for t in tasks])
dupes = [k for k, v in c.items() if v > 1]
print("total dupes: ", len(dupes))
to_remove = []
for path in dupes:
print("")
print("=" * 100)
task_blobs = [json.dumps(t, sort_keys=True) for t in path_to_task[path]]
ann_ids = [t["id"] for t in path_to_task[path]]
max_id = max(ann_ids)
# print("ann_ids: ", path_to_task[path])
print("ann_ids: ", ann_ids)
# for t in task_blobs:
# print("")
# print(t)
print("Removing: ")
for t in path_to_task[path]:
if t["id"] != max_id:
print("Removing task_id: ", t["id"])
to_remove.append((t["id"], path))
to_remove
tasks_filtered = []
for t in tasks:
if (t["id"], t["data"]["full_path"]) in to_remove:
continue
tasks_filtered.append(t)
print(len(tasks), len(tasks_filtered))
###Output
_____no_output_____
###Markdown
Save
###Code
json.dump(
tasks_filtered,
open(Path("/shared/gbiamby/geo/geoscreens_004_tasks_with_preds.json"), "w"),
indent=4,
sort_keys=True,
)
###Output
_____no_output_____
###Markdown
--- ---
###Code
213 % 10, 213 // 10
###Output
_____no_output_____
###Markdown
**Deviation scores using normative models based on deep autoencoders**Here in this notebook, we implemented an easy way to you try our normative models trained on the [UK Biobank](https://www.ukbiobank.ac.uk/) dataset. **Disclaimer**: this script can not be used for clinical purposes.Let's start!--- Set this notebook's hardware acceleratorFirst, you'll need to enable the use of Google's [GPUs](https://cloud.google.com/gpu) (graphics processing unit) for this notebook:- Navigate to Edit→Notebook Settings- Select GPU from the Hardware Accelerator drop-downThese GPUs allow us to perform the deep learning model's calculation in a faster way! --- Download trained modelsNext, we will load the trained normative models based on adversarial autoencoders into this colab environment. During our study, we trained normative models on the UK Biobank using the resampling method called bootstrap method. By using this resampling method, we trained 1,000 different models, each one using a different bootstraped datasets as training set (containing 11,032 brain scans) (check Section 2.4. Normative model training of our paper for more information).Structure of the normative model based on adversarial autoencoders. In this configuration, the subject data is inputted into the encoder and then mapped to the latent code. This latent code is fed to the decoder with the demographic data, and then the decoder generates a reconstruction of the original data. During the training of the model, the discriminator is used to shape the distribution of the latent code. Since the model is trained on healthy controls data, it can reconstruct similar data relatively well, yielding to a small reconstruction error. However, the model would generate a high error when processing data affected by unseen underlying mechanisms, e.g. pathological mechanisms. For each normative model, we had others auxiliary components, like data scalers and demographic data preprocessors. During training, all these components were stored and are available at https://www.dropbox.com/s/bs89t2davs1p2dm/models_for_normative_paper_2019.zip?dl=0 . This link contains a compressed file that have all files created using the [bootstrap_train_aae_supervised.py](https://github.com/Warvito/Normative-modelling-using-deep-autoencoders/blob/master/bootstrap_train_aae_supervised.py) script. The models files are organized in subdirectories where each one correspond to a bootstrap iteration.Besides the models, the zipped file contains two templates files (used later in this notebook).In the following cell, we download the compressed file.
###Code
!wget -O models.zip --no-check-certificate https://www.dropbox.com/s/bs89t2davs1p2dm/models_for_normative_paper_2019.zip?dl=0
###Output
--2019-12-04 17:19:52-- https://www.dropbox.com/s/bs89t2davs1p2dm/models_for_normative_paper_2019.zip?dl=0
Resolving www.dropbox.com (www.dropbox.com)... 162.125.65.1, 2620:100:6021:1::a27d:4101
Connecting to www.dropbox.com (www.dropbox.com)|162.125.65.1|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: /s/raw/bs89t2davs1p2dm/models_for_normative_paper_2019.zip [following]
--2019-12-04 17:19:52-- https://www.dropbox.com/s/raw/bs89t2davs1p2dm/models_for_normative_paper_2019.zip
Reusing existing connection to www.dropbox.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com/cd/0/inline/AtlJvpsEA_66AIcgy486EG8_s3tU9jKWLCgXv9vqSDZKlAIxB-FfvxA04RxpLTjQrfo52zhyJPaBJG58utLO55kGsqouqyLbI6OFFTBpCiVOXMW5uztWIlQ1W3sIzuDjPvI/file# [following]
--2019-12-04 17:19:52-- https://uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com/cd/0/inline/AtlJvpsEA_66AIcgy486EG8_s3tU9jKWLCgXv9vqSDZKlAIxB-FfvxA04RxpLTjQrfo52zhyJPaBJG58utLO55kGsqouqyLbI6OFFTBpCiVOXMW5uztWIlQ1W3sIzuDjPvI/file
Resolving uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com (uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com)... 162.125.65.6, 2620:100:6021:6::a27d:4106
Connecting to uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com (uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com)|162.125.65.6|:443... connected.
HTTP request sent, awaiting response... 302 FOUND
Location: /cd/0/inline2/AtlLi--wvJvlr-pBGsL0IhziyaKce4DsjQ-t0ytExD2PUUH3RQxgFwPI6YIvOjU_hWhbHcb4oH8N9Ih3Riy0FuZIahE7uZmFlpiQpSGY7j9MK-n-8dcwa7eZJ5T6Q9w1QQuM8FPgcie4YV5DXJc_9TRCjUUOb6Mjx6SZVAYo5cP5sJfjIP15KQLAvrCBf2GpGiLSq_m7xbOhb2uMqug8ITsB478CvZ01O6sYoOm857HWwaur4TxB2H79hbxwajkKxB4XRJDp6YY06ZNRRbfMbJXM3_frdt78oi2gnQgxTlLvipPODfp759-jJKzrS-iDX7zTZeZssqc7pfgOpN9bqRmD4evevSGvcoivwznCRXaPVA/file [following]
--2019-12-04 17:19:53-- https://uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com/cd/0/inline2/AtlLi--wvJvlr-pBGsL0IhziyaKce4DsjQ-t0ytExD2PUUH3RQxgFwPI6YIvOjU_hWhbHcb4oH8N9Ih3Riy0FuZIahE7uZmFlpiQpSGY7j9MK-n-8dcwa7eZJ5T6Q9w1QQuM8FPgcie4YV5DXJc_9TRCjUUOb6Mjx6SZVAYo5cP5sJfjIP15KQLAvrCBf2GpGiLSq_m7xbOhb2uMqug8ITsB478CvZ01O6sYoOm857HWwaur4TxB2H79hbxwajkKxB4XRJDp6YY06ZNRRbfMbJXM3_frdt78oi2gnQgxTlLvipPODfp759-jJKzrS-iDX7zTZeZssqc7pfgOpN9bqRmD4evevSGvcoivwznCRXaPVA/file
Reusing existing connection to uc009c4a272cfa1056b31be39219.dl.dropboxusercontent.com:443.
HTTP request sent, awaiting response... 200 OK
Length: 185725753 (177M) [application/zip]
Saving to: ‘models.zip’
models.zip 100%[===================>] 177.12M 47.7MB/s in 7.0s
2019-12-04 17:20:00 (25.5 MB/s) - ‘models.zip’ saved [185725753/185725753]
###Markdown
--- Unzip models filesAfter downloaded the compressed file, we need to unzip it in our colab enviroment.
###Code
!unzip models.zip
###Output
_____no_output_____
###Markdown
To see the unzipped models, go to “Files” in the Google colab environment. If the Google colab environment is not shown, click in the arrow mark which looks like “>” at the left-hand side of the cells. When you click that you will find a tab with three options, just select “Files” to explore the loaded unzipped models. --- Import Python librariesNow, we will start to use the necessary Python code to make our predictions. But first let's import all the necessary Python modules for our processing.
###Code
%tensorflow_version 2.x
from pathlib import Path
import warnings
import joblib
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from google.colab import files
from tqdm import tqdm
###Output
TensorFlow 2.x selected.
###Markdown
--- Download freesurferData.csv and participants.tsv templatesIn order to make predictions of your data, it is necessary to make it in the format to correctly read by this script. To facilitate this process, we supply the template files to be filled with your data.As shown below, these template files contain the names of the necessary columns to run the script.
###Code
pd.read_csv('templates/freesurferData.csv')
pd.read_csv('templates/participants.tsv', sep='\t')
###Output
_____no_output_____
###Markdown
* Note: The column with gender is codified as 0 = "Female" and 1 = "Male".The next cells will start the download of the templates.---
###Code
files.download('templates/freesurferData.csv')
files.download('templates/participants.tsv')
###Output
_____no_output_____
###Markdown
After filled the templates, upload the files to the Google colab environment.**Note: You can create the freesurferData.csv file using our colab script on this** [link](https://colab.research.google.com/github/Warvito/Normative-modelling-using-deep-autoencoders/blob/master/notebooks/freesurfer_organizer.ipynb).Note2: Your data will only be loaded in this runtime of the Google colab. This code is being executed at the Google Cloud Platform by default, and you are not making your data available for our team. If you are concern about uploading your data to the Google Cloud Platform, please, consider executing this notebook in a local runtime in your computer (https://research.google.com/colaboratory/local-runtimes.html). First, start uploading the freesurferData.csv.
###Code
# Remove freesurferData.csv if it exists
!rm freesurferData.csv
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn])))
freesurfer_data_df = pd.read_csv(fn)
freesurfer_data_df
###Output
_____no_output_____
###Markdown
Then, upload the participants.tsv file.
###Code
# Remove participants.tsv if it exists
!rm participants.tsv
uploaded = files.upload()
for fn2 in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(name=fn2, length=len(uploaded[fn2])))
participants_df = pd.read_csv(fn2, sep='\t')
participants_df
dataset_df = pd.merge(freesurfer_data_df, participants_df, on='Participant_ID')
dataset_df
###Output
_____no_output_____
###Markdown
--- Predict the deviation scoresAfter loading the data, we predict the deviations of the new data based on our trained normative models.We begin the processing by setting the random seeds.
###Code
# Set random seed
random_seed = 42
tf.random.set_seed(random_seed)
np.random.seed(random_seed)
###Output
_____no_output_____
###Markdown
Next, we define the name of the brain regions in the variable COLUMNS_NAME.
###Code
#@title
COLUMNS_NAME = ['Left-Lateral-Ventricle',
'Left-Inf-Lat-Vent',
'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex',
'Left-Thalamus-Proper',
'Left-Caudate',
'Left-Putamen',
'Left-Pallidum',
'3rd-Ventricle',
'4th-Ventricle',
'Brain-Stem',
'Left-Hippocampus',
'Left-Amygdala',
'CSF',
'Left-Accumbens-area',
'Left-VentralDC',
'Right-Lateral-Ventricle',
'Right-Inf-Lat-Vent',
'Right-Cerebellum-White-Matter',
'Right-Cerebellum-Cortex',
'Right-Thalamus-Proper',
'Right-Caudate',
'Right-Putamen',
'Right-Pallidum',
'Right-Hippocampus',
'Right-Amygdala',
'Right-Accumbens-area',
'Right-VentralDC',
'CC_Posterior',
'CC_Mid_Posterior',
'CC_Central',
'CC_Mid_Anterior',
'CC_Anterior',
'lh_bankssts_volume',
'lh_caudalanteriorcingulate_volume',
'lh_caudalmiddlefrontal_volume',
'lh_cuneus_volume',
'lh_entorhinal_volume',
'lh_fusiform_volume',
'lh_inferiorparietal_volume',
'lh_inferiortemporal_volume',
'lh_isthmuscingulate_volume',
'lh_lateraloccipital_volume',
'lh_lateralorbitofrontal_volume',
'lh_lingual_volume',
'lh_medialorbitofrontal_volume',
'lh_middletemporal_volume',
'lh_parahippocampal_volume',
'lh_paracentral_volume',
'lh_parsopercularis_volume',
'lh_parsorbitalis_volume',
'lh_parstriangularis_volume',
'lh_pericalcarine_volume',
'lh_postcentral_volume',
'lh_posteriorcingulate_volume',
'lh_precentral_volume',
'lh_precuneus_volume',
'lh_rostralanteriorcingulate_volume',
'lh_rostralmiddlefrontal_volume',
'lh_superiorfrontal_volume',
'lh_superiorparietal_volume',
'lh_superiortemporal_volume',
'lh_supramarginal_volume',
'lh_frontalpole_volume',
'lh_temporalpole_volume',
'lh_transversetemporal_volume',
'lh_insula_volume',
'rh_bankssts_volume',
'rh_caudalanteriorcingulate_volume',
'rh_caudalmiddlefrontal_volume',
'rh_cuneus_volume',
'rh_entorhinal_volume',
'rh_fusiform_volume',
'rh_inferiorparietal_volume',
'rh_inferiortemporal_volume',
'rh_isthmuscingulate_volume',
'rh_lateraloccipital_volume',
'rh_lateralorbitofrontal_volume',
'rh_lingual_volume',
'rh_medialorbitofrontal_volume',
'rh_middletemporal_volume',
'rh_parahippocampal_volume',
'rh_paracentral_volume',
'rh_parsopercularis_volume',
'rh_parsorbitalis_volume',
'rh_parstriangularis_volume',
'rh_pericalcarine_volume',
'rh_postcentral_volume',
'rh_posteriorcingulate_volume',
'rh_precentral_volume',
'rh_precuneus_volume',
'rh_rostralanteriorcingulate_volume',
'rh_rostralmiddlefrontal_volume',
'rh_superiorfrontal_volume',
'rh_superiorparietal_volume',
'rh_superiortemporal_volume',
'rh_supramarginal_volume',
'rh_frontalpole_volume',
'rh_temporalpole_volume',
'rh_transversetemporal_volume',
'rh_insula_volume']
###Output
_____no_output_____
###Markdown
Then, we calculate the relative brain region volumes (original volume divided by the total intracranial volume).
###Code
# Get the relative brain region volumes
x_dataset = dataset_df[COLUMNS_NAME].values
tiv = dataset_df['EstimatedTotalIntraCranialVol'].values
tiv = tiv[:, np.newaxis]
x_dataset = (np.true_divide(x_dataset, tiv)).astype('float32')
###Output
_____no_output_____
###Markdown
Next, we iterate over all models performing the calculation of the deviations. In our paper, we define the **deviation score as the mean squared error** between the autoencoder's reconstruction and the inputted data (more details in the Section 2.5 Analysis of the observed deviation).**Note**: if the age of someone is lower than 47 or higher than 73, the age value will be clipped to be inside the range (47, 73). For example, if someone has age = 40, it will be rounded to 47. We performed this clipping because the age is an important variable for conditioning the predictions of our model.
###Code
warnings.filterwarnings('ignore')
model_dir = Path('models')
N_BOOTSTRAP = 1000
# Create dataframe to store outputs
reconstruction_error_df = pd.DataFrame(columns=['Participant_ID'])
reconstruction_error_df['Participant_ID'] = dataset_df['Participant_ID']
# ----------------------------------------------------------------------------
for i_bootstrap in tqdm(range(N_BOOTSTRAP)):
bootstrap_model_dir = model_dir / '{:03d}'.format(i_bootstrap)
# ----------------------------------------------------------------------------
encoder = keras.models.load_model(bootstrap_model_dir / 'encoder.h5', compile=False)
decoder = keras.models.load_model(bootstrap_model_dir / 'decoder.h5', compile=False)
scaler = joblib.load(bootstrap_model_dir / 'scaler.joblib')
enc_age = joblib.load(bootstrap_model_dir / 'age_encoder.joblib')
enc_gender = joblib.load(bootstrap_model_dir / 'gender_encoder.joblib')
# ----------------------------------------------------------------------------
x_normalized = scaler.transform(x_dataset)
# ----------------------------------------------------------------------------
age = dataset_df['Age'].values
age = np.clip(age, 47, 73)
age = age[:, np.newaxis].astype('float32')
one_hot_age = enc_age.transform(age)
gender = dataset_df['Gender'].values[:, np.newaxis].astype('float32')
one_hot_gender = enc_gender.transform(gender)
y_data = np.concatenate((one_hot_age, one_hot_gender), axis=1).astype('float32')
# ----------------------------------------------------------------------------
encoded = encoder(x_normalized, training=False)
reconstruction = decoder(tf.concat([encoded, y_data], axis=1), training=False)
# ----------------------------------------------------------------------------
reconstruction_error = np.mean((x_normalized - reconstruction) ** 2, axis=1)
reconstruction_error_df[('Reconstruction error {:03d}'.format(i_bootstrap))] = reconstruction_error
###Output
100%|██████████| 1000/1000 [01:37<00:00, 9.42it/s]
###Markdown
Finally, we compute the mean deviation score and save the file with all scores.
###Code
reconstruction_error_df['Mean reconstruction error'] = reconstruction_error_df[reconstruction_error_df.columns[1:]].mean(axis=1)
reconstruction_error_df
reconstruction_error_df.to_csv('reconstruction_error.csv', index=False)
###Output
_____no_output_____
###Markdown
Download predictionsFinally, you can download the result in the "Files" tab or executing the cell below.
###Code
files.download('reconstruction_error.csv')
###Output
_____no_output_____
###Markdown
Use the model deployed for prediction
###Code
import requests
import pandas as pd
df = pd.read_csv('../data/iris2.csv', index_col=False, header=0)
test_data = df
X = df.drop('label', axis=1)
Y = df.label
http_data = X.to_json(orient='split')
http_data
host = '127.0.0.1'
port = '1234'
url = f'http://{host}:{port}/invocations'
headers = {'Content-Type':'application/json'}
r = requests.post(url=url, headers= headers, data=http_data)
r.text
X.shape
###Output
_____no_output_____
###Markdown
Company Name matching ProblèmeLorsqu'un prospect rempli un formulaire, il saisit le nom de son entreprise. Afin de récolter d'avantage d'informations sur le prospect, il est nécessaire d'identifier l'entreprise, c'est à dire de connaitre son numéro SIREN. Cependant, un matching naif entre la saisie utilisateur et une base de référence Nom-Siren n'est pas satisfaisant.En effet la saisie utilisateur est imparfaite (ne correspond à aucun nom de la base de référence):* Erreurs de saisie* Noms non légaux : * Abbreviations * Département, région * Surnom * InversionQuelques exemples :
###Code
#! TODO : Ecrire exemple
###Output
_____no_output_____
###Markdown
Solution L'idée est donc de construire un **moteur de recherche**, qui à partir d'une requête (ici saisie du champ companyName) de l'utilisateur retrouve le document (ici le nom *standard* dans la base de référence), ce qui nous permettra de faire le lien entre la saisie du nom de l'entreprise et son numéro de SIREN.Pour ce faire, plusieurs pistes ont été explorées:* standardisation du nom puis matching naif* utilisation d'un moteur de recherche déjà entrainéEffectuer une requete sur google (ou autre) de la forme `companyName site:"societe.com"` et récupérer le premier résultat renvoyé. Cependant, le nombre de requêtes google est limitée (10-20/heure sans astuce, 200/heure avec), ce qui ne permet pas de tester efficacement sur la base de données historique. Egalement, societe.com n'autorise pas l'utilisation de scrapping à des fins commerciales. Pour ce qui est de la première limitation, il pourrait être envisageable d'utiliser cette méthode en production, car le flux de requêtes à effectuer est relativement faible. Pour la deuxième, utiliser l'addresse url du site suffit car elle est sous la forme `https://www.societe.com/societe/companyName-SIREN.html`. * Deux étapes : 1. Tokeization et identification des stop words correspondants à des mots communs qui n'ajoutent pas d'information sur les entreprises 2. Identification du nom standard (dans la base de référence) C'est cette deuxième solution que nous avons trouvé la plus pertinente et la plus précise.
###Code
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
from collections import Counter
###Output
_____no_output_____
###Markdown
Chargement des bases de donnéesDeux bases de données différentes sont chargées : une base test, qui simule l'entrée utilisateur, et la base de référence, qui permet de faire le lien entre un nom standard de référence pour l'entreprise et son numéro SIREN. Ces données sous fichiers textes sont chargées dans des dataframes pandas. * **test_inputs** : On récupère les 500 plus grandes entreprises de France, collectée par un copier coller dans un fichier texte * **siren_df** : données SIREN-noms issues de l'api data gouv
###Code
df = pd.read_csv("../data/top500verifCom.tsv",
names=["companyName", "postCode", "city", "CA"], sep="\t")
test_inputs = df[["companyName"]]
print(f"Taille du test set : {len(test_inputs)}")
test_inputs.head()
siren_table_path = "../data/siren_table.csv"
siren_df = pd.read_csv(siren_table_path, dtype={"Siren": "object"})
print(f"Taille de la base de données de référence : {len(siren_df)}")
siren_df.head()
###Output
Taille de la base de données de référence : 1213321
###Markdown
CleaningEn examinant la base de données, les charactères et les motifs *polluants* ont été supprimé.
###Code
def clean_spaces(text):
text=text.replace(' ', ' ')
text=text.strip()
if len(text) < 1:
return "unmeaningfulofatext"
return text
def prep(company):
"""
Clean un nom d'entreprise saisie par un utilisateur
Arguments:
company {string} -- nom de l'entreprise, correspond au champ companyName de la table Lead
Returns:
string -- le nom de l'entreprise, avec des caractères polluants en moins.
"""
company = company.encode("ascii", errors="ignore").decode()
company = clean_spaces(company)
company = company.lower()
chars_to_remove = [")", "(", ".", "|", "[", "]", "{", "}", "'", ",", ";"]
rx = '[' + re.escape(''.join(chars_to_remove)) + ']'
company = re.sub(rx, '', company)
company = re.sub(r'[0-9]+', '', company)
return company
prep("BNP PPAri,bas")
###Output
_____no_output_____
###Markdown
Application du preprocessing aux colonnes des deux tables et matching "un à un", parfait
###Code
siren_df["companyNameClean"] = siren_df["companyName"].map(prep)
test_inputs["companyNameClean"] = test_inputs["companyName"].map(prep)
naif_result = test_inputs.merge(siren_df, on="companyNameClean", how="left")
missing_joins = naif_result["Siren"].isnull().sum()/len(test_inputs)*100
#un nom d'entreprise n'ayant pas de numéro siren dans la table naif_result n'apparait qu'une fois
print(f'{missing_joins:.2f} % de joins manquants sur les {len(test_inputs)} champs du test set. La jointure donne {len(naif_result)} champs')
naif_result[naif_result["Siren"].isnull()]
###Output
_____no_output_____
###Markdown
A étudier plus tard : noms non unique dans chiffre-cle-2020--> faire à la main pour les 500 plus grosses entreprises--> attention au merge Lors de la jointure, des noms d'entreprises correspondent à différents SIREN à droite. En effet, après preprocessing, les noms d'entreprise perdent leur unicité dans siren_table
###Code
g = naif_result.groupby("companyNameClean").size()
g.where(g>1).dropna()
###Output
_____no_output_____
###Markdown
Des entreprises apparaissent de nombreuses fois : après cleaning, leur nom n'est plus unique.
###Code
naif_result[naif_result.duplicated(subset=["companyNameClean"], keep=False)].sample(10)
result_df_2 = naif_result.merge(siren_df, left_on="companyName_x", right_on="companyName", suffixes=("_a", "_b"))
result_df_2[result_df_2["Siren_a"]==result_df_2["Siren_b"]]
###Output
_____no_output_____
###Markdown
NLPOn utilise la méthode de fuzzy string matching [[Ukkonnen](https://www.sciencedirect.com/science/article/pii/S0019995885800462)], que l'on applique avec les stop words judicieusement définis pour les noms d'entreprise. TODO : Stop words Calcul du score de similaritésDans un premier temps, il s'agit de vectoriser chaque nom d'entreprise, à partir des tokens définis par la méthode ngram
###Code
def ngrams(string, n=3):
ngrams = zip(*[string[i:] for i in range(n)])
return [''.join(ngram) for ngram in ngrams]
ngrams("benjamin")
def knn_reference(standard_names=[], k_matches=5, ngram_length=3):
#Construit un modèle des k plus proches voisins à partir des n-grams de la liste `standard_names`
#Tf Idf matrice à partir des données références
vectorizer = TfidfVectorizer(min_df=1, analyzer=lambda word : ngrams(word, ngram_length))
tf_idf_ref = vectorizer.fit_transform(standard_names)
# Fit le k-NN sur cette matrice
neighbors = NearestNeighbors(n_neighbors=k_matches, n_jobs=-1, metric="cosine").fit(
tf_idf_ref
)
return neighbors, vectorizer
neighbors, vectorizer = knn_reference(standard_names=siren_df['companyNameClean'].values)
###Output
_____no_output_____
###Markdown
La liste standard contient les noms standards de référence. * Chaque nom est découpé en ngrams.* On transforme cahque nom en une sparce matrix [tf-idf](https://medium.com/@cmukesh8688/tf-idf-vectorizer-scikit-learn-dbc0244a911a) grâce au n-grams* A partir de cette matrice, on entraine un k-nn sur la base des noms standards* On transforme la liste input dans une matrice tf-idf* Calcul les distances et les voisins les plus proches* Calcul un match score* Synthétise ces résultats dans un dataframe
###Code
def matcher(input_names=[], standard_names=[], k_matches=5, ngram_length=3):
"""Pour chaque entrée dans la liste input_names, renvoie
les k premiers matches (nom, index, niveau de confiance) de la liste standard.
Arguments:
input_names {string list} -- noms de l'entreprise, saisies par l'utilisateur,
que l'on veut faire match avec un nom standard (clean, permettant de faire le lien
avec le numéro SIREN)
standard_names {string list} -- noms standards des entreprises, déjà clean
k-matches {int} -- nombre de matchs à renvoyer
ngram_length -- longueur des ngrams
Returns:
DataFrame -- avec la liste originale, et `k_matches` colonnes qui contiennet
les matchs les plus proche dans `standard` et leurs scores de similarité
"""
# Calcul des plus proches voisins de l'input set
tf_idf_test_names = vectorizer.transform(input_names)
distances, neighbors_indices = neighbors.kneighbors(tf_idf_test_names)
#Récupération des informations dans un dataframe
def get_matches(input):
index = input.name
#pour chaque input, récupère les infos de chacun de ses voisins (nom, index, distance/confiance)
neighbors_indexes = neighbors_indices[index]
matches_infos = {}
for neighbor_number, neighbor_index in enumerate(neighbors_indexes):
correspondance = standard_names[neighbor_index]
distance = distances[index][neighbor_number]
confiance = 1 - round(distance, 2)
matches_infos[f"Match #{neighbor_number}"] = correspondance
matches_infos[f"Match #{neighbor_number} similarite"] = confiance
matches_infos[f"Match #{neighbor_number} index"] = neighbor_index
return pd.Series(matches_infos)
column_names = []
for neighbor_number in range(1, k_matches+1):
column_names += [f"Match #{neighbor_number}", f"Match #{neighbor_number} similarite", f"Match #{neighbor_number} index"]
result_df = pd.DataFrame(input_names, columns=["input"])
result_df.loc[:, column_names] = result_df.apply(get_matches, axis=1)
return result_df
matcher(input_names=test_inputs["companyNameClean"].values, standard_names=siren_df['companyNameClean'].values)
###Output
_____no_output_____
###Markdown
Predict Library Loading Concentration
###Code
cd ../
import pickle
from utility import load_data,predict_loading_concentration
import matplotlib
%matplotlib inline
file_id = 'data/run/nexteraJD_201907_High Sensitivity DNA Assay_DE24802700_2019-02-07_13-09-36_'
state_df = load_data(file_id,plot=True)
columns = [column for column in state_df.columns if not column in ['Cluster Density','Library Loading Concentration'] ]
spectra = state_df[columns].values[0]
#Load Model
with open('model/model.pkl','rb') as fp:
model = pickle.load(fp)
#Predict Loading Concentration
predict_loading_concentration(spectra,model)
###Output
_____no_output_____
###Markdown
This notebook uses a pre-trained Tensorflow model to make livepredictions on the audio samples recorded from a microphone.
###Code
import os
from micmon.audio import AudioDevice, AudioPlayer
from micmon.model import Model
model_dir = os.path.expanduser(os.path.join('~', 'models', 'baby-monitor'))
audio_system = 'alsa'
audio_device = 'plughw:3,0'
label_names = ['negative', 'positive']
###Output
DEBUG:matplotlib:(private) matplotlib data path: /usr/lib/python3.8/site-packages/matplotlib/mpl-data
DEBUG:matplotlib:matplotlib data path: /usr/lib/python3.8/site-packages/matplotlib/mpl-data
DEBUG:matplotlib:CONFIGDIR=/home/blacklight/.config/matplotlib
DEBUG:matplotlib:matplotlib version 3.3.2
DEBUG:matplotlib:interactive is False
DEBUG:matplotlib:platform is linux
DEBUG:matplotlib:loaded modules: ['sys', 'builtins', '_frozen_importlib', '_imp', '_warnings', '_frozen_importlib_external', '_io', 'marshal', 'posix', '_thread', '_weakref', 'time', 'zipimport', '_codecs', 'codecs', 'encodings.aliases', 'encodings', 'encodings.utf_8', '_signal', '__main__', 'encodings.latin_1', '_abc', 'abc', 'io', '_stat', 'stat', '_collections_abc', 'genericpath', 'posixpath', 'os.path', 'os', '_sitebuiltins', '_locale', '_bootlocale', 'types', 'importlib._bootstrap', 'importlib._bootstrap_external', 'warnings', 'importlib', 'importlib.machinery', 'importlib.abc', '_operator', 'operator', 'keyword', '_heapq', 'heapq', 'itertools', 'reprlib', '_collections', 'collections', '_functools', 'functools', 'contextlib', 'importlib.util', 'google', 'google.assistant', 'google.assistant.embedded', 'googlesamples', 'googlesamples.assistant', 'google.cloud', 'google.logging', 'google.iam', 'mpl_toolkits', 'ruamel', 'sphinxcontrib', 'zope', 'site', '_weakrefset', 'weakref', 'pkgutil', 'runpy', 'ipykernel._version', '__future__', 'enum', '_sre', 'sre_constants', 'sre_parse', 'sre_compile', 'copyreg', 're', '_json', 'json.scanner', 'json.decoder', 'json.encoder', 'json', 'errno', 'signal', 'threading', '_posixsubprocess', 'select', 'collections.abc', 'math', 'selectors', 'subprocess', 'IPython.core', 'IPython.core.getipython', 'IPython.core.release', 'atexit', 'copy', 'fnmatch', 'glob', 'token', 'tokenize', 'linecache', 'traceback', '_string', 'string', 'logging', 'zlib', '_compression', '_bz2', 'bz2', '_lzma', 'lzma', 'pwd', 'grp', 'shutil', '_opcode', 'opcode', 'dis', 'inspect', '_struct', 'struct', 'six', 'traitlets.utils', 'traitlets.utils.getargspec', 'ipython_genutils._version', 'ipython_genutils', 'locale', 'ipython_genutils.encoding', 'ipython_genutils.py3compat', 'traitlets.utils.importstring', 'traitlets.utils.sentinel', 'traitlets.utils.bunch', 'traitlets.traitlets', 'traitlets._version', 'traitlets', 'decorator', 'gettext', 'argparse', '_ast', 'ast', '_bisect', 'bisect', '_sha512', '_random', 'random', 'ipython_genutils.path', 'traitlets.config.loader', 'textwrap', 'ipython_genutils.text', 'traitlets.config.configurable', 'ipython_genutils.importstring', 'traitlets.config.application', 'traitlets.config', 'pprint', 'platform', 'sysconfig', 'urllib', 'urllib.parse', '_sysconfigdata__linux_x86_64-linux-gnu', 'pydoc', 'bdb', 'IPython.utils', 'IPython.utils.ipstruct', 'IPython.utils.coloransi', 'pygments', 'IPython.utils.colorable', 'IPython.utils.PyColorize', 'IPython.utils.encoding', 'IPython.utils.py3compat', 'IPython.core.excolors', 'IPython.testing', 'IPython.testing.skipdoctest', 'cmd', 'codeop', 'code', 'pdb', 'IPython.core.debugger', 'IPython.core.display_trap', 'pexpect.exceptions', 'pexpect.utils', 'pexpect.expect', 'termios', 'tty', 'pty', 'fcntl', 'resource', 'ptyprocess.util', 'ptyprocess.ptyprocess', 'ptyprocess', 'pexpect.spawnbase', 'pexpect.pty_spawn', 'pexpect.run', 'pexpect', 'shlex', 'IPython.utils._process_common', 'IPython.utils._process_posix', 'IPython.utils.process', 'IPython.utils.decorators', 'IPython.utils.path', 'IPython.utils.data', 'IPython.utils.terminal', 'IPython.core.ultratb', 'IPython.utils._sysinfo', 'IPython.utils.sysinfo', 'IPython.core.crashhandler', 'tempfile', 'IPython.utils.importstring', 'IPython.paths', 'IPython.core.profiledir', 'IPython.core.application', 'IPython.terminal', '_hashlib', '_blake2', '_sha3', 'hashlib', 'IPython.core.compilerop', 'IPython.core.error', 'ntpath', 'pathlib', 'IPython.utils.text', 'IPython.core.magic_arguments', 'getopt', 'typing.io', 'typing.re', 'typing', 'binascii', 'mimetypes', 'IPython.core.display', 'IPython.core.page', 'getpass', 'IPython.lib.security', 'IPython.lib', '_datetime', 'datetime', 'IPython.lib.pretty', 'IPython.utils.openpy', 'IPython.utils.dir2', 'IPython.utils.wildcard', 'pygments.lexers._mapping', 'pygments.modeline', 'pygments.plugin', 'pygments.util', 'pygments.lexers', 'pygments.filter', 'pygments.token', 'pygments.filters', 'pygments.regexopt', 'pygments.lexer', 'pygments.unistring', 'pygments.lexers.python', 'pygments.formatters._mapping', 'pygments.formatters', 'pygments.styles', 'pygments.formatter', 'pygments.formatters.html', 'IPython.core.oinspect', 'IPython.core.inputtransformer2', 'IPython.core.magic', '_compat_pickle', '_pickle', 'pickle', 'pickleshare', 'IPython.core.autocall', 'IPython.core.macro', 'IPython.core.splitinput', 'IPython.core.prefilter', 'IPython.core.alias', 'IPython.core.builtin_trap', 'backcall.backcall', 'backcall', 'IPython.core.events', 'IPython.core.displayhook', 'IPython.core.displaypub', 'IPython.core.extensions', 'IPython.utils.sentinel', 'IPython.core.formatters', '_sqlite3', 'sqlite3.dbapi2', 'sqlite3', 'IPython.core.history', 'IPython.core.logger', 'IPython.core.payload', 'IPython.core.usage', 'html.entities', 'html', 'IPython.lib.display', 'IPython.display', 'IPython.utils.capture', 'IPython.utils.io', 'IPython.core.hooks', 'IPython.utils.strdispatch', 'IPython.utils.syspathcontext', 'IPython.utils.tempdir', 'IPython.utils.contexts', 'IPython.core.async_helpers', 'IPython.core.interactiveshell', 'concurrent', 'concurrent.futures._base', 'concurrent.futures', '_socket', 'socket', '_ssl', 'base64', 'ssl', 'asyncio.constants', 'asyncio.format_helpers', 'asyncio.base_futures', 'asyncio.log', 'asyncio.coroutines', '_contextvars', 'contextvars', 'asyncio.exceptions', 'asyncio.base_tasks', '_asyncio', 'asyncio.events', 'asyncio.futures', 'asyncio.protocols', 'asyncio.transports', 'asyncio.sslproto', 'asyncio.locks', 'asyncio.tasks', 'asyncio.staggered', 'asyncio.trsock', 'asyncio.base_events', 'asyncio.runners', 'asyncio.queues', 'asyncio.streams', 'asyncio.subprocess', 'asyncio.base_subprocess', 'asyncio.selector_events', 'asyncio.unix_events', 'asyncio', 'prompt_toolkit.application.current', 'prompt_toolkit.eventloop.utils', 'prompt_toolkit.eventloop.async_generator', 'wcwidth.table_wide', 'wcwidth.table_zero', 'wcwidth.unicode_versions', 'wcwidth.wcwidth', 'wcwidth', 'prompt_toolkit.utils', 'prompt_toolkit.eventloop.inputhook', 'prompt_toolkit.eventloop', 'prompt_toolkit.application.run_in_terminal', 'prompt_toolkit.selection', 'prompt_toolkit.clipboard.base', 'prompt_toolkit.clipboard.in_memory', 'prompt_toolkit.clipboard', 'prompt_toolkit.cache', 'prompt_toolkit.enums', 'prompt_toolkit.filters.base', 'prompt_toolkit.filters.app', 'prompt_toolkit.filters.cli', 'prompt_toolkit.filters.utils', 'prompt_toolkit.filters', 'prompt_toolkit.document', 'prompt_toolkit.auto_suggest', 'prompt_toolkit.data_structures', 'prompt_toolkit.styles.base', 'prompt_toolkit.styles.named_colors', 'prompt_toolkit.styles.style', 'prompt_toolkit.styles.defaults', 'prompt_toolkit.styles.pygments', 'colorsys', 'prompt_toolkit.styles.style_transformation', 'prompt_toolkit.styles', 'prompt_toolkit.output.color_depth', 'prompt_toolkit.output.base', 'prompt_toolkit.patch_stdout', 'prompt_toolkit.output.defaults', 'prompt_toolkit.output', 'array', 'prompt_toolkit.output.vt100', 'prompt_toolkit.mouse_events', 'prompt_toolkit.formatted_text.base', 'prompt_toolkit.formatted_text.ansi', 'xml', 'xml.dom.domreg', 'xml.dom', 'xml.dom.minicompat', 'xml.dom.NodeFilter', 'xml.dom.xmlbuilder', 'xml.dom.minidom', 'prompt_toolkit.formatted_text.html', 'prompt_toolkit.formatted_text.pygments', 'prompt_toolkit.formatted_text.utils', 'prompt_toolkit.formatted_text', 'prompt_toolkit.completion.base', 'prompt_toolkit.completion.filesystem', 'prompt_toolkit.completion.word_completer', 'prompt_toolkit.completion.fuzzy_completer', 'prompt_toolkit.completion.nested', 'prompt_toolkit.completion', 'prompt_toolkit.history', 'prompt_toolkit.keys', 'prompt_toolkit.key_binding.key_bindings', 'prompt_toolkit.key_binding.key_processor', 'prompt_toolkit.key_binding', 'prompt_toolkit.key_binding.vi_state', 'prompt_toolkit.search', 'prompt_toolkit.validation', 'prompt_toolkit.buffer', 'prompt_toolkit.input.base', 'prompt_toolkit.input.defaults', 'prompt_toolkit.input', 'prompt_toolkit.input.typeahead', 'prompt_toolkit.key_binding.bindings', 'prompt_toolkit.key_binding.bindings.scroll', 'prompt_toolkit.key_binding.bindings.page_navigation', 'prompt_toolkit.lexers.base', 'prompt_toolkit.lexers.pygments', 'prompt_toolkit.lexers', 'prompt_toolkit.layout.utils', 'prompt_toolkit.layout.processors', 'prompt_toolkit.layout.controls', 'prompt_toolkit.layout.dimension', 'prompt_toolkit.layout.margins', 'prompt_toolkit.layout.mouse_handlers', 'prompt_toolkit.layout.screen', 'prompt_toolkit.layout.containers', 'prompt_toolkit.layout.layout', 'prompt_toolkit.layout.menus', 'prompt_toolkit.layout', 'prompt_toolkit.key_binding.bindings.completion', 'prompt_toolkit.key_binding.bindings.named_commands', 'prompt_toolkit.key_binding.bindings.basic', 'prompt_toolkit.key_binding.bindings.cpr', 'prompt_toolkit.key_binding.bindings.emacs', 'prompt_toolkit.key_binding.bindings.mouse', 'prompt_toolkit.input.ansi_escape_sequences', 'prompt_toolkit.input.vt100_parser', 'prompt_toolkit.key_binding.digraphs', 'prompt_toolkit.key_binding.bindings.vi', 'prompt_toolkit.key_binding.defaults', 'prompt_toolkit.key_binding.emacs_state', 'prompt_toolkit.layout.dummy', 'prompt_toolkit.renderer', 'prompt_toolkit.application.application', 'prompt_toolkit.application.dummy', 'prompt_toolkit.application', 'prompt_toolkit.key_binding.bindings.focus', 'prompt_toolkit.widgets.toolbars', 'prompt_toolkit.widgets.base', 'prompt_toolkit.widgets.dialogs', 'prompt_toolkit.widgets.menus', 'prompt_toolkit.widgets', 'prompt_toolkit.shortcuts.dialogs', 'prompt_toolkit.shortcuts.progress_bar.formatters', 'prompt_toolkit.shortcuts.progress_bar.base', 'prompt_toolkit.shortcuts.progress_bar', 'prompt_toolkit.key_binding.bindings.auto_suggest', 'prompt_toolkit.key_binding.bindings.open_in_editor', 'prompt_toolkit.shortcuts.prompt', 'prompt_toolkit.shortcuts.utils', 'prompt_toolkit.shortcuts', 'prompt_toolkit', 'pygments.style', 'unicodedata', 'IPython.core.latex_symbols', 'IPython.utils.generics', 'parso._compatibility', 'parso.utils', 'parso.tree', 'parso.python', 'parso.python.token', 'parso.python.tokenize', 'parso.pgen2.grammar_parser', 'parso.pgen2.generator', 'parso.pgen2', 'parso.parser', 'difflib', 'parso.python.prefix', 'parso.python.tree', 'parso.python.parser', 'parso.python.diff', 'gc', 'parso.file_io', 'parso.cache', 'parso.normalizer', 'parso.python.errors', 'parso.python.pep8', 'parso.grammar', 'parso', 'jedi.file_io', '_queue', 'queue', 'jedi._compatibility', 'jedi.parser_utils', 'colorama.ansi', '_ctypes', 'ctypes._endian', 'ctypes', 'colorama.win32', 'colorama.winterm', 'colorama.ansitowin32', 'colorama.initialise', 'colorama', 'jedi.debug', 'jedi.settings', 'jedi.cache', 'jedi.inference.cache', 'jedi.inference.helpers', 'jedi.inference.utils', 'jedi.inference.base_value', 'jedi.common', 'jedi.inference.sys_path', 'jedi.inference.recursion', 'jedi.inference.flow_analysis', 'jedi.inference.lazy_value', 'jedi.inference.docstrings', 'jedi.plugins', 'jedi.inference.names', 'jedi.inference.filters', 'jedi.inference.compiled.getattr_static', 'jedi.inference.compiled.access', 'jedi.inference.signature', 'jedi.inference.context', 'jedi.inference.compiled.value', 'jedi.inference.compiled', 'jedi.inference.analysis', 'jedi.inference.gradual', 'jedi.inference.value.module', 'jedi.inference.value.dynamic_arrays', 'jedi.inference.value.iterable', 'jedi.inference.arguments', 'jedi.inference.parser_cache', 'jedi.inference.gradual.generics', 'jedi.inference.value.function', 'jedi.inference.value.klass', 'jedi.inference.value.instance', 'jedi.inference.value', 'jedi.inference.gradual.base', 'jedi.inference.gradual.type_var', 'jedi.inference.gradual.typing', 'jedi.inference.gradual.stub_value', 'jedi.inference.gradual.typeshed', 'jedi.inference.imports', 'jedi.inference.param', 'jedi.inference.gradual.annotation', 'jedi.inference.value.decorator', 'jedi.inference.syntax_tree', 'jedi.inference', 'jedi.inference.gradual.conversion', 'jedi.inference.compiled.mixed', 'pydoc_data', 'pydoc_data.topics', 'jedi.api.keywords', 'jedi.api.completion_cache', 'jedi.api.helpers', 'jedi.api.classes', 'jedi.api.interpreter', 'jedi.api.strings', 'jedi.api.file_name', 'jedi.api.completion', 'filecmp', 'jedi.inference.compiled.subprocess.functions', 'jedi.api.exceptions', 'jedi.inference.compiled.subprocess', 'jedi.api.environment', 'jedi.inference.references', 'jedi.api.project', 'jedi.api.errors', 'jedi.api.refactoring', 'jedi.api.refactoring.extract', 'jedi.inference.gradual.utils', 'jedi.api', 'jedi.plugins.stdlib', 'jedi.plugins.flask', 'jedi.plugins.pytest', 'jedi.plugins.django', 'jedi.plugins.registry', 'jedi', 'IPython.core.completer', 'IPython.terminal.ptutils', 'IPython.terminal.shortcuts', 'IPython.terminal.debugger', 'IPython.lib.clipboard', 'IPython.terminal.magics', 'IPython.terminal.pt_inputhooks', 'IPython.terminal.prompts', 'IPython.terminal.interactiveshell', 'IPython.core.magics.auto', 'IPython.core.magics.basic', 'email', 'http', 'email.errors', 'email.quoprimime', 'email.base64mime', 'quopri', 'email.encoders', 'email.charset', 'email.header', 'calendar', 'email._parseaddr', 'email.utils', 'email._policybase', 'email.feedparser', 'email.parser', 'uu', 'email._encoded_words', 'email.iterators', 'email.message', 'http.client', 'urllib.response', 'urllib.error', 'urllib.request', 'IPython.core.magics.code', 'IPython.core.magics.config', 'IPython.core.magics.display', 'timeit', '_lsprof', 'profile', 'cProfile', 'pstats', 'IPython.utils.module_paths', 'IPython.utils.timing', 'IPython.core.magics.execution', 'IPython.core.magics.extension', 'IPython.core.magics.history', 'IPython.core.magics.logging', 'IPython.core.magics.namespace', 'IPython.core.magics.osm', 'IPython.core.magics.packaging', 'IPython.core.pylabtools', 'IPython.core.magics.pylab', 'IPython.lib.backgroundjobs', 'IPython.core.magics.script', 'IPython.core.magics', 'IPython.core.shellapp', 'IPython.extensions', 'IPython.extensions.storemagic', 'IPython.terminal.ipapp', 'IPython.terminal.embed', 'IPython.utils.frame', 'IPython', 'jupyter_client._version', 'zmq.backend.select', 'cython_runtime', 'zmq.backend.cython.constants', '_cython_0_29_17', 'zmq.backend.cython.error', 'zmq.error', 'zmq.backend.cython.message', 'zmq.backend.cython.context', 'zmq.backend.cython.socket', 'zmq.backend.cython.utils', 'zmq.backend.cython._poll', 'zmq.backend.cython._version', 'zmq.backend.cython._device', 'zmq.backend.cython._proxy_steerable', 'zmq.backend.cython', 'zmq.backend', 'zmq.utils', 'zmq.utils.constant_names', 'zmq.sugar.constants', 'zmq.sugar.attrsettr', 'zmq.sugar.poll', 'zmq.utils.strtypes', 'numbers', '_pydecimal', 'decimal', 'simplejson.errors', 'simplejson.raw_json', 'simplejson.compat', 'simplejson._speedups', 'simplejson.scanner', 'simplejson.decoder', 'simplejson.encoder', 'simplejson', 'zmq.utils.jsonapi', 'zmq.sugar.socket', 'zmq.sugar.context', 'zmq.sugar.frame', 'zmq.sugar.tracker', 'zmq.sugar.version', 'zmq.sugar.stopwatch', 'zmq.sugar', 'zmq', 'jupyter_client.localinterfaces', 'jupyter_core.version', 'jupyter_core', 'distutils', 'distutils.errors', 'distutils.dep_util', 'distutils.debug', 'distutils.log', 'distutils.spawn', 'distutils.util', 'jupyter_core.paths', 'jupyter_client.connect', 'traitlets.log', 'jupyter_client.launcher', 'jupyter_client.channelsabc', 'jupyter_client.channels', 'jupyter_client.clientabc', 'jupyter_client.client', 'jupyter_client.kernelspec', 'jupyter_client.managerabc', 'jupyter_client.manager', 'jupyter_client.blocking.channels', 'jupyter_client.blocking.client', 'jupyter_client.blocking', 'zmq._future', 'zmq.asyncio', 'jupyter_client.asynchronous.channels', 'jupyter_client.asynchronous.client', 'jupyter_client.asynchronous', '_uuid', 'uuid', 'jupyter_client.multikernelmanager', 'jupyter_client', 'ipykernel.connect', 'ipykernel', 'tornado', 'logging.handlers', 'tornado.speedups', 'tornado.util', 'tornado.escape', '_curses', 'curses', 'tornado.log', 'tornado.concurrent', 'tornado.ioloop', 'tornado.platform', 'tornado.gen', 'tornado.platform.asyncio', 'zmq.eventloop.ioloop', 'zmq.eventloop', 'tornado.stack_context', 'zmq.eventloop.zmqstream', 'imp', 'hmac', 'dateutil._version', 'dateutil', 'dateutil._common', 'dateutil.relativedelta', 'six.moves', 'dateutil.tz._common', 'dateutil.tz._factories', 'dateutil.tz.tz', 'dateutil.tz', 'dateutil.parser._parser', 'dateutil.parser.isoparser', 'dateutil.parser', '_strptime', 'jupyter_client.jsonutil', 'jupyter_client.adapter', 'jupyter_client.session', 'ipykernel.iostream', 'ipykernel.heartbeat', 'IPython.utils.tokenutil', 'tornado.locks', 'tornado.queues', 'ipykernel.jsonutil', 'ipykernel.kernelbase', 'ipykernel.comm.comm', 'ipykernel.comm.manager', 'ipykernel.comm', 'IPython.core.payloadpage', 'ipykernel.displayhook', 'ipykernel.zmqshell', 'distutils.version', 'ipykernel.eventloops', 'ipykernel.ipkernel', 'ipykernel.parentpoller', 'ipykernel.kernelapp', 'netifaces', 'faulthandler', 'ipykernel.codeutil', 'ipykernel.pickleutil', 'ipykernel.serialize', 'ipykernel.datapub', 'IPython.core.completerlib', 'storemagic', 'micmon', 'micmon.audio', 'micmon.audio.directory', 'micmon.audio.segment', 'numpy._globals', 'numpy.__config__', 'numpy.version', 'numpy._distributor_init', 'numpy.core._multiarray_umath', 'numpy.compat._inspect', 'numpy.compat.py3k', 'numpy.compat', 'numpy.core.overrides', 'numpy.core.multiarray', 'numpy.core.umath', 'numpy.core._string_helpers', 'numpy.core._dtype', 'numpy.core._type_aliases', 'numpy.core.numerictypes', 'numpy.core._asarray', 'numpy.core._exceptions', 'numpy.core._methods', 'numpy.core.fromnumeric', 'numpy.core.shape_base', 'numpy.core._ufunc_config', 'numpy.core.arrayprint', 'numpy.core.numeric', 'numpy.core.defchararray', 'numpy.core.records', 'numpy.core.memmap', 'numpy.core.function_base', 'numpy.core.machar', 'numpy.core.getlimits', 'numpy.core.einsumfunc', 'numpy.core._multiarray_tests', 'numpy.core._add_newdocs', 'numpy.core._dtype_ctypes', 'numpy.core._internal', 'numpy._pytesttester', 'numpy.core', 'numpy.lib.mixins', 'numpy.lib.ufunclike', 'numpy.lib.type_check', 'numpy.lib.scimath', 'numpy.lib.twodim_base', 'numpy.linalg.lapack_lite', 'numpy.linalg._umath_linalg', 'numpy.linalg.linalg', 'numpy.linalg', 'numpy.matrixlib.defmatrix', 'numpy.matrixlib', 'numpy.lib.histograms', 'numpy.lib.function_base', 'numpy.lib.stride_tricks', 'numpy.lib.index_tricks', 'numpy.lib.nanfunctions', 'numpy.lib.shape_base', 'numpy.lib.polynomial', 'numpy.lib.utils', 'numpy.lib.arraysetops', 'numpy.lib.format', 'numpy.lib._datasource', 'numpy.lib._iotools', 'numpy.lib.npyio', 'numpy.lib.financial', 'numpy.lib.arrayterator', 'numpy.lib.arraypad', 'numpy.lib._version', 'numpy.lib', 'numpy.fft._pocketfft_internal', 'numpy.fft._pocketfft', 'numpy.fft.helper', 'numpy.fft', 'numpy.polynomial.polyutils', 'numpy.polynomial._polybase', 'numpy.polynomial.polynomial', 'numpy.polynomial.chebyshev', 'numpy.polynomial.legendre', 'numpy.polynomial.hermite', 'numpy.polynomial.hermite_e', 'numpy.polynomial.laguerre', 'numpy.polynomial', '_cython_0_29_21', 'numpy.random._common', 'secrets', 'numpy.random.bit_generator', 'numpy.random._bounded_integers', 'numpy.random._mt19937', 'numpy.random.mtrand', 'numpy.random._philox', 'numpy.random._pcg64', 'numpy.random._sfc64', 'numpy.random._generator', 'numpy.random._pickle', 'numpy.random', 'numpy.ctypeslib', 'numpy.ma.core', 'numpy.ma.extras', 'numpy.ma', 'numpy', 'matplotlib', 'gzip', 'matplotlib.cbook.deprecation', 'matplotlib.cbook', 'matplotlib._animation_data', 'matplotlib.animation', 'pyparsing', 'matplotlib.fontconfig_pattern', 'matplotlib.docstring', 'matplotlib._color_data', 'matplotlib.colors', 'cycler', 'matplotlib.rcsetup', 'matplotlib._version', 'matplotlib.ft2font', 'kiwisolver']
DEBUG:matplotlib:CACHEDIR=/home/blacklight/.cache/matplotlib
DEBUG:matplotlib.font_manager:Using fontManager instance from /home/blacklight/.cache/matplotlib/fontlist-v330.json
DEBUG:matplotlib.pyplot:Loaded backend module://ipykernel.pylab.backend_inline version unknown.
DEBUG:matplotlib.pyplot:Loaded backend module://ipykernel.pylab.backend_inline version unknown.
DEBUG:tensorflow:Falling back to TensorFlow client; we recommended you install the Cloud TPU client directly with pip install cloud-tpu-client.
WARNING:root:Limited tf.compat.v2.summary API due to missing TensorBoard installation.
WARNING:root:Limited tf.compat.v2.summary API due to missing TensorBoard installation.
WARNING:root:Limited tf.compat.v2.summary API due to missing TensorBoard installation.
WARNING:root:Limited tf.summary API due to missing TensorBoard installation.
###Markdown
Load the model
###Code
model = Model.load(model_dir)
###Output
_____no_output_____
###Markdown
Play some audio from the input device
###Code
with AudioDevice(audio_system, device=audio_device) as source, AudioPlayer() as player:
for sample in source:
player.play(sample)
###Output
_____no_output_____
###Markdown
Record frames from an audio source and make predictions using the model
###Code
with AudioDevice(audio_system, device=audio_device) as source:
for sample in source:
source.pause()
prediction = model.predict(sample)
print(prediction)
source.resume()
###Output
negative
negative
negative
negative
negative
negative
negative
negative
negative
negative
|
Pandas- Visualização.ipynb | ###Markdown
Gráficos do Pandas
###Code
import numpy as np
import pandas as pd
%matplotlib inline
###Output
_____no_output_____
###Markdown
Dados
###Code
df1 = pd.read_csv("df1.csv")
df2 = pd.read_csv("df2.csv")
df1.head()
df2.head()
###Output
_____no_output_____
###Markdown
Estilos
###Code
df1['A'].hist()
import matplotlib.pyplot as plt
plt.style.use('ggplot')
df1['A'].hist()
plt.style.use('bmh')
df1['A'].hist()
plt.style.use('dark_background')
df1['A'].hist()
plt.style.use('fivethirtyeight')
df1['A'].hist()
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
Área
###Code
df2.plot.area(alpha=0.4)
###Output
_____no_output_____
###Markdown
Barras
###Code
df2.head()
df2.plot.bar()
df2.plot.bar(stacked=True)
###Output
_____no_output_____
###Markdown
Histograma
###Code
df1['A'].plot.hist(bins=50)
###Output
_____no_output_____
###Markdown
Dispersão
###Code
df1.plot.scatter(x='A',y='B')
###Output
_____no_output_____
###Markdown
Veja: http://matplotlib.org/users/colormaps.html
###Code
df1.plot.scatter(x='A',y='B',c='C',cmap='coolwarm')
###Output
_____no_output_____
###Markdown
Or use s to indicate size based off another column. s parameter needs to be an array, not just the name of a column:
###Code
df1.plot.scatter(x='A',y='B',s=df1['C']*200)
###Output
/data/user/0/ru.iiec.pydroid3/files/aarch64-linux-android/lib/python3.7/site-packages/matplotlib/collections.py:874: RuntimeWarning: invalid value encountered in sqrt
scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor
###Markdown
BoxPlots
###Code
df2.plot.box()
###Output
_____no_output_____
###Markdown
Hexagonal
###Code
df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
df.plot.hexbin(x='a',y='b',gridsize=25,cmap='Oranges')
###Output
_____no_output_____
###Markdown
____ Densidade de distribuição (KDE)
###Code
df2['a'].plot.kde()
df2.plot.density()
###Output
_____no_output_____ |
markdown_generator/.ipynb_checkpoints/PubsFromBib_my-checkpoint.ipynb | ###Markdown
Publications markdown generator for academicpagesTakes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `pubsFromBibs.py`. Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:* bib file names* specific venue keys based on your bib file preferences* any specific pre-text for specific files* Collection Name (future feature)TODO: Make this work with other databases of citations, TODO: Merge this with the existing TSV parsing solution
###Code
from pybtex.database.input import bibtex
import pybtex.database.input.bibtex
from time import strptime
import string
import html
import os
import re
import sys
sys.executable
#todo: incorporate different collection types rather than a catch all publications, requires other changes to template
publist = {
# "proceeding": {
# "file" : "proceedings.bib",
# "venuekey": "booktitle",
# "venue-pretext": "In the proceedings of ",
# "collection" : {"name":"publications",
# "permalink":"/publication/"}
# },
"journal":{
"file": "mypubs.bib",
"venuekey" : "journal",
"venue-pretext" : "",
"collection" : {"name":"publications",
"permalink":"/publication/"}
}
}
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
cts=[]
for pubsource in publist:
parser = bibtex.Parser()
bibdata = parser.parse_file(publist[pubsource]["file"])
entries_sorted=sorted(bibdata.entries,key=lambda x: bibdata.entries[x].fields["year"],reverse=True)
#loop through the individual references in a given bibtex file
for ind,bib_id in enumerate(entries_sorted):
entry=bibdata.entries[bib_id]
b=entry.fields
pub_year = f'{b["year"]}'
print(pub_year)
print(ind)
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
citation=""
persons=entry.persons["author"]
num_of_authors=len(persons)
author=persons[0]
citation = author.first()[0]+" "+author.last_names[0]
if num_of_authors==2:
author=persons[1]
citation = citation+" and "+author.first_names[0]+" "+author.last_names[0]
for i in range(1,num_of_authors-1):
author=persons[i]
citation = citation+", "+author.first_names[0]+" "+author.last_names[0]
print(citation)
author=persons[-1]
citation = citation+" and "+author.first_names[0]+" "+author.last_names[0]+". "
url = b["url"]
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","")
print(clean_title)
citation = citation+"<a href=\""+url+"\">"+clean_title+".</a>"
journal = entry.fields["journal"]
citation = citation+"<em> "+journal+"</em> "
if "arXiv" in journal:
citation= citation+","+pub_year
else:
vol=b["volume"]
if "number" in b.keys():
num=b["number"]
else:
num=""
pages=b["pages"]
if "number" in b.keys():
citation = citation+vol+"("+num+")"+":"+pages+","+pub_year
else:
citation= citation+vol+":"+pages+","+pub_year
cts.append(citation)
# field may not exist for a reference
all_text="<ol>"
for c in cts:
all_text+="<li>"+c+"</li>"
all_text+="</ol>"
all_text
bibdata = parser.parse_file(publist["journal"]["file"])
bibdata.entries['Gazit2009'].fields["journal"]
citation=""
entry=bibdata.entries['Gazit2018']
persons=entry.persons["author"]
num_of_authors=len(persons)
author=persons[0]
citation = author.first+" "+author.last
if num_of_authors==2:
citation = citation+" and "+author.first+" "+author.last
for i in range(1,num_of_authors-1):
author=persons[i]
citation = citation+", "+author.first+" "+author.last
author=persons[-1]
citation = citation+" and "+author.first+" "+author.last+"."
b=entry.fields
url = b["url"]
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","")
citation = citation+"<a href=\">"+url+"\""+clean_title+".</a>"
journal = entry.fields["journal"]
citation = citation+"<em> "+journal+"</em>"
vol=b["volume"]
num=b["number"]
pages=b["pages"]
pub_year = f'{b["year"]}'
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
citation = citation+vol+"("+num+")"+":"+pages+pub_year
citation
vol
bibtex.Person("Gazit, Snir")
?bibtex.Person
###Output
_____no_output_____ |
Lab6_Convolutional_cats_and_dogs.ipynb | ###Markdown
Training with a Larger Dataset - Cats and DogsIn the previous lab you trained a classifier with a horses-v-humans dataset. You saw that despite getting great training results, when you tried to do classification with real images, there were many errors, due primarily to overfitting -- where the network does very well with data that it has previously seen, but poorly with data it hasn't!In this lab you'll look at a real, and very large dataset, and see the impact this has to avoid overfitting.
###Code
import os
import zipfile
import random
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
# If the URL doesn't work, visit https://www.microsoft.com/en-us/download/confirmation.aspx?id=54765
# And right click on the 'Download Manually' link to get a new URL to the dataset
# Note: This is a very large dataset and will take time to download
!wget --no-check-certificate \
"https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip" \
-O "/tmp/cats-and-dogs.zip"
local_zip = '/tmp/cats-and-dogs.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
print(len(os.listdir('/tmp/PetImages/Cat/')))
print(len(os.listdir('/tmp/PetImages/Dog/')))
# Expected Output:
# 12501
# 12501
try:
os.mkdir('/tmp/cats-v-dogs')
os.mkdir('/tmp/cats-v-dogs/training')
os.mkdir('/tmp/cats-v-dogs/testing')
os.mkdir('/tmp/cats-v-dogs/training/cats')
os.mkdir('/tmp/cats-v-dogs/training/dogs')
os.mkdir('/tmp/cats-v-dogs/testing/cats')
os.mkdir('/tmp/cats-v-dogs/testing/dogs')
except OSError:
pass
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):
files = []
for filename in os.listdir(SOURCE):
file = SOURCE + filename
if os.path.getsize(file) > 0:
files.append(filename)
else:
print(filename + " is zero length, so ignoring.")
training_length = int(len(files) * SPLIT_SIZE)
testing_length = int(len(files) - training_length)
shuffled_set = random.sample(files, len(files))
training_set = shuffled_set[0:training_length]
testing_set = shuffled_set[-testing_length:]
for filename in training_set:
this_file = SOURCE + filename
destination = TRAINING + filename
copyfile(this_file, destination)
for filename in testing_set:
this_file = SOURCE + filename
destination = TESTING + filename
copyfile(this_file, destination)
CAT_SOURCE_DIR = "/tmp/PetImages/Cat/"
TRAINING_CATS_DIR = "/tmp/cats-v-dogs/training/cats/"
TESTING_CATS_DIR = "/tmp/cats-v-dogs/testing/cats/"
DOG_SOURCE_DIR = "/tmp/PetImages/Dog/"
TRAINING_DOGS_DIR = "/tmp/cats-v-dogs/training/dogs/"
TESTING_DOGS_DIR = "/tmp/cats-v-dogs/testing/dogs/"
split_size = .9
split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size)
split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size)
# Expected output
# 666.jpg is zero length, so ignoring
# 11702.jpg is zero length, so ignoring
print(len(os.listdir('/tmp/cats-v-dogs/training/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/training/dogs/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/cats/')))
print(len(os.listdir('/tmp/cats-v-dogs/testing/dogs/')))
# Expected output:
# 11250
# 11250
# 1250
# 1250
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['acc'])
TRAINING_DIR = "/tmp/cats-v-dogs/training/"
train_datagen = ImageDataGenerator(rescale=1.0/255.)
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
batch_size=250,
class_mode='binary',
target_size=(150, 150))
VALIDATION_DIR = "/tmp/cats-v-dogs/testing/"
validation_datagen = ImageDataGenerator(rescale=1.0/255.)
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
batch_size=250,
class_mode='binary',
target_size=(150, 150))
# Expected Output:
# Found 22498 images belonging to 2 classes.
# Found 2500 images belonging to 2 classes.
# Note that this may take some time.
history = model.fit(train_generator, epochs=15, steps_per_epoch=90,
validation_data=validation_generator, validation_steps=6)
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(len(acc)) # Get number of epochs
#------------------------------------------------
# Plot training and validation accuracy per epoch
#------------------------------------------------
plt.plot(epochs, acc, 'r', "Training Accuracy")
plt.plot(epochs, val_acc, 'b', "Validation Accuracy")
plt.title('Training and validation accuracy')
plt.figure()
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(epochs, loss, 'r', "Training Loss")
plt.plot(epochs, val_loss, 'b', "Validation Loss")
plt.figure()
# Desired output. Charts with training and validation metrics. No crash :)
# Here's a codeblock just for fun. You should be able to upload an image here
# and have it classified without crashing
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a dog")
else:
print(fn + " is a cat")
###Output
_____no_output_____ |
Code/Viet_AI_LP_TF_Falselabel_half_data.ipynb | ###Markdown
Visualize Label Spreading
###Code
import matplotlib.pyplot as plt
drop=0.5
n=np.shape(X)[0]
Cn=np.copy(trans.embedding_)
Ynd=np.copy(Y)
#Generate 28 flase labels
for ni in range(28):
r_false=randint(0,np.shape(Y)[0]-1)
Ynd[r_false]=false_label_return(Y[r_false]) #One label flipped randomly
ns=np.round(drop*n).astype(int)
#print('num of samples dropped=',ns)
dropI=np.floor(np.random.rand(ns)*n).astype(int)
no_drop=np.delete(nums,dropI)
Ynd[dropI]=0
#Now combine (Xn, UX1, NUX1)
Xn=((X1))
Ynd=((Ynd))
#dropI=np.concatenate((dropI,range(np.shape(X)[0],np.shape(Xn)[0])))
#Thus (Xn,Ynd) is the new data that will be subjected to label propagation
################################### so random label removal works!
#Step 4.2: Apply Label propagation to [X,UX,NUX]
#lp_model = LabelSpreading(kernel='rbf',gamma=20,alpha=0.1) #high alpha means labels an change till stable
# Train the model
#np.savetxt('test.out', Ynd, delimiter=',')
#lp_model.fit(Xn, Ynd)
#Step 4.3: Predict Model label for the dropped samples
#pred = lp_model.transduction_[dropI]
#pred=pred.astype(int)
#print(np.shape(pred))
#print('Predictions=',pred)#pred[np.shape(X)[0]:np.shape(X)[0]+np.shape(UX)[0]])
#print('Groundtruth=',UY)
############Predictions to be measured#######################
#predictions=pred[ns:(ns+np.shape(UY)[0])]
#print(np.shape(predictions)[0])
#pred_labels=np.zeros((np.shape(UY)[0],7))
print(np.shape(Xn),np.shape(Ynd), no_drop)
# Collect no drops
print(np.shape(no_drop))
no_drop
fig = plt.figure()
#plt.scatter(trans.embedding_[no_drop,0], trans.embedding_[no_drop,1], s=30, c=Ynd[no_drop], cmap='hsv')
#ax.scatter(Xn[dropI,0], Xn[dropI,1], Xn[dropI,2], c=np.zeros(np.shape(dropI)[0]))
plt.scatter(X1[no_drop,1], X1[no_drop,2], s=30, c=Ynd[no_drop], cmap='hsv')
fig = plt.figure()
#ax = fig.add_subplot(projection='3d')
#plt.scatter(trans.embedding_[:,0], trans.embedding_[:,1], c=Ynd, s=30, cmap='hsv')
#plt.scatter(trans.embedding_[no_drop,0], trans.embedding_[no_drop,1], s=30, c=Ynd[no_drop], cmap='hsv')
#plt.scatter(trans.embedding_[dropI,0], trans.embedding_[dropI,1], s=5, c=np.zeros(np.shape(dropI)[0]))
plt.scatter(X1[no_drop,1], X1[no_drop,2], s=30, c=Ynd[no_drop], cmap='hsv')
plt.scatter(X1[dropI,1], X1[dropI,2], s=5, c=np.zeros(np.shape(dropI)[0]))
np.shape(dropI), np.shape(Xn), np.shape(Ynd)
#dropI=np.concatenate((dropI,range(np.shape(X)[0],np.shape(Xn)[0])))
#Thus (Xn,Ynd) is the new data that will be subjected to label propagation
################################### so random label removal works!
#Step 4.2: Apply Label propagation to [X,UX,NUX]
lp_model = LabelSpreading(kernel='rbf',gamma=0.2,alpha=0.1) #high alpha means labels an change till stable
# Train the model
#np.savetxt('test.out', Ynd, delimiter=',')
lp_model.fit(Xn, Ynd)
#Step 4.3: Predict Model label for the dropped samples
pred = lp_model.transduction_[dropI]
pred=pred.astype(int)
Ynd[dropI]=pred
#print(np.shape(pred))
#print('Predictions=',pred)#pred[np.shape(X)[0]:np.shape(X)[0]+np.shape(UX)[0]])
#print('Groundtruth=',UY)
############Predictions to be measured#######################
#predictions=pred[ns:(ns+np.shape(UY)[0])]
#print(np.shape(predictions)[0])
#pred_labels=np.zeros((np.shape(UY)[0],7))
print(np.shape(Xn),np.shape(Ynd))
idx=np.where(Ynd<0)
idx
fig = plt.figure()
plt.scatter(X1[no_drop,1], X1[no_drop,2], s=30, c=Ynd[no_drop], cmap='hsv')
plt.scatter(X1[dropI,1], X1[dropI,2], s=5, c=np.zeros(np.shape(dropI)[0]),cmap='hsv')
#ax = fig.add_subplot(projection='3d')
#plt.scatter(trans.embedding_[:,0], trans.embedding_[:,1], c=Ynd, s=30, cmap='hsv')
#Step 4.1: LP averaged across all 7 classes (equally likely)
dropP=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]#Fraction of population to be randomly dropped
prec=np.zeros((8,20))
rec=np.zeros((8,20))
fb=np.zeros((8,20))
n=np.shape(X)[0]
########################################################################
#In each loop, randomly drop (dropP) number of samples from X, label propagate on [X, UX, NUX], test on [UX]
for cn in range(20):
print(cn)
Xn=np.copy(X1)
for cp,drop in enumerate(dropP):
Ynd=np.copy(Y)
#Generate 28 flase labels
for ni in range(28):
r_false=randint(0,np.shape(Y)[0]-1)
Ynd[r_false]=false_label_return(Y[r_false]) #One label flipped randomly
ns=np.round(drop*n).astype(int)
#print('num of samples dropped=',ns)
dropI=np.floor(np.random.rand(ns)*n).astype(int)
Ynd[dropI]=-1
#Now combine (Xn, UX1, NUX1)
Xn=np.concatenate((X1,UX1,NUX1))
Ynd=np.concatenate((Ynd,YO))
dropI=np.concatenate((dropI,range(np.shape(X)[0],np.shape(Xn)[0])))
#Thus (Xn,Ynd) is the new data that will be subjected to label propagation
################################### so random label removal works!
#Step 4.2: Apply Label propagation to [X,UX,NUX]
lp_model = LabelSpreading(kernel='rbf',gamma=20,alpha=0.1) #high alpha means labels an change till stable
# Train the model
#np.savetxt('test.out', Ynd, delimiter=',')
lp_model.fit(Xn, Ynd)
#Step 4.3: Predict Model label for the dropped samples
pred = lp_model.transduction_[dropI]
pred=pred.astype(int)
#print(np.shape(pred))
#print('Predictions=',pred)#pred[np.shape(X)[0]:np.shape(X)[0]+np.shape(UX)[0]])
#print('Groundtruth=',UY)
############Predictions to be measured#######################
predictions=pred[ns:(ns+np.shape(UY)[0])]
#print(np.shape(predictions)[0])
pred_labels=np.zeros((np.shape(UY)[0],7))
for i in range(np.shape(predictions)[0]):
pred_labels[i]=decimal_to_bin(predictions[i])
#Step 4.4: Compute accuracy, for predictions
# Modify more multilabel metrics here
prec[cp][cn], rec[cp][cn], fb[cp][cn]=return_metrics(true_labels,pred_labels)
dropI=[]
fb
pp=np.mean(prec,axis=1)
rr=np.mean(rec,axis=1)
ff=np.mean(fb,axis=1)
print(np.shape(pp))
#Finally generate the plots per cluster
plt.figure(figsize=(8,6))
plt.plot(dropP,pp)
plt.xlabel('Percentage of Dropped Labels')
plt.ylabel('Precision of Labels')
plt.ylim(0,1)
plt.show()
#Finally generate the plots per cluster
plt.figure(figsize=(8,6))
plt.plot(dropP,rr)
plt.xlabel('Percentage of Dropped Labels')
plt.ylabel('Recall of Labels')
plt.ylim(0,1)
plt.show()
#Finally generate the plots per cluster
plt.figure(figsize=(8,6))
plt.plot(dropP,ff)
plt.xlabel('Percentage of Dropped Labels')
plt.ylabel('Fbeta-score Labels')
plt.ylim(0,1)
plt.show()
def return_per_class_metrics(y_true,y_pred):
p=np.zeros(7)
r=np.zeros(7)
Fbeta=np.zeros(7)
eps=0.000001
beta=2*2
Fbeta=np.zeros(7)
for i in range(7):
y_t=y_true[:,i]
y_p=y_pred[:,i]
tp = np.sum(y_t * y_p)
fp = np.sum((y_p - y_t)>0)
fn = np.sum((y_t - y_p)>0)
p[i] = tp / (tp + fp + eps)
r[i] = tp / (tp + fn + eps)
Fbeta[i]=np.mean((1 + beta) * (p[i] * r[i]) / (beta * p[i] + r[i] + eps))
return p, r, Fbeta
#Step 4.1: LP averaged across each class separately
dropP=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8]#Fraction of population to be randomly dropped
prec=np.zeros((8,20,7))
rec=np.zeros((8,20,7))
fb=np.zeros((8,20,7))
n=np.shape(X)[0]
########################################################################
#In each loop, randomly drop (dropP) number of samples from X, label propagate on [X, UX, NUX], test on [UX]
for cn in range(20):
print(cn)
Xn=np.copy(X1)
for cp,drop in enumerate(dropP):
Ynd=np.copy(Y)
#Generate 1% false labels and check
for ni in range(28):
r_false=randint(0,np.shape(Y)[0]-1)
Ynd[r_false]=false_label_return(Y[r_false]) #One label flipped randomly
ns=np.round(drop*n).astype(int)
#print('num of samples dropped=',ns)
dropI=np.floor(np.random.rand(ns)*n).astype(int)
Ynd[dropI]=-1
#Now combine (Xn, UX1, NUX1)
Xn=np.concatenate((X1,UX1,NUX1))
Ynd=np.concatenate((Ynd,YO))
dropI=np.concatenate((dropI,range(np.shape(X)[0],np.shape(Xn)[0])))
#Thus (Xn,Ynd) is the new data that will be subjected to label propagation
################################### so random label removal works!
#Step 4.2: Apply Label propagation to [X,UX,NUX]
lp_model = LabelSpreading(kernel='rbf',gamma=20,alpha=0.1) #high alpha means labels an change till stable
# Train the model
#np.savetxt('test.out', Ynd, delimiter=',')
lp_model.fit(Xn, Ynd)
#Step 4.3: Predict Model label for the dropped samples
pred = lp_model.transduction_[dropI]
pred=pred.astype(int)
#print(np.shape(pred))
#print('Predictions=',pred)#pred[np.shape(X)[0]:np.shape(X)[0]+np.shape(UX)[0]])
#print('Groundtruth=',UY)
############Predictions to be measured#######################
predictions=pred[ns:(ns+np.shape(UY)[0])]
#print(np.shape(predictions)[0])
pred_labels=np.zeros((np.shape(UY)[0],7))
for i in range(np.shape(predictions)[0]):
pred_labels[i]=decimal_to_bin(predictions[i])
#Step 4.4: Compute accuracy, for predictions
# Modify more multilabel metrics here
prec[cp][cn], rec[cp][cn], fb[cp][cn]=return_per_class_metrics(true_labels,pred_labels)
dropI=[]
p=np.mean(prec,axis=1)
r=np.mean(rec,axis=1)
f=np.mean(fb,axis=1)
print(np.round(p,2))
#Finally generate the plots per cluster
plt.figure(figsize=(8,6))
plt.plot(dropP,p[:,0],label='Opacity')
plt.plot(dropP,p[:,1],label='DR')
plt.plot(dropP,p[:,2],label='Glaucoma')
plt.plot(dropP,p[:,3],label='ME')
plt.plot(dropP,p[:,4],label='MD')
plt.plot(dropP,p[:,5],label='RVO')
plt.plot(dropP,p[:,6],label='Normal')
plt.plot(dropP,pp,label='Overall')
plt.xlabel('Percentage of Dropped Labels')
plt.ylabel('Precision of Labels')
plt.ylim(0,1)
plt.legend()
plt.show()
#Finally generate the plots per cluster
plt.figure(figsize=(8,6))
plt.plot(dropP,r[:,0],label='Opacity')
plt.plot(dropP,r[:,1],label='DR')
plt.plot(dropP,r[:,2],label='Glaucoma')
plt.plot(dropP,r[:,3],label='ME')
plt.plot(dropP,r[:,4],label='MD')
plt.plot(dropP,r[:,5],label='RVO')
plt.plot(dropP,r[:,6],label='Normal')
plt.plot(dropP,rr,label='Overall')
plt.xlabel('Percentage of Dropped Labels')
plt.ylabel('Recall of Labels')
plt.ylim(0,1)
#Finally generate the plots per cluster
plt.figure(figsize=(8,6))
plt.plot(dropP,f[:,0],label='Opacity')
plt.plot(dropP,f[:,1],label='DR')
plt.plot(dropP,f[:,2],label='Glaucoma')
plt.plot(dropP,f[:,3],label='ME')
plt.plot(dropP,f[:,4],label='MD')
plt.plot(dropP,f[:,5],label='RVO')
plt.plot(dropP,f[:,6],label='Normal')
plt.plot(dropP,ff,label='Overall')
plt.xlabel('Percentage of Dropped Labels')
plt.ylabel('Fbeta of Labels')
plt.ylim(0,1)
plt.legend()
plt.show()
###Output
_____no_output_____ |
Face-Mask-Detector/Training.ipynb | ###Markdown
Install the packages using the following commands : pip3 install tensorflow pip3 install opencv-python pip3 install sklearn scikit-learn auto-sklearn pip3 install pygame imutils pip3 install numpy matplotlib pip3 install keras Import all necessary packages to train the Face Mask Detector Model
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
###Output
_____no_output_____
###Markdown
Initialize the initial learning rate, number of epochs to train and batch size Also allocate the directory of the folder containg the datasets . grab the list of images in our dataset directory, then initialize the list of data (i.e., images) and class images
###Code
INIT_LR = 1e-4
EPOCHS = 75
BS = 32
DIRECTORY = r"dataset"
CATEGORIES = ["with_mask", "without_mask"]
print("[INFO] loading images...")
data = []
labels = []
for category in CATEGORIES:
path = os.path.join(DIRECTORY, category)
for img in os.listdir(path):
img_path = os.path.join(path, img)
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(category)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
data = np.array(data, dtype="float32")
labels = np.array(labels)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.20, stratify=labels, random_state=42)
###Output
[INFO] loading images...
###Markdown
construct the training image generator for data augmentation
###Code
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
###Output
_____no_output_____
###Markdown
load the MobileNetV2 network, ensuring the head FC layer sets are left off
###Code
baseModel = MobileNetV2(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)))
#construct the head of the model that will be placed on top of the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
#loop over all layers in the base model and freeze them so they will *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
###Output
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.
###Markdown
compile our model
###Code
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
###Output
[INFO] compiling model...
###Markdown
train the head of the network
###Code
print("[INFO] training head...")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))
###Output
[INFO] training head...
Epoch 1/75
184/184 [==============================] - ETA: 0s - loss: 0.2362 - accuracy: 0.9195WARNING:tensorflow:Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches (in this case, 46 batches). You may need to use the repeat() function when building your dataset.
184/184 [==============================] - 156s 842ms/step - loss: 0.2362 - accuracy: 0.9195 - val_loss: 0.0860 - val_accuracy: 0.9736
Epoch 2/75
184/184 [==============================] - 148s 804ms/step - loss: 0.0829 - accuracy: 0.9770
Epoch 3/75
184/184 [==============================] - 122s 660ms/step - loss: 0.0690 - accuracy: 0.9809
Epoch 4/75
184/184 [==============================] - 119s 646ms/step - loss: 0.0620 - accuracy: 0.9813
Epoch 5/75
184/184 [==============================] - 125s 677ms/step - loss: 0.0575 - accuracy: 0.9842
Epoch 6/75
184/184 [==============================] - 125s 678ms/step - loss: 0.0540 - accuracy: 0.9860
Epoch 7/75
184/184 [==============================] - 126s 685ms/step - loss: 0.0458 - accuracy: 0.9867
Epoch 8/75
184/184 [==============================] - 124s 673ms/step - loss: 0.0473 - accuracy: 0.9860
Epoch 9/75
184/184 [==============================] - 121s 656ms/step - loss: 0.0439 - accuracy: 0.9871
Epoch 10/75
184/184 [==============================] - 127s 691ms/step - loss: 0.0441 - accuracy: 0.9871
Epoch 11/75
184/184 [==============================] - 124s 672ms/step - loss: 0.0431 - accuracy: 0.9871
Epoch 12/75
184/184 [==============================] - 123s 667ms/step - loss: 0.0424 - accuracy: 0.9862
Epoch 13/75
184/184 [==============================] - 122s 664ms/step - loss: 0.0441 - accuracy: 0.9864
Epoch 14/75
184/184 [==============================] - 115s 622ms/step - loss: 0.0401 - accuracy: 0.9891
Epoch 15/75
184/184 [==============================] - 120s 650ms/step - loss: 0.0382 - accuracy: 0.9867
Epoch 16/75
184/184 [==============================] - 113s 612ms/step - loss: 0.0375 - accuracy: 0.9872
Epoch 17/75
184/184 [==============================] - 110s 595ms/step - loss: 0.0354 - accuracy: 0.9891
Epoch 18/75
184/184 [==============================] - 113s 615ms/step - loss: 0.0372 - accuracy: 0.9881
Epoch 19/75
184/184 [==============================] - 118s 640ms/step - loss: 0.0305 - accuracy: 0.9910
Epoch 20/75
184/184 [==============================] - 121s 659ms/step - loss: 0.0337 - accuracy: 0.9898
Epoch 21/75
184/184 [==============================] - 119s 648ms/step - loss: 0.0328 - accuracy: 0.9896
Epoch 22/75
184/184 [==============================] - 113s 614ms/step - loss: 0.0340 - accuracy: 0.9896
Epoch 23/75
184/184 [==============================] - 117s 634ms/step - loss: 0.0299 - accuracy: 0.9881
Epoch 24/75
184/184 [==============================] - 120s 652ms/step - loss: 0.0319 - accuracy: 0.9903
Epoch 25/75
184/184 [==============================] - 112s 606ms/step - loss: 0.0312 - accuracy: 0.9891
Epoch 26/75
184/184 [==============================] - 114s 617ms/step - loss: 0.0318 - accuracy: 0.9903
Epoch 27/75
184/184 [==============================] - 116s 631ms/step - loss: 0.0261 - accuracy: 0.9912
Epoch 28/75
184/184 [==============================] - 120s 652ms/step - loss: 0.0256 - accuracy: 0.9913
Epoch 29/75
184/184 [==============================] - 110s 597ms/step - loss: 0.0276 - accuracy: 0.9908
Epoch 30/75
184/184 [==============================] - 114s 617ms/step - loss: 0.0277 - accuracy: 0.9906
Epoch 31/75
184/184 [==============================] - 116s 632ms/step - loss: 0.0257 - accuracy: 0.9898
Epoch 32/75
184/184 [==============================] - 121s 658ms/step - loss: 0.0260 - accuracy: 0.9908
Epoch 33/75
184/184 [==============================] - 114s 618ms/step - loss: 0.0287 - accuracy: 0.9901
Epoch 34/75
184/184 [==============================] - 114s 619ms/step - loss: 0.0231 - accuracy: 0.9918
Epoch 35/75
184/184 [==============================] - 118s 640ms/step - loss: 0.0250 - accuracy: 0.9918
Epoch 36/75
184/184 [==============================] - 118s 639ms/step - loss: 0.0268 - accuracy: 0.9903
Epoch 37/75
184/184 [==============================] - 111s 602ms/step - loss: 0.0250 - accuracy: 0.9906
Epoch 38/75
184/184 [==============================] - 114s 619ms/step - loss: 0.0264 - accuracy: 0.9918
Epoch 39/75
184/184 [==============================] - 117s 637ms/step - loss: 0.0223 - accuracy: 0.9925
Epoch 40/75
184/184 [==============================] - 117s 636ms/step - loss: 0.0233 - accuracy: 0.9908
Epoch 41/75
184/184 [==============================] - 112s 609ms/step - loss: 0.0212 - accuracy: 0.9923
Epoch 42/75
184/184 [==============================] - 115s 625ms/step - loss: 0.0207 - accuracy: 0.9918
Epoch 43/75
184/184 [==============================] - 119s 644ms/step - loss: 0.0250 - accuracy: 0.9910
Epoch 44/75
184/184 [==============================] - 122s 662ms/step - loss: 0.0243 - accuracy: 0.9910
Epoch 45/75
184/184 [==============================] - 111s 605ms/step - loss: 0.0210 - accuracy: 0.9929
Epoch 46/75
184/184 [==============================] - 115s 627ms/step - loss: 0.0220 - accuracy: 0.9908
Epoch 47/75
184/184 [==============================] - 118s 639ms/step - loss: 0.0209 - accuracy: 0.9918
Epoch 48/75
184/184 [==============================] - 111s 605ms/step - loss: 0.0184 - accuracy: 0.9937
Epoch 49/75
184/184 [==============================] - 112s 608ms/step - loss: 0.0228 - accuracy: 0.9915
Epoch 50/75
184/184 [==============================] - 115s 625ms/step - loss: 0.0216 - accuracy: 0.9927
Epoch 51/75
184/184 [==============================] - 119s 645ms/step - loss: 0.0178 - accuracy: 0.9922
Epoch 52/75
184/184 [==============================] - 109s 593ms/step - loss: 0.0200 - accuracy: 0.9930
Epoch 53/75
184/184 [==============================] - 111s 605ms/step - loss: 0.0229 - accuracy: 0.9915
Epoch 54/75
184/184 [==============================] - 115s 623ms/step - loss: 0.0162 - accuracy: 0.9940
Epoch 55/75
184/184 [==============================] - 106s 577ms/step - loss: 0.0193 - accuracy: 0.9932
Epoch 56/75
184/184 [==============================] - 110s 599ms/step - loss: 0.0189 - accuracy: 0.9944
Epoch 57/75
184/184 [==============================] - 112s 608ms/step - loss: 0.0178 - accuracy: 0.9925
Epoch 58/75
184/184 [==============================] - 116s 629ms/step - loss: 0.0189 - accuracy: 0.9934
Epoch 59/75
184/184 [==============================] - 112s 608ms/step - loss: 0.0177 - accuracy: 0.9927
Epoch 60/75
184/184 [==============================] - 109s 593ms/step - loss: 0.0178 - accuracy: 0.9935
Epoch 61/75
184/184 [==============================] - 112s 607ms/step - loss: 0.0172 - accuracy: 0.9942
Epoch 62/75
184/184 [==============================] - 116s 627ms/step - loss: 0.0162 - accuracy: 0.9942
Epoch 63/75
184/184 [==============================] - 112s 609ms/step - loss: 0.0164 - accuracy: 0.9940
Epoch 64/75
184/184 [==============================] - 109s 593ms/step - loss: 0.0201 - accuracy: 0.9923
Epoch 65/75
184/184 [==============================] - 119s 644ms/step - loss: 0.0180 - accuracy: 0.9930
Epoch 66/75
184/184 [==============================] - 133s 724ms/step - loss: 0.0172 - accuracy: 0.9934
Epoch 67/75
184/184 [==============================] - 125s 681ms/step - loss: 0.0150 - accuracy: 0.9946
Epoch 68/75
184/184 [==============================] - 121s 658ms/step - loss: 0.0159 - accuracy: 0.9942
Epoch 69/75
184/184 [==============================] - 121s 658ms/step - loss: 0.0144 - accuracy: 0.9949
Epoch 70/75
184/184 [==============================] - 121s 656ms/step - loss: 0.0137 - accuracy: 0.9954
Epoch 71/75
184/184 [==============================] - 118s 639ms/step - loss: 0.0168 - accuracy: 0.9946
Epoch 72/75
184/184 [==============================] - 103s 558ms/step - loss: 0.0149 - accuracy: 0.9942
Epoch 73/75
184/184 [==============================] - 104s 564ms/step - loss: 0.0143 - accuracy: 0.9951
Epoch 74/75
184/184 [==============================] - 106s 575ms/step - loss: 0.0155 - accuracy: 0.9946
Epoch 75/75
###Markdown
serialize the model to disk
###Code
print("[INFO] saving mask detector model...")
model.save("mask_detector.model", save_format="h5")
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.png")
###Output
[INFO] saving mask detector model...
|
notebooks/omero_upload.ipynb | ###Markdown
Notebook for uploading a data set with description into an OMERO server
###Code
import os
import sys
import getpass
from pyomero_upload.pyomero_upload import PyOmeroUploader
# configure connection settings
server = 'demo.openmicroscopy.org'
user = 'USER'
password = getpass.getpass()
uploader = PyOmeroUploader(user, password, server)
# defining data to upload
data_path = '/home/jovyan/work/test_data'
dataset_name = 'Upload from Jupyter'
# data upload with hybercubes
# The 'True' argument instructs the uploader to process and deposit image files as hypercubes
dId = uploader.launch_upload(dataset_name, data_path, True)
print(dId)
# searching for the upload
# should be
searcher = uploader.searcher()
import omero_data_transfer.omero_data_broker as data_broker
from omero import sys as om_sys
from omero import rtypes
#query = "select d from Dataset d where d.name = :dname"
query = "select d from Dataset d where d.id = :did"
params = om_sys.Parameters()
params.map = {'dname' : rtypes.rstring(dataset_name)}
params.map = {'did' : rtypes.rlong(dId)}
datasets = searcher.find_objects_by_query(query, params)
dataset = datasets[0]
#dataset = datasets
print(dataset.name.val)
###Output
_____no_output_____ |
emulator_examples/emulator_RF_sklearn.ipynb | ###Markdown
Emulator: Random Forest A [Random Forest (RF)](https://builtin.com/data-science/random-forest-algorithm) regressor is an example of an ensemble learning method. During training, multiple decision trees are generated. Each tree is trained on a different subset of the data. Each tree overfits on the different subsamples of data and features. However, by averaging over all the different trees, the overall variance of the forest is lower. Index1. [Import packages](imports)2. [Load data](loadData) 1. [Load train data](loadTrainData) 2. [Load test data](loadTestData)3. [Emulator method](emulator) 1. [Scale data](scaleData) 2. [Train emulator](trainEmu) 3. [Predict on test data](predEmu) 4. [Plot results](plotEmu) 1. Import packages
###Code
import numpy as np
import pickle
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import pylab
%config InlineBackend.figure_format = 'retina'
matplotlib.rcParams['figure.dpi'] = 80
textsize = 'x-large'
params = {'legend.fontsize': 'large',
'figure.figsize': (5, 4),
'axes.labelsize': textsize,
'axes.titlesize': textsize,
'xtick.labelsize': textsize,
'ytick.labelsize': textsize}
pylab.rcParams.update(params)
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
###Output
_____no_output_____
###Markdown
2. Load data 2.1. Load train data The training set here is the correlation function of galaxy clustering, for different cosmological model. Here we show a 3-free-parameter model; you can also play with a simpler 1-free-parameter model by commenting in the '1d' data (be sure to do this with the test set too).
###Code
path_train = '../data/cosmology_train_big.pickle'
#path_train = '../data/cosmology_train.pickle'
#path_train = '../data/cosmology_train_1d.pickle'
with open(path_train, 'rb') as input_file:
data_train = pickle.load(input_file)
input_train = data_train['input_data']
number_train = input_train.shape[0]
print("Number of datapoints:", number_train)
output_train = data_train['output_data']
n_params = input_train.shape[1]-1
n_values = output_train.shape[1]-1
print("Number of input parameters:", n_params) # remove the `object_id` column
print("Number of output values:", n_values) # remove the `object_id` column
xs_train = np.array(input_train.drop(columns=['object_id']))
ys_train = np.array(output_train.drop(columns=['object_id']))
extra_train = data_train['extra_input']
r_vals = extra_train['r_vals']
###Output
_____no_output_____
###Markdown
2.2. Load test data
###Code
path_test = '../data/cosmology_test.pickle'
#path_test = '../data/cosmology_test_1d.pickle'
with open(path_test, 'rb') as input:
data_test = pickle.load(input)
input_test = data_test['input_data']
number_test = input_test.shape[0]
print("Number of datapoints:", number_test)
output_test = data_test['output_data']
print("Number of input parameters:", input_test.shape[1]-1) # remove the `object_id` column
print("Number of output values:", output_test.shape[1]-1) # remove the `object_id` column
xs_test = np.array(input_test.drop(columns=['object_id']))
ys_test = np.array(output_test.drop(columns=['object_id']))
###Output
_____no_output_____
###Markdown
3. Emulator method 3.1. Scale data Let's first scale our input parameters, to make training easier:
###Code
scaler = StandardScaler()
scaler.fit(xs_train)
xs_train = scaler.transform(xs_train)
xs_test = scaler.transform(xs_test)
###Output
_____no_output_____
###Markdown
Let's also normalize the output data by the mean of the training data, so it's easier to emulate (don't forget to undo the normalization after!):
###Code
y_mean = np.mean(ys_train, axis=0)
ys_train = ys_train/y_mean
ys_test = ys_test/y_mean
###Output
_____no_output_____
###Markdown
3.2. Train emulator We will use `scikit-learn`'s `RandomForestRegressor` to build our emulator. We train a separate regressor for each output value. For this dataset, the simple 'lbfgs' solver works very well. We can also use the 'adam' optimizer; in both cases, we have to tune the hyperparameters carefully.
###Code
regrs = np.empty(n_values, dtype=object)
for j in range(n_values):
ys_train_r = ys_train[:,j]
ys_test_r = ys_test[:,j]
regr = RandomForestRegressor(n_estimators=1000, n_jobs=-1).fit(xs_train, ys_train_r)
score = regr.score(xs_test, ys_test_r)
print(f"Value {j} score:", score)
regrs[j] = regr
###Output
_____no_output_____
###Markdown
These values should be as close to 1 as possible. Try tuning the hyperparameter `n_estimators` to get better results on your test set. 3.3. Predict on test data Now we can predict on our test dataset:
###Code
ys_predict = np.zeros((number_test, n_values))
for j in range(n_values):
ys_predict_r = regrs[j].predict(xs_test)
ys_predict[:,j] = ys_predict_r
###Output
_____no_output_____
###Markdown
Undo all the normalizations:
###Code
ys_train = ys_train*y_mean
ys_test = ys_test*y_mean
ys_predict = ys_predict*y_mean
###Output
_____no_output_____
###Markdown
3.4. Plot resultsWe compare our predictions to the truth (choosing a subset for visual clarity):
###Code
n_plot = int(0.2*number_test)
idxs = np.random.choice(np.arange(number_test), n_plot)
color_idx = np.linspace(0, 1, n_plot)
colors = np.array([plt.cm.rainbow(c) for c in color_idx])
plt.figure(figsize=(8,6))
for i in range(n_plot):
ys_test_plot = ys_test[idxs,:][i]
ys_predict_plot = ys_predict[idxs][i]
if i==0:
label_test = 'truth'
label_predict = 'emu_prediction'
else:
label_test = None
label_predict = None
plt.plot(r_vals[:n_values], ys_test_plot, alpha=0.8, label=label_test, marker='o', markerfacecolor='None', ls='None', color=colors[i])
plt.plot(r_vals[:n_values], ys_predict_plot, alpha=0.8, label=label_predict, color=colors[i])
plt.xlabel('$r$')
plt.ylabel(r'$\xi(r)$')
plt.legend()
###Output
_____no_output_____
###Markdown
We plot the fractional error of all test set statistics:
###Code
color_idx = np.linspace(0, 1, number_test)
colors = np.array([plt.cm.rainbow(c) for c in color_idx])
plt.figure(figsize=(8,6))
frac_errs = np.empty((number_test, n_values))
for i in range(number_test):
# ys_test_plot = ys_test[idxs,:][i]
# ys_predict_plot = ys_predict[idxs][i]
ys_test_plot = ys_test[i]
ys_predict_plot = ys_predict[i]
frac_err = (ys_predict_plot-ys_test_plot)/ys_test_plot
frac_errs[i] = frac_err
plt.plot(r_vals, frac_err, alpha=0.8, color=colors[i])
plt.axhline(0.0, color='k')
plt.xlabel('$r$')
plt.ylabel(r'fractional error')
###Output
_____no_output_____
###Markdown
The emulator is sort of working but it's not great! In particular it struggles significantly with one of the r-bins. This r-bin is likely more difficult to emulate because it contains large range of values and both positive and negative values.
###Code
color_idx = np.linspace(0, 1, number_test)
colors = np.array([plt.cm.rainbow(c) for c in color_idx])
plt.figure(figsize=(8,6))
frac_errs_stdev = np.std(frac_errs, axis=0)
plt.plot(r_vals, frac_errs_stdev, alpha=0.8, color='blue', label='standard deviation')
frac_errs_p16 = np.percentile(frac_errs, 16, axis=0)
frac_errs_p84 = np.percentile(frac_errs, 84, axis=0)
frac_errs_percentile = np.mean([np.abs(frac_errs_p16), np.abs(frac_errs_p84)], axis=0)
plt.plot(r_vals, frac_errs_percentile, alpha=0.8, color='green', label="mean of 16/84 percentile")
plt.xlabel('$r$')
plt.ylabel(r'spread of fractional errors')
plt.legend()
###Output
_____no_output_____ |
miscellaneous_notebooks/Distributions_of_Sums/Chernoff_Bound.ipynb | ###Markdown
Chernoff Bound If the form of a distribution is intractable in that it is difficult to find exact probabilities by integration, then good estimates and bounds become important. Bounds on the tails of the distribution of a random variable help us quantify roughly how close to the mean the random variable is likely to be. We already know two such bounds. Let $X$ be a random variable with expectation $\mu$ and SD $\sigma$. Markov's Bound on the Right Hand Tail If $X$ is non-negative, $$P(X \ge c) ~ \le ~ \frac{\mu}{c}$$ Chebychev's Bound on Two Tails $$P(\lvert X - \mu\rvert \ge c) ~ \le ~ \frac{\sigma^2}{c^2}$$Moment generating functions can help us improve upon these bounds in many cases. In what follows, we will assume that the moment generating function of $X$ is finite over the whole real line. If it is finite only over a smaller interval around 0, the calculations of the mgf below should be confined to that interval. Chernoff Bound on the Right Tail Observe that if $g$ is an increasing function, then the event $\{ X \ge c \}$ is the same as the event $\{ g(X) \ge g(c)\}$. For any fixed $t > 0$, the function defined by $g(x) = e^{tx}$ is increasing as well as non-negative. So for each $t > 0$,\begin{align*}P(X \ge c) ~ &= P(e^{tX} \ge e^{tc}) \\&\le ~ \frac{E(e^{tX})}{e^{tc}} ~~~~ \text{(Markov's bound)} \\&= ~ \frac{M_X(t)}{e^{tc}}\end{align*}This is the first step in developing a [Chernoff bound](https://en.wikipedia.org/wiki/Chernoff_bound) on the right hand tail. For the next step, notice that you can choose $t$ to be any positive number. Some choices of $t$ will give sharper bounds than others. Because these are upper bounds, the sharpest among all of the bounds will correspond to the value of $t$ that minimizes the right hand side. So the Chernoff bound has an *optimized* form:$$P(X \ge c) ~ \le ~ \min_{t > 0} \frac{M_X(t)}{e^{tc}}$$ Application to the Normal Distribution Suppose $X$ has the normal $(\mu, \sigma^2)$ distribution and we want to get a sense of how far $X$ can be above the mean. Fix $c > 0$. The exact chance that the value of $X$ is at least $c$ above the mean is$$P(X - \mu \ge c) ~ = ~ 1 - \Phi(c/\sigma)$$because the distribution of $X - \mu$ is normal $(0, \sigma^2)$. This exact answer looks neat and tidy, but the standard normal cdf $\Phi$ is not easy to work with analytically. Sometimes we can gain more insight from a good bound.The optimized Chernoff bound is\begin{align*}P(X- \mu \ge c) ~ &\le ~ \min_{t > 0} \frac{M_{X-\mu}(t)}{e^{tc}} \\ \\&= ~ \min_{t > 0} \frac{e^{\sigma^2t^2/2}}{e^{tc}} \\ \\&= ~ \min_{t > 0} e^{-ct + \sigma^2t^2/2}\end{align*}The curve below is the graph of $\exp(-ct + \sigma^2t^2/2)$ as a function of $t$, in the case $\sigma = 2$ and $c = 5$. The flat line is the exact probability of $P(X - \mu \ge c)$. The curve is always above the flat line: no matter what $t$ is, the bound is an upper bound. The sharpest bound corresponds to the minimizing value $t^*$ which is somewhere in the 1.2 to 1.3 range.
###Code
# HIDDEN
c = 5
sigma = 2
t_min = 0.5
t_max = 2
t = np.arange(t_min, t_max, 0.01)
bound = np.exp(-1*c*t + 0.5*((sigma*t)**2))
exact = 1 - stats.norm.cdf(2.5)
plt.plot([t_min, t_max], [exact, exact], lw=2, label = 'Exact Chance')
plt.plot(t, bound, lw=2, label = 'Bound')
plt.legend(bbox_to_anchor=(1.4, 1))
plt.xlabel('$t$');
###Output
_____no_output_____
###Markdown
To find the minimizing value of $t$ analytically, we will use the standard calculus method of minimization. But first we will simplify our calculations by a method we used when we were finding maximum likelihood estimates.**Finding the point at which a positive function is minimized is the same as finding the point at which the log of the function is minimized.** This is because $\log$ is an increasing function.So the problem reduces to finding the value of $t$ that minimizes the function $h(t) = -ct + \sigma^2t^2/2$. By differentiation, the minimizing value of $t$ solves$$c ~ = ~ \sigma^2 t^*$$and hence$$t^* ~ = ~ \frac{c}{\sigma^2}$$So the Chernoff bound is $$P(X - \mu \ge c) ~ \le ~ e^{-ct^* + \sigma^2{t^*}^2/2} ~ = ~ e^{-\frac{c^2}{2\sigma^2}}$$Compare this with the bounds we already have. Markov's bound can't be applied directly as $X - \mu$ can have negative values. Because the distribution of $X - \mu$ is symmetric about 0, Chebychev's bound becomes$$P(X - \mu \ge c ) ~ \le ~ \frac{\sigma^2}{2c^2}$$When $c$ is large, the optimized Chernoff bound is quite a bit sharper than Chebychev's. In the case $\sigma = 2$, the graph below shows the exact value of $P(X - \mu \ge c)$ as a function of $c$, along with the Chernoff and Chebychev bounds.
###Code
# HIDDEN
sigma = 2
c_min = 4
c_max = 7
c = np.arange(c_min, c_max + .01, 0.01)
chernoff = np.exp(-0.5*((c/sigma)**2))
chebychev = 0.5 * ((sigma/c)**2)
plt.plot(c, 1 - stats.norm.cdf(c/sigma), label='Exact Chance', lw=2)
plt.plot(c, chernoff, lw=2, label='Chernoff')
plt.plot(c, chebychev, lw=2, label='Chebychev')
plt.xlim(c_min, c_max)
plt.xlabel('$c$')
plt.legend()
plt.title('Bounds on $P(X - \mu \geq c)$ where $X$ is normal $(\mu, 2^2)$');
###Output
_____no_output_____ |
examples/pandas_multi-index_Tutorial.ipynb | ###Markdown
Tutorial on using pandas multi-index dataframes Use processed TTU dataset for demonstration purposes. The dataset can be obtained by running the notebook "process_TTU_tower.ipynb" which can be found in the [a2e-mmc/assessment repository](https://github.com/a2e-mmc/assessment) (currently only in the dev branch)
###Code
datadir = './'
TTUdata = 'TTU_tilt_corrected_20131108-09.csv'
###Output
_____no_output_____
###Markdown
1. Loading .csv file into multi-index dataframe .csv files can be read directly into a multi-index dataframe by using the `index_col` argument of `read_csv()`
###Code
df = pd.read_csv(os.path.join(datadir,TTUdata),parse_dates=True,index_col=['datetime','height'])
df.head()
###Output
_____no_output_____
###Markdown
2. Extracting index values To extract index values, it is advisable to use `index.get_level_values()` (this function also works for single index dataframes). You could also use `index.levels[]`, but this might cause some issues when working with subsets or copies of dataframes.
###Code
df.index.get_level_values(0) # specify level by index position
df.index.get_level_values('height') # specify level by index label
df.index.levels[0]
###Output
_____no_output_____
###Markdown
Note that `index.get_level_values()` returns the full index, whereas `index.levels[]` returns the unique values
###Code
df.index.get_level_values(0).size, df.index.levels[0].size
###Output
_____no_output_____
###Markdown
3. Conversion to single index and back Use `set_index()` and `reset_index()` to go back and forth between single index and multi-index dataframes From multi-index to single index ...
###Code
# Reset all indices
df_noindex = df.reset_index()
df_noindex.head()
# Reset a specific index
df_single_index = df.reset_index(level=1) # level can be specified by position or label ('height')
df_single_index.head()
###Output
_____no_output_____
###Markdown
... and back
###Code
df = df_noindex.set_index(['datetime','height'])
df.head()
# append 'height' to existing index, otherwise 'datetime' index will be removed
df = df_single_index.set_index('height',append=True)
df.head()
###Output
_____no_output_____
###Markdown
4. Accessing slices and cross-sections Use `xs()` and `loc[]` to access slices or cross-sections of a multi-index. Note that `xs()` is used to access a single value of a certain index and that it removes that particular index level, whereas `loc[]` is more general but does not remove indices with a single entry.
###Code
df_xs = df.xs(0.9,level='height')
df_xs.head()
df_loc = df.loc[('2013-11-08 00:00:00',[0.9,2.4,10.1]),['u','v','w']]
df_loc.head()
# To access an entire index using loc, use "slice(None)" instead of ":",
# Columns can be accessed all together using ":"
df_loc = df.loc[(slice(None),0.9),:]
df_loc.head()
###Output
_____no_output_____
###Markdown
Accessing columns works the same as with single index dataframes
###Code
df[['u','v','w']].head()
###Output
_____no_output_____
###Markdown
5. Stacking and unstacking `stack()` and `unstack()` allows to turn an index level into a new column level and vice versa.**These tools are particularly useful to compute time statistics (mean, variance, covariance) or interpolate over height or times (see advanced examples below).**
###Code
# Extract a subset to make examples more clear
times = pd.date_range('2013-11-08 00:00:00','2013-11-08 00:00:03',freq='1s')
heights = [0.9,2.4,4.0]
df_subset = df.loc[(times,heights),['u','v']]
df_subset
###Output
_____no_output_____
###Markdown
Unstacking an index level to column level ...
###Code
# Without any argument, unstack() converts the last index into a column level
unstacked_height = df_subset.unstack()
unstacked_height
# The level that needs to be unstacked can be specified
unstacked_time = df_subset.unstack(level='datetime')
unstacked_time
###Output
_____no_output_____
###Markdown
... and back
###Code
unstacked_height.stack()
###Output
_____no_output_____
###Markdown
Note that `stack()` takes the last column level and appends it to the index. In the case of `unstacked_time`, stacking 'datetime' back as an index will reversed the original order of indices. This can be set back to the original form by using `reorder_levels()` and then `sort_index()`
###Code
unstacked_time.stack()
unstacked_time.stack().reorder_levels(order=['datetime','height']).sort_index()
###Output
_____no_output_____
###Markdown
Advanced examples using `unstack()` and `stack()` 1. Calculate hourly means
###Code
df.head()
###Output
_____no_output_____
###Markdown
Unstack 'height' so that datetime is the only index level
###Code
unstacked = df.unstack(level=1)
###Output
_____no_output_____
###Markdown
calculate hourly averages using `resample().mean()`
###Code
df_1h = unstacked.resample('1h').mean()
df_1h.head()
###Output
_____no_output_____
###Markdown
run `stack()` so that height is again an index level
###Code
df_1h = df_1h.stack()
df_1h.head()
###Output
_____no_output_____
###Markdown
Or as a single line:
###Code
df_1h = df.unstack(level=1).resample('1h').mean().stack()
df_1h.head()
###Output
_____no_output_____
###Markdown
Note that it would be much harder to do this with a single index dataframe with 'height' as a column as you don't want to take an average over the heights. Instead, you would need to `pivot()` the table about the 'height' column, compute the hourly mean values, use `stack()` to undo the pivoting, and then use `reset_index()` and `set_index()` to arrive at the original single index form 2. Interpolate to a specified height
###Code
df.head()
###Output
_____no_output_____
###Markdown
Unstack 'datetime' so that height is the only index level
###Code
unstacked = df.unstack(level=0)
###Output
_____no_output_____
###Markdown
Interpolate to a specified height (gets appended at the end)
###Code
from scipy.interpolate import interp1d
unstacked.loc[2.0] = interp1d(unstacked.index,unstacked,axis=0)(2.0)
unstacked.tail()
###Output
_____no_output_____
###Markdown
run `stack()` so that datetime is again an index level, and reverse order of indices
###Code
df_2m = unstacked.stack().reorder_levels(order=['datetime','height']).sort_index()
df_2m.head()
###Output
_____no_output_____ |
Data Science Ass/Intro to Python Ass(Week 4 Ass).ipynb | ###Markdown
2- Write out the datatypes in python with comment Declaring Integer
###Code
#Declaring age as an integer
age = 23
print(age)
###Output
23
###Markdown
Declaring Float
###Code
# Declaring weight of a fish as a float
fish_weight = 43.6
print(fish_weight)
###Output
43.6
###Markdown
Declaring String
###Code
# Decalring Adegbite Ayoade Abel as name
name = "Adegbite Ayoade Abel"
print(name)
###Output
Adegbite Ayoade Abel
###Markdown
Declaring List
###Code
# Declaring countries as a list
countries = ["Nigeria","Cameroon","Senegal","India","Paris"]
print(countries)
###Output
['Nigeria', 'Cameroon', 'Senegal', 'India', 'Paris']
###Markdown
Declaring Tuples
###Code
# Declaring list of bootcamps in Lagos
bootcamp_tuples = ("CodeLagos","Data Science Nigeria","AISaturday Lagos","WAAW Foundation")
print(bootcamp_tuples)
###Output
('CodeLagos', 'Data Science Nigeria', 'AISaturday Lagos', 'WAAW Foundation')
|
TryingToWorkWithTwoNetworks(Working).ipynb | ###Markdown
Copyright 2018 The TensorFlow Datasets Authors, Licensed under the Apache License, Version 2.0
###Code
!pip install -q tensorflow-datasets tensorflow
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
# tfds works in both Eager and Graph modes
tf.enable_v2_behavior()
# Construct a tf.data.Dataset
ds = tfds.load('mnist', split='train', shuffle_files=True)
# Build your input pipeline
ds = ds.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
k=0
for example in ds.take(10):
k=k+1
print(k)
image, label = example['image'], example['label']
from keras.datasets import mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
print(test_labels.shape)
print(test_images.shape)
plt.imshow(test_images[4,:, :].astype(np.float32), cmap=plt.get_cmap("gray"))
for example in ds.take(1):
image, label = example['image'], example['label']
label2 = tf.one_hot(label,10)
im_np = image.numpy()
im_np2 = im_np[0,:,:,0]
print(im_np2.shape)
plt.imshow(image.numpy()[0,:, :, 0].astype(np.float32), cmap=plt.get_cmap("gray"))
print("Label: %d" % label[0].numpy())
print(label2[0].numpy())
print(image.shape)
print(label.shape)
def make_a():
inputs = tf.keras.layers.Input(shape=[28,28,1])
model = tf.keras.Sequential()
model.add(inputs)
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(128, activation='relu'))
return model
def make_b():
inputs = tf.keras.layers.Input(shape=[128,])
model = tf.keras.Sequential()
model.add(inputs)
model.add(layers.Dense(10,activation='softmax'))
return model
model_a = make_a()
model_b = make_b()
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(1e-4)
tf.keras.utils.plot_model(model_a, show_shapes=True, dpi=64)
tf.keras.utils.plot_model(model_b, show_shapes=True, dpi=64)
def train_step(image, label):
with tf.GradientTape(persistent=True) as gen_tape:
model_a_output = model_a(image, training=True)
print('Model_a_output shape: ', model_a_output.shape)
model_b_output = model_b(model_a_output, training=True)
print('Model_b_output shape: ', model_b_output.shape)
loss = cross_entropy(model_b_output,label)
print("Loss: ", loss)
# gradients_of_model_a, gradients_of_model_b= gen_tape.gradient(loss, model_a.trainable_variables,model_b.trainable_variables)
gradients_of_model_a = gen_tape.gradient(loss, model_a.trainable_variables)
print('Gradients_of_model_a: ', gradients_of_model_a)
gradients_of_model_b = gen_tape.gradient(loss, model_b.trainable_variables)
print('Gradients_of_model_b: ', gradients_of_model_b)
optimizer.apply_gradients(zip(gradients_of_model_a, model_a.trainable_variables))
optimizer.apply_gradients(zip(gradients_of_model_b, model_b.trainable_variables))
epochs=10
def train(ds, epochs):
for epoch in range(epochs):
start = time.time()
for example in ds.take(100):
image, label = example['image'], example['label']
label2 = tf.one_hot(label, depth=10)
# print(label2.shape)
# print(image.shape)
train_step(image, label2)
train(ds, epochs)
result = model_b(model_a(test_images))
print(result[4])
###Output
tf.Tensor([0. 0. 0. 0. 0. 0. 0. 0. 0. 1.], shape=(10,), dtype=float32)
|
targets.ipynb | ###Markdown
Protein-protein targets
###Code
targets = pd.DataFrame(targets.values())
targets.to_csv("targets.csv")
targets
###Output
_____no_output_____
###Markdown
Protein-protein target chains
###Code
target_chains = pd.DataFrame.from_dict(target_chains)
target_chains.to_csv("target_chains.csv")
target_chains
###Output
_____no_output_____
###Markdown
Templates for protein-protein target chains
###Code
target_templates = pd.DataFrame(target_templates)
target_templates.to_csv("target_templates.csv")
target_templates
import qgrid
target_templates2 = qgrid.show_grid(target_templates)
target_templates2
###Output
_____no_output_____
###Markdown
Protein-peptide targets
###Code
prot_peptide_targets = pd.DataFrame(prot_peptide_targets.values())
prot_peptide_targets.to_csv("prot_peptide_targets.csv")
prot_peptide_targets
prot_peptide_chains = pd.DataFrame.from_dict(prot_peptide_chains)
prot_peptide_chains.to_csv("prot_peptide_chains.csv")
prot_peptide_chains
###Output
_____no_output_____ |
doc/examples/visualization_gallery.ipynb | ###Markdown
Visualization GalleryThis notebook shows common visualization issues encountered in Xarray.
###Code
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load example dataset:
###Code
ds = xr.tutorial.load_dataset('air_temperature')
###Output
_____no_output_____
###Markdown
Multiple plots and map projectionsControl the map projection parameters on multiple axesThis example illustrates how to plot multiple maps and control their extentand aspect ratio.For more details see [this discussion](https://github.com/pydata/xarray/issues/1397issuecomment-299190567) on github.
###Code
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(transform=ccrs.PlateCarree(), # the data's projection
col='time', col_wrap=1, # multiplot settings
aspect=ds.dims['lon'] / ds.dims['lat'], # for a sensible figsize
subplot_kws={'projection': map_proj}) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
###Output
_____no_output_____
###Markdown
Centered colormapsXarray's automatic colormaps choice
###Code
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={'label': 'K'})
ax1.set_title('Kelvins: default')
ax2.set_xlabel('')
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={'label': '°C'})
ax2.set_title('Celsius: default')
ax2.set_xlabel('')
ax2.set_ylabel('')
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={'label': 'K'})
ax3.set_title('Kelvins: center=273.15')
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={'label': '°C'})
ax4.set_title('Celsius: center=False')
ax4.set_ylabel('')
# Make it nice
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Control the plot's colorbarUse ``cbar_kwargs`` keyword to specify the number of ticks.The ``spacing`` kwarg can be used to draw proportional ticks.
###Code
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={'ticks': levels})
air2d.plot(ax=ax3, levels=levels, cbar_kwargs={'ticks': levels,
'spacing': 'proportional'})
# Show plots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Multiple lines from a 2d DataArrayUse ``xarray.plot.line`` on a 2d DataArray to plot selections asmultiple lines.See ``plotting.multiplelines`` for more details.
###Code
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue='lat')
ax1.set_ylabel('°C')
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x='time', add_legend=False)
ax2.set_ylabel('')
# Show
plt.tight_layout()
###Output
_____no_output_____
###Markdown
`imshow()` and rasterio map projectionsUsing rasterio's projection information for more accurate plots.This example extends `recipes.rasterio` and plots the image in theoriginal map projection instead of relying on pcolormesh and a maptransformation.
###Code
da = xr.tutorial.open_rasterio("RGB.byte")
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM('18N')
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb='band', transform=crs)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Parsing rasterio geocoordinatesConverting a projection's cartesian coordinates into 2D longitudes andlatitudes.These new coordinates might be handy for plotting and indexing, but it shouldbe kept in mind that a grid which is regular in projection coordinates willlikely be irregular in lon/lat. It is often recommended to work in the data'soriginal map projection (see `recipes.rasterio_rgb`).
###Code
from rasterio.warp import transform
import numpy as np
da = xr.tutorial.open_rasterio("RGB.byte")
# Compute the lon/lat coordinates with rasterio.warp.transform
ny, nx = len(da['y']), len(da['x'])
x, y = np.meshgrid(da['x'], da['y'])
# Rasterio works with 1D arrays
lon, lat = transform(da.crs, {'init': 'EPSG:4326'},
x.flatten(), y.flatten())
lon = np.asarray(lon).reshape((ny, nx))
lat = np.asarray(lat).reshape((ny, nx))
da.coords['lon'] = (('y', 'x'), lon)
da.coords['lat'] = (('y', 'x'), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim='band')
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(ax=ax, x='lon', y='lat', transform=ccrs.PlateCarree(),
cmap='Greys_r', add_colorbar=False)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Visualization GalleryThis notebook shows common visualization issues encountered in Xarray.
###Code
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load example dataset:
###Code
ds = xr.tutorial.load_dataset('air_temperature')
###Output
_____no_output_____
###Markdown
Multiple plots and map projectionsControl the map projection parameters on multiple axesThis example illustrates how to plot multiple maps and control their extentand aspect ratio.For more details see [this discussion](https://github.com/pydata/xarray/issues/1397issuecomment-299190567) on github.
###Code
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(transform=ccrs.PlateCarree(), # the data's projection
col='time', col_wrap=1, # multiplot settings
aspect=ds.dims['lon'] / ds.dims['lat'], # for a sensible figsize
subplot_kws={'projection': map_proj}) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
###Output
_____no_output_____
###Markdown
Centered colormapsXarray's automatic colormaps choice
###Code
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={'label': 'K'})
ax1.set_title('Kelvins: default')
ax2.set_xlabel('')
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={'label': '°C'})
ax2.set_title('Celsius: default')
ax2.set_xlabel('')
ax2.set_ylabel('')
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={'label': 'K'})
ax3.set_title('Kelvins: center=273.15')
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={'label': '°C'})
ax4.set_title('Celsius: center=False')
ax4.set_ylabel('')
# Make it nice
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Control the plot's colorbarUse ``cbar_kwargs`` keyword to specify the number of ticks.The ``spacing`` kwarg can be used to draw proportional ticks.
###Code
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={'ticks': levels})
air2d.plot(ax=ax3, levels=levels, cbar_kwargs={'ticks': levels,
'spacing': 'proportional'})
# Show plots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Multiple lines from a 2d DataArrayUse ``xarray.plot.line`` on a 2d DataArray to plot selections asmultiple lines.See ``plotting.multiplelines`` for more details.
###Code
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue='lat')
ax1.set_ylabel('°C')
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x='time', add_legend=False)
ax2.set_ylabel('')
# Show
plt.tight_layout()
###Output
_____no_output_____
###Markdown
`imshow()` and rasterio map projectionsUsing rasterio's projection information for more accurate plots.This example extends `recipes.rasterio` and plots the image in theoriginal map projection instead of relying on pcolormesh and a maptransformation.
###Code
da = xr.tutorial.open_dataset("RGB.byte").data
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM('18N')
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb='band', transform=crs)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Parsing rasterio geocoordinatesConverting a projection's cartesian coordinates into 2D longitudes andlatitudes.These new coordinates might be handy for plotting and indexing, but it shouldbe kept in mind that a grid which is regular in projection coordinates willlikely be irregular in lon/lat. It is often recommended to work in the data'soriginal map projection (see `recipes.rasterio_rgb`).
###Code
from rasterio.warp import transform
import numpy as np
da = xr.tutorial.open_dataset("RGB.byte").data
# Compute the lon/lat coordinates with rasterio.warp.transform
ny, nx = len(da['y']), len(da['x'])
x, y = np.meshgrid(da['x'], da['y'])
# Rasterio works with 1D arrays
lon, lat = transform(da.crs, {'init': 'EPSG:4326'},
x.flatten(), y.flatten())
lon = np.asarray(lon).reshape((ny, nx))
lat = np.asarray(lat).reshape((ny, nx))
da.coords['lon'] = (('y', 'x'), lon)
da.coords['lat'] = (('y', 'x'), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim='band')
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(ax=ax, x='lon', y='lat', transform=ccrs.PlateCarree(),
cmap='Greys_r', add_colorbar=False)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Visualization GalleryThis notebook shows common visualization issues encountered in xarray.
###Code
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load example dataset:
###Code
ds = xr.tutorial.load_dataset('air_temperature')
###Output
_____no_output_____
###Markdown
Multiple plots and map projectionsControl the map projection parameters on multiple axesThis example illustrates how to plot multiple maps and control their extentand aspect ratio.For more details see [this discussion](https://github.com/pydata/xarray/issues/1397issuecomment-299190567) on github.
###Code
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(transform=ccrs.PlateCarree(), # the data's projection
col='time', col_wrap=1, # multiplot settings
aspect=ds.dims['lon'] / ds.dims['lat'], # for a sensible figsize
subplot_kws={'projection': map_proj}) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
###Output
_____no_output_____
###Markdown
Centered colormapsXarray's automatic colormaps choice
###Code
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={'label': 'K'})
ax1.set_title('Kelvins: default')
ax2.set_xlabel('')
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={'label': '°C'})
ax2.set_title('Celsius: default')
ax2.set_xlabel('')
ax2.set_ylabel('')
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={'label': 'K'})
ax3.set_title('Kelvins: center=273.15')
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={'label': '°C'})
ax4.set_title('Celsius: center=False')
ax4.set_ylabel('')
# Make it nice
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Control the plot's colorbarUse ``cbar_kwargs`` keyword to specify the number of ticks.The ``spacing`` kwarg can be used to draw proportional ticks.
###Code
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={'ticks': levels})
air2d.plot(ax=ax3, levels=levels, cbar_kwargs={'ticks': levels,
'spacing': 'proportional'})
# Show plots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Multiple lines from a 2d DataArrayUse ``xarray.plot.line`` on a 2d DataArray to plot selections asmultiple lines.See ``plotting.multiplelines`` for more details.
###Code
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue='lat')
ax1.set_ylabel('°C')
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x='time', add_legend=False)
ax2.set_ylabel('')
# Show
plt.tight_layout()
###Output
_____no_output_____
###Markdown
`imshow()` and rasterio map projectionsUsing rasterio's projection information for more accurate plots.This example extends `recipes.rasterio` and plots the image in theoriginal map projection instead of relying on pcolormesh and a maptransformation.
###Code
da = xr.tutorial.open_rasterio("RGB.byte")
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM('18')
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb='band', transform=crs)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Parsing rasterio geocoordinatesConverting a projection's cartesian coordinates into 2D longitudes andlatitudes.These new coordinates might be handy for plotting and indexing, but it shouldbe kept in mind that a grid which is regular in projection coordinates willlikely be irregular in lon/lat. It is often recommended to work in the data'soriginal map projection (see `recipes.rasterio_rgb`).
###Code
from pyproj import Transformer
import numpy as np
da = xr.tutorial.open_rasterio("RGB.byte")
x, y = np.meshgrid(da['x'], da['y'])
transformer = Transformer.from_crs(da.crs, "EPSG:4326", always_xy=True)
lon, lat = transformer.transform(x, y)
da.coords['lon'] = (('y', 'x'), lon)
da.coords['lat'] = (('y', 'x'), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim='band')
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(ax=ax, x='lon', y='lat', transform=ccrs.PlateCarree(),
cmap='Greys_r', shading="auto",add_colorbar=False)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Visualization GalleryThis notebook shows common visualization issues encountered in xarray.
###Code
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load example dataset:
###Code
ds = xr.tutorial.load_dataset("air_temperature")
###Output
_____no_output_____
###Markdown
Multiple plots and map projectionsControl the map projection parameters on multiple axesThis example illustrates how to plot multiple maps and control their extentand aspect ratio.For more details see [this discussion](https://github.com/pydata/xarray/issues/1397issuecomment-299190567) on github.
###Code
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(
transform=ccrs.PlateCarree(), # the data's projection
col="time",
col_wrap=1, # multiplot settings
aspect=ds.dims["lon"] / ds.dims["lat"], # for a sensible figsize
subplot_kws={"projection": map_proj},
) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
###Output
_____no_output_____
###Markdown
Centered colormapsXarray's automatic colormaps choice
###Code
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={"label": "K"})
ax1.set_title("Kelvins: default")
ax2.set_xlabel("")
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={"label": "°C"})
ax2.set_title("Celsius: default")
ax2.set_xlabel("")
ax2.set_ylabel("")
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={"label": "K"})
ax3.set_title("Kelvins: center=273.15")
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={"label": "°C"})
ax4.set_title("Celsius: center=False")
ax4.set_ylabel("")
# Make it nice
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Control the plot's colorbarUse ``cbar_kwargs`` keyword to specify the number of ticks.The ``spacing`` kwarg can be used to draw proportional ticks.
###Code
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={"ticks": levels})
air2d.plot(
ax=ax3, levels=levels, cbar_kwargs={"ticks": levels, "spacing": "proportional"}
)
# Show plots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Multiple lines from a 2d DataArrayUse ``xarray.plot.line`` on a 2d DataArray to plot selections asmultiple lines.See ``plotting.multiplelines`` for more details.
###Code
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue="lat")
ax1.set_ylabel("°C")
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x="time", add_legend=False)
ax2.set_ylabel("")
# Show
plt.tight_layout()
###Output
_____no_output_____
###Markdown
`imshow()` and rasterio map projectionsUsing rasterio's projection information for more accurate plots.This example extends `recipes.rasterio` and plots the image in theoriginal map projection instead of relying on pcolormesh and a maptransformation.
###Code
da = xr.tutorial.open_rasterio("RGB.byte")
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM("18")
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb="band", transform=crs)
ax.coastlines("10m", color="r")
###Output
_____no_output_____
###Markdown
Parsing rasterio geocoordinatesConverting a projection's cartesian coordinates into 2D longitudes andlatitudes.These new coordinates might be handy for plotting and indexing, but it shouldbe kept in mind that a grid which is regular in projection coordinates willlikely be irregular in lon/lat. It is often recommended to work in the data'soriginal map projection (see `recipes.rasterio_rgb`).
###Code
from pyproj import Transformer
import numpy as np
da = xr.tutorial.open_rasterio("RGB.byte")
x, y = np.meshgrid(da["x"], da["y"])
transformer = Transformer.from_crs(da.crs, "EPSG:4326", always_xy=True)
lon, lat = transformer.transform(x, y)
da.coords["lon"] = (("y", "x"), lon)
da.coords["lat"] = (("y", "x"), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim="band")
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(
ax=ax,
x="lon",
y="lat",
transform=ccrs.PlateCarree(),
cmap="Greys_r",
shading="auto",
add_colorbar=False,
)
ax.coastlines("10m", color="r")
###Output
_____no_output_____
###Markdown
Visualization GalleryThis notebook shows common visualization issues encountered in Xarray.
###Code
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load example dataset:
###Code
ds = xr.tutorial.load_dataset('air_temperature')
###Output
_____no_output_____
###Markdown
Multiple plots and map projectionsControl the map projection parameters on multiple axesThis example illustrates how to plot multiple maps and control their extentand aspect ratio.For more details see [this discussion](https://github.com/pydata/xarray/issues/1397issuecomment-299190567) on github.
###Code
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(transform=ccrs.PlateCarree(), # the data's projection
col='time', col_wrap=1, # multiplot settings
aspect=ds.dims['lon'] / ds.dims['lat'], # for a sensible figsize
subplot_kws={'projection': map_proj}) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
###Output
_____no_output_____
###Markdown
Centered colormapsXarray's automatic colormaps choice
###Code
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={'label': 'K'})
ax1.set_title('Kelvins: default')
ax2.set_xlabel('')
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={'label': '°C'})
ax2.set_title('Celsius: default')
ax2.set_xlabel('')
ax2.set_ylabel('')
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={'label': 'K'})
ax3.set_title('Kelvins: center=273.15')
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={'label': '°C'})
ax4.set_title('Celsius: center=False')
ax4.set_ylabel('')
# Make it nice
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Control the plot's colorbarUse ``cbar_kwargs`` keyword to specify the number of ticks.The ``spacing`` kwarg can be used to draw proportional ticks.
###Code
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={'ticks': levels})
air2d.plot(ax=ax3, levels=levels, cbar_kwargs={'ticks': levels,
'spacing': 'proportional'})
# Show plots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Multiple lines from a 2d DataArrayUse ``xarray.plot.line`` on a 2d DataArray to plot selections asmultiple lines.See ``plotting.multiplelines`` for more details.
###Code
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue='lat')
ax1.set_ylabel('°C')
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x='time', add_legend=False)
ax2.set_ylabel('')
# Show
plt.tight_layout()
###Output
_____no_output_____
###Markdown
`imshow()` and rasterio map projectionsUsing rasterio's projection information for more accurate plots.This example extends `recipes.rasterio` and plots the image in theoriginal map projection instead of relying on pcolormesh and a maptransformation.
###Code
url = 'https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif'
da = xr.open_rasterio(url)
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM('18N')
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb='band', transform=crs)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Parsing rasterio geocoordinatesConverting a projection's cartesian coordinates into 2D longitudes andlatitudes.These new coordinates might be handy for plotting and indexing, but it shouldbe kept in mind that a grid which is regular in projection coordinates willlikely be irregular in lon/lat. It is often recommended to work in the data'soriginal map projection (see `recipes.rasterio_rgb`).
###Code
from rasterio.warp import transform
import numpy as np
url = 'https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif'
da = xr.open_rasterio(url)
# Compute the lon/lat coordinates with rasterio.warp.transform
ny, nx = len(da['y']), len(da['x'])
x, y = np.meshgrid(da['x'], da['y'])
# Rasterio works with 1D arrays
lon, lat = transform(da.crs, {'init': 'EPSG:4326'},
x.flatten(), y.flatten())
lon = np.asarray(lon).reshape((ny, nx))
lat = np.asarray(lat).reshape((ny, nx))
da.coords['lon'] = (('y', 'x'), lon)
da.coords['lat'] = (('y', 'x'), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim='band')
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(ax=ax, x='lon', y='lat', transform=ccrs.PlateCarree(),
cmap='Greys_r', add_colorbar=False)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Visualization GalleryThis notebook shows common visualization issues encountered in xarray.
###Code
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import xarray as xr
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load example dataset:
###Code
ds = xr.tutorial.load_dataset('air_temperature')
###Output
_____no_output_____
###Markdown
Multiple plots and map projectionsControl the map projection parameters on multiple axesThis example illustrates how to plot multiple maps and control their extentand aspect ratio.For more details see [this discussion](https://github.com/pydata/xarray/issues/1397issuecomment-299190567) on github.
###Code
air = ds.air.isel(time=[0, 724]) - 273.15
# This is the map projection we want to plot *onto*
map_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)
p = air.plot(transform=ccrs.PlateCarree(), # the data's projection
col='time', col_wrap=1, # multiplot settings
aspect=ds.dims['lon'] / ds.dims['lat'], # for a sensible figsize
subplot_kws={'projection': map_proj}) # the plot's projection
# We have to set the map's options on all axes
for ax in p.axes.flat:
ax.coastlines()
ax.set_extent([-160, -30, 5, 75])
###Output
_____no_output_____
###Markdown
Centered colormapsXarray's automatic colormaps choice
###Code
air = ds.air.isel(time=0)
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))
# The first plot (in kelvins) chooses "viridis" and uses the data's min/max
air.plot(ax=ax1, cbar_kwargs={'label': 'K'})
ax1.set_title('Kelvins: default')
ax2.set_xlabel('')
# The second plot (in celsius) now chooses "BuRd" and centers min/max around 0
airc = air - 273.15
airc.plot(ax=ax2, cbar_kwargs={'label': '°C'})
ax2.set_title('Celsius: default')
ax2.set_xlabel('')
ax2.set_ylabel('')
# The center doesn't have to be 0
air.plot(ax=ax3, center=273.15, cbar_kwargs={'label': 'K'})
ax3.set_title('Kelvins: center=273.15')
# Or it can be ignored
airc.plot(ax=ax4, center=False, cbar_kwargs={'label': '°C'})
ax4.set_title('Celsius: center=False')
ax4.set_ylabel('')
# Make it nice
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Control the plot's colorbarUse ``cbar_kwargs`` keyword to specify the number of ticks.The ``spacing`` kwarg can be used to draw proportional ticks.
###Code
air2d = ds.air.isel(time=500)
# Prepare the figure
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))
# Irregular levels to illustrate the use of a proportional colorbar
levels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]
# Plot data
air2d.plot(ax=ax1, levels=levels)
air2d.plot(ax=ax2, levels=levels, cbar_kwargs={'ticks': levels})
air2d.plot(ax=ax3, levels=levels, cbar_kwargs={'ticks': levels,
'spacing': 'proportional'})
# Show plots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Multiple lines from a 2d DataArrayUse ``xarray.plot.line`` on a 2d DataArray to plot selections asmultiple lines.See ``plotting.multiplelines`` for more details.
###Code
air = ds.air - 273.15 # to celsius
# Prepare the figure
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)
# Selected latitude indices
isel_lats = [10, 15, 20]
# Temperature vs longitude plot - illustrates the "hue" kwarg
air.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue='lat')
ax1.set_ylabel('°C')
# Temperature vs time plot - illustrates the "x" and "add_legend" kwargs
air.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x='time', add_legend=False)
ax2.set_ylabel('')
# Show
plt.tight_layout()
###Output
_____no_output_____
###Markdown
`imshow()` and rasterio map projectionsUsing rasterio's projection information for more accurate plots.This example extends `recipes.rasterio` and plots the image in theoriginal map projection instead of relying on pcolormesh and a maptransformation.
###Code
da = xr.tutorial.open_rasterio("RGB.byte")
# The data is in UTM projection. We have to set it manually until
# https://github.com/SciTools/cartopy/issues/813 is implemented
crs = ccrs.UTM('18N')
# Plot on a map
ax = plt.subplot(projection=crs)
da.plot.imshow(ax=ax, rgb='band', transform=crs)
ax.coastlines('10m', color='r')
###Output
_____no_output_____
###Markdown
Parsing rasterio geocoordinatesConverting a projection's cartesian coordinates into 2D longitudes andlatitudes.These new coordinates might be handy for plotting and indexing, but it shouldbe kept in mind that a grid which is regular in projection coordinates willlikely be irregular in lon/lat. It is often recommended to work in the data'soriginal map projection (see `recipes.rasterio_rgb`).
###Code
from rasterio.warp import transform
import numpy as np
da = xr.tutorial.open_rasterio("RGB.byte")
# Compute the lon/lat coordinates with rasterio.warp.transform
ny, nx = len(da['y']), len(da['x'])
x, y = np.meshgrid(da['x'], da['y'])
# Rasterio works with 1D arrays
lon, lat = transform(da.crs, {'init': 'EPSG:4326'},
x.flatten(), y.flatten())
lon = np.asarray(lon).reshape((ny, nx))
lat = np.asarray(lat).reshape((ny, nx))
da.coords['lon'] = (('y', 'x'), lon)
da.coords['lat'] = (('y', 'x'), lat)
# Compute a greyscale out of the rgb image
greyscale = da.mean(dim='band')
# Plot on a map
ax = plt.subplot(projection=ccrs.PlateCarree())
greyscale.plot(ax=ax, x='lon', y='lat', transform=ccrs.PlateCarree(),
cmap='Greys_r', add_colorbar=False)
ax.coastlines('10m', color='r')
###Output
_____no_output_____ |
Assignments/hw3/HW3_Generalized_Linear_Model_finished/.ipynb_checkpoints/plot_iris_logistic-checkpoint.ipynb | ###Markdown
Logistic Regression 3-class ClassifierShow below is a logistic-regression classifiers decision boundaries on the`iris `_ dataset. Thedatapoints are colored according to their labels.
###Code
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
import pandas as pd
mydata = pd.read_csv("iris.csv")
dt = mydata.values
X = dt[:, 2]
X = X.astype('int')
Y = dt[:, 3]
Y = Y.astype('int')
# import some data to play with
#iris = datasets.load_iris()
#X = iris.data[:, :2] # we only take the first two features.
#Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit([[X, Y]])
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
###Output
Automatically created module for IPython interactive environment
|
notebooks/hillslope_diffusion_class_notebook.ipynb | ###Markdown
Linear diffusion exercise with LandlabThis notebook is adapted from *Landscape Evolution Modeling with CHILD* by Gregory Tucker and Stephen Lancaster. This notebook was created by Nicole Gasparini at Tulane University. For tutorials on learning Landlab, click here: https://github.com/landlab/landlab/wiki/Tutorials **What is this notebook?**This notebook illustrates the evolution of landforms dominated by processes that result in linear diffusion of sediment. In other words, the downhill flow of soil is proportional to the (downhill) gradient of the land surface multiplied by a transport coefficient.The notebook first illustrates a simple example of a diffusing hillslope. We then provide a number of exercises for students to do on their own. This set of exercises is recomended for students in a quantitative geomorphology class, who have been introduced to the linear diffusion equation in class. **Application of linear diffusion transport law:**For relatively gentle, soil-mantled slopes, there is reasonably strong support for a transport law of the form:\begin{equation}q_s = -D \nabla z\end{equation}where ${q}_s$ is the transport rate with dimensions of L$^2$T$^{-1}$; $D$ is a transport coefficient with dimensions of L$^2$T$^{-1}$; and $z$ is elevation. $\nabla z$ is the gradient in elevation. If distance is increasing downslope, $\nabla z$ is negative downslope, hence the negative in front of $D$. Changes in elevation, or erosion, are calculated from conservation of mass:\begin{equation}\frac{dz}{dt} = U-\nabla q_s\end{equation}where $U$ is the rock uplift rate, with dimensions LT$^{-1}$.**How will we explore this with Landlab?**We will use the Landlab component *LinearDiffuser*, which implements the equations above, to explore how hillslopes evolve when linear diffusion describes hillslope sediment transport. We will explore both steady state, here defined as erosion rate equal to rock uplift rate, and also how a landscape gets to steady state.The first example illustrates how to set-up the model and evolve a hillslope to steady state, along with how to plot some variables of interest. We assume that you have knowledge of how to derive the steady-state form of a uniformly uplifting, steady-state, diffusive hillslope. For more information on hillslope sediment transport laws, this paper is a great overview:Roering, Joshua J. (2008) "How well can hillslope evolution models “explain” topography? Simulating soil transport and production with high-resolution topographic data." Geological Society of America Bulletin.Based on the first example, you are asked to first think about what will happen as you change a parameter, and then you explore this numerically by changing the code.Start at the top by reading each block of text and sequentially running each code block (shift - enter OR got to the _Cell_ pulldown menu at the top and choose _Run Cells_). Remember that you can always go to the _Kernel_ pulldown menu at the top and choose _Restart & Clear Output_ or _Restart & Run All_ if you change things and want to start afresh. If you just change one code block and rerun only that code block, only the parts of the code in that code block will be updated. (E.g. if you change parameters but don't reset the code blocks that initialize run time or topography, then these values will not be reset.) **Now on to the code example**Import statements. You should not need to edit this.
###Code
# Code Block 1
from landlab import RasterModelGrid
from landlab.components import FlowAccumulator, LinearDiffuser
from landlab.plot.imshow import imshow_grid
from matplotlib.pyplot import (
figure, show, plot, xlabel, ylabel, title, legend, ylim
)
import numpy as np
###Output
_____no_output_____
###Markdown
We will create a grid with 41 rows and 5 columns, and dx is 5 m (a long, narrow, hillslope). The initial elevation is 0 at all nodes.We set-up boundary conditions so that material can leave the hillslope at the two short ends.
###Code
# Code Block 2
# setup grid
mg = RasterModelGrid((41, 5), 5.)
z_vals = mg.add_zeros('topographic__elevation', at='node')
# initialize some values for plotting
ycoord_rast = mg.node_vector_to_raster(mg.node_y)
ys_grid = ycoord_rast[:, 2]
# set boundary condition.
mg.set_closed_boundaries_at_grid_edges(True, False, True, False)
###Output
_____no_output_____
###Markdown
Now we initialize the *LinearDiffuser* component.
###Code
# Code Block 3
D = 0.01 # initial value of 0.01 m^2/yr
lin_diffuse = LinearDiffuser(mg, linear_diffusivity=D)
###Output
_____no_output_____
###Markdown
We now initialize a few more parameters.
###Code
# Code Block 4
# Uniform rate of rock uplift
uplift_rate = 0.0001 # meters/year, originally set to 0.0001
# Total time in years that the model will run for.
runtime = 1000000 # years, originally set to 1,000,000
# Stability criteria for timestep dt. Coefficient can be changed
# depending on our tolerance for stability vs tolerance for run time.
dt = 0.5 * mg.dx * mg.dx / D
# nt is number of time steps
nt = int(runtime // dt)
# Below is to keep track of time for labeling plots
time_counter = 0
# length of uplift over a single time step, meters
uplift_per_step = uplift_rate * dt
###Output
_____no_output_____
###Markdown
Now we figure out the analytical solution for the elevation of the steady-state profile.
###Code
# Code Block 5
ys = np.arange(mg.number_of_node_rows*mg.dx-mg.dx)
# location of divide or ridge crest -> middle of grid
# based on boundary conds.
divide_loc = (mg.number_of_node_rows*mg.dx-mg.dx)/2
# half-width of the ridge
half_width = (mg.number_of_node_rows*mg.dx-mg.dx)/2
# analytical solution for elevation under linear diffusion at steady state
zs = (uplift_rate/(2*D)) * \
(np.power(half_width, 2) - np.power(ys - divide_loc, 2))
###Output
_____no_output_____
###Markdown
Before we evolve the landscape, let's look at the initial topography. (This is just verifying that it is flat with zero elevation.)
###Code
# Code Block 6
figure(1)
imshow_grid(mg, 'topographic__elevation')
title('initial topography')
figure(2)
elev_rast = mg.node_vector_to_raster(
mg.at_node['topographic__elevation'])
plot(ys_grid, elev_rast[:, 2], 'r-', label='model')
plot(ys, zs, 'k--', label='analytical solution')
ylim((-5,50)) #may want to change upper limit if D changes
xlabel('horizontal distance (m)')
ylabel('vertical distance (m)')
legend(loc='lower center')
_ = title('initial topographic cross section')
###Output
_____no_output_____
###Markdown
Now we are ready to evolve the landscape and compare it to the steady state solution.Below is the time loop that does all the calculations.
###Code
# Code Block 7
for i in range(nt):
mg['node']['topographic__elevation'][mg.core_nodes] += uplift_per_step
lin_diffuse.run_one_step(dt)
time_counter += dt
# All landscape evolution is the first two lines of loop.
# Below is simply for plotting the topography halfway through the run
if i == int(nt // 2):
figure(1)
imshow_grid(mg, 'topographic__elevation')
title('topography at time %s, with D = %s'%(time_counter,D))
figure(2)
elev_rast = mg.node_vector_to_raster(
mg.at_node['topographic__elevation']
)
plot(ys_grid, elev_rast[:, 2], 'k-', label='model')
plot(ys, zs, 'g--', label='analytical solution - SS')
plot(ys, zs*0.75, 'b--', label='75% of analytical solution')
plot(ys, zs*0.5, 'r--', label='50% of analytical solution')
xlabel('horizontal distance (m)')
ylabel('vertical distance (m)')
legend(loc='lower center')
title(
'topographic__elevation at time %s, with D = %s'
%(time_counter,D)
)
###Output
_____no_output_____
###Markdown
Now we plot the final cross-section.
###Code
# Code Block 8
elev_rast = mg.node_vector_to_raster(mg.at_node['topographic__elevation'])
plot(ys_grid, elev_rast[:, 2], 'k-', label='model')
plot(ys, zs, 'g--', label='analytical solution - SS')
plot(ys, zs * 0.75, 'b--', label='75% of analytical solution')
plot(ys, zs * 0.5, 'r--', label='50% of analytical solution')
xlabel('horizontal distance (m)')
ylabel('vertical distance (m)')
legend(loc='lower center')
_ = title('topographic cross section at time %s, with D = %s'%(time_counter,D))
###Output
_____no_output_____
###Markdown
Now we plot the steepest slope in the downward direction across the landscape.(To calculate the steepest slope at a location, we need to route flow across the landscape.)
###Code
# Code Block 9
fr = FlowAccumulator(mg, flow_director='FlowDirectorD8') # intializing flow routing
fr.run_one_step()
plot(
mg.node_y[mg.core_nodes],
mg.at_node['topographic__steepest_slope'][mg.core_nodes],
'k-'
)
xlabel('horizontal distance (m)')
ylabel('topographic slope (m/m)')
_ = title('slope of the hillslope at time %s, with D = %s'%(time_counter,D))
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.