hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ec591229311980769f11a1d941540031fbcb5dfa
883,692
ipynb
Jupyter Notebook
analysis_notebooks/cext_wave_dist_prot_sensor_d1_q_tole3.ipynb
barbagroup/pygbe_lspr
8653771cac5d650dc85b6ecb4a8d9bbe52402b05
[ "CC-BY-3.0" ]
3
2017-04-28T20:35:57.000Z
2021-12-01T03:24:31.000Z
analysis_notebooks/cext_wave_dist_prot_sensor_d1_q_tole3.ipynb
barbagroup/pygbe_lspr
8653771cac5d650dc85b6ecb4a8d9bbe52402b05
[ "CC-BY-3.0" ]
null
null
null
analysis_notebooks/cext_wave_dist_prot_sensor_d1_q_tole3.ipynb
barbagroup/pygbe_lspr
8653771cac5d650dc85b6ecb4a8d9bbe52402b05
[ "CC-BY-3.0" ]
1
2021-12-01T03:24:35.000Z
2021-12-01T03:24:35.000Z
704.137052
68,076
0.935742
[ [ [ "## LSPR response calculation\n\nWe study the relation between the LSPR response and the sensor-analyte distance. We vary the distance between the silver sphere (sensor) and the analytes (BSA protein) and we calculate the extinction cross section as a function of wavelength for the different distances:\n\n* $d = \\infty$ i.e. we only have the sensor (silver sphere r=25 nm)\n* $d=4 \\,nm$\n* $d=2 \\,nm$\n* $d=1 \\,nm$", "_____no_output_____" ] ], [ [ "import numpy\nfrom matplotlib import pyplot, rcParams\n#from scripts.data_analysis_helper import plot_cext_wave_distance\n#%matplotlib notebook\n%matplotlib inline", "_____no_output_____" ], [ "w , Cext = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/d1_tole-3/BSA_sensorR125_d=infty_total.txt', \n unpack = True)\nw_d1 , Cext_d1 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/d1_tole-3/BSA_sensorR125_d=1_dens1_total.txt', \n unpack = True)\n#w_d2 , Cext_d2 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/p=5/BSA_sensor_d=2_total.txt', \n# unpack = True)\n#w_d4 , Cext_d4 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/p=5/BSA_sensor_d=4_total.txt', \n# unpack = True)", "_____no_output_____" ], [ "#wavelength = [w/10., w_d4/10, w_d2/10., w_d1/10.]\n#cext = [Cext, Cext_d4, Cext_d2, Cext_d1]\n#linestyles = ['-', '--', '-.', ':']\n#colors = ['k', '0.2', '0.4', '0.6']\n#labels = ['$d = \\infty$','$d=4 \\,nm$', '$d=2 \\,nm$', '$d=1 \\,nm$']\n\nwavelength = [w/10., w_d1/10.]\ncext = [Cext, Cext_d1]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "def plot_cext_wave_distance(wavelength, cext, linestyles, colors, labels, title=None):\n '''Plots the cross extinction section as a function of wavelength for\n different values of distance at which the proteins are located.\n\n \tArguments:\n ----------\n wavelength: list of wavelength arrays for each distance case.\n cext : list of cross extinction section arrays for each distance case.\n linestyles: list of linstyles we desire to use for each distance case.\n colors : list of colors we desire to use for each distance case.\n labels : list of labels we desire to use for each distance case.\n\t'''\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = 16\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 2\n\n fig=pyplot.figure(figsize=(9,6))\n ax = fig.add_subplot(1,1,1)\n \n major_xticks = numpy.linspace(min(wavelength[0]), max(wavelength[0]), 11)\n minor_xticks = numpy.linspace(min(wavelength[0]), max(wavelength[0]), 21)\n #major_yticks = numpy.linspace(0, 8000, 9)\n #minor_yticks = numpy.linspace(0, 8000, 33)\n\n ax.set_xticks(major_xticks) \n ax.set_xticks(minor_xticks, minor=True)\n #ax.set_yticks(major_yticks) \n #ax.set_yticks(minor_yticks, minor=True)\n\n pyplot.xticks(rotation=25)\n pyplot.tick_params(axis='both', length=5, width=1, which='major', direction='in')\n pyplot.tick_params(axis='both', length=2.5, width=1, which='minor', direction='in')\n\n pyplot.xlabel('Wavelength [nm]')\n pyplot.ylabel('Cross extinction section [$nm^2$]')\n pyplot.xlim(382,387)\n #pyplot.ylim(0,8000)\n pyplot.grid(ls=':', which='minor', alpha=0.6)\n pyplot.grid(ls=':', which='major', alpha=0.8)\n #pyplot.title('Silver sphere with BSA Proteins')\n \n for i in range(len(wavelength)):\n pyplot.plot(wavelength[i], cext[i], linestyle=linestyles[i], \n color=colors[i], linewidth=2, label=labels[i])\n \n pyplot.legend(loc='best')\n\n if title:\n fig.tight_layout()\n pyplot.savefig('figures/'+title+'.pdf', dpi=80, format='pdf')\n\n", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength, cext, linestyles, colors, labels, title='1p_0rot_0tilt_ef-1')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w[idx][0]))", "Cext max at d=infty is 15296.97948969 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15157.01242337 and it occurs at a wavelngth of 3840.0\n" ] ], [ [ "# Case d=1nm at z and 0, 0 in x,y . Prot rot 60 deg, tilt 30 deg, EF -1", "_____no_output_____" ] ], [ [ "w_6030 , Cext_6030 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030/BSA_sensorR125_d=infty_6030.txt', \n unpack = True)\nw_d1_6030 , Cext_d1_6030 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030/BSA_sensorR125_d=1_6030.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_6030 = [w_6030/10., w_d1_6030/10.]\ncext_6030 = [Cext_6030, Cext_d1_6030]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_6030, cext_6030, linestyles, colors, labels, title='1p_60rot_30tilt_ef-1')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_6030, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_6030[idx][0]))", "Cext max at d=infty is 15296.97948969 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15217.52713154 and it occurs at a wavelngth of 3840.0\n" ] ], [ [ "# Case d=1nm at z and 0, 0 in x,y . Prot rot 60 deg, tilt 30 deg, EF -0.0037", "_____no_output_____" ] ], [ [ "w_ef , Cext_ef = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037/BSA_sensorR125_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_ef , Cext_d1_ef = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037/BSA_sensorR125_d=1_6030_ef0.0037_total.txt', \n unpack = True)\n\n", "_____no_output_____" ], [ "wavelength_ef = [w_ef/10., w_d1_ef/10.]\ncext_ef = [Cext_ef, Cext_d1_ef]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_ef, cext_ef, linestyles, colors, labels, title='1p_60rot_30tilt_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_ef, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_ef[idx][0]))", "Cext max at d=infty is 15296.97948625 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15374.58631287 and it occurs at a wavelngth of 3840.0\n" ] ], [ [ "# Case d=1nm at z and 0, 0 in x,y and +/- 45 deg rot along y . Prot rot 60 deg, tilt 30 deg, EF -0.0037, 3 proteins", "_____no_output_____" ] ], [ [ "w_3p , Cext_3p = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_3p/BSA_sensorR125_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_3p , Cext_d1_3p = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_3p/BSA_sensorR125_3prot_d=1_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_3p = [w_3p/10., w_d1_3p/10.]\ncext_3p = [Cext_3p, Cext_d1_3p]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_3p, cext_3p, linestyles, colors, labels, title='3p_60rot_30tilt_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_3p, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_3p[idx][0]))", "Cext max at d=infty is 15296.97948625 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15484.98418821 and it occurs at a wavelngth of 3842.5\n" ] ], [ [ "# Case d=1nm at z and 0, 0 in EF -0.0037, 2 proteins +/- z orient 6030", "_____no_output_____" ] ], [ [ "w_2p , Cext_2p = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_2pz/BSA_sensorR125_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_2p , Cext_d1_2p = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_2pz/BSA_sensorR125_2pz_d=1_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_2p = [w_2p/10., w_d1_2p/10.]\ncext_2p = [Cext_2p, Cext_d1_2p]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_2p, cext_2p, linestyles, colors, labels, title='2pz_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_2p, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_2p[idx][0]))", "Cext max at d=infty is 15296.97948625 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15121.67133896 and it occurs at a wavelngth of 3842.5\n" ] ], [ [ "# Case d=1nm at z and 0, 0 in EF -0.0037, 2 proteins +/- z orient 00", "_____no_output_____" ] ], [ [ "w_2p_00 , Cext_2p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_2pz_00/BSA_sensorR125_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_2p_00 , Cext_d1_2p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_2pz_00/BSA_sensorR125_2pz_d=1_00_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_2p_00 = [w_2p_00/10., w_d1_2p_00/10.]\ncext_2p_00 = [Cext_2p_00, Cext_d1_2p_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_2p_00, cext_2p_00, linestyles, colors, labels, title='2pz00_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_2p_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_2p_00[idx][0]))", "Cext max at d=infty is 15296.97948625 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15035.01800421 and it occurs at a wavelngth of 3842.5\n" ] ], [ [ "# Case d=1nm at z, +/- 45 in EF -0.0037, 3 proteins", "_____no_output_____" ] ], [ [ "w_3p_00 , Cext_3p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_3prot_d=1_00_ef0.0037/BSA_sensorR125_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_3p_00 , Cext_d1_3p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_3prot_d=1_00_ef0.0037/BSA_sensorR125_3prot_d=1_00_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_3p_00 = [w_3p_00/10., w_d1_3p_00/10.]\ncext_3p_00 = [Cext_3p_00, Cext_d1_3p_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_3p_00, cext_3p_00, linestyles, colors, labels, title='3p00_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_3p_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_3p_00[idx][0]))", "Cext max at d=infty is 15296.97948625 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 15438.13143417 and it occurs at a wavelngth of 3842.5\n" ] ], [ [ "# Case d=1nm at +/-z and +/- y in EF -0.0037, 4 proteins", "_____no_output_____" ] ], [ [ "w_4p_00 , Cext_4p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_4p/BSA_sensorR125_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_4p_00 , Cext_d1_4p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR125_d=1_6030_ef0.0037_4p/BSA_sensorR125_4prot_d=1_00_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_4p_00 = [w_4p_00/10., w_d1_4p_00/10.]\ncext_4p_00 = [Cext_4p_00, Cext_d1_4p_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_4p_00, cext_4p_00, linestyles, colors, labels, title='4p00_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_4p_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_4p_00[idx][0]))", "Cext max at d=infty is 15296.97948625 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 14970.45420177 and it occurs at a wavelngth of 3842.5\n" ] ], [ [ "# Case d=1nm at +/-z in EF -0.0037, 2 proteins R10 nm", "_____no_output_____" ] ], [ [ "w_2p10_00 , Cext_2p10_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR100_2p_ef0.0037/BSA_sensorR100_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_2p10_00 , Cext_d1_2p10_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR100_2p_ef0.0037/BSA_sensorR100_2prot_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_2p10_00 = [w_2p10_00/10., w_d1_2p10_00/10.]\ncext_2p10_00 = [Cext_2p10_00, Cext_d1_2p10_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_2p10_00, cext_2p10_00, linestyles, colors, labels, title='2pr10_00_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_2p10_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_2p10_00[idx][0]))", "Cext max at d=infty is 7832.21649637 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 7651.83547235 and it occurs at a wavelngth of 3842.5\n" ] ], [ [ "# Case d=1nm at +/-z and 1 at 30 deg on y (rotx) in EF -0.0037, 2 proteins R10 nm", "_____no_output_____" ] ], [ [ "w_3px_00 , Cext_3px_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR100_3p_ef0.0037/BSA_sensorR100_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_3px_00 , Cext_d1_3px_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR100_3p_ef0.0037/BSA_sensorR100_3prot_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_3px_00 = [w_3px_00/10., w_d1_3px_00/10.]\ncext_3px_00 = [Cext_3px_00, Cext_d1_3px_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_3px_00, cext_3px_00, linestyles, colors, labels, title='3px_00_ef-0.0037')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_3px_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_3px_00[idx][0]))", "Cext max at d=infty is 7832.21649637 and it occurs at a wavelngth of 3840.0\nCext max at d=1 nm is 7692.92766466 and it occurs at a wavelngth of 3845.0\n" ] ], [ [ "# Case d=1 nm at +/-z 2 proteins EF -0.0037, R8 nm", "_____no_output_____" ] ], [ [ "w_d1_00 , Cext_d1_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR80_d=1_2pz/BSA_sensorR80_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d1_2p_00 , Cext_d1_2p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR80_d=1_2pz/BSA_sensorR80_2pz_d=1_00_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_d1_2p_00 = [w_d1_00/10., w_d1_2p_00/10.]\ncext_d1_00 = [Cext_d1_00, Cext_d1_2p_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=1 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_d1_2p_00, cext_d1_00, linestyles, colors, labels, title='2pz_00_ef-0.0037_R8nm')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=1 nm']\nlst = list(zip(cext_d1_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelength of {}'.format(l,max(c), w_d1_00[idx][0]))", "Cext max at d=infty is 4010.09400027 and it occurs at a wavelength of 3840.0\nCext max at d=1 nm is 3901.06202839 and it occurs at a wavelength of 3845.0\n" ] ], [ [ "# Case d=0.5 nm at +/-z 2 proteins EF -0.0037, R8 nm", "_____no_output_____" ] ], [ [ "w_d05_00 , Cext_d05_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR80_d=0.5_2pz/BSA_sensorR80_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d05_2p_00 , Cext_d05_2p_00 = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR80_d=0.5_2pz/BSA_sensorR80_2pz_d=0.5_00_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_d05_2p_00 = [w_d05_00/10., w_d05_2p_00/10.]\ncext_d05_00 = [Cext_d05_00, Cext_d05_2p_00]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=0.5 \\,nm$']", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_d05_2p_00, cext_d05_00, linestyles, colors, labels, title='2pz_d05_00_ef-0.0037_R8nm')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=0.5 nm']\nlst = list(zip(cext_d05_00, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_d05_00[idx][0]))", "Cext max at d=infty is 4010.09400027 and it occurs at a wavelngth of 3840.0\nCext max at d=0.5 nm is 3863.4502775 and it occurs at a wavelngth of 3847.5\n" ] ], [ [ "# GOLD ", "_____no_output_____" ], [ "# Case d=0.5 nm at +/-z 2 proteins EF -0.0037, R8 nm", "_____no_output_____" ] ], [ [ "w_d05_00_g , Cext_d05_00_g = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR80_d=0.5_2pz_GOLD/GOLD_BSA_sensorR80_d=infty_ef0.0037_total.txt', \n unpack = True)\nw_d05_2p_00_g , Cext_d05_2p_00_g = numpy.loadtxt('../data/wave_cext_d_prot_sensor/test_join_sort/BSA_sensorR80_d=0.5_2pz_GOLD/GOLD_BSA_sensorR80_2pz_d=0.5_00_ef0.0037_total.txt', \n unpack = True)", "_____no_output_____" ], [ "wavelength_d05_2p_00_g = [w_d05_00_g/10., w_d05_2p_00_g/10.]\ncext_d05_00_g = [Cext_d05_00_g, Cext_d05_2p_00_g]\nlinestyles = ['-', ':']\ncolors = ['k', '0.6']\nlabels = ['$d = \\infty$', '$d=0.5 \\,nm$']", "_____no_output_____" ], [ "wavelength = wavelength_d05_2p_00_g\nnumpy.linspace(min(wavelength[0]), max(wavelength[0]), 11)", "_____no_output_____" ], [ "def plot_cext_wave_distance(wavelength, cext, linestyles, colors, labels, title=None):\n '''Plots the cross extinction section as a function of wavelength for\n different values of distance at which the proteins are located.\n\n \tArguments:\n ----------\n wavelength: list of wavelength arrays for each distance case.\n cext : list of cross extinction section arrays for each distance case.\n linestyles: list of linstyles we desire to use for each distance case.\n colors : list of colors we desire to use for each distance case.\n labels : list of labels we desire to use for each distance case.\n\t'''\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = 16\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 2\n\n fig=pyplot.figure(figsize=(9,6))\n ax = fig.add_subplot(1,1,1)\n \n major_xticks = numpy.linspace(min(wavelength[0]), max(wavelength[0]), 11)\n #minor_xticks = numpy.linspace(min(wavelength[0]), max(wavelength[0]), 21)\n #major_yticks = numpy.linspace(0, 8000, 9)\n #minor_yticks = numpy.linspace(0, 8000, 33)\n\n ax.set_xticks(major_xticks) \n #ax.set_xticks(minor_xticks, minor=True)\n #ax.set_yticks(major_yticks) \n #ax.set_yticks(minor_yticks, minor=True)\n\n pyplot.xticks(rotation=25)\n pyplot.tick_params(axis='both', length=5, width=1, which='major', direction='in')\n pyplot.tick_params(axis='both', length=2.5, width=1, which='minor', direction='in')\n\n pyplot.xlabel('Wavelength [nm]')\n pyplot.ylabel('Cross extinction section [$nm^2$]')\n pyplot.xlim(517,527)\n #pyplot.ylim(0,8000)\n pyplot.grid(ls=':', which='minor', alpha=0.6)\n pyplot.grid(ls=':', which='major', alpha=0.8)\n #pyplot.title('Silver sphere with BSA Proteins')\n \n for i in range(len(wavelength)):\n pyplot.plot(wavelength[i], cext[i], linestyle=linestyles[i], \n color=colors[i], linewidth=2, label=labels[i])\n \n pyplot.legend(loc='best')\n\n if title:\n pyplot.savefig('figures/'+title+'.pdf', dpi=80, format='pdf')\n\n", "_____no_output_____" ], [ "plot_cext_wave_distance(wavelength_d05_2p_00_g, cext_d05_00_g, linestyles, colors, labels, title='GOLD_2pz_d05_00_ef-0.0037_R8nm')", "_____no_output_____" ], [ "lab = ['d=infty', 'd=0.5 nm']\nlst = list(zip(cext_d05_00_g, lab))\nfor i in range(len(lst)):\n c, l = lst[i]\n idx = numpy.where(c==max(c))\n print('Cext max at {} is {} and it occurs at a wavelngth of {}'.format(l,max(c), w_d05_00_g[idx][0]))", "Cext max at d=infty is 209.57050904 and it occurs at a wavelngth of 5210.0\nCext max at d=0.5 nm is 213.51990511 and it occurs at a wavelngth of 5210.0\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec59145354cf6f92cb21f6b290c8fc80569cd7f6
88,531
ipynb
Jupyter Notebook
Lectures/Lecture2-Jupyter_and_python/Lecture-2-Introduction-to-Python-Programming.ipynb
xli13/big-data-python-class
f9bd318d65e2f4d46dfb49f6f5e89380b33c9381
[ "MIT" ]
1
2018-07-04T08:44:52.000Z
2018-07-04T08:44:52.000Z
Lectures/Lecture2-Jupyter_and_python/Lecture-2-Introduction-to-Python-Programming.ipynb
xli13/big-data-python-class
f9bd318d65e2f4d46dfb49f6f5e89380b33c9381
[ "MIT" ]
2
2017-10-02T03:38:35.000Z
2018-09-06T02:34:31.000Z
Lectures/Lecture2-Jupyter_and_python/Lecture-2-Introduction-to-Python-Programming.ipynb
xli13/big-data-python-class
f9bd318d65e2f4d46dfb49f6f5e89380b33c9381
[ "MIT" ]
42
2015-08-26T00:06:39.000Z
2018-12-19T23:56:13.000Z
21.619292
657
0.495928
[ [ [ "# Introduction to Python programming", "_____no_output_____" ], [ "This crash course on python is take from two souces:\n\n[http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures).\nand\nChapter 2 of the Datascience from scratch: First principles with python\nCode from https://github.com/joelgrus/data-science-from-scratch\nAn official tutorial on ipyhthon: http://ipython.org/ipython-doc/2/interactive/tutorial.html", "_____no_output_____" ], [ "## Python program files", "_____no_output_____" ], [ "* Python code is usually stored in text files with the file ending \"`.py`\":\n\n myprogram.py\n\n* Every line in a Python program file is assumed to be a Python statement, or part thereof. \n\n * The only exception is comment lines, which start with the character `#` (optionally preceded by an arbitrary number of white-space characters, i.e., tabs or spaces). Comment lines are usually ignored by the Python interpreter.\n\n\n* To run our Python program from the command line we use:\n\n $ python myprogram.py\n\n* On UNIX systems it is common to define the path to the interpreter on the first line of the program (note that this is a comment line as far as the Python interpreter is concerned):\n\n #!/usr/bin/env python\n\n If we do, and if we additionally set the file script to be executable, we can run the program like this:\n\n $ myprogram.py", "_____no_output_____" ], [ "### Example:\na. use some command line functions:\n\nin Windows:\n\nls ..\\Scripts\\hello-world.py \n\nMac or linux:\n\nls ../Scripts/hello-world.py", "_____no_output_____" ] ], [ [ "ls ..\\..\\Scripts\\hello-world*.py", " Volume in drive C is OS\n Volume Serial Number is 7417-6934\n\n Directory of C:\\Users\\PS\\git\\big-data-python-class-2018\\Scripts\n\n09/05/2018 10:46 PM 19 hello-world.py\n09/05/2018 10:46 PM 72 hello-world-in-swedish.py\n 2 File(s) 91 bytes\n 0 Dir(s) 321,133,596,672 bytes free\n" ] ], [ [ "### built in magic commands start with #\n\nA good list of the commands are found in:\nhttps://ipython.org/ipython-doc/3/interactive/magics.html", "_____no_output_____" ] ], [ [ "%%sh \ncat ../../Scripts/hello-world.py", "print \"hello world\"" ], [ "!python ..\\..\\Scripts\\hello-world.py", "hello world\n" ] ], [ [ "### Character encoding", "_____no_output_____" ], [ "The standard character encoding is ASCII, but we can use any other encoding, for example UTF-8. To specify that UTF-8 is used we include the special line\n\n # -*- coding: UTF-8 -*-\n\nat the top of the file.", "_____no_output_____" ] ], [ [ "%%sh\ncat ../../Scripts/hello-world-in-swedish.py", "#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n\r\nprint(\"Hej världen!\")" ], [ "!python ../../Scripts/hello-world-in-swedish.py", "Hej världen!\n" ] ], [ [ "Other than these two *optional* lines in the beginning of a Python code file, no additional code is required for initializing a program. ", "_____no_output_____" ], [ "## Jupyter notebooks ( or old ipython notebooks)", "_____no_output_____" ], [ "This file - an IPython notebook - does not follow the standard pattern with Python code in a text file. Instead, an IPython notebook is stored as a file in the [JSON](http://en.wikipedia.org/wiki/JSON) format. The advantage is that we can mix formatted text, Python code and code output. It requires the IPython notebook server to run it though, and therefore isn't a stand-alone Python program as described above. Other than that, there is no difference between the Python code that goes into a program file or an IPython notebook.", "_____no_output_____" ], [ "## Modules", "_____no_output_____" ], [ "Most of the functionality in Python is provided by *modules*. The Python Standard Library is a large collection of modules that provides *cross-platform* implementations of common facilities such as access to the operating system, file I/O, string management, network communication, and much more.", "_____no_output_____" ], [ "### References", "_____no_output_____" ], [ " * The Python Language Reference: http://docs.python.org/2/reference/index.html\n * The Python Standard Library: http://docs.python.org/2/library/\n\nTo use a module in a Python program it first has to be imported. A module can be imported using the `import` statement. For example, to import the module `math`, which contains many standard mathematical functions, we can do:", "_____no_output_____" ] ], [ [ "import math", "_____no_output_____" ] ], [ [ "This includes the whole module and makes it available for use later in the program. For example, we can do:", "_____no_output_____" ] ], [ [ "import math\n\nx = math.cos(2 * math.pi)\n\nprint(x)", "1.0\n" ] ], [ [ "Alternatively, we can chose to import all symbols (functions and variables) in a module to the current namespace (so that we don't need to use the prefix \"`math.`\" every time we use something from the `math` module:", "_____no_output_____" ] ], [ [ "from math import *\n\nx = cos(2 * pi)\n\nprint(x)", "1.0\n" ] ], [ [ "This pattern can be very convenient, but in large programs that include many modules it is often a good idea to keep the symbols from each module in their own namespaces, by using the `import math` pattern. This would elminate potentially confusing problems with name space collisions.\n\nAs a third alternative, we can chose to import only a few selected symbols from a module by explicitly listing which ones we want to import instead of using the wildcard character `*`:", "_____no_output_____" ] ], [ [ "from math import cos, pi\n\nx = cos(2 * pi)\n\nprint(x)", "1.0\n" ] ], [ [ "### Looking at what a module contains, and its documentation", "_____no_output_____" ], [ "Once a module is imported, we can list the symbols it provides using the `dir` function:", "_____no_output_____" ] ], [ [ "import math\n\nprint(dir(math))", "['__doc__', '__name__', '__package__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'hypot', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']\n" ] ], [ [ "And using the function `help` we can get a description of each function (almost .. not all functions have docstrings, as they are technically called, but the vast majority of functions are documented this way). ", "_____no_output_____" ] ], [ [ "help(math.log)", "Help on built-in function log in module math:\n\nlog(...)\n log(x[, base])\n \n Return the logarithm of x to the given base.\n If the base not specified, returns the natural logarithm (base e) of x.\n\n" ], [ "log(10)", "_____no_output_____" ], [ "log(10, 2)", "_____no_output_____" ] ], [ [ "We can also use the `help` function directly on modules: Try\n\n help(math) \n\nSome very useful modules form the Python standard library are `os`, `sys`, `math`, `shutil`, `re`, `subprocess`, `multiprocessing`, `threading`. \n\nA complete lists of standard modules for Python 2 and Python 3 are available at http://docs.python.org/2/library/ and http://docs.python.org/3/library/, respectively.", "_____no_output_____" ], [ "# Whitespacing formatting\nPython uses indents and not bracing to delimit blocks of code\nMakes code readable but it means you must be careful about your formatting", "_____no_output_____" ] ], [ [ "for i in [1,2,3,4]:\n print i\nprint \"done looping\"", "1\n2\n3\n4\ndone looping\n" ] ], [ [ "White spacing is ignored inside parenteses and brackets:", "_____no_output_____" ] ], [ [ "long_winded_computation = (1+2+ 3 + 4 + 5+ 6\n + 7 + 8 + 9 + 10 + 11+\n \n 13 + 14 + 15)\nlong_winded_computation\n", "_____no_output_____" ] ], [ [ "So use that to make you code easier to read", "_____no_output_____" ] ], [ [ "list_of_lists = [[1,2,3],[4,5,6],[7,8,9]]\neasier_to_read_list_of_lists = [[1,2,3],\n [4,5,6],\n [7,8,9]]", "_____no_output_____" ] ], [ [ "This may make it hard to cut and paste code since the indentation may have to adjusted to the block", "_____no_output_____" ], [ "## Variables and types", "_____no_output_____" ], [ "### Symbol names ", "_____no_output_____" ], [ "Variable names in Python can contain alphanumerical characters `a-z`, `A-Z`, `0-9` and some special characters such as `_`. Normal variable names must start with a letter. \n\nBy convention, variable names start with a lower-case letter, and Class names start with a capital letter. \n\nIn addition, there are a number of Python keywords that cannot be used as variable names. These keywords are:\n\n and, as, assert, break, class, continue, def, del, elif, else, except, \n exec, finally, for, from, global, if, import, in, is, lambda, not, or,\n pass, print, raise, return, try, while, with, yield\n\nNote: Be aware of the keyword `lambda`, which could easily be a natural variable name in a scientific program. But being a keyword, it cannot be used as a variable name.", "_____no_output_____" ], [ "### Functions\nA function is a rule for taking zero or more inputes and return a corresponding output\nFunctions are first-class which means we can assign them to variables and pass them into functions just like any aguement", "_____no_output_____" ] ], [ [ "def double(x):\n \"\"\"this is where you put in the docstring that explains what the fuciton does:\n this function multiplies its input by \"\"\"\n return x * 2\n\ndef apply_to_one(f):\n \"\"\"calls the fuction f with 1 as its argument\"\"\"\n return f(1)\n\napply_to_one(double)\n", "_____no_output_____" ] ], [ [ "You can create short anonymous fuctions or lambdas or even assign lambdas to variables but better to use a def", "_____no_output_____" ] ], [ [ "apply_to_one(lambda x: x + 4 )", "_____no_output_____" ], [ "another_double = lambda x: 2 * x\ndef yet_another_double(x): return 2 * x", "_____no_output_____" ] ], [ [ "### Assignment", "_____no_output_____" ], [ "\n\nThe assignment operator in Python is `=`. Python is a dynamically typed language, so we do not need to specify the type of a variable when we create one.\n\nAssigning a value to a new variable creates the variable:", "_____no_output_____" ] ], [ [ "# variable assignments\nx = 1.0\nmy_variable = 12.2", "_____no_output_____" ] ], [ [ "Although not explicitly specified, a variable does have a type associated with it. The type is derived from the value that was assigned to it.", "_____no_output_____" ] ], [ [ "type(x)", "_____no_output_____" ] ], [ [ "If we assign a new value to a variable, its type can change.", "_____no_output_____" ] ], [ [ "x = 1", "_____no_output_____" ], [ "type(x)", "_____no_output_____" ] ], [ [ "If we try to use a variable that has not yet been defined we get an `NameError`:", "_____no_output_____" ] ], [ [ "print(y)", "_____no_output_____" ] ], [ [ "### Fundamental types", "_____no_output_____" ] ], [ [ "# integers\nx = 1\ntype(x)", "_____no_output_____" ], [ "# float\nx = 1.0\ntype(x)", "_____no_output_____" ], [ "# boolean\nb1 = True\nb2 = False\n\ntype(b1)", "_____no_output_____" ], [ "# complex numbers: note the use of `j` to specify the imaginary part\nx = 1.0 - 1.0j\ntype(x)", "_____no_output_____" ], [ "print(x)", "(1-1j)\n" ], [ "print(x.real, x.imag)", "(1.0, -1.0)\n" ] ], [ [ "### Type utility functions", "_____no_output_____" ], [ "\nThe module `types` contains a number of type name definitions that can be used to test if variables are of certain types:", "_____no_output_____" ] ], [ [ "import types\n\n# print all types defined in the `types` module\nprint(dir(types))", "['BooleanType', 'BufferType', 'BuiltinFunctionType', 'BuiltinMethodType', 'ClassType', 'CodeType', 'ComplexType', 'DictProxyType', 'DictType', 'DictionaryType', 'EllipsisType', 'FileType', 'FloatType', 'FrameType', 'FunctionType', 'GeneratorType', 'GetSetDescriptorType', 'InstanceType', 'IntType', 'LambdaType', 'ListType', 'LongType', 'MemberDescriptorType', 'MethodType', 'ModuleType', 'NoneType', 'NotImplementedType', 'ObjectType', 'SliceType', 'StringType', 'StringTypes', 'TracebackType', 'TupleType', 'TypeType', 'UnboundMethodType', 'UnicodeType', 'XRangeType', '__all__', '__builtins__', '__doc__', '__file__', '__name__', '__package__']\n" ], [ "x = 1.0\n\n# check if the variable x is a float\ntype(x) is float", "_____no_output_____" ], [ "# check if the variable x is an int\ntype(x) is int", "_____no_output_____" ] ], [ [ "We can also use the `isinstance` method for testing types of variables:", "_____no_output_____" ] ], [ [ "isinstance(x, float)", "_____no_output_____" ] ], [ [ "### Type casting", "_____no_output_____" ] ], [ [ "x = 1.5\n\nprint(x, type(x))", "(1.5, <type 'float'>)\n" ], [ "x = int(x)\n\nprint(x, type(x))", "(1, <type 'int'>)\n" ], [ "z = complex(x)\n\nprint(z, type(z))", "((1+0j), <type 'complex'>)\n" ], [ "x = float(z)", "_____no_output_____" ] ], [ [ "Complex variables cannot be cast to floats or integers. We need to use `z.real` or `z.imag` to extract the part of the complex number we want:", "_____no_output_____" ] ], [ [ "y = bool(z.real)\n\nprint(z.real, \" -> \", y, type(y))\n\ny = bool(z.imag)\n\nprint(z.imag, \" -> \", y, type(y))", "(1.0, ' -> ', True, <type 'bool'>)\n(0.0, ' -> ', False, <type 'bool'>)\n" ] ], [ [ "## Operators and comparisons", "_____no_output_____" ], [ "Most operators and comparisons in Python work as one would expect:\n\n* Arithmetic operators `+`, `-`, `*`, `/`, `//` (integer division), '**' power\n", "_____no_output_____" ] ], [ [ "1 + 2, 1 - 2, 1 * 2, 1 / 2", "_____no_output_____" ], [ "1.0 + 2.0, 1.0 - 2.0, 1.0 * 2.0, 1.0 / 2.0", "_____no_output_____" ], [ "# Integer division of float numbers\n3.0 // 2.0", "_____no_output_____" ], [ "# Note! The power operators in python isn't ^, but **\n2 ** 2", "_____no_output_____" ] ], [ [ "Note: The `/` operator always performs a floating point division in Python 3.x.\nThis is not true in Python 2.x, where the result of `/` is always an integer if the operands are integers.\nto be more specific, `1/2 = 0.5` (`float`) in Python 3.x, and `1/2 = 0` (`int`) in Python 2.x (but `1.0/2 = 0.5` in Python 2.x).", "_____no_output_____" ], [ "* The boolean operators are spelled out as the words `and`, `not`, `or`. ", "_____no_output_____" ] ], [ [ "True and False", "_____no_output_____" ], [ "not False", "_____no_output_____" ], [ "True or False", "_____no_output_____" ] ], [ [ "* Comparison operators `>`, `<`, `>=` (greater or equal), `<=` (less or equal), `==` equality, `is` identical.", "_____no_output_____" ] ], [ [ "2 > 1, 2 < 1", "_____no_output_____" ], [ "2 > 2, 2 < 2", "_____no_output_____" ], [ "2 >= 2, 2 <= 2", "_____no_output_____" ], [ "# equality\n[1,2] == [1,2]", "_____no_output_____" ], [ "# objects identical?\nl1 = l2 = [1,2]\n\nl1 is l2", "_____no_output_____" ] ], [ [ "## Compound types: Strings, List and dictionaries", "_____no_output_____" ], [ "### Strings", "_____no_output_____" ], [ "Strings are the variable type that is used for storing text messages. ", "_____no_output_____" ] ], [ [ "s = \"Hello world\"\ntype(s)", "_____no_output_____" ], [ "# length of the string: the number of characters\nlen(s)", "_____no_output_____" ], [ "# replace a substring in a string with somethign else\ns2 = s.replace(\"world\", \"test\")\nprint(s2)", "Hello test\n" ] ], [ [ "We can index a character in a string using `[]`:", "_____no_output_____" ] ], [ [ "s[0]", "_____no_output_____" ] ], [ [ "**Heads up MATLAB users:** Indexing start at 0!\n\nWe can extract a part of a string using the syntax `[start:stop]`, which extracts characters between index `start` and `stop` -1 (the character at index `stop` is not included):", "_____no_output_____" ] ], [ [ "s[0:5]", "_____no_output_____" ], [ "s[4:5]", "_____no_output_____" ] ], [ [ "If we omit either (or both) of `start` or `stop` from `[start:stop]`, the default is the beginning and the end of the string, respectively:", "_____no_output_____" ] ], [ [ "s[:5]", "_____no_output_____" ], [ "s[6:]", "_____no_output_____" ], [ "s[:]", "_____no_output_____" ] ], [ [ "We can also define the step size using the syntax `[start:end:step]` (the default value for `step` is 1, as we saw above):", "_____no_output_____" ] ], [ [ "s[::1]", "_____no_output_____" ], [ "s[::2]", "_____no_output_____" ] ], [ [ "This technique is called *slicing*. Read more about the syntax here: http://docs.python.org/release/2.7.3/library/functions.html?highlight=slice#slice", "_____no_output_____" ], [ "Python has a very rich set of functions for text processing. See for example http://docs.python.org/2/library/string.html for more information.", "_____no_output_____" ], [ "#### String formatting examples", "_____no_output_____" ] ], [ [ "print(\"str1\", \"str2\", \"str3\") # The print statement concatenates strings with a space", "('str1', 'str2', 'str3')\n" ], [ "print(\"str1\", 1.0, False, -1j) # The print statements converts all arguments to strings", "('str1', 1.0, False, -1j)\n" ], [ "print(\"str1\" + \"str2\" + \"str3\") # strings added with + are concatenated without space", "str1str2str3\n" ], [ "print(\"value = %f\" % 1.0) # we can use C-style string formatting", "value = 1.000000\n" ], [ "# this formatting creates a string\ns2 = \"value1 = %.2f. value2 = %d\" % (3.1415, 1.5)\n\nprint(s2)", "value1 = 3.14. value2 = 1\n" ], [ "# alternative, more intuitive way of formatting a string \ns3 = 'value1 = {0}, value2 = {1}'.format(3.1415, 1.5)\n\nprint(s3)", "value1 = 3.1415, value2 = 1.5\n" ] ], [ [ "### List", "_____no_output_____" ], [ "Lists are very similar to strings, except that each element can be of any type.\nOther languages call these arrays, simply an ordered collection of things.\n\nThe syntax for creating lists in Python is `[...]`:", "_____no_output_____" ] ], [ [ "l = [1,2,3,4]\n\nprint(type(l))\nprint(l)", "<type 'list'>\n[1, 2, 3, 4]\n" ] ], [ [ "We can use the same slicing techniques to manipulate lists as we could use on strings:", "_____no_output_____" ] ], [ [ "print(l)\n\nprint(l[1:3])\n\nprint(l[::2])", "[1, 2, 3, 4]\n[2, 3]\n[1, 3]\n" ] ], [ [ "**Heads up MATLAB users:** Indexing starts at 0!", "_____no_output_____" ] ], [ [ "l[0]", "_____no_output_____" ] ], [ [ "Elements in a list do not all have to be of the same type:", "_____no_output_____" ] ], [ [ "l = [1, 'a', 1.0, 1-1j]\n\nprint(l)", "[1, 'a', 1.0, (1-1j)]\n" ] ], [ [ "Python lists can be inhomogeneous and arbitrarily nested:", "_____no_output_____" ] ], [ [ "nested_list = [1, [2, [3, [4, [5]]]]]\n\nnested_list", "_____no_output_____" ] ], [ [ "Lists play a very important role in Python. For example they are used in loops and other flow control structures (discussed below). There are a number of convenient functions for generating lists of various types, for example the `range` function:", "_____no_output_____" ] ], [ [ "start = 10\nstop = 30\nstep = 2\n\nrange(start, stop, step)", "_____no_output_____" ], [ "# in python 3 range generates an interator, which can be converted to a list using 'list(...)'.\n# It has no effect in python 2\nlist(range(start, stop, step))", "_____no_output_____" ], [ "list(range(-10, 10))", "_____no_output_____" ], [ "s", "_____no_output_____" ], [ "# convert a string to a list by type casting:\ns2 = list(s)\n\ns2", "_____no_output_____" ], [ "# sorting lists\ns2.sort()\n\nprint(s2)", "[' ', 'H', 'd', 'e', 'l', 'l', 'l', 'o', 'o', 'r', 'w']\n" ] ], [ [ "#### Adding, inserting, modifying, and removing elements from lists", "_____no_output_____" ] ], [ [ "# create a new empty list\nl = []\n\n# add an elements using `append`\nl.append(\"A\")\nl.append(\"d\")\nl.append(\"d\")\n\nprint(l)", "['A', 'd', 'd']\n" ] ], [ [ "We can modify lists by assigning new values to elements in the list. In technical jargon, lists are *mutable*.", "_____no_output_____" ] ], [ [ "l[1] = \"p\"\nl[2] = \"p\"\n\nprint(l)", "['A', 'p', 'p']\n" ], [ "l[1:3] = [\"d\", \"d\"]\n\nprint(l)", "['A', 'd', 'd']\n" ] ], [ [ "Insert an element at an specific index using `insert`", "_____no_output_____" ] ], [ [ "l.insert(0, \"i\")\nl.insert(1, \"n\")\nl.insert(2, \"s\")\nl.insert(3, \"e\")\nl.insert(4, \"r\")\nl.insert(5, \"t\")\n\nprint(l)", "['i', 'n', 's', 'e', 'r', 't', 'A', 'd', 'd']\n" ] ], [ [ "Remove first element with specific value using 'remove'", "_____no_output_____" ] ], [ [ "l.remove(\"A\")\n\nprint(l)", "['i', 'n', 's', 'e', 'r', 't', 'd', 'd']\n" ] ], [ [ "Remove an element at a specific location using `del`:", "_____no_output_____" ] ], [ [ "del l[7]\ndel l[6]\n\nprint(l)", "['i', 'n', 's', 'e', 'r', 't']\n" ] ], [ [ "See `help(list)` for more details, or read the online documentation ", "_____no_output_____" ], [ "### Tuples", "_____no_output_____" ], [ "Tuples are like lists, except that they cannot be modified once created, that is they are *immutable*. \n\nIn Python, tuples are created using the syntax `(..., ..., ...)`, or even `..., ...`:", "_____no_output_____" ] ], [ [ "point = (10, 20)\n\nprint(point, type(point))", "((10, 20), <type 'tuple'>)\n" ], [ "point = 10, 20\n\nprint(point, type(point))", "((10, 20), <type 'tuple'>)\n" ] ], [ [ "We can unpack a tuple by assigning it to a comma-separated list of variables:", "_____no_output_____" ] ], [ [ "x, y = point\n\nprint(\"x =\", x)\nprint(\"y =\", y)", "('x =', 10)\n('y =', 20)\n" ] ], [ [ "If we try to assign a new value to an element in a tuple we get an error:", "_____no_output_____" ] ], [ [ "point[0] = 20", "_____no_output_____" ] ], [ [ "### Dictionaries", "_____no_output_____" ], [ "Dictionaries are also like lists, except that each element is a key-value pair. The syntax for dictionaries is `{key1 : value1, ...}`:", "_____no_output_____" ] ], [ [ "params = {\"parameter1\" : 1.0,\n \"parameter2\" : 2.0,\n \"parameter3\" : 3.0,}\n\nprint(type(params))\nprint(params)", "<type 'dict'>\n{'parameter1': 1.0, 'parameter3': 3.0, 'parameter2': 2.0}\n" ], [ "print(\"parameter1 = \" + str(params[\"parameter1\"]))\nprint(\"parameter2 = \" + str(params[\"parameter2\"]))\nprint(\"parameter3 = \" + str(params[\"parameter3\"]))", "parameter1 = 1.0\nparameter2 = 2.0\nparameter3 = 3.0\n" ], [ "params[\"parameter1\"] = \"A\"\nparams[\"parameter2\"] = \"B\"\n\n# add a new entry\nparams[\"parameter4\"] = \"D\"\n\nprint(\"parameter1 = \" + str(params[\"parameter1\"]))\nprint(\"parameter2 = \" + str(params[\"parameter2\"]))\nprint(\"parameter3 = \" + str(params[\"parameter3\"]))\nprint(\"parameter4 = \" + str(params[\"parameter4\"]))", "parameter1 = A\nparameter2 = B\nparameter3 = 3.0\nparameter4 = D\n" ] ], [ [ "## Control Flow", "_____no_output_____" ], [ "### Conditional statements: if, elif, else", "_____no_output_____" ], [ "The Python syntax for conditional execution of code uses the keywords `if`, `elif` (else if), `else`:", "_____no_output_____" ] ], [ [ "statement1 = False\nstatement2 = False\n\nif statement1:\n print(\"statement1 is True\")\n \nelif statement2:\n print(\"statement2 is True\")\n \nelse:\n print(\"statement1 and statement2 are False\")", "statement1 and statement2 are False\n" ] ], [ [ "For the first time, here we encounted a peculiar and unusual aspect of the Python programming language: Program blocks are defined by their indentation level. \n\nCompare to the equivalent C code:\n\n if (statement1)\n {\n printf(\"statement1 is True\\n\");\n }\n else if (statement2)\n {\n printf(\"statement2 is True\\n\");\n }\n else\n {\n printf(\"statement1 and statement2 are False\\n\");\n }\n\nIn C blocks are defined by the enclosing curly brakets `{` and `}`. And the level of indentation (white space before the code statements) does not matter (completely optional). \n\nBut in Python, the extent of a code block is defined by the indentation level (usually a tab or say four white spaces). This means that we have to be careful to indent our code correctly, or else we will get syntax errors. ", "_____no_output_____" ], [ "#### Examples:", "_____no_output_____" ] ], [ [ "statement1 = statement2 = True\n\nif statement1:\n if statement2:\n print(\"both statement1 and statement2 are True\")", "both statement1 and statement2 are True\n" ], [ "# Bad indentation!\nif statement1:\n if statement2:\n print(\"both statement1 and statement2 are True\") # this line is not properly indented", "_____no_output_____" ], [ "statement1 = False \n\nif statement1:\n print(\"printed if statement1 is True\")\n \n print(\"still inside the if block\")", "_____no_output_____" ], [ "if statement1:\n print(\"printed if statement1 is True\")\n \nprint(\"now outside the if block\")", "now outside the if block\n" ] ], [ [ "## Loops", "_____no_output_____" ], [ "In Python, loops can be programmed in a number of different ways. The most common is the `for` loop, which is used together with iterable objects, such as lists. The basic syntax is:", "_____no_output_____" ], [ "### **`for` loops**:", "_____no_output_____" ] ], [ [ "for x in [1,2,3]:\n print(x)", "1\n2\n3\n" ] ], [ [ "The `for` loop iterates over the elements of the supplied list, and executes the containing block once for each element. Any kind of list can be used in the `for` loop. For example:", "_____no_output_____" ] ], [ [ "for x in range(4): # by default range start at 0\n print(x)", "0\n1\n2\n3\n" ] ], [ [ "Note: `range(4)` does not include 4 !", "_____no_output_____" ] ], [ [ "for x in range(-3,3):\n print(x)", "-3\n-2\n-1\n0\n1\n2\n" ], [ "for word in [\"scientific\", \"computing\", \"with\", \"python\"]:\n print(word)", "scientific\ncomputing\nwith\npython\n" ] ], [ [ "To iterate over key-value pairs of a dictionary:", "_____no_output_____" ] ], [ [ "for key, value in params.items():\n print(key + \" = \" + str(value))", "parameter4 = D\nparameter1 = A\nparameter3 = 3.0\nparameter2 = B\n" ] ], [ [ "Sometimes it is useful to have access to the indices of the values when iterating over a list. We can use the `enumerate` function for this:", "_____no_output_____" ] ], [ [ "for idx, x in enumerate(range(-3,3)):\n print(idx, x)", "(0, -3)\n(1, -2)\n(2, -1)\n(3, 0)\n(4, 1)\n(5, 2)\n" ] ], [ [ "### List comprehensions: Creating lists using `for` loops:", "_____no_output_____" ], [ "A convenient and compact way to initialize lists:", "_____no_output_____" ] ], [ [ "l1 = [x**2 for x in range(0,5)]\n\nprint(l1)", "[0, 1, 4, 9, 16]\n" ] ], [ [ "### `while` loops:", "_____no_output_____" ] ], [ [ "i = 0\n\nwhile i < 5:\n print(i)\n \n i = i + 1\n \nprint(\"done\")", "0\n1\n2\n3\n4\ndone\n" ] ], [ [ "Note that the `print(\"done\")` statement is not part of the `while` loop body because of the difference in indentation.", "_____no_output_____" ], [ "## Functions", "_____no_output_____" ], [ "A function in Python is defined using the keyword `def`, followed by a function name, a signature within parentheses `()`, and a colon `:`. The following code, with one additional level of indentation, is the function body.", "_____no_output_____" ] ], [ [ "def func0(): \n print(\"test\")", "_____no_output_____" ], [ "func0()", "test\n" ] ], [ [ "Optionally, but highly recommended, we can define a so called \"docstring\", which is a description of the functions purpose and behaivor. The docstring should follow directly after the function definition, before the code in the function body.", "_____no_output_____" ] ], [ [ "def func1(s):\n \"\"\"\n Print a string 's' and tell how many characters it has \n \"\"\"\n \n print(s + \" has \" + str(len(s)) + \" characters\")", "_____no_output_____" ], [ "help(func1)", "Help on function func1 in module __main__:\n\nfunc1(s)\n Print a string 's' and tell how many characters it has\n\n" ], [ "func1(\"test\")", "test has 4 characters\n" ] ], [ [ "Functions that returns a value use the `return` keyword:", "_____no_output_____" ] ], [ [ "def square(x):\n \"\"\"\n Return the square of x.\n \"\"\"\n return x ** 2", "_____no_output_____" ], [ "square(4)", "_____no_output_____" ] ], [ [ "We can return multiple values from a function using tuples (see above):", "_____no_output_____" ] ], [ [ "def powers(x):\n \"\"\"\n Return a few powers of x.\n \"\"\"\n return x ** 2, x ** 3, x ** 4", "_____no_output_____" ], [ "powers(3)", "_____no_output_____" ], [ "x2, x3, x4 = powers(3)\n\nprint(x3)", "27\n" ] ], [ [ "### Default argument and keyword arguments", "_____no_output_____" ], [ "In a definition of a function, we can give default values to the arguments the function takes:", "_____no_output_____" ] ], [ [ "def myfunc(x, p=2, debug=False):\n if debug:\n print(\"evaluating myfunc for x = \" + str(x) + \" using exponent p = \" + str(p))\n return x**p", "_____no_output_____" ] ], [ [ "If we don't provide a value of the `debug` argument when calling the the function `myfunc` it defaults to the value provided in the function definition:", "_____no_output_____" ] ], [ [ "myfunc(5)", "_____no_output_____" ], [ "myfunc(5, debug=True)", "evaluating myfunc for x = 5 using exponent p = 2\n" ] ], [ [ "If we explicitly list the name of the arguments in the function calls, they do not need to come in the same order as in the function definition. This is called *keyword* arguments, and is often very useful in functions that takes a lot of optional arguments.", "_____no_output_____" ] ], [ [ "myfunc(p=3, debug=True, x=7)", "evaluating myfunc for x = 7 using exponent p = 3\n" ] ], [ [ "### Unnamed functions (lambda function)", "_____no_output_____" ], [ "In Python we can also create unnamed functions, using the `lambda` keyword:", "_____no_output_____" ] ], [ [ "f1 = lambda x: x**2\n \n# is equivalent to \n\ndef f2(x):\n return x**2", "_____no_output_____" ], [ "f1(2), f2(2)", "_____no_output_____" ] ], [ [ "This technique is useful for example when we want to pass a simple function as an argument to another function, like this:", "_____no_output_____" ] ], [ [ "# map is a built-in python function\nmap(lambda x: x**2, range(-3,4))", "_____no_output_____" ], [ "# in python 3 we can use `list(...)` to convert the iterator to an explicit list\nlist(map(lambda x: x**2, range(-3,4)))", "_____no_output_____" ] ], [ [ "## Classes", "_____no_output_____" ], [ "Classes are the key features of object-oriented programming. A class is a structure for representing an object and the operations that can be performed on the object. \n\nIn Python a class can contain *attributes* (variables) and *methods* (functions).\n\nA class is defined almost like a function, but using the `class` keyword, and the class definition usually contains a number of class method definitions (a function in a class).\n\n* Each class method should have an argument `self` as its first argument. This object is a self-reference.\n\n* Some class method names have special meaning, for example:\n\n * `__init__`: The name of the method that is invoked when the object is first created.\n * `__str__` : A method that is invoked when a simple string representation of the class is needed, as for example when printed.\n * There are many more, see http://docs.python.org/2/reference/datamodel.html#special-method-names", "_____no_output_____" ] ], [ [ "class Point:\n \"\"\"\n Simple class for representing a point in a Cartesian coordinate system.\n \"\"\"\n \n def __init__(self, x, y):\n \"\"\"\n Create a new Point at x, y.\n \"\"\"\n self.x = x\n self.y = y\n \n def translate(self, dx, dy):\n \"\"\"\n Translate the point by dx and dy in the x and y direction.\n \"\"\"\n self.x += dx\n self.y += dy\n \n def __str__(self):\n return(\"Point at [%f, %f]\" % (self.x, self.y))", "_____no_output_____" ] ], [ [ "To create a new instance of a class:", "_____no_output_____" ] ], [ [ "p1 = Point(0, 0) # this will invoke the __init__ method in the Point class\n\nprint(p1) # this will invoke the __str__ method", "Point at [0.000000, 0.000000]\n" ] ], [ [ "To invoke a class method in the class instance `p`:", "_____no_output_____" ] ], [ [ "p2 = Point(1, 1)\n\np1.translate(0.25, 1.5)\n\nprint(p1)\nprint(p2)", "Point at [0.250000, 1.500000]\nPoint at [1.000000, 1.000000]\n" ] ], [ [ "Note that calling class methods can modifiy the state of that particular class instance, but does not effect other class instances or any global variables.\n\nThat is one of the nice things about object-oriented design: code such as functions and related variables are grouped in separate and independent entities. ", "_____no_output_____" ], [ "## Modules", "_____no_output_____" ], [ "One of the most important concepts in good programming is to reuse code and avoid repetitions.\n\nThe idea is to write functions and classes with a well-defined purpose and scope, and reuse these instead of repeating similar code in different part of a program (modular programming). The result is usually that readability and maintainability of a program is greatly improved. What this means in practice is that our programs have fewer bugs, are easier to extend and debug/troubleshoot. \n\nPython supports modular programming at different levels. Functions and classes are examples of tools for low-level modular programming. Python modules are a higher-level modular programming construct, where we can collect related variables, functions and classes in a module. A python module is defined in a python file (with file-ending `.py`), and it can be made accessible to other Python modules and programs using the `import` statement. \n\nConsider the following example: the file `mymodule.py` contains simple example implementations of a variable, function and a class:", "_____no_output_____" ] ], [ [ "%%file mymodule.py\n\"\"\"\nExample of a python module. Contains a variable called my_variable,\na function called my_function, and a class called MyClass.\n\"\"\"\n\nmy_variable = 0\n\ndef my_function():\n \"\"\"\n Example function\n \"\"\"\n return my_variable\n \nclass MyClass:\n \"\"\"\n Example class.\n \"\"\"\n\n def __init__(self):\n self.variable = my_variable\n \n def set_variable(self, new_value):\n \"\"\"\n Set self.variable to a new value\n \"\"\"\n self.variable = new_value\n \n def get_variable(self):\n return self.variable", "Writing mymodule.py\n" ] ], [ [ "We can import the module `mymodule` into our Python program using `import`:", "_____no_output_____" ] ], [ [ "import mymodule", "_____no_output_____" ] ], [ [ "Use `help(module)` to get a summary of what the module provides:", "_____no_output_____" ] ], [ [ "help(mymodule)", "Help on module mymodule:\n\nNAME\n mymodule\n\nFILE\n c:\\users\\ps\\git\\big-data-python-class\\lectures\\lecture2-jupyter_and_python\\mymodule.py\n\nDESCRIPTION\n Example of a python module. Contains a variable called my_variable,\n a function called my_function, and a class called MyClass.\n\nCLASSES\n MyClass\n \n class MyClass\n | Example class.\n | \n | Methods defined here:\n | \n | __init__(self)\n | \n | get_variable(self)\n | \n | set_variable(self, new_value)\n | Set self.variable to a new value\n\nFUNCTIONS\n my_function()\n Example function\n\nDATA\n my_variable = 0\n\n\n" ], [ "mymodule.my_variable", "_____no_output_____" ], [ "mymodule.my_function() ", "_____no_output_____" ], [ "my_class = mymodule.MyClass() \nmy_class.set_variable(10)\nmy_class.get_variable()", "_____no_output_____" ] ], [ [ "If we make changes to the code in `mymodule.py`, we need to reload it using `reload`:", "_____no_output_____" ] ], [ [ "reload(mymodule) # works only in python 2", "_____no_output_____" ] ], [ [ "## Exceptions", "_____no_output_____" ], [ "In Python errors are managed with a special language construct called \"Exceptions\". When errors occur exceptions can be raised, which interrupts the normal program flow and fallback to somewhere else in the code where the closest try-except statement is defined.", "_____no_output_____" ], [ "To generate an exception we can use the `raise` statement, which takes an argument that must be an instance of the class `BaseException` or a class derived from it. ", "_____no_output_____" ] ], [ [ "raise Exception(\"description of the error\")", "_____no_output_____" ] ], [ [ "A typical use of exceptions is to abort functions when some error condition occurs, for example:\n\n def my_function(arguments):\n \n if not verify(arguments):\n raise Exception(\"Invalid arguments\")\n \n # rest of the code goes here", "_____no_output_____" ], [ "To gracefully catch errors that are generated by functions and class methods, or by the Python interpreter itself, use the `try` and `except` statements:\n\n try:\n # normal code goes here\n except:\n # code for error handling goes here\n # this code is not executed unless the code\n # above generated an error\n\nFor example:", "_____no_output_____" ] ], [ [ "try:\n print(\"test\")\n # generate an error: the variable test is not defined\n print(test)\nexcept:\n print(\"Caught an exception\")", "test\nCaught an exception\n" ] ], [ [ "To get information about the error, we can access the `Exception` class instance that describes the exception by using for example:\n\n except Exception as e:", "_____no_output_____" ] ], [ [ "try:\n print(\"test\")\n # generate an error: the variable test is not defined\n print(test)\nexcept Exception as e:\n print(\"Caught an exception:\" + str(e))", "test\nCaught an exception:name 'test' is not defined\n" ] ], [ [ "### Counter\n\nTurns a sequenc of values into a defaultdict(int)-like object mappings keys to counts ( good for histograms )", "_____no_output_____" ] ], [ [ "import collections as coll\nc = coll.Counter([0,1,2,0])\nprint c", "Counter({0: 2, 1: 1, 2: 1})\n" ], [ "document = \"This is a test document with a lot of different words but at least one duplicate\".split(\" \")\nword_counts = coll.Counter(document)\nprint word_counts", "Counter({'a': 2, 'different': 1, 'least': 1, 'words': 1, 'This': 1, 'of': 1, 'is': 1, 'but': 1, 'one': 1, 'duplicate': 1, 'at': 1, 'lot': 1, 'test': 1, 'document': 1, 'with': 1})\n" ] ], [ [ "### Sorting", "_____no_output_____" ] ], [ [ "x = [4,1,2,3]\ny=sorted(x)\nx.sort()\nprint \"X=\"+str(x)\nprint \"y=\"+str(y)", "X=[1, 2, 3, 4]\ny=[1, 2, 3, 4]\n" ] ], [ [ "### List Comprehensions", "_____no_output_____" ] ], [ [ "even_numbers = [x for x in range(5) if x %2 == 0]\nsquares = [x * x for x in range(5)]\neven_squared = [x*x for x in even_numbers]\n\nprint even_squared", "[0, 4, 16]\n" ], [ "square_dict = { x: x*x for x in range(5) }\nprint square_dict", "{0: 0, 1: 1, 2: 4, 3: 9, 4: 16}\n" ] ], [ [ "### Generators and Iterators", "_____no_output_____" ] ], [ [ "range(10) # works greate but sometimes we need a single number or set when we need them", "_____no_output_____" ], [ "def lazy_range(n): \n \"\"\"a lazy version of ther range function to only create the value when evaluating it import when the range gets really big\"\"\"\n i = 0\n while i < n :\n yield i\n i += 1\n \nfor i in lazy_range(10):\n print str(double(i))", "0\n2\n4\n6\n8\n10\n12\n14\n16\n18\n" ] ], [ [ "You need to recreate the the lazy generator to use it a second time or use a list", "_____no_output_____" ] ], [ [ "### randomness", "_____no_output_____" ], [ "import random as rand\n[rand.random() for _ in range(4)]", "_____no_output_____" ] ], [ [ "* [Lecuture 2 continued](./Lecture%202%20continued.ipynb)", "_____no_output_____" ], [ "## Further reading", "_____no_output_____" ], [ "* http://www.python.org - The official web page of the Python programming language.\n* http://www.python.org/dev/peps/pep-0008 - Style guide for Python programming. Highly recommended. \n* http://www.greenteapress.com/thinkpython/ - A free book on Python programming.\n* [Python Essential Reference](http://www.amazon.com/Python-Essential-Reference-4th-Edition/dp/0672329786) - A good reference book on Python programming.\n* [ZEN of Python](https://legacy.python.org/dev/peps/pep-0020)", "_____no_output_____" ] ], [ [ "import this", "The Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
ec5917464cc1b046d43db1fb00c507e8fe6332af
10,097
ipynb
Jupyter Notebook
model.ipynb
leovigna/blackjack-strategy
ffc39fcc27a9f2dcd67d12834547e08064aa8891
[ "MIT" ]
null
null
null
model.ipynb
leovigna/blackjack-strategy
ffc39fcc27a9f2dcd67d12834547e08064aa8891
[ "MIT" ]
null
null
null
model.ipynb
leovigna/blackjack-strategy
ffc39fcc27a9f2dcd67d12834547e08064aa8891
[ "MIT" ]
null
null
null
24.271635
102
0.470932
[ [ [ "import random", "_____no_output_____" ], [ "# Black Jack Simulator\n\n'''\n- Deck\n- - Shuffle\n- - Card Value, Win/Bust\n- As (Soft/Hard)\n- - Hit, Stand, Double, Split, (Insurance)\n\n- Game state\n- Decision tree\n'''\ncards =\n\ndeck = [ \n 11, \n 2,\n ]", "_____no_output_____" ], [ "## Make non-mutable", "_____no_output_____" ], [ "def generate_suite():\n yield from range(2,12)\n yield 10\n yield 10\n yield 10\n \ndef generate_deck(n):\n suite = [i for i in generate_suite() for s in range(4 * n)]\n random.shuffle(suite)\n \n return suite\n\ndef draw_card(deck):\n #return deck.copy().pop(0)\n return deck.pop(0)", "_____no_output_____" ], [ "def soft_total(cards):\n cards_soft = [x if x != 11 else 1 for x in cards] \n return sum(cards_soft)", "_____no_output_____" ], [ "def hard_total(cards):\n return sum(cards)", "_____no_output_____" ], [ "# Edge case multiple Aces\ndef best_total(cards):\n soft = soft_total(cards)\n hard = hard_total(cards)\n \n if (hard <= 21): return hard\n \n return soft", "_____no_output_____" ], [ "def is_bust(cards):\n return best_total(cards) > 21", "_____no_output_____" ], [ "def is_blackjack(cards):\n return (best_total(cards) == 21) && len(cards) == 2", "_____no_output_____" ], [ "'''\nGame\n- Dealer gets 2 cards, card at index 0 is visible\n- Each player 2 cards\n\ngame_state = {\n dealer\n player0: []\n plauer1\n}\n'''\n\ndef initial_state(player_count):\n game_state = {\n 'dealer': []\n }\n for i in range(player_count):\n game_state['player' + str(i)] = []\n \n return game_state", "_____no_output_____" ], [ "def deal_cards(game_state, deck):\n # Dealer\n game_state['dealer'] = [draw_card(deck), draw_card(deck)]\n for k in game_state.keys():\n if k != 'dealer':\n game_state[k] = [draw_card(deck), draw_card(deck)]\n \n return game_state, deck", "_____no_output_____" ], [ "def dealer_move(game_state, deck):\n '''\n Dealer stands on 17\n '''\n dealer_state = game_state.copy()\n dealer_deck = deck.copy()\n \n if best_total(dealer_state['dealer']) >= 17:\n return dealer_state, dealer_deck\n \n while best_total(dealer_state['dealer']) < 17:\n dealer_state['dealer'].append(draw_card(dealer_deck))\n \n return dealer_state, dealer_deck", "_____no_output_____" ], [ "def player_move(game_state, deck, player, move):\n '''\n move = stand | hit | double | split\n '''\n if move == 'stand':\n stand_state = game_state.copy()\n stand_deck = deck.copy()\n return stand_state, stand_deck\n elif move == 'hit':\n hit_state = game_state.copy()\n hit_deck = deck.copy()\n hit_state[player].append(draw_card(hit_deck))\n return hit_state, hit_deck\n elif move == 'double':\n # Do Nothing\n return game_state.copy(), deck.copy()\n elif move == 'split':\n split_state = game_state.copy()\n split_deck = deck.copy()\n split_state[player].append(draw_card(split_deck))\n split_state[player].append(draw_card(split_deck))\n return split_state, split_deck", "_____no_output_____" ], [ "# Player moves\n# Dealer moves\n# End Game, Payout\n# Split win?\ndef end_game(game_state, player):\n dealer_count = best_total(game_state['dealer'])\n player_count = best_total(game_state[player])\n \n player_bj = (player_count == 21) & (len(game_state[player]) == 2)\n dealer_bj = (dealer_count == 21) & (len(game_state['dealer']) == 2)\n \n # blackjack, blackjack-split\n if player_bj & dealer_bj:\n return 'split_blackjack'\n if player_bj:\n return 'win_blackjack'\n if dealer_bj:\n return 'lose_blackjack'\n \n # busts\n if player_count > 21:\n return 'lose_bust'\n if dealer_count > 21:\n return 'win_bust'\n \n # points\n if dealer_count < player_count:\n return 'win_point'\n elif dealer_count > player_count:\n return 'lose_point'\n else:\n return 'split_point'", "_____no_output_____" ], [ "# Generate possible variants\ndef game_gen():\n # Initial state\n deck = generate_deck(6)\n print(deck[0:10])\n state = initial_state(1)\n deal_cards(state, deck) \n end = None\n \n while end != 'lose_bust':\n print (state)\n # Player move\n state_player, deck_player = player_move(state, deck, 'player0', 'stand')\n # Dealer move\n state_dealer, deck_dealer = dealer_move(state_player, deck_player)\n # End game\n end = end_game(state_dealer, 'player0')\n yield state_dealer, deck_dealer, end\n \n # Player move\n state, deck = player_move(state, deck, 'player0', 'hit')\n \nfor state, deck, end in game_gen():\n print(state, end, deck[0:10])", "[4, 10, 5, 10, 8, 10, 10, 10, 3, 2]\n{'dealer': [4, 10], 'player0': [5, 10]}\n{'dealer': [4, 10, 8], 'player0': [5, 10]} win_bust [10, 10, 10, 3, 2, 10, 2, 9, 10, 10]\n{'dealer': [4, 10, 8], 'player0': [5, 10, 8]}\n{'dealer': [4, 10, 8], 'player0': [5, 10, 8]} lose_bust [10, 10, 10, 3, 2, 10, 2, 9, 10, 10]\n" ], [ "deck = generate_deck(2)\ngame = initial_state(1)\nprint(game)\n\ndeal_cards(game, deck)\n\nprint(game)\nprint(soft_total(game['dealer']))\nprint(hard_total(game['dealer']))\ndealer_move(game, deck)\nprint(game)", "_____no_output_____" ], [ "state = {\n 'dealer': [11, 9],\n 'player1': [11, 10]\n}\nplayer = 'player1'\nprint((best_total(state[player]) == 21) & (len(state[player]) == 2))\nprint(best_total(state[player]))\n\nend_game(state, player)", "True\n21\n" ], [ "cards = [11, 10]\n\nsoft = soft_total(cards)\nhard = hard_total(cards)\n\nprint (soft)\nprint (hard)", "11\n21\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec591970cc8fd998efcb7ade71aabe30718818ba
85,079
ipynb
Jupyter Notebook
src/experiments.ipynb
say4n/deepcosegmentation.pytorch
73098eed7c097fa0add3f0a6907df36a4e9a5d85
[ "MIT" ]
5
2019-10-28T08:35:04.000Z
2020-09-24T08:13:40.000Z
src/experiments.ipynb
zswzifir/deepcosegmentation.pytorch
73098eed7c097fa0add3f0a6907df36a4e9a5d85
[ "MIT" ]
2
2020-03-07T01:19:46.000Z
2020-03-17T18:48:37.000Z
src/experiments.ipynb
zswzifir/deepcosegmentation.pytorch
73098eed7c097fa0add3f0a6907df36a4e9a5d85
[ "MIT" ]
3
2020-03-07T02:18:47.000Z
2020-08-27T05:53:51.000Z
40.494526
146
0.470645
[ [ [ "# Deep Co-segmentation Experiments", "_____no_output_____" ], [ "## Imports", "_____no_output_____" ] ], [ [ "from dataset import iCosegDataset, PASCALVOCCosegDataset, MSRCDataset, InternetDataset\nfrom model import SiameseSegNet\nimport numpy as np\nimport os\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torchvision\nfrom tqdm import tqdm_notebook", "_____no_output_____" ] ], [ [ "## Constants", "_____no_output_____" ] ], [ [ "## Debug\n\nDEBUG = False\n\n\n## Dataset\nBATCH_SIZE = 2 * 1 # two images at a time for Siamese net\nINPUT_CHANNELS = 3 # RGB\nOUTPUT_CHANNELS = 2 # BG + FG channel\n\n## Inference\nCUDA = \"0\"\n\n## Output Dir\nOUTPUT_DIR = \"./experiments\"\n\nos.system(f\"rm -r {OUTPUT_DIR}\")\nos.makedirs(OUTPUT_DIR, exist_ok=True)", "_____no_output_____" ] ], [ [ "## Metrics", "_____no_output_____" ] ], [ [ "def metrics(pmapA, pmapB, masksA, masksB):\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = 0, 0, 0, 0, 0, 0\n \n for idx in range(BATCH_SIZE//2):\n pred_maskA = torch.argmax(pmapA[idx], dim=0).cpu().numpy()\n pred_maskB = torch.argmax(pmapB[idx], dim=0).cpu().numpy()\n\n masksA_cpu = masksA[idx].cpu().numpy()\n masksB_cpu = masksB[idx].cpu().numpy()\n\n intersection_a += np.sum(pred_maskA & masksA_cpu)\n intersection_b += np.sum(pred_maskB & masksB_cpu)\n\n union_a += np.sum(pred_maskA | masksA_cpu)\n union_b += np.sum(pred_maskB | masksB_cpu)\n\n precision_a += np.sum(pred_maskA == masksA_cpu)\n precision_b += np.sum(pred_maskB == masksB_cpu)\n\n return intersection_a, intersection_b, union_a, union_b, precision_a, precision_b", "_____no_output_____" ] ], [ [ "# Experiments", "_____no_output_____" ], [ "## Load Deep Object Co-segmentation model trained on Pascal VOC ", "_____no_output_____" ] ], [ [ "LOAD_CHECKPOINT = \"/home/SharedData/intern_sayan/PASCAL_coseg/\"\n\nmodel = SiameseSegNet(input_channels=INPUT_CHANNELS,\n output_channels=OUTPUT_CHANNELS,\n gpu=CUDA)\nif DEBUG:\n print(model)\n\nFloatTensor = torch.FloatTensor\nLongTensor = torch.LongTensor\n\nif CUDA is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = CUDA\n\n model = model.cuda()\n\n FloatTensor = torch.cuda.FloatTensor\n LongTensor = torch.cuda.LongTensor\n\nif LOAD_CHECKPOINT:\n model.load_state_dict(torch.load(os.path.join(LOAD_CHECKPOINT, \"coseg_model_best.pth\")))", "_____no_output_____" ] ], [ [ "## iCoseg", "_____no_output_____" ] ], [ [ "root_dir = \"/home/SharedData/intern_sayan/iCoseg/\"\n\nimage_dir = os.path.join(root_dir, \"images\")\nmask_dir = os.path.join(root_dir, \"ground_truth\")\n\ndataset = iCosegDataset(image_dir=image_dir,\n mask_dir=mask_dir)\ndataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, drop_last=True)", "_____no_output_____" ] ], [ [ "## VOC + iCoseg [Car] \n\niCoseg class indices = {5}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([5,5]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"car_iCoseg_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"car_iCoseg_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"car_iCoseg_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + iCoseg [People]\niCoseg class indices = {1,4,26,27,28}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([1,1]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([4,4]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([26,26]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([26,27]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([27,27]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([27,28]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([28,28]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"people_iCoseg_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"people_iCoseg_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"people_iCoseg_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + iCoseg [Goose]\niCoseg class indices = {10}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([10,10]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"goose_iCoseg_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"goose_iCoseg_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"goose_iCoseg_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + iCoseg [Airplane] \n\niCoseg class indices = {12,13,14}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([12,12]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([12,13]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([13,13]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([13,14]))) or \\\n torch.equal(batch[\"label\"], torch.from_numpy(np.array([14,14]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"airplane_iCoseg_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"airplane_iCoseg_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"airplane_iCoseg_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "### Clean-up", "_____no_output_____" ] ], [ [ "del dataset\ndel dataloader", "_____no_output_____" ] ], [ [ "## MSRC Dataloader", "_____no_output_____" ] ], [ [ "root_dir = \"/home/SharedData/intern_sayan/MSRC_processed/\"\n\nimage_dir = os.path.join(root_dir, \"images\")\nmask_dir = os.path.join(root_dir, \"GT\")\n\ndataset = MSRCDataset(image_dir=image_dir,\n mask_dir=mask_dir)\ndataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, drop_last=True)", "_____no_output_____" ] ], [ [ "## VOC + MSRC [Car] \n\nMSRC class indices = {2}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([2,2]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"car_MSRC_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"car_MSRC_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"car_MSRC_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + MSRC [Airplane] \n\nMSRC class indices = {10}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([10,10]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"airplane_MSRC_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"airplane_MSRC_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"airplane_MSRC_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + MSRC [Bird] \n\nMSRC class indices = {1}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([1,1]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"bird_MSRC_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"bird_MSRC_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"bird_MSRC_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + MSRC [Cat] \n\nMSRC class indices = {3}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([3,3]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"cat_MSRC_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"cat_MSRC_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"cat_MSRC_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + MSRC [Cow] \n\nMSRC class indices = {5}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([5,5]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"cow_MSRC_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"cow_MSRC_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"cow_MSRC_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "### Clean-up", "_____no_output_____" ] ], [ [ "del dataset\ndel dataloader", "_____no_output_____" ] ], [ [ "## Internet Dataloader", "_____no_output_____" ] ], [ [ "root_dir = \"/home/SharedData/intern_sayan/internet_processed/\"\n\nimage_dir = os.path.join(root_dir, \"images\", \"Data\")\nmask_dir = os.path.join(root_dir, \"GT\", \"Data\")\n\ndataset = InternetDataset(image_dir=image_dir,\n mask_dir=mask_dir)\ndataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, drop_last=True)", "_____no_output_____" ] ], [ [ "## VOC + Internet [Airplane] \n\nInternet class indices = {0}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([0,0]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"airplane_Internet_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"airplane_Internet_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"airplane_Internet_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "## VOC + Internet [Car] \n\nInternet class indices = {1}", "_____no_output_____" ] ], [ [ "def infer():\n model.eval()\n\n intersection, union, precision = 0, 0, 0\n t_start = time.time()\n\n for batch_idx, batch in tqdm_notebook(enumerate(dataloader)):\n images = batch[\"image\"].type(FloatTensor)\n labels = batch[\"label\"].type(LongTensor)\n masks = batch[\"mask\"].type(FloatTensor)\n \n if torch.equal(batch[\"label\"], torch.from_numpy(np.array([1,1]))):\n\n # pdb.set_trace()\n\n pairwise_images = [(images[2*idx], images[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_labels = [(labels[2*idx], labels[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n pairwise_masks = [(masks[2*idx], masks[2*idx+1]) for idx in range(BATCH_SIZE//2)]\n\n # pdb.set_trace()\n\n imagesA, imagesB = zip(*pairwise_images)\n labelsA, labelsB = zip(*pairwise_labels)\n masksA, masksB = zip(*pairwise_masks)\n\n # pdb.set_trace()\n\n imagesA, imagesB = torch.stack(imagesA), torch.stack(imagesB)\n labelsA, labelsB = torch.stack(labelsA), torch.stack(labelsB)\n masksA, masksB = torch.stack(masksA).long(), torch.stack(masksB).long()\n\n # pdb.set_trace()\n\n eq_labels = []\n\n for idx in range(BATCH_SIZE//2):\n if torch.equal(labelsA[idx], labelsB[idx]):\n eq_labels.append(torch.ones(1).type(LongTensor))\n else:\n eq_labels.append(torch.zeros(1).type(LongTensor))\n\n eq_labels = torch.stack(eq_labels)\n\n # pdb.set_trace()\n\n masksA = masksA * eq_labels.unsqueeze(1)\n masksB = masksB * eq_labels.unsqueeze(1)\n\n imagesA_v = torch.autograd.Variable(FloatTensor(imagesA))\n imagesB_v = torch.autograd.Variable(FloatTensor(imagesB))\n\n pmapA, pmapB, similarity = model(imagesA_v, imagesB_v)\n\n # pdb.set_trace()\n\n res_images, res_masks, gt_masks = [], [], []\n\n for idx in range(BATCH_SIZE//2):\n res_images.append(imagesA[idx])\n res_images.append(imagesB[idx])\n\n res_masks.append(torch.argmax((pmapA * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n res_masks.append(torch.argmax((pmapB * similarity.unsqueeze(2).unsqueeze(2))[idx],\n dim=0).reshape(1, 512, 512))\n\n gt_masks.append(masksA[idx].reshape(1, 512, 512))\n gt_masks.append(masksB[idx].reshape(1, 512, 512))\n\n # pdb.set_trace()\n\n images_T = torch.stack(res_images)\n masks_T = torch.stack(res_masks)\n gt_masks_T = torch.stack(gt_masks)\n\n\n # metrics - IoU & precision\n intersection_a, intersection_b, union_a, union_b, precision_a, precision_b = metrics(pmapA,\n pmapB,\n masksA,\n masksB)\n\n intersection += intersection_a + intersection_b\n union += union_a + union_b\n\n precision += (precision_a / (512 * 512)) + (precision_b / (512 * 512))\n\n\n # pdb.set_trace()\n\n torchvision.utils.save_image(images_T,\n os.path.join(OUTPUT_DIR,f\"car_Internet_{batch_idx}_images.png\"),\n nrow=2)\n torchvision.utils.save_image(masks_T,\n os.path.join(OUTPUT_DIR, f\"car_Internet_{batch_idx}_masks.png\"),\n nrow=2)\n torchvision.utils.save_image(gt_masks_T,\n os.path.join(OUTPUT_DIR, f\"car_Internet_{batch_idx}_gt_masks.png\"),\n nrow=2)\n\n delta = time.time() - t_start\n\n print(f\"\\nTime elapsed: [{delta} secs]\\nPrecision : [{precision/(len(dataloader) * BATCH_SIZE)}]\\nIoU : [{intersection/union}]\")\n \n \ninfer()", "_____no_output_____" ] ], [ [ "### Clean-up", "_____no_output_____" ] ], [ [ "del dataset\ndel dataloader", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec591dcc3d3aeb1e2e4c9b72ab5778730fdfa330
288,310
ipynb
Jupyter Notebook
notebooks/Basic Smoothing.ipynb
aakashparsi/tsmoothie
40c289b11163d23b62744231153608c10f388574
[ "MIT" ]
340
2020-09-02T08:10:19.000Z
2022-03-26T06:52:56.000Z
notebooks/Basic Smoothing.ipynb
aakashparsi/tsmoothie
40c289b11163d23b62744231153608c10f388574
[ "MIT" ]
15
2020-11-27T20:02:35.000Z
2021-11-28T10:36:42.000Z
notebooks/Basic Smoothing.ipynb
aakashparsi/tsmoothie
40c289b11163d23b62744231153608c10f388574
[ "MIT" ]
58
2020-09-19T04:35:01.000Z
2022-03-25T22:05:50.000Z
563.105469
38,268
0.943346
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tsmoothie.utils_func import sim_randomwalk\nfrom tsmoothie.smoother import *", "_____no_output_____" ], [ "# generate randomwalks\nnp.random.seed(33)\n\ndata = sim_randomwalk(n_series=10, timesteps=200, \n process_noise=10, measure_noise=30)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = ExponentialSmoother(window_len=20, alpha=0.3)\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('sigma_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = ConvolutionSmoother(window_len=20, window_type='ones')\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('sigma_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = SpectralSmoother(smooth_fraction=0.2, pad_len=20)\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('sigma_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = PolynomialSmoother(degree=6)\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('prediction_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = SplineSmoother(n_knots=6, spline_type='natural_cubic_spline')\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('prediction_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = GaussianSmoother(n_knots=6, sigma=0.1)\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('prediction_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = BinnerSmoother(n_knots=6)\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('prediction_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = LowessSmoother(smooth_fraction=0.2, iterations=1)\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('prediction_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ], [ "# operate smoothing\nsmoother = KalmanSmoother(component='level_trend', \n component_noise={'level':0.1, 'trend':0.1})\nsmoother.smooth(data)\n\n# generate intervals\nlow, up = smoother.get_intervals('kalman_interval')", "_____no_output_____" ], [ "# plot the first smoothed timeseries with intervals\nplt.plot(smoother.smooth_data[0], linewidth=3, color='blue')\nplt.plot(smoother.data[0], '.k')\nplt.xlabel('time')\n\nplt.fill_between(range(len(smoother.data[0])), low[0], up[0], alpha=0.3)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5924debe8d59a344972d1a131a5728eb87252e
147,707
ipynb
Jupyter Notebook
text2text_demo.ipynb
artitw/text2text
ac7b32216363ac13c2b4e69dde94e6fbc155f300
[ "MIT" ]
191
2020-03-01T23:07:49.000Z
2022-03-29T03:31:44.000Z
text2text_demo.ipynb
artitw/text2text
ac7b32216363ac13c2b4e69dde94e6fbc155f300
[ "MIT" ]
32
2020-03-18T09:59:20.000Z
2022-03-26T23:05:25.000Z
text2text_demo.ipynb
artitw/text2text
ac7b32216363ac13c2b4e69dde94e6fbc155f300
[ "MIT" ]
26
2020-04-18T19:36:48.000Z
2022-02-27T14:11:10.000Z
35.034867
1,443
0.473444
[ [ [ "# [Text2Text](https://github.com/artitw/text2text): Multilingual tokenization, embedding, translation, summarization, question generation, question answering, data augmentation, distance measurement\n\n## How Cross-Lingual NLP Models Work (click to watch)\n[![Cross-Lingual Models](http://img.youtube.com/vi/caZLVcJqsqo/0.jpg)](https://youtu.be/caZLVcJqsqo \"Cross-Lingual Models\")\n", "_____no_output_____" ] ], [ [ "pip install -q -U text2text", "\u001b[K |████████████████████████████████| 71kB 5.2MB/s \n\u001b[K |████████████████████████████████| 2.3MB 24.6MB/s \n\u001b[K |████████████████████████████████| 1.2MB 58.3MB/s \n\u001b[K |████████████████████████████████| 3.3MB 65.3MB/s \n\u001b[K |████████████████████████████████| 901kB 43.5MB/s \n\u001b[?25h" ], [ "### Text Handler API quick start\nimport text2text as t2t\nt2t.Transformer.PRETRAINED_TRANSLATOR = \"facebook/m2m100_418M\" #Remove this line for the larger model\nh = t2t.Handler([\"Hello, World!\"], src_lang=\"en\") #Initialize with some text\nh.tokenize() #[['▁Hello', ',', '▁World', '!']]", "Better speed can be achieved with apex installed from https://www.github.com/artitw/apex.\n" ], [ "h.vectorize() #array([[0.18745188, 0.05658336, 0.15895301, ..., 0.46946704, 0.6332584 , 0.43805206]], dtype=float32)", "_____no_output_____" ], [ "h.tfidf() #[{'!': 0.5, ',': 0.5, '▁Hello': 0.5, '▁World': 0.5}]", "_____no_output_____" ], [ "h.search(queries=[\"Hello\"]).toarray() #array([[0.5]])", "_____no_output_____" ], [ "h.translate(tgt_lang=\"zh\") #['你好,世界!']", "_____no_output_____" ], [ "h.summarize() #[\"World ' s largest world\"]", "100%|██████████| 213450/213450 [00:00<00:00, 784787.15B/s]\n" ], [ "h.question() #[('What is the name of the world you are in?', 'The world')]", "***** Recover model: qg_model.bin *****\n" ], [ "h.variate() #['Hello the world!', 'Welcome to the world.', 'Hello to the world!',...", "_____no_output_____" ], [ "t2t.Handler([\"Hello, World! [SEP] Hello, what?\"]).answer() #['World']", "_____no_output_____" ], [ "t2t.Handler([\"Hello, World! [SEP] Hello, what?\"]).measure() #[2]", "_____no_output_____" ], [ "### Languages Available\nt2t.Transformer.LANGUAGES", "_____no_output_____" ], [ "# Sample texts\narticle_en = 'The Secretary-General of the United Nations says there is no military solution in Syria.'\n \nnotre_dame_str = \"As at most other universities, Notre Dame's students run a number of news media outlets. The nine student - run outlets include three newspapers, both a radio and television station, and several magazines and journals. Begun as a one - page journal in September 1876, the Scholastic magazine is issued twice monthly and claims to be the oldest continuous collegiate publication in the United States. The other magazine, The Juggler, is released twice a year and focuses on student literature and artwork. The Dome yearbook is published annually. The newspapers have varying publication interests, with The Observer published daily and mainly reporting university and other news, and staffed by students from both Notre Dame and Saint Mary's College. Unlike Scholastic and The Dome, The Observer is an independent publication and does not have a faculty advisor or any editorial oversight from the University. In 1987, when some students believed that The Observer began to show a conservative bias, a liberal newspaper, Common Sense was published. Likewise, in 2003, when other students believed that the paper showed a liberal bias, the conservative paper Irish Rover went into production. Neither paper is published as often as The Observer; however, all three are distributed to all students. Finally, in Spring 2008 an undergraduate journal for political science research, Beyond Politics, made its debut.\"\n \nbacteria_str = \"Bacteria are a type of biological cell. They constitute a large domain of prokaryotic microorganisms. Typically a few micrometres in length, bacteria have a number of shapes, ranging from spheres to rods and spirals. Bacteria were among the first life forms to appear on Earth, and are present in most of its habitats.\"\n \nbio_str = \"Biology is the science that studies life. What exactly is life? This may sound like a silly question with an obvious answer, but it is not easy to define life. For example, a branch of biology called virology studies viruses, which exhibit some of the characteristics of living entities but lack others. It turns out that although viruses can attack living organisms, cause diseases, and even reproduce, they do not meet the criteria that biologists use to define life.\"\n ", "_____no_output_____" ], [ "### Tokenization\nt2t.Handler([\n \"Let's go hiking tomorrow\", \n \"안녕하세요.\", \n \"돼지꿈을 꾸세요~~\"\n ]).tokenize()", "_____no_output_____" ], [ "# Embeddings\nt2t.Handler([\n \"Let's go hiking tomorrow\", \n \"안녕하세요.\", \n \"돼지꿈을 꾸세요~~\"\n ]).vectorize()", "_____no_output_____" ], [ "### TF-IDF\nt2t.Handler([\n \"Let's go hiking tomorrow\", \n \"안녕하세요.\", \n \"돼지꿈을 꾸세요~~\"\n ]).tfidf()", "_____no_output_____" ], [ "### Search\nt2t.Handler([\n \"Let's go hiking tomorrow, let's go!\", \n \"안녕하세요.\", \n \"돼지꿈을 꾸세요~~\",\n ]).search(queries=[\"go\", \"안녕\"]).toarray()", "_____no_output_____" ], [ "#### Multiple queries on a single index\ntfidf_index = t2t.Handler([\n article_en, \n notre_dame_str, \n bacteria_str, \n bio_str\n ]).tfidf(output=\"matrix\")\n\nsearch_results_tf1 = t2t.Handler().search(\n queries=[\"wonderful life\", \"university students\"], \n index=tfidf_index)\n\nsearch_results_tf2 = t2t.Handler().search(\n queries=[\"Earth creatures are cool\", \"United Nations\"], \n index=tfidf_index)", "_____no_output_____" ], [ "#### Using neural embeddings index\nembedding_index = t2t.Handler([\n article_en, \n notre_dame_str, \n bacteria_str, \n bio_str\n ]).vectorize()\n\nsearch_results_em1 = t2t.Handler().search(\n queries=[\"wonderful life\", \"university students\"],\n vector_class=t2t.Vectorizer,\n index=embedding_index)\n\nsearch_results_em2 = t2t.Handler().search(\n queries=[\"Earth creatures are cool\", \"United Nations\"],\n vector_class=t2t.Vectorizer,\n index=embedding_index)", "_____no_output_____" ], [ "#### Blending neural embeddings and tf-idf\nimport numpy as np\nnp.mean( \n np.array([\n search_results_tf1, \n search_results_em1,\n ]), axis=0)", "_____no_output_____" ], [ "### Levenshtein Sub-word Edit Distance\nt2t.Handler([\n \"Hello, World! [SEP] Hello, what?\", \n \"안녕하세요. [SEP] 돼지꿈을 꾸세요~~\"\n ]).measure(metric=\"levenshtein_distance\")", "_____no_output_____" ], [ "### Translation\n# Default translator model\nt2t.Handler([article_en, notre_dame_str, bacteria_str, bio_str], src_lang='en').translate(tgt_lang='zh')", "_____no_output_____" ], [ "# Smaller model to save time and memory for development\nt2t.Transformer.PRETRAINED_TRANSLATOR = \"facebook/m2m100_418M\"\nt2t.Handler([\"I would like to go hiking tomorrow.\"], \n src_lang=\"en\"\n ).translate(tgt_lang='zh')\n", "_____no_output_____" ], [ "# Smaller model to save time and memory for development\n# Note language code difference\nt2t.Transformer.PRETRAINED_TRANSLATOR = \"facebook/mbart-large-50-many-to-many-mmt\"\nt2t.Transformer.LANGUAGES = {\n 'af_ZA': 'Afrikaans',\n 'ar_AR': 'Arabic',\n 'az_AZ': 'Azerbaijani',\n 'bn_IN': 'Bengali',\n 'cs_CZ': 'Czech',\n 'de_DE': 'German',\n 'en_XX': 'English',\n 'es_XX': 'Spanish',\n 'et_EE': 'Estonian',\n 'fa_IR': 'Persian',\n 'fi_FI': 'Finnish',\n 'fr_XX': 'French',\n 'gl_ES': 'Galician',\n 'gu_IN': 'Gujarati',\n 'he_IL': 'Hebrew',\n 'hi_IN': 'Hindi',\n 'hr_HR': 'Croatian',\n 'id_ID': 'Indonesian',\n 'it_IT': 'Italian',\n 'ja_XX': 'Japanese',\n 'ka_GE': 'Georgian',\n 'kk_KZ': 'Kazakh',\n 'km_KH': 'Khmer',\n 'ko_KR': 'Korean',\n 'lt_LT': 'Lithuanian',\n 'lv_LV': 'Latvian',\n 'mk_MK': 'Macedonian',\n 'ml_IN': 'Malayalam',\n 'mn_MN': 'Mongolian',\n 'mr_IN': 'Marathi',\n 'my_MM': 'Burmese',\n 'ne_NP': 'Nepali',\n 'nl_XX': 'Dutch',\n 'pl_PL': 'Polish',\n 'ps_AF': 'Pashto',\n 'pt_XX': 'Portuguese',\n 'ro_RO': 'Romanian',\n 'ru_RU': 'Russian',\n 'si_LK': 'Sinhala',\n 'sl_SI': 'Slovene',\n 'sv_SE': 'Swedish',\n 'sw_KE': 'Swahili',\n 'ta_IN': 'Tamil',\n 'te_IN': 'Telugu',\n 'th_TH': 'Thai',\n 'tl_XX': 'Tagalog',\n 'tr_TR': 'Turkish',\n 'uk_UA': 'Ukrainian',\n 'ur_PK': 'Urdu',\n 'vi_VN': 'Vietnamese',\n 'xh_ZA': 'Xhosa',\n 'zh_CN': 'Chinese'\n}\nt2t.Handler([\"I would like to go hiking tomorrow.\"], \n src_lang=\"en_XX\"\n ).translate(tgt_lang='zh_CN')\n", "_____no_output_____" ], [ "### Question Answering. Question must follow context with ` [SEP] ` in between.\nt2t.Handler([\n \"Hello, this is Text2Text! [SEP] What is this?\", \n \"It works very well. It's awesome! [SEP] How is it?\"\n ]).answer()", "_____no_output_____" ], [ "t2t.Handler([\"很喜欢陈慧琳唱歌。[SEP] 喜欢做什么?\"], \n src_lang=\"zh\",\n ).answer()", "_____no_output_____" ], [ "### Question Generation\nt2t.Handler([\n bio_str,\n bio_str,\n bio_str,\n bio_str,\n bio_str,\n \"I will go to school today to take my math exam.\",\n \"I will go to school today to take my math exam.\",\n \"Tomorrow is my cousin's birthday. He will turn 24 years old.\",\n notre_dame_str,\n bacteria_str,\n bacteria_str,\n bacteria_str,\n \"I will go to school today to take my math exam. [SEP] school\",\n \"I will go to school today to take my math exam. [SEP] exam\",\n \"I will go to school today to take my math exam. [SEP] math\",\n ], src_lang='en').question()", "_____no_output_____" ], [ "t2t.Handler([\"很喜欢陈慧琳唱歌。\"], src_lang='zh').question()", "_____no_output_____" ], [ "### Summarization\nt2t.Handler([notre_dame_str, bacteria_str, bio_str], src_lang='en').summarize()", "_____no_output_____" ], [ "### Variation\n# Useful for augmenting training data\nt2t.Handler([bacteria_str], src_lang='en').variate()", "_____no_output_____" ], [ "### Training / Fine-tuning\n# Finetune cross-lingual model on your data\nresult = t2t.Handler([\"Hello, World! [TGT] 你好,世界!\"], \n src_lang=\"en\",\n tgt_lang=\"zh\",\n num_epochs=10, \n save_directory=\"model_dir\"\n ).fit()\n\n# load and use model from saved directory\nt2t.Transformer.PRETRAINED_TRANSLATOR = \"model_dir\"\nt2t.Handler(\"Hello, World!\").translate(tgt_lang=\"zh\")", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec592b19f58ea79f4edd8645aca2042552bc6c6d
92,102
ipynb
Jupyter Notebook
6/old/Mnist_final.ipynb
isaiah-li/Building-Machine-Learning-Projects-with-TensorFlow
d01acb378a74d6589f01adf6bfbc4fe20ad4f0b4
[ "MIT" ]
270
2016-11-21T13:54:44.000Z
2022-02-18T01:50:49.000Z
6/old/Mnist_final.ipynb
isaiah-li/Building-Machine-Learning-Projects-with-TensorFlow
d01acb378a74d6589f01adf6bfbc4fe20ad4f0b4
[ "MIT" ]
12
2016-12-28T00:06:09.000Z
2021-09-29T06:19:24.000Z
6/old/Mnist_final.ipynb
isaiah-li/Building-Machine-Learning-Projects-with-TensorFlow
d01acb378a74d6589f01adf6bfbc4fe20ad4f0b4
[ "MIT" ]
192
2016-11-21T13:54:47.000Z
2021-11-25T08:33:18.000Z
239.225974
5,360
0.888927
[ [ [ "import tensorflow as tf\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n# Import MINST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n#Show the first training image\nplt.imshow(mnist.train.images[0].reshape((28, 28), order='C'), cmap='Greys', interpolation='nearest')", "Extracting /tmp/data/train-images-idx3-ubyte.gz\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\n" ], [ "# Parameters\nbatch_size = 128\nlearning_rate = 0.05\nnumber_iterations = 2000\nsteps = 10\n\n# Network Parameters\nn_input = 784 # 28x28 images\nn_classes = 10 # 10 digit classes\ndropout = 0.80 # Dropout probability\n\n# tf Graph input\nX = tf.placeholder(tf.float32, [None, n_input])\nY = tf.placeholder(tf.float32, [None, n_classes])\nkeep_prob = tf.placeholder(tf.float32) #dropout (keep probability)", "_____no_output_____" ], [ "# Create some wrappers for simplicity\ndef conv2d(x, W, b, strides=1):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\ndef subsampling(x, k=2):\n # MaxPool2D wrapper\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\n padding='SAME')\n\n\n# Create model\ndef conv_net(x_in, weights, biases, dropout):\n # Reshape input picture\n x_in = tf.reshape(x_in, shape=[-1, 28, 28, 1])\n\n # Convolution Layer 1\n conv_layer_1 = conv2d(x_in, weights['wc1'], biases['bc1'])\n # Subsampling\n conv_layer_1 = subsampling(conv_layer_1, k=2)\n\n # Convolution Layer 2\n conv_layer_2 = conv2d(conv_layer_1, weights['wc2'], biases['bc2'])\n # Subsampling\n conv_layer_2 = subsampling(conv_layer_2, k=2)\n\n # Fully connected layer\n # Reshape conv_layer_2 output to fit fully connected layer input\n fully_connected_layer = tf.reshape(conv_layer_2, [-1, weights['wd1'].get_shape().as_list()[0]])\n fully_connected_layer = tf.add(tf.matmul(fully_connected_layer, weights['wd1']), biases['bd1'])\n fully_connected_layer = tf.nn.relu(fully_connected_layer)\n # Apply Dropout\n fully_connected_layer = tf.nn.dropout(fully_connected_layer, dropout)\n\n # Output, class prediction\n prediction_output = tf.add(tf.matmul(fully_connected_layer, weights['out']), biases['out'])\n return prediction_output\n\n# Store layers weight & bias\nweights = {\n # 5x5 convolutional units, 1 input, 32 outputs\n 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),\n # 5x5 convolutional units, 32 inputs, 64 outputs\n 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),\n # fully connected, 7*7*64 inputs, 1024 outputs\n 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),\n # 1024 inputs, 10 outputs (class prediction)\n 'out': tf.Variable(tf.random_normal([1024, n_classes]))\n}\n\nbiases = {\n 'bc1': tf.Variable(tf.random_normal([32])),\n 'bc2': tf.Variable(tf.random_normal([64])),\n 'bd1': tf.Variable(tf.random_normal([1024])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n# Construct model\npred = conv_net(X, weights, biases, keep_prob)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < number_iterations:\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n test = batch_x[0]\n fig = plt.figure()\n plt.imshow(test.reshape((28, 28), order='C'), cmap='Greys',\n interpolation='nearest')\n # Run optimization op (backprop)\n sess.run(optimizer, feed_dict={X: batch_x, Y: batch_y,\n keep_prob: dropout})\n if step % steps == 0:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([cost, accuracy], feed_dict={X: batch_x,\n Y: batch_y,\n keep_prob: 1.})\n print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc)\n step += 1\n\n # Calculate accuracy for 256 mnist test images\n print \"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: mnist.test.images[:256],\n Y: mnist.test.labels[:256],\n keep_prob: 1.})", "Iter 1280, Minibatch Loss= 37.628696, Training Accuracy= 0.17188\nTesting Accuracy: 0.136719\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ec594889b15702aa1f3b71ecac84931962a33ca1
16,750
ipynb
Jupyter Notebook
.ipynb_checkpoints/Structural Dynamics-checkpoint.ipynb
mjamezquidilla/ModalAnalysis
826e04b70600ccda543ebcb794076b405806a6c2
[ "MIT" ]
1
2022-03-01T23:33:56.000Z
2022-03-01T23:33:56.000Z
Structural Dynamics.ipynb
mjamezquidilla/ModalAnalysis
826e04b70600ccda543ebcb794076b405806a6c2
[ "MIT" ]
null
null
null
Structural Dynamics.ipynb
mjamezquidilla/ModalAnalysis
826e04b70600ccda543ebcb794076b405806a6c2
[ "MIT" ]
null
null
null
60.251799
3,460
0.756597
[ [ [ "# Computation for the EigenValues of a 3DOF System in Structural Dynamics\n\nIn this example, we will look on how to solve a 3DOF system using python.\nThis uses the library of Sympy (Symbolic Python) to solve the equations in terms of variables (letters)", "_____no_output_____" ], [ "$$ m \\ddot(x) + c \\dot(x) + k (x) = 0 $$", "_____no_output_____" ], [ "## Step 1: Loading libraries", "_____no_output_____" ] ], [ [ "# imports numpy and matplotlib\n%pylab inline\n\n# imports sympy\nimport sympy as sm\n\n# prints output using LaTeX for pretty viewing\nsm.init_printing(use_latex=True)", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "## Step 2: Establish Mass Matrix", "_____no_output_____" ] ], [ [ "# assigning masses\nm1 = 1000 #(kg)\nm2 = 1500 #(kg)\nm3 = 2000 #(kg)\n\n# making the mass matrix\nm = sm.Matrix([[m1,0,0],[0,m2,0],[0,0,m3]])\n\n# printing the mass matrix\nm", "_____no_output_____" ] ], [ [ "\n## Step 3: Establish Stiffness Matrix", "_____no_output_____" ] ], [ [ "# computing for the stiffnesses\nk1 = 600000 #(N/m)\nk2 = 1200000 #(N/m)\nk3 = 1800000 #(N/m)\n\n# making the stiffness matrix\nk = sm.Matrix([[k1,-k1,0],[-k1,k1+k2,-k2],[0,-k2,k2+k3]])\n\n# printing the stiffness matrix\nk", "_____no_output_____" ] ], [ [ "## Step 4: Solving the EigenValue Problem\nMaking the Matrix", "_____no_output_____" ] ], [ [ "# assigning the \"x\" variable as a symbol\nx = sm.Symbol('x')\n\n# making the Eigen matrix\nA = k-m*x**2\n\n# printing the matrix\nA", "_____no_output_____" ] ], [ [ "Getting the Determinant of the Matrix", "_____no_output_____" ] ], [ [ "sm.det(A).simplify()", "_____no_output_____" ] ], [ [ "Solving the determinants and listing the types of modes", "_____no_output_____" ] ], [ [ "# This step solves the equation in line\nB = sm.solve(sm.det(A),x**2)\n\n# This line converts the array into real number\nC = real(B)\n\n# This line makes an array into a complex numbers\nD = array((C.astype(complex)))\n\n# Gets the squareroot of the real part of the complex number\nE = 1/(sqrt(D.real) / 2 / math.pi)\n\n# printing the Modes\nprint(\"Period 1 = {0}, Period 2 = {1}, Period 3 = {2}\".format(E[0],E[1],E[2]))", "Period 1 = 0.2023720283163326, Period 2 = 0.43267656159437806, Period 3 = 0.13629624070103266\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec5959131fe3a9e70f716c0148419334af4ae3d8
18,867
ipynb
Jupyter Notebook
notebooks/session1and2.ipynb
kwk38kh/03-simple-predictions
4097d45d9a25a261dc1591da1dd1ca641e6868ae
[ "MIT" ]
null
null
null
notebooks/session1and2.ipynb
kwk38kh/03-simple-predictions
4097d45d9a25a261dc1591da1dd1ca641e6868ae
[ "MIT" ]
null
null
null
notebooks/session1and2.ipynb
kwk38kh/03-simple-predictions
4097d45d9a25a261dc1591da1dd1ca641e6868ae
[ "MIT" ]
null
null
null
49.912698
639
0.670377
[ [ [ "## Seaborn Refresher\n\nLet's review using Seaborn and Pandas to load up some data and then pair plot it.\n\nWe'll be using the same tools that we used last week for this \n- [pandas](pandas.pydata.org) for data handling (our dataframe library)\n- [seaborn](seaborn.pydata.org) for _nice_ data visualization\n\nShortly we'll also by trying out:\n\n- [scikit-learn](scikit-learn.org) an extensive machine learning library.\n- [numpy](numpy.org) - a fundamental maths library best used by people with a strong maths background. We won't explore it much today, but it does have some useful methods that we'll need. It underlies all other mathematical and plotting tools that we use in Python.\n\nWe'll be using scikit-learn over the next few weeks, and it's well worth reading the documentation and high level descriptions.\n\nAs before, the aim is to get familiar with code-sharing workflows - so we will be doing pair programming for the duration of the day! _You will probably want to take a moment to look at the documentation of the libraries above - especially pandas_\n\nThe other useful resource is Stack Overflow - if you have a question that sounds like 'how do I do {x}' then someone will probably have answered it on SO. Questions are also tagged by library so if you have a particular pandas question you can do something like going to https://stackoverflow.com/questions/tagged/pandas (just replace the 'pandas' in the URL with whatever library you're trying to use.\n\nGenerally answers on SO are probably a lot closer to getting you up and running than the documentation. Once you get used to the library then the documentation is generally a quicker reference. We will cover strategies for getting help in class.\n\n## Git links\n\nWe will be working through using GitHub and GitKraken to share code between pairs. We will go through all the workflow in detail in class but here are some useful links for reference:\n\n- GitKraken interface basics: https://support.gitkraken.com/start-here/interface\n- Staging and committing (save current state -> local history): https://support.gitkraken.com/working-with-commits/commits\n- Pushing and pulling (sync local history <-> GitHub history): https://support.gitkraken.com/working-with-repositories/pushing-and-pulling\n- Forking and pull requests (request to sync your GitHub history <-> someone else's history - requires a _review_):\n - https://help.github.com/articles/about-forks/\n - https://help.github.com/articles/creating-a-pull-request-from-a-fork/\n\n## Step 1: Read in the dataset and pairplot\n\nFor this exercise, we will be using the Tips dataset that you can find in the same directory as this notebook. This is a widely used dataset in machine learning, and while not related to minerals and energy, it is sufficient for our purpose. The dataset relates total bills at US restaurants to tip size, as well as the sex of the tipper, whether they smoke, the day of the week, the kind of meal, and the number of people.\n\nIn pairs work out how to read this data into a pandas dataframe, then use Seaborn to pairplot the species in the dataset.\n\nSeaborn happens to have this dataset built in. Run the next cell to see the built in data. Then modify the code to open the dataset from a CSV file. The dataset can take a little while to load, so be patient - the dataset will appear.", "_____no_output_____" ] ], [ [ "import seaborn as sns\ntips = sns.load_dataset('tips')\ntips", "_____no_output_____" ] ], [ [ "## Step 2 : Find a linear regression with Seaborn\n\nNow that you've seen some pairplots (tips vs tip size are the most meaningful comparisons), use Seaborn to find lines of best fit in this dataset.\n\nThere are a few different ways to do this. Try using regplot.\n\nYou may notice a \"FutureWarning\". Ignore this - Python is often in a state of flux and these types of warning are common. Often with major packages like Seaborn you'll find that a soon-to-be-released version of the library will not create these warnings.", "_____no_output_____" ], [ "## Step 3: Linear regression with scikit-learn\n\nScikit-learn provides machine learning tools in several categories. These include supervised learning and unsupervised learning. We'll start working with unsupervised learning next week. Supervised learning is about finding a model for features that can be measured and some labelling that we have for the available data. If, for example, we have lithium assays and we want to try to predict lithium based on sensor data from a portable spectrometer, then the lithium assays are the labels and the measured intensities at different wavelengths are the measured features. This kind of supervised learning is called regression.\n\nThere's another kind of supervised learned which is called classification, this is what we're doing when we want to assign observed data to different discrete classes. Regression can sometimes be used, with minor additions, to classify data as well. For example, with our lithium spectral regression model we could classify samples as being high in lithium or low in lithium simply by using a threshold value that we set. There are more sophisticated ways to classify, which will be covered in later weeks.\n\nWe use the estimator API of scikit-learn to do regression.\n\n### The Estimator API of scikit-learn\n\nThere are a few steps to follow when using the estimator API. These steps are the same for all methods that scikit-learn implements, not just for linear regression.\n\n1. Choose a class of model by importing the appropriate estimator class. In our case we want to import Linear Regression. Here's how we can do it.\n\nFirst import LinearRegression from scikit-learn. Use this code:\n\n```from sklearn.linear_model import LinearRegression```", "_____no_output_____" ], [ "Now create an \"instance\" of the LinearRegression class. We can do it like this:\n\n```model = LinearRegression(fit_intercept=True)```\n\nTo check that this has worked look at the model object after it's created. It should tell you about some of its settings.\n\n```model```", "_____no_output_____" ], [ "These settings are also called hyperparameters. We'll encounter hyperparameters again next week, and will talk about them in more detail then. They're often very important in working out whether our model is well fitted to the data.\n\n2. Next we need to arrange a pandas dataframe (like \"tips\") into a features matrix and a target vector.\n\nSearch on the Internet for this. I know that Stack Overflow will be helpful. You will need to look at the column names in the dataframe to find the names of the two columns that are important to us. Do this in the next cell.\n\nThe notation is a bit strange! The two pairs of \"[ ]\" as \"[[ ]]\" that you will see is correct.", "_____no_output_____" ], [ "3. Fit the model to your data by using the fit() method of the LinearRegression object.\n\nAgain, look at the documentation for how to apply this. You'll need to provide your features matrix (X) and target vector (y) as parameters to the fit method.\n\n#### Congratulations you've trained your first machine learning model!\n\nAs this is a two dimensional linear model, it has two parameters. The line's intercept and slope. The notation that scikit-learn uses is a little unfriendly. Its convention is to add underscores to the names of the parameters it finds. Also, it calls the slope \"coef\".\n\nAfter fitting the model, find the coefficients with ```model.coef_``` and ```model._intercept_```.", "_____no_output_____" ], [ "#### Now that we've trained a model, we should make predictions!\n\n6. Make predictions!\n\nThis is also more complicated with scikit-learn than it is with Seaborn.\n\nFor a given, single value for a feature (ie a meal cost) we can predict a label. For example, for a meal cost of $20, we could make a prediction with:\n\n```predicted_tip = model.predict(20)```\n\nBut to find the smooth line that seaborn finds we need to explicitly tell scikit-learn that we want to do a prediction for all of the meal costs that we're interested in. To do this we\nuse a new library called \"numpy\" and a method called linspace (which is short for linear spacing).\n\nFirst we need to import numpy.\n\n```import numpy as np```\n\nWhile I used predicted_tip above as an example of a predicted target array, and 20 is an example of x, I'll now switch to the usual y and x conventions used in tutorials with scikit-learn. You can of course use any variables names you, and in your own code it's best to use descriptive names that mean something in the domain of your industry, like 'predicted_tip\", or \"octane_rating\".\n\nWe need to use the linspace method in numpy. Use it like this:\n\n```xfit = np.linspace(0, 60)```\n\nThis will create a collection of meal costs, in order, starting from 0 dollars up to 60 dollars. This is what we need, but this collection isn't formatted correctly for scikit-learn. To make it work with scikit-learn we next have to adjust the format with this instruction:\n\n```xfit_reshaped = xfit[:, np.newaxis]\nyfit = model.predict(xfit_reshaped).```\n\nyfit now contains our predicted tips. Type ```yfit``` to see them numerically.\n\nTry this all out in the next cell. Take it step by step. Don't try to run this all in one go, but build it up line by line, checking that you do not get errors after each line.", "_____no_output_____" ], [ "We can also plot these results, but lets stop here.\n\nWe could use the default plotting functions that Pandas provides for this. But for report purposes you may, in future, want to find out how to use Seaborn for this.\n\nWe can create dataframes from this data with code like:\n\n```\nDataFrame({'meal_cost':xfit, 'tips':yfit})\n```", "_____no_output_____" ], [ "### Exercise: Create a linear function with scikit-learn. Then add noise.\n\n1. Scikit-learn includes a built in function to quickly create datasets for experimenting with the estimator API.\n\nFind out about the make_regression method in sklearn.datasets.\n\nUse this to make a noisy line with 100 samples. Use n_features to set the number of features, and use noise to adjust gaussian noise that is added.\n\nAdd outliers and see how the fitting is impacted. LinearRegression can report R^2 values. Use the \"score\" method. Google and Stack Overflow will help with usage.", "_____no_output_____" ], [ "### Exercise: Create a linear function with seaborn. Then add noise.\n\nWhile we'll be using scikit-learn over the next few weeks and it's helpful to keep working with it, it's really quite painful compared to Seaborn. Looking at residuals is an exploratory task that Seaborn is better suited to than scikit-learn.\n\nTry this code, which plots the residuals after gaussian noise is added to a simple y = x line. Try to get the gist of how it works.\n\n```\nimport numpy as np\nimport seaborn as sns\nsns.set(style=\"whitegrid\")\n\nrs = np.random.RandomState(7)\nx = rs.uniform(0, 100, 10000)\ny = x + rs.normal(0, 1, 10000)\n\nsns.residplot(x, y, lowess=True, color=\"g\")\n```\n\nThe RandomState object is part of the Numpy numerical package which we won't explore in detail at this time. It is a collection of mathematics functions which underlies all other mathematical libraries that we've been using, such as Seaborn and scikit-learn. RandomState is used for generating random numbers from distributions.\n\nA uniform distribution means that all of the values that may be returned are equally likely. When we throw dice we are sampling from a uniform distribution. Here we tell Python that we want random numbers between 0 and 100, all equally likely, and we want 10000 of them.\n\nA normal (or gaussian) distribution returns values which are most likely to be near the mean, falling off symetrically to either side. It is the \"bell\" curve that you've seen many times. Here we say that we want the error that we add to our simple line to have a mean of zero, and a standard deviation of 1.\n\nSeaborn's residplot function plots the residuals after fitting a line to the data. With a normal distribution we expect to see these residuals evenly scattered around zero.\n\nAn example of a heavy tailed distribution is the gamma distribution. This is often used to model failure likelihood for machines. Unlike the normal distribution it is not symmetric. In quality control applications it quickly peaks after a short lifetime, but then has a long tail that extends many years into the future. This makes sense as we expect most failures to be early in the life of a machine because of manufacturing faults, after that the failure time is less predictable, but we all know of machines or gadgets that seem to last forever. Google will quickly bring up examples of the shape.\n\nTry the code above again, but substitute ```rs.normal (0, 1, 10000)``` with ```rs.gamma(2, 2, 10000)```.\n\nHow would you change this code to create a heteroscadistic error?", "_____no_output_____" ], [ "### Exercise: Examine the linear dataset that you have brought\n\nFind a least-squares fit using scikit-learn, and plot the residuals. Are the residuals gaussian? Is there homoscedasticity? Do you have outliers?", "_____no_output_____" ], [ "### Exercise: Metal recovery vs %sulphur in feed\n\nIn this exercise we're going to look at a typical minerals engineering problem. We have data collected in laboratory batch floatation tests on samples taken from different parts of a base metal orebody. It appears that there is a simple relationship between metal recovery and the percent of sulphur in the sample. We can see that recovery is increasing with sulphur.\n\n1. Open and scatterplot the file metal_recovery_vs_sulphur.csv", "_____no_output_____" ], [ "2. Find the least-squares linear fit for this data, without using any data transformations. Plot the results. Using Pandas and Seaborn may be the easiest way to approach this.\n\nThe results aren't terrible, but there are some problems. There is a definite curve in the data and the line is unable to fit through all points. It also poor at extrapolating. This curve will happily predict greater than 100% recovery at feed sulphur beyond around 2%. It's also happy to advise metal recoveries of around 40% with no feed sulphur. That may seem reasonable to a data scientist, but domain experts will regard that as ridiculous.\n\n3. Try transforming the sulphur feed percentages before fitting. We'd like to know if the data can be made to look more linear through a simple algebraic relationship. Domain knowledge may help here. The general shape of the curve suggests that there may be a power relationship here. What happens if you regress again the square root of feed sulphur? What kind of transformation could lead a metal recovery that is limited below 100%? Maybe it's worth trying the reciprocal of feed sulphur?", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec59856727e80de2724594a76d50c361d7887f5b
41,648
ipynb
Jupyter Notebook
Week 6/SLU09 - Linear Algebra & NumPy, Part 1/Solutions notebook.ipynb
Juliana-R/ds-prep-course
cc8e50e3aae8023cdb0281d30cc4793e9ca7801e
[ "MIT" ]
26
2020-03-23T19:34:02.000Z
2021-03-03T23:02:38.000Z
Week 6/SLU09 - Linear Algebra & NumPy, Part 1/Solutions notebook.ipynb
ecarlosfonseca/ds-prep-course
cb0527748763b2ffa8852de66e22d45dcd4c55fb
[ "MIT" ]
30
2020-03-15T20:37:50.000Z
2022-03-12T00:27:09.000Z
Week 6/SLU09 - Linear Algebra & NumPy, Part 1/Solutions notebook.ipynb
ecarlosfonseca/ds-prep-course
cb0527748763b2ffa8852de66e22d45dcd4c55fb
[ "MIT" ]
36
2020-03-21T12:44:08.000Z
2021-04-02T21:56:32.000Z
27.876841
271
0.548862
[ [ [ "# SLU09 - Linear Algebra & NumPy, Part 1: Exercise notebook", "_____no_output_____" ], [ "Welcome! Let's check you Linear Algebra and NumPy knowledge on:\n\n- Vectors and basic operations;\n- Matrices and basic operations;\n- NumPy arrays.", "_____no_output_____" ] ], [ [ "# run this cell before anything else\n\n# numpy\nimport numpy as np\n\n# auxiliary stuff (don't worry about it)\nfrom utils import *\n\n# for evaluation purposes\nimport hashlib\nfrom math import isclose\ndef _hash(s):\n return hashlib.blake2b(\n bytes(str(s), encoding='utf8'),\n digest_size=5\n ).hexdigest()", "_____no_output_____" ] ], [ [ "<img src=\"./media/what_if.png\" width=\"500\">", "_____no_output_____" ], [ "## 1 - Vectors", "_____no_output_____" ], [ "In this section of exercises, you'll apply what you learned about vectors definition, basic properties and operations.", "_____no_output_____" ], [ "### Exercise 1.1\n\nWhich **two** of the following objects **do NOT** represent vectors in Python?\n\na) The integer `1`;\n\nb) The list `[1, 2, 3, 4]`;\n\nc) The NumPy array `np.array([[2,0], [1,0]])`;\n\nd) The NumPy array `np.array([[0], [1]])`;\n\ne) The NumPy array `np.array([[2]])`.", "_____no_output_____" ] ], [ [ "# Uncomment the correct answer\n#answer = \"a and b\"\n#answer = \"a and e\"\n#answer = \"a and c\"\n#answer = \"c and d\"\n#answer = \"a and d\"\n\n### BEGIN SOLUTION\nanswer = \"a and c\"\n### END SOLUTION", "_____no_output_____" ], [ "assert _hash(answer) == 'a2b790bda1', \"Something's not right! Check if all the quantities are vectors and if the shape of the arrays makes sense. Remember all the ways in which you can represent a vector.\"", "_____no_output_____" ] ], [ [ "### Exercise 1.2\n\nWhich **one** of the following sentences is **correct**?\n\na) The vectors $[0, 1]$ and $[2, 0]$ are not orthogonal.\n\nb) The vectors $[0, 1]$ and $[2, 0]$ are collinear (linearly dependent).\n\nc) The vectors $[1, 1]$ and $[2, 2]$ are non-collinear (linearly independent).\n\nd) We can describe any 2D vector we want with some linear combination of the vectors $[0, 1]$ and $[2, 0]$.\n\ne) We can describe any 2D vector we want with some linear combination of the vectors $[1, 1]$ and $[2, 2]$.", "_____no_output_____" ] ], [ [ "# Uncomment the correct answer\n#my_answer = \"a\"\n#my_answer = \"b\"\n#my_answer = \"c\"\n#my_answer = \"d\"\n#my_answer = \"e\"\n\n### BEGIN SOLUTION\nmy_answer = \"d\"\n### END SOLUTION", "_____no_output_____" ], [ "if _hash(my_answer) == \"f4e169f8ee\":\n print(\"Review section 1.7 of Learning Notebook 1! What's the angle between these vectors?\")\nelif _hash(my_answer) == \"9350f68d6b\" or _hash(my_answer) == \"2add4c06d4\":\n print(\"Boris Johnson says no... Check sections 1.4-1.5 of Learning Notebook 1!!\")\nelif _hash(my_answer) == \"94ca9f1720\":\n print(\"Go check sections 1.4-1.5 of Learning Notebook 1. Being 'independent' matters.\")\nelse:\n assert _hash(my_answer) == '838cd7f570', \\\n \"Did you just make up an answer?\\n You're only supposed to uncomment the correct answer.\"", "_____no_output_____" ] ], [ [ "Oh and by the way... zero velocity kitten is still waiting for a refill.\n<img src=\"./media/kitten_stopped.png\" width=\"600\"/>", "_____no_output_____" ], [ "## 2 - Vectors in NumPy", "_____no_output_____" ], [ "### Exercise 2.1\n\nLook at the NumPy array below:\n\n```Python\n np.array([[1, 0, 0, 0]])\n```\n\nAssign the shape and number of array dimensions, respectively, to variables `array_shape` and `array_ndim`.\n\nThe ideia is that you should be able to assign the correct tuple to `array_shape` and the correct integer to `array_ndim`, without using any auxiliary functions.", "_____no_output_____" ] ], [ [ "# Assign the shape of array to array_shape (remember that the shape is represented by a tuple!!)\n# array_shape = ...\n\n# Assign the number of array dimensions to array_ndim\n# array_ndim = ...\n\n### BEGIN SOLUTION\narray_shape = (1, 4)\narray_ndim = 2\n### END SOLUTION", "_____no_output_____" ], [ "assert isinstance(array_shape, tuple), \"The shape of an array should by a tuple!\"\nassert isinstance(array_ndim, int), \"The number of array dimensions should be an integer!\"\nassert _hash(array_shape) != '61dcee3c2e', (\n \"The shape is almost right! You just got the order of the axes wrong!\") \nassert array_ndim != 4, \"Wrong dimensions! You can say that vector is 4D, but the number of array dimensions is not 4!...\\n\\\n Check section 2.2.2 of Learning Notebook 1 if you get stuck.\"\nassert _hash(array_shape) == '56c0c2870d', (\n \"The shape is wrong. Check section 2.2.2 of Learning Notebook 1 if you get stuck.\")\nassert _hash(array_ndim) == 'cf2d85ea1d', (\n \"The number of array dimensions is wrong. Check section 2.2.2 of Learning Notebook 1 if you get stuck.\")", "_____no_output_____" ] ], [ [ "### Exercise 2.2\n\n**(i)** Use the method [`ndarray.reshape()`](https://numpy.org/doc/1.18/reference/generated/numpy.ndarray.reshape.html) to convert `u` to a **column vector** `u_column`, represented by a 2D array;\n\n**(ii)** Use the transpose attribute (or use `reshape` again) to get the transpose of `u_column` and assign it to `u_row`.", "_____no_output_____" ] ], [ [ "# run this cell first\nu = np.array([0, 1, .5, .25])", "_____no_output_____" ], [ "# Convert u to a column vector represented by a 2D array \n# u_column = ...\n\n# Convert u_column to a row vector represented by a 2D array\n# u_row = ...\n\n### BEGIN SOLUTION\nu_column = u.reshape((-1, 1))\nu_row = u_column.T\n### END SOLUTION", "_____no_output_____" ], [ "assert _hash(u_column) != '047849a4d8', \"u_column should be a column vector and not a row vector!\"\nassert _hash(u_row) != '56cc2df426', \"u_row should be a row vector and not a column vector!\"\nassert u_column.ndim == 2 and u_row.ndim == 2, \"Your arrays need to be 2D!\"\nassert (_hash(u_row) == '047849a4d8') and (_hash(u_column) == '56cc2df426'), \"Did you change the content of the arrays?\"", "_____no_output_____" ] ], [ [ "### Exercise 2.3\n\n#### 2.3.1)\n\nFind the dot product between vectors `s` and `t` and assign the result to `scalar`:", "_____no_output_____" ] ], [ [ "# run this cell first\ns = np.array([1, -2, -2, 2])\nt = np.array([-6, -3, 1, 1])", "_____no_output_____" ], [ "# use numpy to determine the dot product between s and t\n# scalar = ...\n\n### BEGIN SOLUTION\nscalar = np.dot(s, t)\n### END SOLUTION", "_____no_output_____" ], [ "assert not isinstance(scalar, np.ndarray), \"The result should be a scalar, not a numpy array!\"\nassert _hash(scalar) == '5b4838043f', \"Wrong! :(\"", "_____no_output_____" ] ], [ [ "#### 2.3.2)\n\nBased on the result, what can you conclude about the vectors `s` and `t`? (uncomment the correct answer)\n\na) `s` is the transpose of `t`;\n\nb) `s` and `t` are orthogonal;\n\nc) `s` and `t` can describe the space of all 4D vectors;\n\nd) `s` and `t` are collinear.", "_____no_output_____" ] ], [ [ "# Uncommment the correct answer\n#correct_answer = \"a\"\n#correct_answer = \"b\"\n#correct_answer = \"c\"\n#correct_answer = \"d\"\n\n### BEGIN SOLUTION\ncorrect_answer = \"b\"\n### END SOLUTION", "_____no_output_____" ], [ "assert _hash(correct_answer) != 'f4e169f8ee', \"That's not correct! What is the transpose of a vector?\"\nassert _hash(correct_answer) != '2add4c06d4', \"Are you sure? Read section 1.5 of Learning Notebook 1...\"\nassert _hash(correct_answer) != '838cd7f570', \"Not really. Can you find a scalar that transforms s into t, or vice-versa?\"\nassert _hash(correct_answer) == '9350f68d6b', \"Don't write anything, you just need to uncomment the correct answer!'\"", "_____no_output_____" ] ], [ [ "## 3 - Matrices", "_____no_output_____" ], [ "### Exercise 3.1\n\nWhich **two** of the following sentences are **not correct**?\n\na) A symmetric matrix is always equal to its transpose;\n\nb) An identity matrix of size $n\\times n$ is a square matrix where all entries are zero;\n\nc) A square matrix is a matrix whose elements are square roots;\n\nd) Given any number of matrices of the same size, we can add them in any order we want.", "_____no_output_____" ] ], [ [ "# Uncommment the correct answer\n#me_answers = \"a and b\"\n#me_answers = \"b and c\"\n#me_answers = \"a and d\"\n#me_answers = \"c and d\"\n\n### BEGIN SOLUTION\nme_answers = \"b and c\"\n### END SOLUTION", "_____no_output_____" ], [ "assert _hash(me_answers) != 'f9f1efd901', \"That's not right. One of the chosen sentences is actually correct.\\nYou need to choose 2 incorrect sentences.\"\nassert _hash(me_answers) != '153d3d6799', \"That's not right. Both sentences are actually correct. Check section 3 of Learning Notebook 2 if you don't understand why!\"\nassert _hash(me_answers) != 'd5ea9fb2d7', \"That's not right. Remember that you can add matrices in any order you want,\\n as long as they have the same size, so the last sentence is true.\"\nassert _hash(me_answers) == '3f46356968', \"Don't write any code! Just uncomment the correct answer!\"", "_____no_output_____" ] ], [ [ "### Exercise 3.2\n\nWhat's the result of the following operation? (don't write any code!!)\n\n$$2 \\cdot \\begin{bmatrix}0 & 1\\\\ 1 & 0\\end{bmatrix}\n- 1 \\cdot \\begin{bmatrix}1 & 0\\\\ 0 & 1\\end{bmatrix}$$\n\na) $\\begin{bmatrix}1 & 0\\\\ 0 & 1\\end{bmatrix}$\n\nb) $\\begin{bmatrix}2 & 0\\\\ 0 & 2\\end{bmatrix}$\n\nc) $\\begin{bmatrix}-1 & 2\\\\ 2 & -1\\end{bmatrix}$\n\nd) $\\begin{bmatrix}1 & 1\\\\ 1 & 1\\end{bmatrix}$", "_____no_output_____" ] ], [ [ "# Uncommment the correct answer\n#correct_matrix = \"a\"\n#correct_matrix = \"b\"\n#correct_matrix = \"c\"\n#correct_matrix = \"d\"\n\n### BEGIN SOLUTION\ncorrect_matrix = \"c\"\n### END SOLUTION", "_____no_output_____" ], [ "assert _hash(correct_matrix) != 'f4e169f8ee', \"Wrong! Check section 3.3 of Learning Notebook 2 if you don't understand why.\"\nassert _hash(correct_matrix) != '9350f68d6b', \"Wrong! Check section 3.3 of Learning Notebook 2 if you don't understand why.\"\nassert _hash(correct_matrix) != '838cd7f570', \"Wrong! Check section 3.3 of Learning Notebook 2 if you don't understand why.\"\nassert _hash(correct_matrix) == '2add4c06d4', \"Don't write any code, you just need to uncomment the correct answer!'\"", "_____no_output_____" ] ], [ [ "## 4 - NumPy arrays and matrices", "_____no_output_____" ], [ "*Everything* is about matrices! Images are nothing but matrices.\n\nOn the cell below we'll open a greyscale image of a cute panda, reading it into a matrix of pixels.\n\nBecause we're dealing with a greyscale image, we can represent it by a 2D ndarray of shape `(height, width)`, where each entry is a value in the range `0` to `255`, corresponding to a pixel in the image.", "_____no_output_____" ] ], [ [ "# read cute panda image into 2D numpy array\npanda = load_panda()\n\n# show image\nplot_img(panda);\n\n# print array shape (image size in pixels)\nprint(\"panda greyscale image array:\", panda.shape)\n\n# preview some rows\nprint(\"\\nFirst 5 rows and 5 columns:\", panda[:5, :5])", "_____no_output_____" ] ], [ [ "### Exercise 4.1\n\nLet's invert the panda colours!! 🐼\n\nInvert the image colours by performing the operation `255 - value` for each element `value` in the array `panda`.\n\n**Hint**: NumPy will perform the addition (or subtraction) between scalars and arrays in an element-wise fashion. In Linear Algebra terms, this is the equivalent of subtracting the matrix `panda` from a matrix of the same size, where all entries are equal to `255`.", "_____no_output_____" ] ], [ [ "# invert the panda!\n# opposite_panda = ...\n\n### BEGIN SOLUTION\nopposite_panda = 255 - panda\n### END SOLUTION", "_____no_output_____" ], [ "assert isinstance(panda, np.ndarray) and panda.shape == (460, 460) and panda.min() == 0 \\\n and panda.max() == 255 and _hash(panda[0, 10]) == '751b860653', \\\n \"OMG you changed the panda variable!! Reload by running: panda = load_panda()\"\nassert opposite_panda.shape == (460, 460), \"The panda_inverted array should have the same shape as the panda array!\"\nassert _hash(int(opposite_panda[320, 400])) == '0d1d9d8d66', \"Something's not right.\"\nassert _hash(int(opposite_panda[10, 100])) == 'f7e25634c3', \"Something's not right.\"\nprint(\"\\n- You've just turned the panda into its negative!\\n\")\nprint(\"- But don't worry, pandas are always positive, even on the negative side!\\n\\n\")\nprint(\"(thank you for the joke, infrastructure lesser god)\")\nplot_pandas(opposite_panda)", "_____no_output_____" ] ], [ [ "### Exercise 4.2\n\nLet's transpose the panda!\n\nFind the transpose of `panda` and assign it to `transposed_panda`.", "_____no_output_____" ] ], [ [ "# find the transpose of panda\n# transposed_panda = ...\n\n### BEGIN SOLUTION\ntransposed_panda = np.transpose(panda)\n### END SOLUTION", "_____no_output_____" ], [ "assert isinstance(panda, np.ndarray) and panda.shape == (460, 460) and panda.min() == 0 \\\n and panda.max() == 255 and _hash(panda[0, 10]) == '751b860653', \\\n \"OMG you changed the panda variable!! Reload it with the code: panda = load_panda()\"\nassert _hash(panda[0, 10]) == '751b860653', \"Please don't change the panda variable!! Create a copy instead!\"\nassert _hash(transposed_panda) == '06f31209c9', \"Wrong! What did you do to the panda?\"\nprint(\"\\nCORRECT! Oh no look, the panda is falling!!...\")\nplot_pandas(transposed_panda)", "_____no_output_____" ] ], [ [ "### Exercise 4.3\n\nCreate a *binary* panda called `binary_panda` where:\n- You set the value of all entries that are **greather than** `100` to `255`;\n- You set the value of the remaining entries to `0`.", "_____no_output_____" ] ], [ [ "# create a boolean mask to filter all entries > 100\n# mask = ...\n\n# create a COPY of panda using the method .copy() - DO NOT USE \"binary_panda = panda\"!!\n# binary_panda = ...\n\n# use the mask to set all entries above 100 in binary_panda to 255\n# ...\n\n# set all the other values to 0 (tip: use ~mask)\n# ...\n\n### BEGIN SOLUTION\nmask = panda > 100\nbinary_panda = panda.copy()\nbinary_panda[mask] = 255\nbinary_panda[~mask] = 0\n### END SOLUTION", "_____no_output_____" ], [ "assert isinstance(panda, np.ndarray) and panda.shape == (460, 460) and panda.min() == 0 \\\n and panda.max() == 255 and _hash(panda[0, 10]) == '751b860653', \\\n \"OMG you changed the panda variable!! Reload it with the code: panda = load_panda()\"\nassert _hash(int(binary_panda[0, 10])) == '6bafb3698f', \"Wrong! What are you doing to the panda?\"\nassert _hash(int(binary_panda[0, 100])) == '5b4838043f', \"Wrong! What are you doing to the panda?\"\nprint(\"\\nCORRECT! Panda is now literally only black and white...\")\nplot_pandas(binary_panda)", "_____no_output_____" ] ], [ [ "### Exercise 4.4\n\n4 pandas is better than 1!! 🐼🐼🐼🐼\n\nBecause we like tile effect, let's create an image with 2x2 pandas, concatenating our `panda` array into an image with 4 pandas, which should look like this:\n\n<img src=\"./media/tile_pandas.png\">", "_____no_output_____" ] ], [ [ "## create a 2D array with 4 concatenated pandas\n\n# it should have 2 pandas on the first column (first concatenation step)\n# column_pandas = ...\n\n# and 2 columns of pandas side by side (second concatenation step)\n# tile_pandas = ...\n\n### BEGIN SOLUTION\ncolumn_pandas = np.concatenate((panda, panda), axis=0)\ntile_pandas = np.concatenate((column_pandas, column_pandas), axis=1)\n### END SOLUTION", "_____no_output_____" ], [ "assert isinstance(panda, np.ndarray) and panda.shape == (460, 460) and panda.min() == 0 \\\n and panda.max() == 255 and _hash(panda[0, 10]) == '751b860653', \\\n \"OMG you changed the panda variable!! Reload it with the code: panda = load_panda()\"\nassert _hash(column_pandas[720,400]) == \"9963b5511e\", \"Wrong. Check the concatenation step for column_pandas!\"\nassert tile_pandas.shape == (920, 920), \"The image size is not right!\"\nassert tile_pandas.shape != (1840, 460), \"Did you concatenate along the correct axis?\"\nassert _hash(tile_pandas[300,400]) == '6c5db95765', \"Wrong! What did you do to the panda?\"\nassert _hash(tile_pandas[260,720]) == 'f4bbfd7be2', \"Wrong! What did you do to the panda?\"\nassert _hash(tile_pandas[600,420]) == '0d1d9d8d66', \"Wrong! What did you do to the panda?\"\nassert _hash(tile_pandas[600,800]) == 'ede52244b6', \"Wrong! What did you do to the panda?\"\nprint(\"\\nCORRECT! Tiled pandas!\")\nplot_pandas(tile_pandas)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Dizzy panda?\n\nIt's a linear combination of 3 pandas...", "_____no_output_____" ] ], [ [ "# plot panda matrix and a linear combination of two panda matrices!!\npanda_combination = 0.5*panda + 0.5*panda.T + 0.5*panda[::-1]\nplot_pandas(panda_combination)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "### Exercise 4.5 (boss level!!)\n\nDo you like puzzles? \n\nLet's create a 100-piece puzzle together with our panda image! We'll split the tasks:\n\n- Your task will be to slice the image into 100 pieces!\n\n- My task will be to randomly mix the pieces you've built and display the final result.\n\nWhat I need from you is:\n\n- `pieces` - a **list** with all the 100 pieces inside it, represented as 2D array slices of our `panda` array, each of shape `(side, side)` (I don't care about the order of the pieces inside the list...).\n\nYou will also need to calculate the following variables first:\n\n- `area` - to get a 100-piece puzzle, how many pixels will we have per **square** piece;\n\n- `side` - the side of each of the 100 square pieces.\n\n\nI know I know, your part is harder! 😁😝\n\nNote the following:\n\n- The end size of your array should be the same as the original `panda`;\n\n- Each pixel is simply an element of the array.\n\nLet's do this!", "_____no_output_____" ] ], [ [ "# Your turn\n\n#### how many pixels will each of the square pieces have? (just some simple arithmetics)\n# area = ...\n\n#### what's the side length per square piece?\n# convert the result to an INTEGER!!\n# side = ...\n\n\n#### now slice the image into 100 square pieces!\n\n# initialize a list to store the 100 numpy array pieces\npieces = []\n\n# this is where you get creative!\n# try to find a way to iterate through the panda array\n# and save all the different 100 array pieces in pieces_array\n# - you could use a for loop, range, list comprehension, append,... whatever you want!\n# - all you've learned in the previous SLUs is enough to solve this exercise!\n# - if you can, avoid using any NumPy methods\n# make sure you save all the 100 different pieces (numpy arrays) inside the list pieces\n#\n# ...\n# pieces...\n\n\n### BEGIN SOLUTION\narea = (panda.shape[0] * panda.shape[1]) / 100\nside = int(area ** .5) # side = int(panda.shape[0] / 10)\n\n# pieces list with 2 for loops\nimg_side = panda.shape[0]\nfor h_index in range(0, img_side, side): # loop horizontally (h_index)\n for v_index in range(0, img_side, side): # loop vertically (v_index)\n piece = panda[h_index:h_index+side, v_index:v_index+side] # slice next piece of shape (46, 46)\n pieces.append(piece) # save piece in pieces list\n### END SOLUTION", "_____no_output_____" ], [ "import math\n\nassert isinstance(panda, np.ndarray) and panda.shape == (460, 460) and panda.min() == 0 \\\n and panda.max() == 255 and _hash(panda[0, 10]) == '751b860653', \\\n \"OMG did you change the panda variable? How dare you make that to a panda! Reload it with the code: panda = load_panda()\"\nassert isinstance(pieces, list), \"Wrong! The variable pieces should be a list of numpy arrays!\"\nassert len(pieces) == 100, \"Wrong! You should have 100 pieces, no less no more!\"\n\n# check if all pieces are in the list\npiece_hashes = set([_hash(piece) for piece in pieces])\nassert piece_hashes == {\n '02be9d6ce2', '07848c537e', '0a2e28f440', '0af647d93c', '11d3b39a80', '13cfdfa2e9', '1a8344eb52', '1bd4a336b6',\n '1c601bdd89', '22bd495b3c', '26c7dc1d4c', '289180411b', '28a6e32b35', '2a6c71b8df', '2a729e70c9', '2b02c76a03',\n '2bfa86fc88', '2e95608216', '2f248cb1c8', '35fb7da6db', '3697193023', '376315dbbd', '3eb8f59cd2', '40128ced6e',\n '40d7edd99f', '41badc88cd', '430813f00e', '4397304874', '44c70a7d78', '470fb34b6d', '4979a9b607', '4e5bd0b84b',\n '4fe0fd7b10', '503419682b', '5048f5fd22', '51857ecdd5', '51a28f3aca', '57da59b8f8', '5835ab0b91', '58600ed82f',\n '5ac9427587', '5c84000093', '5d45dcf52a', '5d62bde0ab', '6756b0fc03', '68eb166d6a', '6ac4fb00e9', '6ddbc03b88',\n '706b4ae8ab', '747944d48d', '7675185716', '79af3029d6', '7caa71e4f3', '7da3a9d8c2', '847027c964', '84cea37281',\n '86457b6483', '89a69e1c4c', '8a052d1712', '8b8d7648ac', '97862ad710', '983979c8d2', '9989f29398', '9b82e1e2ee',\n '9c867619b4', 'a026d50550', 'a7f7329ad3', 'a94443d646', 'acf93bc106', 'b0e0eb6ba7', 'b572037421', 'b6be9e18e8',\n 'b922487e57', 'bb418e046e', 'bedf071ed4', 'bf00c8999b', 'bfe2706cfc', 'c48ec85d2f', 'c4ae780575', 'c5b00870fb',\n 'c7901354c2', 'cfc67efe75', 'd7251c83d4', 'd88fc392a9', 'da29f647d5', 'db3609fbb9', 'dcb1466706', 'de172beccc',\n 'de79a642a2', 'deeee80582', 'df8e136de6', 'e15c1c9bc9', 'e5642145ee', 'e806e500ee', 'e975f82de7', 'ed3651f66f',\n 'f0236ab443', 'f12bd5363a', 'f2e8138d40', 'fe3fbd881d'}, \"No! :( Something's not right... The panda is crying.\"\n\nprint(\"You did it, you're awesome! Let me shuffle the pieces now. :)\")", "_____no_output_____" ], [ "# My turn - run this cell!!\n\n# shuffle the pieces\nimport random\npieces_shuffled = pieces.copy()\nrandom.shuffle(pieces_shuffled)", "_____no_output_____" ] ], [ [ "The moment of truth...", "_____no_output_____" ] ], [ [ "# SHOW THE PUZZLE!!\ntry:\n plot_img(np.concatenate([np.concatenate(pieces_shuffled[n:n+10], axis=1) for n in range(0, 100, 10)]))\n print(\"\\nYey!! There it is. Our 100-piece puzzle!\")\nexcept:\n print(\"Did you pass all the asserts? Either you didn't or I didn't catch your errors.\")\n print(\"Now our panda is sad...\")\n plot_sad_panda()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec59a58ef2357a4d914d8d8a0d889a09912786a0
7,326
ipynb
Jupyter Notebook
2/dogs_and_cats.ipynb
aykuli/stepic-ml-courses
83f079e71dde288d13f211eb4f12fa231e750d20
[ "MIT" ]
null
null
null
2/dogs_and_cats.ipynb
aykuli/stepic-ml-courses
83f079e71dde288d13f211eb4f12fa231e750d20
[ "MIT" ]
null
null
null
2/dogs_and_cats.ipynb
aykuli/stepic-ml-courses
83f079e71dde288d13f211eb4f12fa231e750d20
[ "MIT" ]
null
null
null
32.131579
85
0.355037
[ [ [ "from sklearn import tree\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import cross_val_score\n\nfrom IPython.display import SVG\nfrom IPython.display import display\nfrom IPython.display import HTML\nfrom graphviz import Source\n\nstyle=\"<style>{width: 70% !important; height: 70% !important;}</style>\"\n%matplotlib inline", "_____no_output_____" ], [ "dac_test = pd.read_json('../data/dataset_209691_15.txt')\ndac_train = pd.read_csv('../data/dogs_n_cats.csv')\n\nprint(dac_train)\nprint(dac_test)", " Длина Высота Шерстист Гавкает Лазает по деревьям Вид\n0 15 18.0 1 1 0 собачка\n1 20 24.0 1 0 0 собачка\n2 34 40.8 1 1 0 собачка\n3 26 31.2 1 1 0 собачка\n4 16 19.2 1 1 0 собачка\n.. ... ... ... ... ... ...\n995 8 3.2 1 0 1 котик\n996 7 2.8 1 0 1 котик\n997 6 2.4 1 0 1 котик\n998 7 2.8 1 0 1 котик\n999 11 4.4 1 0 1 котик\n\n[1000 rows x 6 columns]\n Длина Высота Шерстист Гавкает Лазает по деревьям\n0 25 30.0 1 1 0\n1 17 20.4 1 1 0\n2 16 19.2 1 1 0\n3 16 19.2 1 1 0\n4 24 28.8 1 1 0\n.. ... ... ... ... ...\n95 12 4.8 1 0 1\n96 14 5.6 1 0 1\n97 13 5.2 1 0 1\n98 9 3.6 1 0 1\n99 11 4.4 1 0 1\n\n[100 rows x 5 columns]\n" ], [ "X_train = dac_train.drop(['Вид'], axis = 1)\ny_train = dac_train.Вид\nprint(X_train)\nprint(y_train)\nX_test = dac_test\nprint(X_test)", " Длина Высота Шерстист Гавкает Лазает по деревьям\n0 15 18.0 1 1 0\n1 20 24.0 1 0 0\n2 34 40.8 1 1 0\n3 26 31.2 1 1 0\n4 16 19.2 1 1 0\n.. ... ... ... ... ...\n995 8 3.2 1 0 1\n996 7 2.8 1 0 1\n997 6 2.4 1 0 1\n998 7 2.8 1 0 1\n999 11 4.4 1 0 1\n\n[1000 rows x 5 columns]\n0 собачка\n1 собачка\n2 собачка\n3 собачка\n4 собачка\n ... \n995 котик\n996 котик\n997 котик\n998 котик\n999 котик\nName: Вид, Length: 1000, dtype: object\n Длина Высота Шерстист Гавкает Лазает по деревьям\n0 25 30.0 1 1 0\n1 17 20.4 1 1 0\n2 16 19.2 1 1 0\n3 16 19.2 1 1 0\n4 24 28.8 1 1 0\n.. ... ... ... ... ...\n95 12 4.8 1 0 1\n96 14 5.6 1 0 1\n97 13 5.2 1 0 1\n98 9 3.6 1 0 1\n99 11 4.4 1 0 1\n\n[100 rows x 5 columns]\n" ], [ "np.random.seed(0) \nclf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=max_depth)\nclf.fit(X_train, y_train)", "_____no_output_____" ], [ "scores_data = pd.melt(scores_data, id_vars=['max_depth'], \n value_vars=['train_score', 'test_score'], \n var_name='set_type', value_name='score')", "_____no_output_____" ], [ "sns.lineplot(x=\"max_depth\", y =\"score\", hue=\"set_type\", data=scores_data)", "_____no_output_____" ], [ "pred = clf.predict(X_test)\ndogs = list(pred)\ndogs.count('собачка')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ec59a5cbc5086f7740711dd6c3d4c33d7efe6490
8,640
ipynb
Jupyter Notebook
pyVNA/_take_VNA_sweep.ipynb
nikitakuklev/ADRNX
b8a07f50788bbbfd36a8f956ca70cf8ed2bd95ee
[ "Apache-2.0" ]
null
null
null
pyVNA/_take_VNA_sweep.ipynb
nikitakuklev/ADRNX
b8a07f50788bbbfd36a8f956ca70cf8ed2bd95ee
[ "Apache-2.0" ]
null
null
null
pyVNA/_take_VNA_sweep.ipynb
nikitakuklev/ADRNX
b8a07f50788bbbfd36a8f956ca70cf8ed2bd95ee
[ "Apache-2.0" ]
null
null
null
29.090909
131
0.474074
[ [ [ "import serial,sys,time,math,datetime,os, numpy as np, matplotlib.pyplot as plt\nfrom timeit import default_timer as timer", "_____no_output_____" ], [ "def setup_adapter():\n ser = serial.Serial()\n ser.port = \"/dev/ttyUSB2\"\n ser.baudrate = 115200\n ser.timeout = 2.0\n ser.open()\n time.sleep(0.2)\n ser.flushInput()\n ser.write('++savecfg 0\\n'.encode('ascii'))\n ser.write('++auto 0\\n'.encode('ascii'))\n set_tm()\n \ndef read_line_gpib():\n ser.write('++read eoi\\n'.encode('ascii'))\n return ser.readline().decode().rstrip()\n\ndef read_all_gpib():\n time.sleep(5)\n ser.write('++read eoi\\n'.encode('ascii'))\n ser.timeout = 0.5 \n resp = bytes()\n cnt = 0\n lentot = 0\n for i in range(1,5000):\n resppart = ser.read(10000)\n time.sleep(0.01)\n #print(len(resppart))\n if (len(resppart) == 0):\n cnt += 1\n else:\n lentot += len(resppart)\n cnt = 0\n resp += resppart\n if (cnt>200):\n break \n print('Read {} bytes'.format(lentot))\n return resp\n\ndef read_all_gpib_withtarget(amt):\n time.sleep(5) \n ser.timeout = 0.5 \n resp = bytes()\n cnt = 0\n lentot = 0\n st = timer()\n ser.write('++read eoi\\n'.encode('ascii'))\n for i in range(1,5000):\n if (timer()-st > 3):\n ser.write('++read eoi\\n'.encode('ascii'))\n st = timer()\n time.sleep(0.01)\n resppart = ser.read(100000)\n time.sleep(0.01)\n print(len(resppart))\n if (len(resppart) == 0):\n cnt += 1\n else:\n lentot += len(resppart)\n cnt = 0\n resp += resppart\n if (cnt>200):\n break \n if (lentot>=amt):\n break\n print('Read {} bytes'.format(lentot))\n ser.timeout = 0.5\n return resp\n \ndef wtf(): \n return ser.read(64000).decode('ascii').rstrip()\n\ndef flush_gpib(): \n ser.write('++read eoi\\n'.encode('ascii'))\n time.sleep(3.1)\n return ser.read(512000).decode('ascii').rstrip()\n\ndef set_tm(tm=3000):\n ser.write('++read_tmo_ms {}\\n'.format(tm).encode('ascii'))\n\ndef send_command_with_resp(cmd):\n ser.write('{}\\n'.format(cmd).encode('ascii'))\n time.sleep(0.05)\n return read_line_gpib()\n \ndef send_command_no_resp(cmd):\n ser.write('{}\\n'.format(cmd).encode('ascii'))\n \ndef await_confirmation(tm):\n start = timer();\n while (read_line_gpib() != '1' and timer()-start < tm):\n time.sleep(1)\n print(\"Confirmation received in {:3.2f}s\".format(timer()-start))\n \ndef get_value_lists(text):\n ans3 = text.split('\\n')\n ilist = []\n qlist = []\n for s in ans3:\n if s != '':\n #print(s)\n strarr = s.split(',') \n ilist.append(float(strarr[0]))\n qlist.append(float(strarr[1]))\n return [ilist,qlist]\n\ndef get_current_vna_state():\n flush_gpib()\n start = float(send_command_with_resp('STAR?;'))\n npoints = int(float(send_command_with_resp('POIN?;')))\n span = float(send_command_with_resp('SPAN?;'))\n ifbw = int(float(send_command_with_resp('IFBW?;')))\n #pwr = float(send_command_with_resp('POWE?;'))\n pwr = -10.0\n print(\"Current settings: [{} to {}] with {} points, IFBW of {} at {} dBm\".format(start,start+span,npoints,ifbw,pwr))\n \ndef get_new_log_name():\n date = datetime.datetime.now()\n logroot = '/home/pi/Desktop/DataNX/vna/'\n logdir = logroot+date.strftime('%Y%m%d')\n #if not os.path.isdir(logdir):\n # print(\"Directory |{}| not there, making\".format(logdir))\n # os.mkdir(logdir)\n dirs = os.listdir(logroot)\n print(dirs)\n logfile = logdir+os.path.sep+date.strftime('vnasweep-%H-%M-%S.txt')\n print(logfile)\n \ndef save_to_log(data):\n fl = open(logfile, \"w\")\n time.sleep(0.05)\n fl.write('DATE: {}\\n'.format(date.strftime('%Y/%m/%d')))\n fl.write('TIME: {}\\n'.format(date.strftime('%H:%M:%S')))\n fl.write('POWER: {}\\n'.format(pwr))\n fl.write('IFBW: {}\\n'.format(ifbw))\n fl.write('STIMULUS, REAL, IMAGINARY\\n')\n fl.write('\\n')\n freqs = list(np.linspace(start,start+span,npoints))\n [il,ql] = get_value_lists(response.decode('ascii'))\n if len(freqs) != len(il):\n print(\"BAD\")\n else:\n for i in range(0,len(freqs)):\n try:\n fl.write('{:12.10E}, {:12.10E}, {:12.10E}\\n'.format(freqs[i],il[i],ql[i]))\n print(i)\n except Exception as e:\n print(e)", "_____no_output_____" ], [ "setup_adapter()\nget_current_vna_state()", "_____no_output_____" ], [ "class VNACONST:\n S11 = 'S11'\n S21 = 'S21'\n ", "_____no_output_____" ], [ "VNACONST.S21", "_____no_output_____" ], [ "# set measurement to S21\nsend_command_no_resp('S21;')\n# choose polar such that we get I/Q back\nsend_command_no_resp('POLA;')\n# select form 4 (ASCII) output\nsend_command_no_resp('FORM4;')\n# setup scan parameters\nsend_command_no_resp('IFBW10;')\nsend_command_no_resp('POIN1601;')\nsend_command_no_resp('POWE-10.0;')", "_____no_output_____" ], [ "#initiate sweep and wait for response\nsend_command_no_resp('OPC?;SING;')\nawait_confirmation(250.0)\nsend_command_no_resp('LOGM;')", "_____no_output_____" ], [ "send_command_no_resp('OUTPFORM;')\nresponse = read_all_gpib_withtarget(80050)", "_____no_output_____" ], [ "[il,ql] = get_value_lists(response.decode('ascii'))\ndatatoplot = [10.0*math.log10((il[i]**2+ql[i]**2)/(50.0*.001)) for i in range(0,len(il))]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec59dcdfd9e27bb72cf3d8c4430b27afae7b0796
164,976
ipynb
Jupyter Notebook
week_3/TimeSeriesPredictor.ipynb
technogleb/ts_summer
bb2c92d541a7b361232ece5a9fb5931388604275
[ "MIT" ]
2
2021-05-17T09:45:16.000Z
2021-08-11T11:58:09.000Z
week_3/TimeSeriesPredictor.ipynb
technogleb/ts_summer
bb2c92d541a7b361232ece5a9fb5931388604275
[ "MIT" ]
null
null
null
week_3/TimeSeriesPredictor.ipynb
technogleb/ts_summer
bb2c92d541a7b361232ece5a9fb5931388604275
[ "MIT" ]
2
2021-05-24T18:50:59.000Z
2021-05-30T19:30:56.000Z
120.684711
53,957
0.786605
[ [ [ "%load_ext autoreload\n%autoreload 2", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "import sys\nfrom pathlib import Path\n\nsys.path.append(str(Path().cwd().parent))", "_____no_output_____" ], [ "from plotting import plot_ts", "_____no_output_____" ] ], [ [ "### Возьмем временной ряд", "_____no_output_____" ] ], [ [ "from load_dataset import Dataset", "_____no_output_____" ], [ "ds = Dataset('../data/dataset/')", "_____no_output_____" ], [ "ts = ds['daily-min-temperatures.csv']", "_____no_output_____" ] ], [ [ "### Cоздание инстанса", "_____no_output_____" ] ], [ [ "from model import TimeSeriesPredictor", "_____no_output_____" ] ], [ [ "в качестве обязательных параметров принимает частоту ряда в формате iso8601\nи количество лагов для построения модели - granularity и num_lags", "_____no_output_____" ] ], [ [ "predictor = TimeSeriesPredictor('P1D', 365)", "_____no_output_____" ] ], [ [ "также predictor'y можно передать параметр model, чтобы использовать конкретную модель для прогнозирования, модель может быть любым sklearn-совместимым эстиматором", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import GradientBoostingRegressor\n\npredictor = TimeSeriesPredictor('P1D', 365, model=GradientBoostingRegressor)", "_____no_output_____" ] ], [ [ "model по умолчанию - LinearRegression", "_____no_output_____" ] ], [ [ "predictor = TimeSeriesPredictor('P1D', 365)\npredictor.model", "_____no_output_____" ] ], [ [ "также для передачи дополнительной информации в модель вы можете использовать параметр mappers куда нужно передать ваши функции, принимающие timestamp и возвращающие для него значение вашего доп признака", "_____no_output_____" ] ], [ [ "def get_hour(timestamp):\n return timestamp.hour\n\nexternal_features = {\n 'hour': get_hour\n}\n\npredictor = TimeSeriesPredictor('P1D', 365, mappers=external_features)", "_____no_output_____" ] ], [ [ "Помимо основных параметровв модель также можно передать любое произвольно количество именованных аргументов **kwargs, которые будут переданы в конструктор `model`", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import GradientBoostingRegressor\n\npredictor = TimeSeriesPredictor('P1D', 365, model=GradientBoostingRegressor, max_depth=6, n_estimators=1000)", "_____no_output_____" ] ], [ [ "### Получение и установка параметров и функционал get/set params", "_____no_output_____" ], [ "классическое получение и задание аттрибутов", "_____no_output_____" ] ], [ [ "predictor.num_lags\npredictor.model\npredictor.model.learning_rate", "_____no_output_____" ], [ "predictor.model.learning_rate = 0.2", "_____no_output_____" ], [ "predictor.model.learning_rate", "_____no_output_____" ] ], [ [ "метод get_params получает словарь всех параметров модели включая sub-параметры модели, в таком случае они идут с префиксом\n`model__`", "_____no_output_____" ] ], [ [ "predictor.get_params()", "_____no_output_____" ] ], [ [ "метод set_params реализует обратную логику, принимая набор параметров для установки, включая sub-параметры модели, в таком случае их нужно указывать с префиксом `model__`", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\n\n\nparams = {\n 'num_lags': 730,\n 'model': RandomForestRegressor(), # обратите внимание, что в данном случае требуется создать инстанс модели!!\n 'model__max_depth': 3,\n 'model__n_estimators': 400,\n}\n\npredictor.set_params(**params) # параметры необходимо распаковать", "_____no_output_____" ], [ "predictor.get_params()", "_____no_output_____" ] ], [ [ "### Обучение модели", "_____no_output_____" ], [ "в данном случае на вход подается временной ряд в формате pd.Series", "_____no_output_____" ] ], [ [ "split_idx = int(len(ts) * 0.7)\n\nts_train, ts_test = ts[:split_idx], ts[split_idx:]", "_____no_output_____" ], [ "predictor.fit(ts_train)", "_____no_output_____" ] ], [ [ "### Получение out-of-sample прогноза", "_____no_output_____" ], [ "на вход подаем временной ряд от которого нужно сделать прогноз (длинной минимум num_lags) + горизонт прогнозирования", "_____no_output_____" ] ], [ [ "from sklearn.metrics import mean_squared_error as mse", "_____no_output_____" ], [ "preds = predictor.predict_next(ts_train, n_steps=len(ts_test))", "_____no_output_____" ], [ "plot_ts(ts_test, preds)", "_____no_output_____" ], [ "mse(ts_test, preds)", "_____no_output_____" ] ], [ [ "### Получение in-sample (когда каждая точка нам известна) прогноза", "_____no_output_____" ], [ "в таком случае мы должны вторым аргументов передать реальный тест, такм образом этим методом мы смотрим как бы мы предсказали ts_test если бы мы предсказывали на одну точку вперед, т.е. нам всегда известна реальная предыдущая точка", "_____no_output_____" ] ], [ [ "preds = predictor.predict_batch(ts_train, ts_test)", "_____no_output_____" ], [ "plot_ts(ts_test, preds)", "_____no_output_____" ], [ "mse(ts_test, preds)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
ec59fc32cff3fdb9b51d6a2dcb911e21b5ca64e5
516,261
ipynb
Jupyter Notebook
notebooks/XGBoost_region_analysis_cortex_striatum.ipynb
dash2927/diff_predictor
a189e7a4e0fd29d16cbd533008d1abb9a2e6898c
[ "MIT" ]
1
2020-10-20T23:32:25.000Z
2020-10-20T23:32:25.000Z
notebooks/XGBoost_region_analysis_cortex_striatum.ipynb
dash2927/diff_predictor
a189e7a4e0fd29d16cbd533008d1abb9a2e6898c
[ "MIT" ]
null
null
null
notebooks/XGBoost_region_analysis_cortex_striatum.ipynb
dash2927/diff_predictor
a189e7a4e0fd29d16cbd533008d1abb9a2e6898c
[ "MIT" ]
1
2020-10-07T22:54:10.000Z
2020-10-07T22:54:10.000Z
110.880799
57,032
0.809221
[ [ [ "# Region Based Data Analysis", "_____no_output_____" ], [ "The following notebook will go through prediction analysis for region based Multiple Particle Tracking (MPT) using OGD severity datasets for non-treated (NT) hippocampus, ganglia, thalamus, cortex, and striatum.\n", "_____no_output_____" ], [ "## Table of Contents\n\n\n [1. Load Data](#1.-load-data)<br />\n [2. Analysis](#2.-analysis)<br />\n [3. Modelling](#modelling)<br />\n [4. Evaluate Results](#evaluate-results)<br />", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## 1. Load Data", "_____no_output_____" ], [ "Loading feature dataset from OGD folders:\n\nThere are 15 total videos from each age group.", "_____no_output_____" ] ], [ [ "# libraries used\nimport boto3\nimport diff_classifier.aws as aws\nimport pandas as pd\nimport seaborn as sn\nimport numpy as np\nimport matplotlib.pyplot as pl\n\n\nfrom os import listdir, getcwd, chdir\nfrom os.path import isfile, join\nimport os\n\nfrom matplotlib import colors as plt_colors\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn import preprocessing\n\nimport xgboost as xgb\n# from xgboost import cv\n\nimport shap", "_____no_output_____" ], [ "workbookDir = getcwd()\nprint('Current Notebook Dir: ' + workbookDir)\nchdir(workbookDir) # Go to current workbook Dir\nchdir('..') # Go up one\nprint(f'Using current directory for loading data: {getcwd()}')\nworkbookDir = getcwd()", "Current Notebook Dir: C:\\Users\\david\\Documents\\nancework\\source\\diff_predictor\\notebooks\nUsing current directory for loading data: C:\\Users\\david\\Documents\\nancework\\source\\diff_predictor\n" ], [ "!pwd", "/c/Users/david/Documents/nancework/source/diff_predictor\n" ], [ "dataset_path = workbookDir + '/region_feature_folder/'\nfilelist = [f for f in listdir(dataset_path) if isfile(join(dataset_path, f)) and 'feat' in f and 'ganglia' not in f and 'hippocampus' not in f and 'thalamus' not in f]", "_____no_output_____" ], [ "filelist", "_____no_output_____" ], [ "fstats_tot = None\nvideo_num = 0\nfor filename in filelist:\n# try:\n fstats = pd.read_csv(dataset_path + filename, encoding = \"ISO-8859-1\", index_col='Unnamed: 0')\n print('{} size: {}'.format(filename, fstats.shape))\n if 'cortex' in filename:\n fstats['region'] = pd.Series(fstats.shape[0]*['cortex'], index=fstats.index)\n elif 'striatum' in filename:\n fstats['region'] = pd.Series(fstats.shape[0]*['striatum'], index=fstats.index)\n elif 'ganglia' in filename:\n fstats['region'] = pd.Series(fstats.shape[0]*['ganglia'], index=fstats.index)\n elif 'thalamus' in filename:\n fstats['region'] = pd.Series(fstats.shape[0]*['thalamus'], index=fstats.index)\n elif 'hippocampus' in filename:\n fstats['region'] = pd.Series(fstats.shape[0]*['hippocampus'], index=fstats.index)\n else:\n print('Error, no target')\n fstats['Video Number'] = pd.Series(fstats.shape[0]*[video_num], index=fstats.index)\n if fstats_tot is None:\n fstats_tot = fstats\n else:\n fstats_tot = fstats_tot.append(fstats, ignore_index=True)\n video_num += 1\n# except Exception:\n# print('Skipped!: {}'.format(filename))", "feat_NT_slice_1_cortex_vid_10.csv size: (4832, 67)\nfeat_NT_slice_1_cortex_vid_6.csv size: (7990, 67)\nfeat_NT_slice_1_cortex_vid_7.csv size: (4159, 67)\nfeat_NT_slice_1_cortex_vid_8.csv size: (1984, 67)\nfeat_NT_slice_1_cortex_vid_9.csv size: (6506, 67)\nfeat_NT_slice_1_striatum_vid_1.csv size: (2431, 67)\nfeat_NT_slice_1_striatum_vid_2.csv size: (2240, 67)\nfeat_NT_slice_1_striatum_vid_3.csv size: (1536, 67)\nfeat_NT_slice_1_striatum_vid_4.csv size: (2177, 67)\nfeat_NT_slice_1_striatum_vid_5.csv size: (2169, 67)\nfeat_NT_slice_2_cortex_vid_1.csv size: (1388, 67)\nfeat_NT_slice_2_cortex_vid_2.csv size: (1784, 67)\nfeat_NT_slice_2_cortex_vid_3.csv size: (3520, 67)\nfeat_NT_slice_2_cortex_vid_4.csv size: (1429, 67)\nfeat_NT_slice_2_cortex_vid_5.csv size: (2210, 67)\nfeat_NT_slice_2_striatum_vid_1.csv size: (8314, 67)\nfeat_NT_slice_2_striatum_vid_2.csv size: (10500, 67)\nfeat_NT_slice_2_striatum_vid_3.csv size: (11355, 67)\nfeat_NT_slice_2_striatum_vid_4.csv size: (10237, 67)\nfeat_NT_slice_2_striatum_vid_5.csv size: (13938, 67)\n" ] ], [ [ "## 2. Analysis", "_____no_output_____" ], [ "The following columns are present within the downloaded datasets:", "_____no_output_____" ] ], [ [ "fstats_tot.columns", "_____no_output_____" ] ], [ [ "Many of these features are not useful for prediction or have data which may negatively impact classification. The following features and the target feature are defined in the following cell. We also remove any datapoints that are empty or infinite:", "_____no_output_____" ] ], [ [ "fstats_tot\nfeatures = [\n 'alpha', # Fitted anomalous diffusion alpha exponenet\n 'D_fit', # Fitted anomalous diffusion coefficient\n 'kurtosis', # Kurtosis of track\n 'asymmetry1', # Asymmetry of trajecory (0 for circular symmetric, 1 for linear)\n 'asymmetry2', # Ratio of the smaller to larger principal radius of gyration\n 'asymmetry3', # An asymmetric feature that accnts for non-cylindrically symmetric pt distributions\n 'AR', # Aspect ratio of long and short side of trajectory's minimum bounding rectangle\n 'elongation', # Est. of amount of extension of trajectory from centroid\n 'boundedness', # How much a particle with Deff is restricted by a circular confinement of radius r\n 'fractal_dim', # Measure of how complicated a self similar figure is\n 'trappedness', # Probability that a particle with Deff is trapped in a region\n 'efficiency', # Ratio of squared net displacement to the sum of squared step lengths\n 'straightness', # Ratio of net displacement to the sum of squared step lengths\n 'MSD_ratio', # MSD ratio of the track\n 'frames', # Number of frames the track spans\n 'Deff1', # Effective diffusion coefficient at 0.33 s\n 'Deff2', # Effective diffusion coefficient at 3.3 s\n# 'angle_mean', # Mean turning angle which is counterclockwise angle from one frame point to another\n# 'angle_mag_mean', # Magnitude of the turning angle mean\n# 'angle_var', # Variance of the turning angle\n# 'dist_tot', # Total distance of the trajectory\n# 'dist_net', # Net distance from first point to last point\n# 'progression', # Ratio of the net distance traveled and the total distance\n 'Mean alpha', \n 'Mean D_fit', \n 'Mean kurtosis', \n 'Mean asymmetry1', \n 'Mean asymmetry2',\n 'Mean asymmetry3', \n 'Mean AR',\n 'Mean elongation', \n 'Mean boundedness',\n 'Mean fractal_dim', \n 'Mean trappedness', \n 'Mean efficiency',\n 'Mean straightness', \n 'Mean MSD_ratio', \n 'Mean Deff1', \n 'Mean Deff2',\n ]\n\ntarget = 'region' # prediction target (y)\n\necm = fstats_tot[features + [target] + ['X'] + ['Y']]\necm = ecm[~ecm.isin([np.nan, np.inf, -np.inf]).any(1)] # Removing nan and inf data points", "_____no_output_____" ], [ "# Showing a piece of our data:\necm[target].unique()", "_____no_output_____" ] ], [ [ "Before prediction, it is required to balance data. As shown, The current dataset is highly imbalance with most datapoints belonging to P21 and P35 categories. The dataset is reduced using random sampling of each target category.", "_____no_output_____" ] ], [ [ "#--------------NOT-ADDED-----------------------------\ndef balance_data(df, target, **kwargs):\n if 'random_state' not in kwargs:\n random_state = 1\n else:\n random_state = kwargs['random_state']\n if isinstance(target, list):\n target = target[0]\n df_target = []\n bal_df = []\n for name in df[target].unique():\n df_target.append((name, df[df[target] == name]))\n print(f\"Ratio before data balance ({':'.join([str(i[0]) for i in df_target])}) = {':'.join([str(len(i[1])) for i in df_target])}\")\n for i in range(len(df_target)):\n ratio = min([len(i[1]) for i in df_target])/len(df_target[i][1])\n bal_df.append(df_target[i][1].sample(frac=ratio, random_state=random_state))\n print(f\"Ratio after balance ({':'.join([str(i[0]) for i in df_target])}) = {':'.join([str(len(i)) for i in bal_df])}\")\n return pd.concat(bal_df)\nbal_ecm = balance_data(ecm, target, random_state=1)", "Ratio before data balance (cortex:striatum) = 6575:17401\nRatio after balance (cortex:striatum) = 6575:6575\n" ], [ "# ecm_14 = ecm[ecm[target] == 14]\n# ecm_21 = ecm[ecm[target] == 21]\n# ecm_28 = ecm[ecm[target] == 28]\n# ecm_35 = ecm[ecm[target] == 35]\n# print(f\"Ratio before data balance (P14:P21:P28:P35) = {len(ecm_14)}:{len(ecm_21)}:{len(ecm_28)}:{len(ecm_35)}\")\n# ecm_list = [ecm_14, ecm_21, ecm_28, ecm_35]\n# for i in range(len(ecm_list)):\n# ratio = min([len(i) for i in ecm_list])/len(ecm_list[i])\n# ecm_list[i] = ecm_list[i].sample(frac=ratio, random_state=1)\n# print(f\"Ratio after balance (P14:P21:P28:P35) = {len(ecm_list[0])}:{len(ecm_list[1])}:{len(ecm_list[2])}:{len(ecm_list[3])}\")\n# bal_ecm = pd.concat(ecm_list)", "_____no_output_____" ] ], [ [ "## 3. Modelling", "_____no_output_____" ], [ "The model used for this study is an extreme gradient boosting (XGBoost) decision tree which is a boosted decision tree. This model was used due to its past results within competitions and research.", "_____no_output_____" ], [ "Due to the use of statistical surroundings in our feature analysis, binning is required in order to avoid data leakage between training/testing. The followingcode will implement binning and a checkerboard implementation to select certain bins for the training dataset.", "_____no_output_____" ] ], [ [ "# Using checkerboard binning for data split:\ndef checkerboard(size):\n rows = int(size/2)\n checks = list(range(0, size*size, size+1))\n for i in range(1, rows):\n ssize = size - 2*i\n for j in range(0, ssize):\n checks.append(2*i + (size+1)*j)\n for i in range(1, rows):\n ssize = size - 2*i\n for j in range(0, ssize):\n checks.append(size*size - 1 - (2*i + (size+1)*j))\n checks.sort()\n return checks", "_____no_output_____" ], [ "# Old method\n# bins = list(range(0, 2048+1, 256))\n# bal_ecm['binx'] = pd.cut(bal_ecm.X, bins, labels=[0, 1, 2, 3, 4, 5, 6, 7], include_lowest=True)\n# bal_ecm['biny'] = pd.cut(bal_ecm.Y, bins, labels=[0, 1, 2, 3, 4, 5, 6, 7], include_lowest=True)\n# bal_ecm['bins'] = 8*bal_ecm['binx'].astype(np.int8) + bal_ecm['biny'].astype(np.int8)\n# bal_ecm = bal_ecm[np.isfinite(bal_ecm['bins'])]\n# bal_ecm['bins'] = bal_ecm['bins'].astype(int)\n\n# cols = bal_ecm.columns.tolist()\n# cols = cols[-3:] + cols[:-3]\n# bal_ecm = bal_ecm[cols]", "_____no_output_____" ], [ "# def bin_data(data, ):\n# pass\n\nresolution = 128\nassert not 2048%resolution and resolution >= 128, \"resolution needs to be a factor of 2048 and > 128\"\nbins = list(range(0, 2048+1, resolution))\nbin_labels = [int(i/resolution) for i in bins][:-1]\nbal_ecm['binx'] = pd.cut(bal_ecm.X, bins, labels=bin_labels, include_lowest=True)\nbal_ecm['biny'] = pd.cut(bal_ecm.Y, bins, labels=bin_labels, include_lowest=True)\nbal_ecm['bins'] = (len(bins)-1)*bal_ecm['binx'].astype(np.int32) + bal_ecm['biny'].astype(np.int32)\nbal_ecm = bal_ecm[np.isfinite(bal_ecm['bins'])]\nbal_ecm['bins'] = bal_ecm['bins'].astype(int)\n\n# cols = bal_ecm.columns.tolist()\n# cols = cols[-3:] + cols[:-3]\n# bal_ecm = bal_ecm[cols]", "_____no_output_____" ], [ "# Checkerboard method\n# seed = 1234\n# np.random.seed(seed)\n# test_val_split = 0.5\n\n# le = preprocessing.LabelEncoder()\n# bal_ecm['encoded_target'] = le.fit_transform(bal_ecm[target])\n# X_train = bal_ecm[~bal_ecm.bins.isin(checkerboard((len(bins)-1)))].reset_index()\n# X_test_val = bal_ecm[bal_ecm.bins.isin(checkerboard((len(bins)-1)))].reset_index()\n# y_train = X_train['encoded_target']\n# X_val, X_test = train_test_split(X_test_val, test_size=test_val_split, random_state=seed)\n# y_test = X_test['encoded_target']\n# y_val = X_val['encoded_target']\n# dtrain = xgb.DMatrix(X_train[features], label=y_train)\n# dtest = xgb.DMatrix(X_test[features], label=y_test)\n# dval = xgb.DMatrix(X_val[features], label=y_val)", "_____no_output_____" ], [ "# Regular split\n\nseed = 1234\nnp.random.seed(seed)\ntrain_split = 0.8\ntest_split = 0.5\n\nle = preprocessing.LabelEncoder()\nbal_ecm['encoded_target'] = le.fit_transform(bal_ecm[target])\ntraining_bins = np.random.choice(bal_ecm.bins.unique(), int(len(bal_ecm.bins.unique())*train_split), replace=False)\nX_train = bal_ecm[bal_ecm.bins.isin(training_bins)]\nX_test_val = bal_ecm[~bal_ecm.bins.isin(training_bins)]\nX_val, X_test = train_test_split(X_test_val, test_size=test_split, random_state=seed)\ny_train = X_train['encoded_target']\ny_test = X_test['encoded_target']\ny_val = X_val['encoded_target']\ndtrain = xgb.DMatrix(X_train[features], label=y_train)\ndtest = xgb.DMatrix(X_test[features], label=y_test)\ndval = xgb.DMatrix(X_val[features], label=y_val)", "_____no_output_____" ], [ "#Check lengths of datasets:\ndef get_lengths(df, X_train, X_test, X_val=None):\n print(f'Tot before split: {len(df)}')\n print(f'Training: {len(X_train)} ({len(X_train)/len(bal_ecm):.3f}%)')\n print(f'Testing: {len(X_test)} ({len(X_test)/len(bal_ecm):.3f}%)')\n try:\n print(f'Evaluation: {len(X_val)} ({len(X_val)/len(bal_ecm):.3f}%)')\n except:\n pass\nget_lengths(bal_ecm, X_train, X_test, X_val)", "Tot before split: 13150\nTraining: 10232 (0.778%)\nTesting: 1459 (0.111%)\nEvaluation: 1459 (0.111%)\n" ], [ "from xgboost.libpath import find_lib_path\nimport ctypes", "_____no_output_____" ], [ "lib_path = find_lib_path()\nlib = ctypes.cdll.LoadLibrary(lib_path[0])", "_____no_output_____" ] ], [ [ "Model parameters are based on the best possible XGBoost parameters to minimize logloss error.", "_____no_output_____" ] ], [ [ "# Init_params for binary logistic classification\ninit_param = {'max_depth': 3,\n 'eta': 0.005,\n 'min_child_weight': 0,\n 'verbosity': 0,\n 'objective': 'binary:logistic',\n 'silent': 'True',\n 'gamma': 5,\n 'subsample': 0.15,\n 'colsample_bytree': 0.8,\n 'eval_metric': 'logloss'}", "_____no_output_____" ], [ "# from xgboost import XGBClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n# model = XGBClassifier()", "_____no_output_____" ], [ "# model.predict(X_test[features])", "_____no_output_____" ], [ "features", "_____no_output_____" ], [ "from xgboost.training import CVPack\nfrom xgboost import callback\nfrom xgboost.core import CallbackEnv\nfrom xgboost.core import EarlyStopException\n\ndef cv(params, X_train, y_train, features=None, num_boost_round=20, nfold=3, stratified=False, folds=None,\n metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None,\n fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True,\n seed=0, callbacks=None, shuffle=True):\n # pylint: disable = invalid-name\n \"\"\"Cross-validation with given parameters.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : DMatrix\n Data to be trained.\n num_boost_round : int\n Number of boosting iterations.\n nfold : int\n Number of folds in CV.\n stratified : bool\n Perform stratified sampling.\n folds : a KFold or StratifiedKFold instance or list of fold indices\n Sklearn KFolds or StratifiedKFolds object.\n Alternatively may explicitly pass sample indices for each fold.\n For ``n`` folds, **folds** should be a length ``n`` list of tuples.\n Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used\n as the training samples for the ``n`` th fold and ``out`` is a list of\n indices to be used as the testing samples for the ``n`` th fold.\n metrics : string or list of strings\n Evaluation metrics to be watched in CV.\n obj : function\n Custom objective function.\n feval : function\n Custom evaluation function.\n maximize : bool\n Whether to maximize feval.\n early_stopping_rounds: int\n Activates early stopping. Cross-Validation metric (average of validation\n metric computed over CV folds) needs to improve at least once in\n every **early_stopping_rounds** round(s) to continue training.\n The last entry in the evaluation history will represent the best iteration.\n If there's more than one metric in the **eval_metric** parameter given in\n **params**, the last metric will be used for early stopping.\n fpreproc : function\n Preprocessing function that takes (dtrain, dtest, param) and returns\n transformed versions of those.\n as_pandas : bool, default True\n Return pd.DataFrame when pandas is installed.\n If False or pandas is not installed, return np.ndarray\n verbose_eval : bool, int, or None, default None\n Whether to display the progress. If None, progress will be displayed\n when np.ndarray is returned. If True, progress will be displayed at\n boosting stage. If an integer is given, progress will be displayed\n at every given `verbose_eval` boosting stage.\n show_stdv : bool, default True\n Whether to display the standard deviation in progress.\n Results are not affected, and always contains std.\n seed : int\n Seed used to generate the folds (passed to numpy.random.seed).\n callbacks : list of callback functions\n List of callback functions that are applied at end of each iteration.\n It is possible to use predefined callbacks by using\n :ref:`Callback API <callback_api>`.\n Example:\n\n .. code-block:: python\n\n [xgb.callback.reset_learning_rate(custom_rates)]\n shuffle : bool\n Shuffle data before creating folds.\n\n Returns\n -------\n evaluation history : list(string)\n \"\"\"\n if stratified is True and not SKLEARN_INSTALLED:\n raise XGBoostError('sklearn needs to be installed in order to use stratified cv')\n if isinstance(metrics, str):\n metrics = [metrics]\n if not features:\n features = X_train.columns\n if isinstance(params, list):\n _metrics = [x[1] for x in params if x[0] == 'eval_metric']\n params = dict(params)\n if 'eval_metric' in params:\n params['eval_metric'] = _metrics\n else:\n params = dict((k, v) for k, v in params.items())\n if (not metrics) and 'eval_metric' in params:\n if isinstance(params['eval_metric'], list):\n metrics = params['eval_metric']\n else:\n metrics = [params['eval_metric']]\n params.pop(\"eval_metric\", None)\n results = {}\n # create folds in data\n cvfolds, wt_list = mknfold(X_train, y_train, nfold, params, metrics, features)\n \n # setup callbacks\n callbacks = [] if callbacks is None else callbacks\n if early_stopping_rounds is not None:\n callbacks.append(callback.early_stop(early_stopping_rounds,\n maximize=maximize,\n verbose=False))\n if isinstance(verbose_eval, bool) and verbose_eval:\n callbacks.append(callback.print_evaluation(show_stdv=show_stdv))\n elif isinstance(verbose_eval, int):\n callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))\n callbacks_before_iter = [\n cb for cb in callbacks if\n cb.__dict__.get('before_iteration', False)]\n callbacks_after_iter = [\n cb for cb in callbacks if\n not cb.__dict__.get('before_iteration', False)]\n for i in range(num_boost_round):\n for cb in callbacks_before_iter:\n cb(CallbackEnv(model=None,\n cvfolds=cvfolds,\n iteration=i,\n begin_iteration=0,\n end_iteration=num_boost_round,\n rank=0,\n evaluation_result_list=None))\n for fold in cvfolds:\n fold.update(i, obj)\n res = aggcv([f.eval(i, feval) for f in cvfolds], wt_list)\n for key, mean, std in res:\n if key + '-mean' not in results:\n results[key + '-mean'] = []\n if key + '-std' not in results:\n results[key + '-std'] = []\n results[key + '-mean'].append(mean)\n results[key + '-std'].append(std)\n try:\n for cb in callbacks_after_iter:\n cb(CallbackEnv(model=None,\n cvfolds=cvfolds,\n iteration=i,\n begin_iteration=0,\n end_iteration=num_boost_round,\n rank=0,\n evaluation_result_list=res))\n except EarlyStopException as e:\n for k in results:\n results[k] = results[k][:(e.best_iteration + 1)]\n break\n \n if as_pandas:\n try:\n import pandas as pd\n results = pd.DataFrame.from_dict(results)\n except ImportError:\n pass\n return results", "_____no_output_____" ], [ "def bin_fold(X_train, nfold):\n bin_list = [X_train[X_train['bins'] == i_bin].index.to_numpy() for i_bin in X_train.bins.unique()]\n bin_list = sorted(bin_list, key=len)\n i = 0\n while(len(bin_list) > nfold):\n if (i >= len(bin_list)-1):\n i = 0\n bin_list[i] = np.concatenate([bin_list[i], bin_list.pop()])\n i += 1\n wt_list = [len(i)/sum(len(s) for s in bin_list) for i in bin_list]\n return bin_list, wt_list\n\ndef mknfold(X_train, y_train, nfold, param, evals=(), features=None):\n if not features:\n features = X_train.columns\n dall = xgb.DMatrix(X_train[features], label=y_train)\n out_idset, wt_list = bin_fold(X_train, nfold)\n in_idset = [np.concatenate([out_idset[i] for i in range(nfold) if k != i]) for k in range(nfold)]\n evals = list(evals)\n ret = []\n for k in range(nfold):\n # perform the slicing using the indexes determined by the above methods\n x_train_snip = X_train.loc[in_idset[k]][features]\n y_train_snip = X_train.loc[in_idset[k]]['encoded_target'] \n x_test_snip = X_train.loc[out_idset[k]][features]\n y_test_snip = X_train.loc[out_idset[k]]['encoded_target']\n dtrain = xgb.DMatrix(x_train_snip, label=y_train_snip)\n dtest = xgb.DMatrix(x_test_snip, label=y_test_snip)\n tparam = param\n plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]\n ret.append(CVPack(dtrain, dtest, plst))\n return ret, wt_list", "_____no_output_____" ], [ "from xgboost.core import STRING_TYPES\n\ndef aggcv(rlist, wt_list):\n # pylint: disable=invalid-name\n \"\"\"\n Aggregate cross-validation results.\n\n If verbose_eval is true, progress is displayed in every call. If\n verbose_eval is an integer, progress will only be displayed every\n `verbose_eval` trees, tracked via trial.\n \"\"\"\n cvmap = {}\n idx = rlist[0].split()[0]\n for line in rlist:\n arr = line.split()\n assert idx == arr[0]\n for metric_idx, it in enumerate(arr[1:]):\n if not isinstance(it, STRING_TYPES):\n it = it.decode()\n k, v = it.split(':')\n if (metric_idx, k) not in cvmap:\n cvmap[(metric_idx, k)] = []\n cvmap[(metric_idx, k)].append(float(v))\n msg = idx\n results = []\n for (metric_idx, k), v in sorted(cvmap.items(), key=lambda x: x[0][0]):\n v = np.array(v)\n if not isinstance(msg, STRING_TYPES):\n msg = msg.decode()\n mean = np.average(v, weights=wt_list)\n std = np.average((v-mean)**2, weights=wt_list)\n results.extend([(k, mean, std)])\n return results", "_____no_output_____" ], [ "cv(init_param, X_train, y_train, features, num_boost_round=10, nfold=5, early_stopping_rounds=3, metrics={'logloss', 'error'})", "_____no_output_____" ], [ "from scipy.stats import skewnorm\na=10\ndata = [round(i, 3) for i in skewnorm.rvs(a, size=10, random_state=seed)*0.3]\ndata", "_____no_output_____" ], [ "seed = 1234\nnp.random.seed(seed)", "_____no_output_____" ], [ "import operator\nimport numpy as np\n\ndef xgb_paramsearch(X_train, y_train, features, init_params, nfold=5, num_boost_round=2000, early_stopping_rounds=3, metrics=None, **kwargs):\n params = {**init_params}\n if 'use_gpu' in kwargs and kwargs['use_gpu']:\n # GPU integration will cut cv time in ~half:\n params.update({'gpu_id' : 0,\n 'tree_method': 'gpu_hist',\n 'predictor': 'gpu_predictor'})\n if 'metrics' not in kwargs:\n metrics = {params['eval_metric']}\n else:\n metrics.add(params['eval_metric'])\n if params['eval_metric'] in ['map', 'auc', 'aucpr']:\n eval_f = operator.gt\n else: \n eval_f = operator.lt\n if 'early_break' not in kwargs:\n early_break = 5\n else: \n early_break = kwargs['early_break']\n if 'thresh' not in kwargs:\n thresh = 0.01\n else: \n thresh = kwargs['thresh']\n if 'seed' not in kwargs:\n seed = 1111\n else: \n seed = kwargs['seed']\n best_param = params\n best_model = cv(params, \n X_train, \n y_train, \n features, \n nfold=nfold, \n num_boost_round=num_boost_round, \n early_stopping_rounds=early_stopping_rounds, \n metrics=metrics)\n best_eval = best_model[f\"test-{params['eval_metric']}-mean\"].min()\n best_boost_rounds = best_model[f\"test-{params['eval_metric']}-mean\"].idxmin()\n \n def _gs_helper(var1n, var2n, best_model, best_param, best_eval, best_boost_rounds):\n local_param = {**best_param}\n for var1, var2 in gs_params:\n print(f\"Using CV with {var1n}={{{var1}}}, {var2n}={{{var2}}}\")\n local_param[var1n] = var1\n local_param[var2n] = var2\n cv_model = cv(local_param, \n X_train, \n y_train, \n features, \n nfold=nfold, \n num_boost_round= num_boost_round, \n early_stopping_rounds=early_stopping_rounds, \n metrics=metrics)\n cv_eval = cv_model[f\"test-{local_param['eval_metric']}-mean\"].min()\n boost_rounds = cv_model[f\"test-{local_param['eval_metric']}-mean\"].idxmin()\n if(eval_f(cv_eval, best_eval)):\n best_model = cv_model\n best_param[var1n] = var1\n best_param[var2n] = var2\n best_eval = cv_eval\n best_boost_rounds = boost_rounds\n print(f\"New best param found: \"\n f\"{local_param['eval_metric']} = {{{best_eval}}}, \"\n f\"boost_rounds = {{{best_boost_rounds}}}\")\n return best_model, best_param, best_eval, best_boost_rounds\n \n while(early_break >= 0):\n np.random.seed(seed)\n best_eval_init = best_eval\n gs_params = {\n (subsample, colsample)\n for subsample in np.random.choice([i/10. for i in range(5,11)], 3)\n for colsample in np.random.choice([i/10. for i in range(5,11)], 3)\n }\n best_model, best_param, best_eval, best_boost_rounds = _gs_helper('subsample', \n 'colsample_bytree', \n best_model, \n best_param, \n best_eval, \n best_boost_rounds)\n gs_params = {\n (max_depth, min_child_weight)\n for max_depth in [10] + list(np.random.randint(1, 10, 3))\n for min_child_weight in [0, 10] + list(np.random.randint(0, 10, 3))\n }\n best_model, best_param, best_eval, best_boost_rounds = _gs_helper('max_depth', \n 'min_child_weight', \n best_model, \n best_param, \n best_eval, \n best_boost_rounds)\n gs_params = {\n (eta, gamma)\n for eta in np.random.choice([.005, .01, .05, .1, .2, .3], 3)\n for gamma in [0] + list(np.random.choice([0.01, 0.001, 0.2, 0.5, 1.0, 2.0, 3.0, 5.0, 10.0], 3))\n }\n best_model, best_param, best_eval, best_boost_rounds = _gs_helper('eta', \n 'gamma', \n best_model, \n best_param, \n best_eval, \n best_boost_rounds)\n if (abs(best_eval_init - best_eval) < thresh):\n early_break-=1\n seed+=1\n return best_model, best_param, best_eval, best_boost_rounds\n", "_____no_output_____" ], [ "best_model, best_param, best_eval, best_boost_rounds = xgb_paramsearch(X_train, y_train, features, init_params=init_param, nfold=5, num_boost_round=2000, early_stopping_rounds=3, metrics={'logloss', 'error'}, use_gpu='True')", "Using CV with subsample={0.9}, colsample_bytree={0.9}\nNew best param found: logloss = {0.21495537353401092}, boost_rounds = {1755}\nUsing CV with subsample={0.9}, colsample_bytree={0.7}\nUsing CV with subsample={0.6}, colsample_bytree={0.5}\nNew best param found: logloss = {0.21234662412040659}, boost_rounds = {1999}\nUsing CV with subsample={0.6}, colsample_bytree={0.9}\nNew best param found: logloss = {0.20908726641907738}, boost_rounds = {1934}\nUsing CV with subsample={0.6}, colsample_bytree={0.7}\nNew best param found: logloss = {0.2090441665363565}, boost_rounds = {1990}\nUsing CV with subsample={1.0}, colsample_bytree={0.5}\nUsing CV with subsample={1.0}, colsample_bytree={0.8}\nUsing CV with subsample={1.0}, colsample_bytree={0.9}\nUsing CV with max_depth={10}, min_child_weight={8}\nNew best param found: logloss = {0.20562004182955432}, boost_rounds = {1200}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={4}, min_child_weight={10}\nUsing CV with max_depth={7}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={4}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={4}, min_child_weight={5}\nUsing CV with max_depth={9}, min_child_weight={8}\nUsing CV with max_depth={4}, min_child_weight={0}\nUsing CV with max_depth={7}, min_child_weight={7}\nUsing CV with max_depth={9}, min_child_weight={10}\nUsing CV with max_depth={7}, min_child_weight={10}\nUsing CV with max_depth={9}, min_child_weight={4}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={7}, min_child_weight={2}\nUsing CV with max_depth={4}, min_child_weight={1}\nUsing CV with max_depth={10}, min_child_weight={9}\nUsing CV with max_depth={7}, min_child_weight={9}\nUsing CV with eta={0.05}, gamma={0}\nNew best param found: logloss = {0.19985461004691163}, boost_rounds = {128}\nUsing CV with eta={0.3}, gamma={0}\nUsing CV with eta={0.05}, gamma={0.2}\nNew best param found: logloss = {0.1987166149335418}, boost_rounds = {135}\nUsing CV with eta={0.005}, gamma={0.01}\nUsing CV with eta={0.005}, gamma={5.0}\nUsing CV with eta={0.3}, gamma={5.0}\nUsing CV with eta={0.05}, gamma={10.0}\nUsing CV with eta={0.3}, gamma={3.0}\nUsing CV with eta={0.3}, gamma={2.0}\nUsing CV with eta={0.005}, gamma={0}\nUsing CV with subsample={0.9}, colsample_bytree={1.0}\nUsing CV with subsample={0.9}, colsample_bytree={0.7}\nUsing CV with subsample={0.9}, colsample_bytree={0.6}\nUsing CV with subsample={0.9}, colsample_bytree={0.5}\nUsing CV with subsample={0.8}, colsample_bytree={0.5}\nUsing CV with subsample={0.8}, colsample_bytree={0.7}\nUsing CV with subsample={0.8}, colsample_bytree={0.9}\nUsing CV with max_depth={9}, min_child_weight={1}\nUsing CV with max_depth={4}, min_child_weight={8}\nUsing CV with max_depth={10}, min_child_weight={6}\nUsing CV with max_depth={4}, min_child_weight={0}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={4}, min_child_weight={9}\nUsing CV with max_depth={1}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={1}\nUsing CV with max_depth={4}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={4}\nUsing CV with max_depth={4}, min_child_weight={5}\nUsing CV with max_depth={9}, min_child_weight={10}\nUsing CV with max_depth={1}, min_child_weight={9}\nUsing CV with max_depth={1}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={9}, min_child_weight={2}\nUsing CV with max_depth={9}, min_child_weight={9}\nUsing CV with max_depth={1}, min_child_weight={8}\nUsing CV with max_depth={10}, min_child_weight={2}\nUsing CV with eta={0.1}, gamma={1.0}\nUsing CV with eta={0.01}, gamma={2.0}\nUsing CV with eta={0.1}, gamma={0}\nUsing CV with eta={0.01}, gamma={0.2}\nUsing CV with eta={0.01}, gamma={0.5}\nUsing CV with eta={0.01}, gamma={0}\nUsing CV with eta={0.01}, gamma={0.001}\nUsing CV with eta={0.01}, gamma={10.0}\nUsing CV with eta={0.1}, gamma={3.0}\nUsing CV with subsample={0.9}, colsample_bytree={1.0}\nUsing CV with subsample={1.0}, colsample_bytree={0.9}\nUsing CV with subsample={0.9}, colsample_bytree={0.5}\nUsing CV with subsample={0.9}, colsample_bytree={0.8}\nUsing CV with subsample={1.0}, colsample_bytree={0.6}\nUsing CV with subsample={1.0}, colsample_bytree={0.8}\nUsing CV with subsample={1.0}, colsample_bytree={0.7}\nUsing CV with max_depth={9}, min_child_weight={1}\nUsing CV with max_depth={3}, min_child_weight={0}\nUsing CV with max_depth={3}, min_child_weight={7}\nUsing CV with max_depth={2}, min_child_weight={5}\nUsing CV with max_depth={10}, min_child_weight={3}\nUsing CV with max_depth={10}, min_child_weight={8}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={3}, min_child_weight={3}\nUsing CV with max_depth={10}, min_child_weight={7}\nUsing CV with max_depth={3}, min_child_weight={10}\nUsing CV with max_depth={2}, min_child_weight={2}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={2}, min_child_weight={10}\nUsing CV with max_depth={9}, min_child_weight={10}\nUsing CV with max_depth={9}, min_child_weight={6}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={9}, min_child_weight={2}\nUsing CV with max_depth={2}, min_child_weight={0}\nUsing CV with max_depth={2}, min_child_weight={4}\nUsing CV with eta={0.005}, gamma={3.0}\nUsing CV with eta={0.3}, gamma={0}\nUsing CV with eta={0.3}, gamma={0.2}\nUsing CV with eta={0.005}, gamma={0.2}\nUsing CV with eta={0.005}, gamma={0.01}\nUsing CV with eta={0.005}, gamma={1.0}\nUsing CV with eta={0.005}, gamma={0}\nUsing CV with subsample={0.9}, colsample_bytree={1.0}\nUsing CV with subsample={0.8}, colsample_bytree={0.7}\nUsing CV with subsample={0.6}, colsample_bytree={0.6}\nUsing CV with subsample={0.8}, colsample_bytree={0.6}\nUsing CV with subsample={0.6}, colsample_bytree={0.7}\nUsing CV with subsample={0.6}, colsample_bytree={0.5}\nNew best param found: logloss = {0.1983395301016419}, boost_rounds = {153}\nUsing CV with subsample={0.8}, colsample_bytree={0.5}\nUsing CV with subsample={0.9}, colsample_bytree={0.6}\nUsing CV with subsample={0.9}, colsample_bytree={0.7}\nUsing CV with max_depth={5}, min_child_weight={9}\nUsing CV with max_depth={6}, min_child_weight={9}\nUsing CV with max_depth={9}, min_child_weight={1}\nUsing CV with max_depth={10}, min_child_weight={3}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={6}, min_child_weight={7}\nUsing CV with max_depth={6}, min_child_weight={10}\nUsing CV with max_depth={5}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={9}\nNew best param found: logloss = {0.1975226303752932}, boost_rounds = {156}\nUsing CV with max_depth={5}, min_child_weight={4}\nUsing CV with max_depth={9}, min_child_weight={3}\nUsing CV with max_depth={6}, min_child_weight={0}\nUsing CV with max_depth={9}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={5}\nUsing CV with max_depth={5}, min_child_weight={10}\nUsing CV with max_depth={6}, min_child_weight={8}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={9}, min_child_weight={2}\nUsing CV with max_depth={5}, min_child_weight={7}\nUsing CV with eta={0.2}, gamma={1.0}\nUsing CV with eta={0.2}, gamma={0}\nUsing CV with eta={0.3}, gamma={0}\nUsing CV with eta={0.2}, gamma={0.5}\nUsing CV with eta={0.3}, gamma={0.01}\nUsing CV with eta={0.3}, gamma={5.0}\nUsing CV with eta={0.3}, gamma={10.0}\nUsing CV with eta={0.3}, gamma={3.0}\nUsing CV with subsample={0.5}, colsample_bytree={1.0}\nUsing CV with subsample={0.5}, colsample_bytree={0.5}\nUsing CV with subsample={0.5}, colsample_bytree={0.8}\nUsing CV with subsample={0.5}, colsample_bytree={0.7}\nUsing CV with subsample={0.5}, colsample_bytree={0.9}\nUsing CV with subsample={0.7}, colsample_bytree={0.6}\nUsing CV with subsample={0.7}, colsample_bytree={0.7}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={3}\nUsing CV with max_depth={7}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={7}, min_child_weight={6}\nUsing CV with max_depth={10}, min_child_weight={6}\n" ], [ "param['alpha'] = 50", "_____no_output_____" ], [ "cv_model[f\"test-merror-mean\"].min()", "_____no_output_____" ], [ "best_param = {'max_depth': 7,\n 'eta': 0.05,\n 'min_child_weight': 9,\n 'verbosity': 0,\n 'objective': 'binary:logistic',\n 'silent': 'True',\n 'gamma': 0.2,\n 'subsample': 0.6,\n 'colsample_bytree': 0.5,\n 'eval_metric': 'logloss',\n 'gpu_id': 0,\n 'tree_method': 'gpu_hist',\n 'predictor': 'gpu_predictor'}", "_____no_output_____" ], [ "best_boost_rounds = 178", "_____no_output_____" ], [ "*** only use PEG (try to find 100nm)\n*** maybe look at different features (poor distributions)\n heterogenious in different ways\n different features are responsible to accuracies\n*** think about to present code/results!", "_____no_output_____" ], [ "evals = [(dtrain, 'train'), (dval, 'eval')]\nnum_round = best_boost_rounds\nbst = xgb.train(best_param, dtrain, num_round, evals, early_stopping_rounds=3, )\n\n\n######\nlabel = dtest.get_label()\nypred1 = bst.predict(dtest)\n# by default, we predict using all the trees\nalpha = 0.62\npred = [0 if i < alpha else 1 for i in ypred1]\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, pred))\n", "[0]\ttrain-logloss:0.66096\teval-logloss:0.66415\nMultiple eval metrics have been passed: 'eval-logloss' will be used for early stopping.\n\nWill train until eval-logloss hasn't improved in 3 rounds.\n[1]\ttrain-logloss:0.63409\teval-logloss:0.63898\n[2]\ttrain-logloss:0.60689\teval-logloss:0.61444\n[3]\ttrain-logloss:0.57992\teval-logloss:0.59109\n[4]\ttrain-logloss:0.55720\teval-logloss:0.57081\n[5]\ttrain-logloss:0.53419\teval-logloss:0.55110\n[6]\ttrain-logloss:0.51256\teval-logloss:0.53224\n[7]\ttrain-logloss:0.49381\teval-logloss:0.51522\n[8]\ttrain-logloss:0.47719\teval-logloss:0.50082\n[9]\ttrain-logloss:0.46127\teval-logloss:0.48681\n[10]\ttrain-logloss:0.44381\teval-logloss:0.47183\n[11]\ttrain-logloss:0.42847\teval-logloss:0.45926\n[12]\ttrain-logloss:0.41541\teval-logloss:0.44833\n[13]\ttrain-logloss:0.40073\teval-logloss:0.43644\n[14]\ttrain-logloss:0.38986\teval-logloss:0.42731\n[15]\ttrain-logloss:0.37715\teval-logloss:0.41623\n[16]\ttrain-logloss:0.36523\teval-logloss:0.40740\n[17]\ttrain-logloss:0.35411\teval-logloss:0.39920\n[18]\ttrain-logloss:0.34310\teval-logloss:0.39036\n[19]\ttrain-logloss:0.33471\teval-logloss:0.38334\n[20]\ttrain-logloss:0.32682\teval-logloss:0.37653\n[21]\ttrain-logloss:0.31813\teval-logloss:0.36913\n[22]\ttrain-logloss:0.31082\teval-logloss:0.36357\n[23]\ttrain-logloss:0.30223\teval-logloss:0.35635\n[24]\ttrain-logloss:0.29307\teval-logloss:0.34974\n[25]\ttrain-logloss:0.28598\teval-logloss:0.34393\n[26]\ttrain-logloss:0.27761\teval-logloss:0.33683\n[27]\ttrain-logloss:0.27006\teval-logloss:0.33129\n[28]\ttrain-logloss:0.26472\teval-logloss:0.32721\n[29]\ttrain-logloss:0.25911\teval-logloss:0.32279\n[30]\ttrain-logloss:0.25410\teval-logloss:0.31836\n[31]\ttrain-logloss:0.24859\teval-logloss:0.31423\n[32]\ttrain-logloss:0.24414\teval-logloss:0.31040\n[33]\ttrain-logloss:0.23960\teval-logloss:0.30643\n[34]\ttrain-logloss:0.23320\teval-logloss:0.30310\n[35]\ttrain-logloss:0.22793\teval-logloss:0.29915\n[36]\ttrain-logloss:0.22342\teval-logloss:0.29633\n[37]\ttrain-logloss:0.21896\teval-logloss:0.29290\n[38]\ttrain-logloss:0.21496\teval-logloss:0.28910\n[39]\ttrain-logloss:0.21217\teval-logloss:0.28696\n[40]\ttrain-logloss:0.20936\teval-logloss:0.28432\n[41]\ttrain-logloss:0.20534\teval-logloss:0.28112\n[42]\ttrain-logloss:0.20147\teval-logloss:0.27887\n[43]\ttrain-logloss:0.19827\teval-logloss:0.27669\n[44]\ttrain-logloss:0.19454\teval-logloss:0.27381\n[45]\ttrain-logloss:0.19080\teval-logloss:0.27209\n[46]\ttrain-logloss:0.18713\teval-logloss:0.26946\n[47]\ttrain-logloss:0.18381\teval-logloss:0.26748\n[48]\ttrain-logloss:0.18121\teval-logloss:0.26665\n[49]\ttrain-logloss:0.17857\teval-logloss:0.26455\n[50]\ttrain-logloss:0.17652\teval-logloss:0.26381\n[51]\ttrain-logloss:0.17391\teval-logloss:0.26247\n[52]\ttrain-logloss:0.17046\teval-logloss:0.26055\n[53]\ttrain-logloss:0.16751\teval-logloss:0.25920\n[54]\ttrain-logloss:0.16501\teval-logloss:0.25737\n[55]\ttrain-logloss:0.16240\teval-logloss:0.25603\n[56]\ttrain-logloss:0.16045\teval-logloss:0.25479\n[57]\ttrain-logloss:0.15800\teval-logloss:0.25293\n[58]\ttrain-logloss:0.15594\teval-logloss:0.25116\n[59]\ttrain-logloss:0.15399\teval-logloss:0.24963\n[60]\ttrain-logloss:0.15209\teval-logloss:0.24808\n[61]\ttrain-logloss:0.15091\teval-logloss:0.24761\n[62]\ttrain-logloss:0.14887\teval-logloss:0.24625\n[63]\ttrain-logloss:0.14653\teval-logloss:0.24585\n[64]\ttrain-logloss:0.14473\teval-logloss:0.24474\n[65]\ttrain-logloss:0.14256\teval-logloss:0.24362\n[66]\ttrain-logloss:0.14111\teval-logloss:0.24270\n[67]\ttrain-logloss:0.13922\teval-logloss:0.24144\n[68]\ttrain-logloss:0.13763\teval-logloss:0.24094\n[69]\ttrain-logloss:0.13645\teval-logloss:0.23972\n[70]\ttrain-logloss:0.13537\teval-logloss:0.23850\n[71]\ttrain-logloss:0.13438\teval-logloss:0.23795\n[72]\ttrain-logloss:0.13304\teval-logloss:0.23698\n[73]\ttrain-logloss:0.13132\teval-logloss:0.23604\n[74]\ttrain-logloss:0.12975\teval-logloss:0.23530\n[75]\ttrain-logloss:0.12843\teval-logloss:0.23461\n[76]\ttrain-logloss:0.12704\teval-logloss:0.23455\n[77]\ttrain-logloss:0.12606\teval-logloss:0.23366\n[78]\ttrain-logloss:0.12531\teval-logloss:0.23295\n[79]\ttrain-logloss:0.12422\teval-logloss:0.23306\n[80]\ttrain-logloss:0.12325\teval-logloss:0.23264\n[81]\ttrain-logloss:0.12255\teval-logloss:0.23203\n[82]\ttrain-logloss:0.12191\teval-logloss:0.23130\n[83]\ttrain-logloss:0.12116\teval-logloss:0.23029\n[84]\ttrain-logloss:0.12005\teval-logloss:0.23016\n[85]\ttrain-logloss:0.11907\teval-logloss:0.22959\n[86]\ttrain-logloss:0.11791\teval-logloss:0.22936\n[87]\ttrain-logloss:0.11715\teval-logloss:0.22858\n[88]\ttrain-logloss:0.11635\teval-logloss:0.22792\n[89]\ttrain-logloss:0.11501\teval-logloss:0.22733\n[90]\ttrain-logloss:0.11442\teval-logloss:0.22685\n[91]\ttrain-logloss:0.11376\teval-logloss:0.22633\n[92]\ttrain-logloss:0.11307\teval-logloss:0.22594\n[93]\ttrain-logloss:0.11221\teval-logloss:0.22588\n[94]\ttrain-logloss:0.11128\teval-logloss:0.22583\n[95]\ttrain-logloss:0.11022\teval-logloss:0.22569\n[96]\ttrain-logloss:0.10917\teval-logloss:0.22537\n[97]\ttrain-logloss:0.10825\teval-logloss:0.22490\n[98]\ttrain-logloss:0.10752\teval-logloss:0.22410\n[99]\ttrain-logloss:0.10695\teval-logloss:0.22379\n[100]\ttrain-logloss:0.10626\teval-logloss:0.22334\n[101]\ttrain-logloss:0.10548\teval-logloss:0.22308\n[102]\ttrain-logloss:0.10480\teval-logloss:0.22331\n[103]\ttrain-logloss:0.10419\teval-logloss:0.22361\n[104]\ttrain-logloss:0.10324\teval-logloss:0.22416\nStopping. Best iteration:\n[101]\ttrain-logloss:0.10548\teval-logloss:0.22308\n\nAccuracy: 0.906100068540096\n" ], [ "bst = xgb.XGBClassifier()\nbooster = xgb.Booster({'nthread':4})", "_____no_output_____" ], [ "from datetime import date\nimport json\nbst.save_model(f'./saved_models/model_xgboost_region_based_cortex_striatum_80_20_split_2020-08-02')\nwith open(f'./saved_models/config_xgboost_region_based_cortex_striatum_80_20_split_2020-08-02', 'w', encoding='utf-8') as f:\n json.dump(bst.save_config(), f, ensure_ascii=False, indent=4)", "_____no_output_____" ], [ "from datetime import date\nimport json\nbooster.load_model(f'./saved_models/model_xgboost_region_based_cortex_striatum_80_20_split_2020-08-02')\nwith open(f'./saved_models/model_xgboost_region_based_cortex_striatum_80_20_split_2020-08-02', 'r', encoding='utf-8') as f:\n config = f.read()\n config = json.loads(config)\nbooster.load_config(config)\nbst._Booster = booster", "_____no_output_____" ], [ "ypred1 = bst.predict(dtest)\n# by default, we predict using all the trees\nalpha = 0.62\npred = [0 if i < alpha else 1 for i in ypred1]\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, pred))", "Accuracy: 0.906100068540096\n" ], [ "def perf_meas(y_actual, y_pred, cls):\n TP = 0\n FP = 0\n TN = 0\n FN = 0\n for i in range(len(y_actual)): \n if (y_actual[i]==y_pred[i]) and (y_pred[i]==cls):\n TP += 1\n if (y_pred[i]==cls) and (y_actual[i]!=y_pred[i]):\n FP += 1\n if (y_actual[i]==y_pred[i]) and (y_pred[i]!=cls):\n TN += 1\n if (y_pred[i]!=cls) and (y_actual[i]!=y_pred[i]):\n FN += 1\n return(TP, FP, TN, FN)\nfor i in range(2):\n print(perf_meas(list(y_test), pred, i))", "(696, 71, 638, 54)\n(638, 54, 696, 71)\n" ], [ "acc = []\nfor i in range(50):\n seed = 1234\n np.random.seed(seed)\n train_split = 0.8\n test_split = 0.5\n le = preprocessing.LabelEncoder()\n bal_ecm['encoded_target'] = le.fit_transform(bal_ecm[target])\n training_bins = np.random.choice(bal_ecm.bins.unique(), int(len(bal_ecm.bins.unique())*train_split), replace=False)\n X_train = bal_ecm[bal_ecm.bins.isin(training_bins)]\n X_test_val = bal_ecm[~bal_ecm.bins.isin(training_bins)]\n seed+=i\n np.random.seed(seed)\n X_val, X_test = train_test_split(X_test_val, test_size=test_split, random_state=seed)\n y_train = X_train['encoded_target']\n y_test = X_test['encoded_target']\n y_val = X_val['encoded_target']\n dtrain = xgb.DMatrix(X_train[features], label=y_train)\n dtest = xgb.DMatrix(X_test[features], label=y_test)\n dval = xgb.DMatrix(X_val[features], label=y_val)\n ypred1 = bst.predict(dtest)\n # by default, we predict using all the trees\n alpha = 0.62\n pred = [0 if i < alpha else 1 for i in ypred1]\n acc.append(metrics.accuracy_score(y_test, pred))\nacc = np.array(acc)", "_____no_output_____" ], [ "acc.mean()", "_____no_output_____" ], [ "acc.std()", "_____no_output_____" ], [ "model_bytearray = bst.save_raw()[4:]\ndef myfun(self=None):\n return model_bytearray\nbst.save_raw = myfun", "_____no_output_____" ], [ "# import ctypes\n\n# def c_array(ctype, values):\n# \"\"\"Convert a python string to c array.\"\"\"\n# if (isinstance(values, np.ndarray)\n# and values.dtype.itemsize == ctypes.sizeof(ctype)):\n# return (ctype * len(values)).from_buffer_copy(values)\n# return (ctype * len(values))(*values)\n\n# mats = c_array(ctypes.c_void_p, [dtrain.handle])\n", "_____no_output_____" ], [ "# tst = X_test[features + [target]]\n# tst['tst'] = y_test", "_____no_output_____" ], [ "results = X_test[features]\nresults['predicted'] = pred\nresults['actual'] = y_test", "\nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n\nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n" ] ], [ [ "## 4. Evaluate Results", "_____no_output_____" ] ], [ [ "print('0 == {}'.format(le.inverse_transform([0])))\nprint('1 == {}'.format(le.inverse_transform([1])))\n\n\nclass_names = ['cortex', 'striatum']\nclass_results = classification_report(y_test, pred, digits=4, target_names = class_names)\nprint(str(class_results))", "0 == ['cortex']\n1 == ['striatum']\n precision recall f1-score support\n\n cortex 0.9074 0.9280 0.9176 750\n striatum 0.9220 0.8999 0.9108 709\n\n accuracy 0.9143 1459\n macro avg 0.9147 0.9139 0.9142 1459\nweighted avg 0.9145 0.9143 0.9143 1459\n\n" ], [ "confusion_matrix(y_test, pred)\npl.figure(figsize=(12,10))\ncm_array = confusion_matrix(y_test, pred)\ndf_cm = pd.DataFrame(cm_array, index = class_names, columns = class_names)\n\nsn.set(font_scale=1.4) # for label size\nax = sn.heatmap(df_cm, annot=True, annot_kws={\"size\": 16}, cmap=\"YlGnBu\")\nax.set(xlabel='Predicted', ylabel='Actual')\n\npl.show()", "_____no_output_____" ], [ "explainer = shap.TreeExplainer(bst)\nshap_values = explainer.shap_values(X_test[features])", "_____no_output_____" ], [ "%matplotlib inline", "_____no_output_____" ], [ "cortex = '#77AADD'\nstriatum = '#44BB99'\ncolors = [cortex]\nclass_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])\ncmap = plt_colors.ListedColormap(np.array(colors)[class_inds])", "_____no_output_____" ], [ "# sn.reset_orig() # Reset matplot lib to no longer use seaborn", "_____no_output_____" ], [ "shap.summary_plot(shap_values, X_test[features], class_names=np.array(class_names), title='Total SHAP Values', plot_type='bar',max_display=15, color=cortex)", "_____no_output_____" ], [ "pl.ioff()\n%matplotlib inline\n\n#------SHAP-FILE--------------\nimport random\n\ndef get_cmap(shap_values):\n class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])\n cmap = plt_colors.ListedColormap(np.array(colors)[class_inds])\n return cmap\n\ndef plot_dependency(feature_name, shap_values, X_df, fig_dim, color, figsize=None, y_range=None, alpha=None):\n if len(list(color)) is not 1:\n color = get_cmap(shap_values)\n colors = enumerate(color)\n fig, axs = pl.subplots(*fig_dim, figsize=figsize)\n# ax = axs.ravel()\n cnt = 0\n if (fig_dim == (1, 1)):\n if figsize is not None:\n axs[x][y].set_ylim(*figsize)\n shap.dependence_plot(feature_name, shap_values, X_df, interaction_index=None, color=next(colors)[1], ax=axs)\n else:\n for x in range(fig_dim[0]):\n for y in range(fig_dim[1]):\n if figsize is not None:\n axs[x][y].set_ylim(*figsize)\n shap.dependence_plot(feature_name, shap_values, X_df, interaction_index=None, color=next(colors)[1], ax=axs[x][y])\n cnt+=1\nfeat = ['Mean fractal_dim', 'Mean kurtosis', 'straightness', 'Mean MSD_ratio', 'Mean efficiency']\nname = ['Mean Fractal Dimension', 'Mean Kurtosis', 'Straightness', 'Mean MSD Ratio', 'Mean Efficiency']\nfigsize = (7.5, 5)\nbottom = -3.0\ntop = 3.0\nfor i in range(len(name)):\n fig = pl.figure(figsize=figsize)\n ax = fig.gca()\n ax.set_ylim(bottom, top)\n shap.dependence_plot(feat[i], shap_values, X_test[features], interaction_index = None, color=[cortex], alpha=0.5, ax=ax, show=False)\n ax.set_xlabel(name[i])\n ax.set_ylabel('SHAP Value')", "_____no_output_____" ], [ "plot_dependency(\"Mean fractal_dim\", shap_values, X_test[features], (1,1), ['#999999'])", "_____no_output_____" ], [ "plot_dependency(\"Mean kurtosis\", shap_values, X_test[features], (1,1), ['#999999'])", "_____no_output_____" ], [ "plot_dependency(\"straightness\", shap_values, X_test[features], (1,1), ['#999999'])", "_____no_output_____" ], [ "plot_dependency(\"Mean alpha\", shap_values, X_test[features], (1,1), ['#999999'])", "_____no_output_____" ], [ "shap.summary_plot(shap_values, X_test[features], max_display=5, class_names = class_names, title = 'SHAP Value cortex')", "_____no_output_____" ], [ "from modules import anim_plot_changed\nfrom importlib import reload\nreload(anim_plot_changed)\n\n_ = anim_plot_changed.rotate_3d(results, [top_feat[0], top_feat[1], top_feat[2]])\n_ = anim_plot_changed.rotate_3d(results, [top_feat[0], top_feat[2], top_feat[3]])\n_ = anim_plot_changed.rotate_3d(results, [top_feat[1], top_feat[2], top_feat[3]])", "_____no_output_____" ], [ "from modules import anim_plot_changed\nfrom importlib import reload\nreload(anim_plot_changed)\n\n_ = anim_plot_changed.rotate_3d(results, [top_feat[0], top_feat[1], top_feat[2]], anim_param={'frames':np.arange(0,720,1)}, save_param={'filename':'This_is_a_test.gif','fps':50})", "_____no_output_____" ], [ "from matplotlib import animation\nfrom matplotlib.animation import PillowWriter", "_____no_output_____" ], [ "from sklearn import model", "_____no_output_____" ], [ "print(model.feature_importances_)", "_____no_output_____" ], [ "# Feature search (new) -------not in file--------: \nimport operator\nfrom sklearn.metrics import accuracy_score\n\ndef feature_thresholding_helper(X_train, X_test, X_val, new_feat):\n dtrain = xgb.DMatrix(X_train[new_feat], label=y_train)\n dtest = xgb.DMatrix(X_test[new_feat], label=y_test)\n dval = xgb.DMatrix(X_val[new_feat], label=y_val)\n return dtrain, dtest, dval\n\ndef feature_thresholding(X_train, y_train, X_test, y_test, X_val, y_val, params, features, nfold=5, num_boost_round=2000, early_stopping_rounds=3, metrics={'mlogloss', 'merror'}, thresh=np.arange(0,.1,.002)):\n best_thresh = -1\n if params['eval_metric'] in ['map', 'auc', 'aucpr']:\n best_eval = -np.inf\n eval_f = operator.gt\n else: \n best_eval = np.inf\n eval_f = operator.lt\n \n best_eval = -np.inf\n eval_f = operator.gt\n \n for t in thresh:\n print(f\"Using thresh = {t} \",end = '| ')\n new_feat = list(np.array(features)[np.array(model.feature_importances_ > t)])\n\n# cv_model = cv(params, \n# X_train, \n# y_train, \n# features=new_feat, \n# nfold=nfold, \n# num_boost_round=num_boost_round, \n# early_stopping_rounds=early_stopping_rounds, \n# metrics=metrics) \n# cv_eval = cv_model[f\"test-{'merror'}-mean\"].min()\n# print(f\"Eval = {cv_eval} \", end = '| ')\n# if eval_f(cv_eval, best_eval):\n# best_thresh = t\n# best_eval = cv_eval\n\n dtrain, dtest, dval = feature_thresholding_helper(X_train, X_test, X_val, new_feat)\n evals = [(dtrain, 'train'), (dval, 'eval')]\n bst2 = xgb.train(best_param, dtrain, 1500, evals, early_stopping_rounds=3, verbose_eval=False)\n ######\n label = dtest.get_label()\n ypred1 = bst2.predict(dtest)\n # by default, we predict using all the trees\n pred2 = [np.where(x == np.max(x))[0][0] for x in ypred1]\n\n cv_eval = accuracy_score(y_test, pred2)\n if eval_f(cv_eval, best_eval):\n best_thresh = t\n best_eval = cv_eval\n \n print(f\"Best eval = {best_eval}, Best threshold = {best_thresh}\")\n print(f\"Features used:\\n{np.array(features)[np.array(model.feature_importances_ > best_thresh)]}\")\n return list(np.array(features)[np.array(model.feature_importances_ > best_thresh)])\n\nnew_feat = feature_thresholding(X_train, y_train, X_test, y_test, X_val, y_val, best_param, features)", "Using thresh = 0.0 | " ], [ "new_feat = list(np.array(features)[np.array(model.feature_importances_ > best_thresh)])", "_____no_output_____" ], [ "cv_model = cv(best_param, \n X_train, \n y_train, \n features=new_feat, \n nfold=5, \n num_boost_round=best_boost_rounds, \n early_stopping_rounds=3, \n metrics={'mlogloss', 'merror'}) \ncv_model", "_____no_output_____" ], [ "dtrain = xgb.DMatrix(X_train[new_feat], label=y_train)\ndtest = xgb.DMatrix(X_test[new_feat], label=y_test)\ndval = xgb.DMatrix(X_val[new_feat], label=y_val)", "_____no_output_____" ], [ "evals = [(dtrain, 'train'), (dval, 'eval')]\nnum_round = best_boost_rounds\nbst = xgb.train(best_param, dtrain, num_round, evals, early_stopping_rounds=3, )\n\n\n######\nlabel = dtest.get_label()\nypred1 = bst.predict(dtest)\n# by default, we predict using all the trees\npred = [np.where(x == np.max(x))[0][0] for x in ypred1]", "[0]\ttrain-mlogloss:1.38480\teval-mlogloss:1.38538\nMultiple eval metrics have been passed: 'eval-mlogloss' will be used for early stopping.\n\nWill train until eval-mlogloss hasn't improved in 3 rounds.\n[1]\ttrain-mlogloss:1.38298\teval-mlogloss:1.38424\n[2]\ttrain-mlogloss:1.38080\teval-mlogloss:1.38266\n[3]\ttrain-mlogloss:1.37877\teval-mlogloss:1.38131\n[4]\ttrain-mlogloss:1.37662\teval-mlogloss:1.37985\n[5]\ttrain-mlogloss:1.37462\teval-mlogloss:1.37853\n[6]\ttrain-mlogloss:1.37239\teval-mlogloss:1.37699\n[7]\ttrain-mlogloss:1.37065\teval-mlogloss:1.37585\n[8]\ttrain-mlogloss:1.36888\teval-mlogloss:1.37481\n[9]\ttrain-mlogloss:1.36682\teval-mlogloss:1.37343\n[10]\ttrain-mlogloss:1.36465\teval-mlogloss:1.37196\n[11]\ttrain-mlogloss:1.36287\teval-mlogloss:1.37079\n[12]\ttrain-mlogloss:1.36106\teval-mlogloss:1.36964\n[13]\ttrain-mlogloss:1.35940\teval-mlogloss:1.36858\n[14]\ttrain-mlogloss:1.35725\teval-mlogloss:1.36707\n[15]\ttrain-mlogloss:1.35526\teval-mlogloss:1.36570\n[16]\ttrain-mlogloss:1.35396\teval-mlogloss:1.36495\n[17]\ttrain-mlogloss:1.35199\teval-mlogloss:1.36365\n[18]\ttrain-mlogloss:1.34997\teval-mlogloss:1.36234\n[19]\ttrain-mlogloss:1.34821\teval-mlogloss:1.36126\n[20]\ttrain-mlogloss:1.34642\teval-mlogloss:1.36003\n[21]\ttrain-mlogloss:1.34493\teval-mlogloss:1.35906\n[22]\ttrain-mlogloss:1.34351\teval-mlogloss:1.35830\n[23]\ttrain-mlogloss:1.34152\teval-mlogloss:1.35686\n[24]\ttrain-mlogloss:1.33953\teval-mlogloss:1.35557\n[25]\ttrain-mlogloss:1.33786\teval-mlogloss:1.35444\n[26]\ttrain-mlogloss:1.33628\teval-mlogloss:1.35341\n[27]\ttrain-mlogloss:1.33442\teval-mlogloss:1.35211\n[28]\ttrain-mlogloss:1.33282\teval-mlogloss:1.35121\n[29]\ttrain-mlogloss:1.33109\teval-mlogloss:1.35013\n[30]\ttrain-mlogloss:1.32912\teval-mlogloss:1.34883\n[31]\ttrain-mlogloss:1.32723\teval-mlogloss:1.34753\n[32]\ttrain-mlogloss:1.32546\teval-mlogloss:1.34625\n[33]\ttrain-mlogloss:1.32376\teval-mlogloss:1.34505\n[34]\ttrain-mlogloss:1.32190\teval-mlogloss:1.34380\n[35]\ttrain-mlogloss:1.32005\teval-mlogloss:1.34255\n[36]\ttrain-mlogloss:1.31822\teval-mlogloss:1.34130\n[37]\ttrain-mlogloss:1.31655\teval-mlogloss:1.34020\n[38]\ttrain-mlogloss:1.31468\teval-mlogloss:1.33885\n[39]\ttrain-mlogloss:1.31304\teval-mlogloss:1.33773\n[40]\ttrain-mlogloss:1.31120\teval-mlogloss:1.33645\n[41]\ttrain-mlogloss:1.30951\teval-mlogloss:1.33536\n[42]\ttrain-mlogloss:1.30781\teval-mlogloss:1.33422\n[43]\ttrain-mlogloss:1.30609\teval-mlogloss:1.33309\n[44]\ttrain-mlogloss:1.30458\teval-mlogloss:1.33224\n[45]\ttrain-mlogloss:1.30300\teval-mlogloss:1.33131\n[46]\ttrain-mlogloss:1.30143\teval-mlogloss:1.33035\n[47]\ttrain-mlogloss:1.29969\teval-mlogloss:1.32917\n[48]\ttrain-mlogloss:1.29807\teval-mlogloss:1.32805\n[49]\ttrain-mlogloss:1.29651\teval-mlogloss:1.32701\n[50]\ttrain-mlogloss:1.29489\teval-mlogloss:1.32593\n[51]\ttrain-mlogloss:1.29336\teval-mlogloss:1.32490\n[52]\ttrain-mlogloss:1.29175\teval-mlogloss:1.32385\n[53]\ttrain-mlogloss:1.29009\teval-mlogloss:1.32271\n[54]\ttrain-mlogloss:1.28849\teval-mlogloss:1.32153\n[55]\ttrain-mlogloss:1.28704\teval-mlogloss:1.32054\n[56]\ttrain-mlogloss:1.28577\teval-mlogloss:1.31979\n[57]\ttrain-mlogloss:1.28400\teval-mlogloss:1.31861\n[58]\ttrain-mlogloss:1.28238\teval-mlogloss:1.31764\n[59]\ttrain-mlogloss:1.28078\teval-mlogloss:1.31665\n[60]\ttrain-mlogloss:1.27939\teval-mlogloss:1.31577\n[61]\ttrain-mlogloss:1.27807\teval-mlogloss:1.31501\n[62]\ttrain-mlogloss:1.27644\teval-mlogloss:1.31394\n[63]\ttrain-mlogloss:1.27489\teval-mlogloss:1.31297\n[64]\ttrain-mlogloss:1.27340\teval-mlogloss:1.31201\n[65]\ttrain-mlogloss:1.27189\teval-mlogloss:1.31098\n[66]\ttrain-mlogloss:1.27027\teval-mlogloss:1.30995\n[67]\ttrain-mlogloss:1.26880\teval-mlogloss:1.30897\n[68]\ttrain-mlogloss:1.26744\teval-mlogloss:1.30810\n[69]\ttrain-mlogloss:1.26581\teval-mlogloss:1.30714\n[70]\ttrain-mlogloss:1.26427\teval-mlogloss:1.30604\n[71]\ttrain-mlogloss:1.26297\teval-mlogloss:1.30515\n[72]\ttrain-mlogloss:1.26169\teval-mlogloss:1.30440\n[73]\ttrain-mlogloss:1.26013\teval-mlogloss:1.30332\n[74]\ttrain-mlogloss:1.25889\teval-mlogloss:1.30264\n[75]\ttrain-mlogloss:1.25760\teval-mlogloss:1.30177\n[76]\ttrain-mlogloss:1.25639\teval-mlogloss:1.30107\n[77]\ttrain-mlogloss:1.25520\teval-mlogloss:1.30035\n[78]\ttrain-mlogloss:1.25363\teval-mlogloss:1.29936\n[79]\ttrain-mlogloss:1.25216\teval-mlogloss:1.29843\n[80]\ttrain-mlogloss:1.25058\teval-mlogloss:1.29744\n[81]\ttrain-mlogloss:1.24912\teval-mlogloss:1.29654\n[82]\ttrain-mlogloss:1.24767\teval-mlogloss:1.29553\n[83]\ttrain-mlogloss:1.24632\teval-mlogloss:1.29471\n[84]\ttrain-mlogloss:1.24519\teval-mlogloss:1.29405\n[85]\ttrain-mlogloss:1.24379\teval-mlogloss:1.29318\n[86]\ttrain-mlogloss:1.24246\teval-mlogloss:1.29243\n[87]\ttrain-mlogloss:1.24110\teval-mlogloss:1.29160\n[88]\ttrain-mlogloss:1.23964\teval-mlogloss:1.29063\n[89]\ttrain-mlogloss:1.23854\teval-mlogloss:1.29004\n[90]\ttrain-mlogloss:1.23731\teval-mlogloss:1.28923\n[91]\ttrain-mlogloss:1.23585\teval-mlogloss:1.28833\n[92]\ttrain-mlogloss:1.23471\teval-mlogloss:1.28767\n[93]\ttrain-mlogloss:1.23330\teval-mlogloss:1.28679\n[94]\ttrain-mlogloss:1.23201\teval-mlogloss:1.28604\n[95]\ttrain-mlogloss:1.23078\teval-mlogloss:1.28527\n[96]\ttrain-mlogloss:1.22957\teval-mlogloss:1.28455\n[97]\ttrain-mlogloss:1.22829\teval-mlogloss:1.28374\n[98]\ttrain-mlogloss:1.22732\teval-mlogloss:1.28320\n[99]\ttrain-mlogloss:1.22603\teval-mlogloss:1.28236\n[100]\ttrain-mlogloss:1.22466\teval-mlogloss:1.28151\n[101]\ttrain-mlogloss:1.22331\teval-mlogloss:1.28070\n[102]\ttrain-mlogloss:1.22186\teval-mlogloss:1.27978\n[103]\ttrain-mlogloss:1.22059\teval-mlogloss:1.27900\n[104]\ttrain-mlogloss:1.21939\teval-mlogloss:1.27824\n[105]\ttrain-mlogloss:1.21816\teval-mlogloss:1.27757\n[106]\ttrain-mlogloss:1.21684\teval-mlogloss:1.27668\n[107]\ttrain-mlogloss:1.21545\teval-mlogloss:1.27587\n[108]\ttrain-mlogloss:1.21440\teval-mlogloss:1.27526\n[109]\ttrain-mlogloss:1.21319\teval-mlogloss:1.27457\n[110]\ttrain-mlogloss:1.21212\teval-mlogloss:1.27390\n[111]\ttrain-mlogloss:1.21086\teval-mlogloss:1.27316\n[112]\ttrain-mlogloss:1.20958\teval-mlogloss:1.27235\n[113]\ttrain-mlogloss:1.20826\teval-mlogloss:1.27164\n[114]\ttrain-mlogloss:1.20696\teval-mlogloss:1.27085\n[115]\ttrain-mlogloss:1.20571\teval-mlogloss:1.27002\n[116]\ttrain-mlogloss:1.20441\teval-mlogloss:1.26923\n[117]\ttrain-mlogloss:1.20316\teval-mlogloss:1.26848\n[118]\ttrain-mlogloss:1.20193\teval-mlogloss:1.26771\n[119]\ttrain-mlogloss:1.20063\teval-mlogloss:1.26687\n[120]\ttrain-mlogloss:1.19929\teval-mlogloss:1.26613\n[121]\ttrain-mlogloss:1.19836\teval-mlogloss:1.26556\n[122]\ttrain-mlogloss:1.19726\teval-mlogloss:1.26497\n[123]\ttrain-mlogloss:1.19600\teval-mlogloss:1.26419\n[124]\ttrain-mlogloss:1.19485\teval-mlogloss:1.26341\n[125]\ttrain-mlogloss:1.19365\teval-mlogloss:1.26272\n[126]\ttrain-mlogloss:1.19237\teval-mlogloss:1.26185\n[127]\ttrain-mlogloss:1.19111\teval-mlogloss:1.26106\n[128]\ttrain-mlogloss:1.19005\teval-mlogloss:1.26035\n[129]\ttrain-mlogloss:1.18881\teval-mlogloss:1.25961\n[130]\ttrain-mlogloss:1.18778\teval-mlogloss:1.25900\n[131]\ttrain-mlogloss:1.18670\teval-mlogloss:1.25827\n[132]\ttrain-mlogloss:1.18547\teval-mlogloss:1.25753\n[133]\ttrain-mlogloss:1.18435\teval-mlogloss:1.25694\n[134]\ttrain-mlogloss:1.18317\teval-mlogloss:1.25627\n[135]\ttrain-mlogloss:1.18211\teval-mlogloss:1.25568\n[136]\ttrain-mlogloss:1.18103\teval-mlogloss:1.25509\n[137]\ttrain-mlogloss:1.17983\teval-mlogloss:1.25438\n[138]\ttrain-mlogloss:1.17873\teval-mlogloss:1.25370\n[139]\ttrain-mlogloss:1.17752\teval-mlogloss:1.25301\n[140]\ttrain-mlogloss:1.17630\teval-mlogloss:1.25223\n[141]\ttrain-mlogloss:1.17516\teval-mlogloss:1.25153\n[142]\ttrain-mlogloss:1.17404\teval-mlogloss:1.25087\n[143]\ttrain-mlogloss:1.17303\teval-mlogloss:1.25027\n[144]\ttrain-mlogloss:1.17189\teval-mlogloss:1.24961\n[145]\ttrain-mlogloss:1.17070\teval-mlogloss:1.24895\n[146]\ttrain-mlogloss:1.16969\teval-mlogloss:1.24840\n[147]\ttrain-mlogloss:1.16891\teval-mlogloss:1.24799\n[148]\ttrain-mlogloss:1.16782\teval-mlogloss:1.24737\n[149]\ttrain-mlogloss:1.16668\teval-mlogloss:1.24668\n[150]\ttrain-mlogloss:1.16558\teval-mlogloss:1.24604\n[151]\ttrain-mlogloss:1.16450\teval-mlogloss:1.24536\n[152]\ttrain-mlogloss:1.16352\teval-mlogloss:1.24490\n[153]\ttrain-mlogloss:1.16239\teval-mlogloss:1.24422\n[154]\ttrain-mlogloss:1.16138\teval-mlogloss:1.24361\n[155]\ttrain-mlogloss:1.16036\teval-mlogloss:1.24308\n[156]\ttrain-mlogloss:1.15929\teval-mlogloss:1.24242\n[157]\ttrain-mlogloss:1.15838\teval-mlogloss:1.24198\n[158]\ttrain-mlogloss:1.15730\teval-mlogloss:1.24127\n[159]\ttrain-mlogloss:1.15627\teval-mlogloss:1.24067\n" ], [ "# print('0 == {}'.format(le.inverse_transform([0])))\n# print('1 == {}'.format(le.inverse_transform([1])))\n# print('2 == {}'.format(le.inverse_transform([2])))\n# print('3 == {}'.format(le.inverse_transform([3])))\nclass_names = ['P14', 'P21', 'P28', 'P35']\nclass_results = classification_report(y_test, pred, digits=4, target_names = ['P14', 'P21', 'P28', 'P35'])\nprint(str(class_results))", " precision recall f1-score support\n\n P14 0.6306 0.7071 0.6667 898\n P21 0.4256 0.5091 0.4636 876\n P28 0.3953 0.2662 0.3181 943\n P35 0.5202 0.5352 0.5276 938\n\n accuracy 0.5018 3655\n macro avg 0.4929 0.5044 0.4940 3655\nweighted avg 0.4924 0.5018 0.4924 3655\n\n" ], [ "# Running CV with newly thresholded features; using new seed of 123 to get different unique GS hyperparams\nbest_model2, best_param2, best_eval2, best_boost_rounds2 = xgb_paramsearch(X_train, y_train, new_feat, init_params=best_param, nfold=5, num_boost_round=2000, early_stopping_rounds=3, metrics={'mlogloss', 'merror'}, use_gpu='True', seed=123)", "Using CV with subsample={0.7}, colsample_bytree={0.7}\nNew best param found: mlogloss = {1.05563476842045}, boost_rounds = {1116}\nUsing CV with subsample={0.7}, colsample_bytree={0.8}\nUsing CV with subsample={0.7}, colsample_bytree={0.6}\nUsing CV with subsample={0.9}, colsample_bytree={0.6}\nUsing CV with subsample={0.9}, colsample_bytree={0.5}\nUsing CV with subsample={1.0}, colsample_bytree={0.8}\nUsing CV with subsample={1.0}, colsample_bytree={0.7}\nUsing CV with subsample={1.0}, colsample_bytree={0.6}\nUsing CV with max_depth={1}, min_child_weight={2}\nUsing CV with max_depth={4}, min_child_weight={10}\nUsing CV with max_depth={4}, min_child_weight={7}\nUsing CV with max_depth={1}, min_child_weight={3}\nUsing CV with max_depth={10}, min_child_weight={4}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={4}, min_child_weight={4}\nUsing CV with max_depth={1}, min_child_weight={4}\nUsing CV with max_depth={1}, min_child_weight={7}\nUsing CV with max_depth={1}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={0}\nUsing CV with max_depth={4}, min_child_weight={2}\nUsing CV with max_depth={1}, min_child_weight={1}\nUsing CV with max_depth={4}, min_child_weight={0}\nUsing CV with eta={0.005}, gamma={3.0}\nUsing CV with eta={0.3}, gamma={2.0}\nUsing CV with eta={0.3}, gamma={0}\nUsing CV with eta={0.005}, gamma={0.2}\nUsing CV with eta={0.005}, gamma={0.001}\nUsing CV with eta={0.005}, gamma={5.0}\nUsing CV with eta={0.3}, gamma={3.0}\nUsing CV with eta={0.3}, gamma={0.001}\nUsing CV with eta={0.005}, gamma={1.0}\nUsing CV with eta={0.005}, gamma={0}\nUsing CV with eta={0.005}, gamma={0.5}\nUsing CV with subsample={0.6}, colsample_bytree={1.0}\nUsing CV with subsample={0.7}, colsample_bytree={1.0}\nUsing CV with subsample={0.9}, colsample_bytree={0.6}\nUsing CV with subsample={0.9}, colsample_bytree={0.9}\nUsing CV with subsample={0.9}, colsample_bytree={0.5}\nUsing CV with subsample={0.6}, colsample_bytree={0.9}\nUsing CV with subsample={0.7}, colsample_bytree={0.9}\nUsing CV with subsample={0.7}, colsample_bytree={0.8}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={7}, min_child_weight={0}\nUsing CV with max_depth={9}, min_child_weight={1}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={7}, min_child_weight={1}\nUsing CV with max_depth={9}, min_child_weight={9}\nUsing CV with max_depth={9}, min_child_weight={10}\nUsing CV with max_depth={1}, min_child_weight={9}\nUsing CV with max_depth={7}, min_child_weight={10}\nNew best param found: mlogloss = {1.0555034434075687}, boost_rounds = {1116}\nUsing CV with max_depth={9}, min_child_weight={4}\nUsing CV with max_depth={1}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={0}\nUsing CV with max_depth={7}, min_child_weight={2}\nUsing CV with max_depth={1}, min_child_weight={1}\nUsing CV with max_depth={10}, min_child_weight={9}\nUsing CV with eta={0.3}, gamma={1.0}\nUsing CV with eta={0.3}, gamma={0}\nUsing CV with eta={0.1}, gamma={0}\nUsing CV with eta={0.005}, gamma={0.2}\nUsing CV with eta={0.005}, gamma={2.0}\nUsing CV with eta={0.3}, gamma={0.01}\nUsing CV with eta={0.1}, gamma={5.0}\nUsing CV with eta={0.3}, gamma={10.0}\nUsing CV with eta={0.005}, gamma={0.5}\nUsing CV with eta={0.1}, gamma={2.0}\nUsing CV with eta={0.005}, gamma={0}\nUsing CV with subsample={0.7}, colsample_bytree={1.0}\nUsing CV with subsample={0.7}, colsample_bytree={0.7}\nUsing CV with subsample={0.7}, colsample_bytree={0.8}\nUsing CV with subsample={1.0}, colsample_bytree={0.5}\nUsing CV with subsample={1.0}, colsample_bytree={0.8}\nUsing CV with subsample={1.0}, colsample_bytree={0.9}\nUsing CV with subsample={1.0}, colsample_bytree={1.0}\nUsing CV with max_depth={5}, min_child_weight={9}\nUsing CV with max_depth={1}, min_child_weight={3}\nUsing CV with max_depth={6}, min_child_weight={7}\nNew best param found: mlogloss = {1.0553094309841415}, boost_rounds = {1143}\nUsing CV with max_depth={5}, min_child_weight={10}\nUsing CV with max_depth={5}, min_child_weight={5}\nNew best param found: mlogloss = {1.0552115049521955}, boost_rounds = {1169}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={6}, min_child_weight={10}\nUsing CV with max_depth={5}, min_child_weight={7}\nUsing CV with max_depth={6}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={5}\nUsing CV with max_depth={6}, min_child_weight={2}\nUsing CV with max_depth={1}, min_child_weight={9}\nUsing CV with max_depth={5}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={9}\nUsing CV with max_depth={10}, min_child_weight={2}\nUsing CV with eta={0.2}, gamma={1.0}\nUsing CV with eta={0.2}, gamma={0}\nUsing CV with eta={0.01}, gamma={1.0}\nUsing CV with eta={0.01}, gamma={0.5}\nUsing CV with eta={0.01}, gamma={0}\nUsing CV with eta={0.01}, gamma={5.0}\nUsing CV with eta={0.2}, gamma={5.0}\nUsing CV with eta={0.01}, gamma={0.001}\nUsing CV with eta={0.2}, gamma={3.0}\nUsing CV with eta={0.01}, gamma={3.0}\nUsing CV with subsample={0.7}, colsample_bytree={1.0}\nUsing CV with subsample={0.7}, colsample_bytree={0.5}\nUsing CV with subsample={0.6}, colsample_bytree={0.6}\nNew best param found: mlogloss = {1.0548073140779755}, boost_rounds = {1220}\nUsing CV with subsample={0.7}, colsample_bytree={0.9}\nUsing CV with subsample={0.6}, colsample_bytree={0.5}\nUsing CV with subsample={0.8}, colsample_bytree={0.7}\nUsing CV with subsample={0.8}, colsample_bytree={0.8}\nUsing CV with max_depth={7}, min_child_weight={3}\nNew best param found: mlogloss = {1.0541575875651228}, boost_rounds = {1220}\nUsing CV with max_depth={7}, min_child_weight={0}\nUsing CV with max_depth={10}, min_child_weight={10}\nNew best param found: mlogloss = {1.053628974237133}, boost_rounds = {1222}\nUsing CV with max_depth={7}, min_child_weight={6}\nUsing CV with max_depth={10}, min_child_weight={6}\nUsing CV with max_depth={7}, min_child_weight={4}\nUsing CV with max_depth={7}, min_child_weight={5}\nUsing CV with max_depth={1}, min_child_weight={6}\nUsing CV with max_depth={7}, min_child_weight={10}\nNew best param found: mlogloss = {1.0535128991813134}, boost_rounds = {1220}\nUsing CV with max_depth={1}, min_child_weight={10}\nUsing CV with max_depth={10}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={0}\nUsing CV with max_depth={1}, min_child_weight={1}\nUsing CV with max_depth={7}, min_child_weight={2}\nUsing CV with max_depth={10}, min_child_weight={2}\nUsing CV with eta={0.005}, gamma={3.0}\nUsing CV with eta={0.05}, gamma={0}\nUsing CV with eta={0.05}, gamma={0.5}\nUsing CV with eta={0.01}, gamma={0}\nUsing CV with eta={0.01}, gamma={0.01}\nUsing CV with eta={0.005}, gamma={5.0}\nUsing CV with eta={0.01}, gamma={0.001}\nUsing CV with eta={0.05}, gamma={10.0}\nUsing CV with eta={0.01}, gamma={10.0}\nUsing CV with eta={0.05}, gamma={3.0}\nUsing CV with eta={0.005}, gamma={10.0}\nUsing CV with eta={0.005}, gamma={0}\nUsing CV with subsample={0.9}, colsample_bytree={1.0}\nUsing CV with subsample={0.9}, colsample_bytree={0.8}\nUsing CV with subsample={0.6}, colsample_bytree={0.9}\nUsing CV with subsample={0.6}, colsample_bytree={0.8}\nUsing CV with subsample={0.9}, colsample_bytree={0.6}\nUsing CV with subsample={0.7}, colsample_bytree={0.9}\nUsing CV with subsample={0.7}, colsample_bytree={0.6}\nUsing CV with max_depth={4}, min_child_weight={10}\nUsing CV with max_depth={3}, min_child_weight={5}\nUsing CV with max_depth={9}, min_child_weight={0}\nUsing CV with max_depth={3}, min_child_weight={3}\nUsing CV with max_depth={9}, min_child_weight={1}\nUsing CV with max_depth={10}, min_child_weight={10}\nUsing CV with max_depth={3}, min_child_weight={0}\nUsing CV with max_depth={9}, min_child_weight={2}\nUsing CV with max_depth={3}, min_child_weight={10}\nUsing CV with max_depth={4}, min_child_weight={4}\nUsing CV with max_depth={4}, min_child_weight={0}\nUsing CV with max_depth={9}, min_child_weight={10}\nNew best param found: mlogloss = {1.0529924391137573}, boost_rounds = {1220}\nUsing CV with max_depth={9}, min_child_weight={4}\n" ], [ "seed = 1234\nnp.random.seed(seed)\ntrain_split = 0.7\ntest_split = 0.5\n\nle = preprocessing.LabelEncoder()\nbal_ecm['encoded_target'] = le.fit_transform(bal_ecm[target])\ntraining_bins = np.random.choice(bal_ecm.bins.unique(), int(len(bal_ecm.bins.unique())*train_split), replace=False)\nX_train = bal_ecm[bal_ecm.bins.isin(training_bins)]\nX_test_val = bal_ecm[~bal_ecm.bins.isin(training_bins)]\nX_val, X_test = train_test_split(X_test_val, test_size=test_split, random_state=seed)\ny_train = X_train['encoded_target']\ny_test = X_test['encoded_target']\ny_val = X_val['encoded_target']\ndtrain = xgb.DMatrix(X_train[new_feat], label=y_train)\ndtest = xgb.DMatrix(X_test[new_feat], label=y_test)\ndval = xgb.DMatrix(X_val[new_feat], label=y_val)", "_____no_output_____" ], [ "best_param2={'max_depth': 5,\n 'eta': 0.005,\n 'min_child_weight': 10,\n 'verbosity': 0,\n 'objective': 'multi:softprob',\n 'num_class': 4,\n 'silent': 'True',\n 'gamma': 5,\n 'subsample': 0.6,\n 'colsample_bytree': 0.5,\n 'eval_metric': 'mlogloss',\n 'gpu_id': 0,\n 'tree_method': 'gpu_hist',\n 'predictor': 'gpu_predictor'}", "_____no_output_____" ], [ "evals = [(dtrain, 'train'), (dval, 'eval')]\nnum_round = best_boost_rounds\nbst2 = xgb.train(best_param, dtrain, num_round, evals, early_stopping_rounds=3, )\n\n\n######\nlabel = dtest.get_label()\nypred1 = bst2.predict(dtest)\n# by default, we predict using all the trees\npred2 = [np.where(x == np.max(x))[0][0] for x in ypred1]\n\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, pred2))", "[0]\ttrain-mlogloss:1.38480\teval-mlogloss:1.38538\nMultiple eval metrics have been passed: 'eval-mlogloss' will be used for early stopping.\n\nWill train until eval-mlogloss hasn't improved in 3 rounds.\n[1]\ttrain-mlogloss:1.38298\teval-mlogloss:1.38424\n[2]\ttrain-mlogloss:1.38080\teval-mlogloss:1.38266\n[3]\ttrain-mlogloss:1.37877\teval-mlogloss:1.38131\n[4]\ttrain-mlogloss:1.37662\teval-mlogloss:1.37985\n[5]\ttrain-mlogloss:1.37462\teval-mlogloss:1.37853\n[6]\ttrain-mlogloss:1.37239\teval-mlogloss:1.37699\n[7]\ttrain-mlogloss:1.37065\teval-mlogloss:1.37585\n[8]\ttrain-mlogloss:1.36888\teval-mlogloss:1.37481\n[9]\ttrain-mlogloss:1.36682\teval-mlogloss:1.37343\n[10]\ttrain-mlogloss:1.36465\teval-mlogloss:1.37196\n[11]\ttrain-mlogloss:1.36287\teval-mlogloss:1.37079\n[12]\ttrain-mlogloss:1.36106\teval-mlogloss:1.36964\n[13]\ttrain-mlogloss:1.35940\teval-mlogloss:1.36858\n[14]\ttrain-mlogloss:1.35725\teval-mlogloss:1.36707\n[15]\ttrain-mlogloss:1.35526\teval-mlogloss:1.36570\n[16]\ttrain-mlogloss:1.35396\teval-mlogloss:1.36495\n[17]\ttrain-mlogloss:1.35199\teval-mlogloss:1.36365\n[18]\ttrain-mlogloss:1.34997\teval-mlogloss:1.36234\n[19]\ttrain-mlogloss:1.34821\teval-mlogloss:1.36126\n[20]\ttrain-mlogloss:1.34642\teval-mlogloss:1.36003\n[21]\ttrain-mlogloss:1.34493\teval-mlogloss:1.35906\n[22]\ttrain-mlogloss:1.34351\teval-mlogloss:1.35830\n[23]\ttrain-mlogloss:1.34152\teval-mlogloss:1.35686\n[24]\ttrain-mlogloss:1.33953\teval-mlogloss:1.35557\n[25]\ttrain-mlogloss:1.33786\teval-mlogloss:1.35444\n[26]\ttrain-mlogloss:1.33628\teval-mlogloss:1.35341\n[27]\ttrain-mlogloss:1.33442\teval-mlogloss:1.35211\n[28]\ttrain-mlogloss:1.33282\teval-mlogloss:1.35121\n[29]\ttrain-mlogloss:1.33109\teval-mlogloss:1.35013\n[30]\ttrain-mlogloss:1.32912\teval-mlogloss:1.34883\n[31]\ttrain-mlogloss:1.32723\teval-mlogloss:1.34753\n[32]\ttrain-mlogloss:1.32546\teval-mlogloss:1.34625\n[33]\ttrain-mlogloss:1.32376\teval-mlogloss:1.34505\n[34]\ttrain-mlogloss:1.32190\teval-mlogloss:1.34380\n[35]\ttrain-mlogloss:1.32005\teval-mlogloss:1.34255\n[36]\ttrain-mlogloss:1.31822\teval-mlogloss:1.34130\n[37]\ttrain-mlogloss:1.31655\teval-mlogloss:1.34020\n[38]\ttrain-mlogloss:1.31468\teval-mlogloss:1.33885\n[39]\ttrain-mlogloss:1.31304\teval-mlogloss:1.33773\n[40]\ttrain-mlogloss:1.31120\teval-mlogloss:1.33645\n[41]\ttrain-mlogloss:1.30951\teval-mlogloss:1.33536\n[42]\ttrain-mlogloss:1.30781\teval-mlogloss:1.33422\n[43]\ttrain-mlogloss:1.30609\teval-mlogloss:1.33309\n[44]\ttrain-mlogloss:1.30458\teval-mlogloss:1.33224\n[45]\ttrain-mlogloss:1.30300\teval-mlogloss:1.33131\n[46]\ttrain-mlogloss:1.30143\teval-mlogloss:1.33035\n[47]\ttrain-mlogloss:1.29969\teval-mlogloss:1.32917\n[48]\ttrain-mlogloss:1.29807\teval-mlogloss:1.32805\n[49]\ttrain-mlogloss:1.29651\teval-mlogloss:1.32701\n[50]\ttrain-mlogloss:1.29489\teval-mlogloss:1.32593\n[51]\ttrain-mlogloss:1.29336\teval-mlogloss:1.32490\n[52]\ttrain-mlogloss:1.29175\teval-mlogloss:1.32385\n[53]\ttrain-mlogloss:1.29009\teval-mlogloss:1.32271\n[54]\ttrain-mlogloss:1.28849\teval-mlogloss:1.32153\n[55]\ttrain-mlogloss:1.28704\teval-mlogloss:1.32054\n[56]\ttrain-mlogloss:1.28577\teval-mlogloss:1.31979\n[57]\ttrain-mlogloss:1.28400\teval-mlogloss:1.31861\n[58]\ttrain-mlogloss:1.28238\teval-mlogloss:1.31764\n[59]\ttrain-mlogloss:1.28078\teval-mlogloss:1.31665\n[60]\ttrain-mlogloss:1.27939\teval-mlogloss:1.31577\n[61]\ttrain-mlogloss:1.27807\teval-mlogloss:1.31501\n[62]\ttrain-mlogloss:1.27644\teval-mlogloss:1.31394\n[63]\ttrain-mlogloss:1.27489\teval-mlogloss:1.31297\n[64]\ttrain-mlogloss:1.27340\teval-mlogloss:1.31201\n[65]\ttrain-mlogloss:1.27189\teval-mlogloss:1.31098\n[66]\ttrain-mlogloss:1.27027\teval-mlogloss:1.30995\n[67]\ttrain-mlogloss:1.26880\teval-mlogloss:1.30897\n[68]\ttrain-mlogloss:1.26744\teval-mlogloss:1.30810\n[69]\ttrain-mlogloss:1.26581\teval-mlogloss:1.30714\n[70]\ttrain-mlogloss:1.26427\teval-mlogloss:1.30604\n[71]\ttrain-mlogloss:1.26297\teval-mlogloss:1.30515\n[72]\ttrain-mlogloss:1.26169\teval-mlogloss:1.30440\n[73]\ttrain-mlogloss:1.26013\teval-mlogloss:1.30332\n[74]\ttrain-mlogloss:1.25889\teval-mlogloss:1.30264\n[75]\ttrain-mlogloss:1.25760\teval-mlogloss:1.30177\n[76]\ttrain-mlogloss:1.25639\teval-mlogloss:1.30107\n[77]\ttrain-mlogloss:1.25520\teval-mlogloss:1.30035\n[78]\ttrain-mlogloss:1.25363\teval-mlogloss:1.29936\n[79]\ttrain-mlogloss:1.25216\teval-mlogloss:1.29843\n[80]\ttrain-mlogloss:1.25058\teval-mlogloss:1.29744\n[81]\ttrain-mlogloss:1.24912\teval-mlogloss:1.29654\n[82]\ttrain-mlogloss:1.24767\teval-mlogloss:1.29553\n[83]\ttrain-mlogloss:1.24632\teval-mlogloss:1.29471\n[84]\ttrain-mlogloss:1.24519\teval-mlogloss:1.29405\n[85]\ttrain-mlogloss:1.24379\teval-mlogloss:1.29318\n[86]\ttrain-mlogloss:1.24246\teval-mlogloss:1.29243\n[87]\ttrain-mlogloss:1.24110\teval-mlogloss:1.29160\n[88]\ttrain-mlogloss:1.23964\teval-mlogloss:1.29063\n[89]\ttrain-mlogloss:1.23854\teval-mlogloss:1.29004\n[90]\ttrain-mlogloss:1.23731\teval-mlogloss:1.28923\n[91]\ttrain-mlogloss:1.23585\teval-mlogloss:1.28833\n[92]\ttrain-mlogloss:1.23471\teval-mlogloss:1.28767\n[93]\ttrain-mlogloss:1.23330\teval-mlogloss:1.28679\n[94]\ttrain-mlogloss:1.23201\teval-mlogloss:1.28604\n[95]\ttrain-mlogloss:1.23078\teval-mlogloss:1.28527\n[96]\ttrain-mlogloss:1.22957\teval-mlogloss:1.28455\n[97]\ttrain-mlogloss:1.22829\teval-mlogloss:1.28374\n[98]\ttrain-mlogloss:1.22732\teval-mlogloss:1.28320\n[99]\ttrain-mlogloss:1.22603\teval-mlogloss:1.28236\n[100]\ttrain-mlogloss:1.22466\teval-mlogloss:1.28151\n[101]\ttrain-mlogloss:1.22331\teval-mlogloss:1.28070\n[102]\ttrain-mlogloss:1.22186\teval-mlogloss:1.27978\n[103]\ttrain-mlogloss:1.22059\teval-mlogloss:1.27900\n[104]\ttrain-mlogloss:1.21939\teval-mlogloss:1.27824\n[105]\ttrain-mlogloss:1.21816\teval-mlogloss:1.27757\n[106]\ttrain-mlogloss:1.21684\teval-mlogloss:1.27668\n[107]\ttrain-mlogloss:1.21545\teval-mlogloss:1.27587\n[108]\ttrain-mlogloss:1.21440\teval-mlogloss:1.27526\n[109]\ttrain-mlogloss:1.21319\teval-mlogloss:1.27457\n[110]\ttrain-mlogloss:1.21212\teval-mlogloss:1.27390\n[111]\ttrain-mlogloss:1.21086\teval-mlogloss:1.27316\n[112]\ttrain-mlogloss:1.20958\teval-mlogloss:1.27235\n[113]\ttrain-mlogloss:1.20826\teval-mlogloss:1.27164\n[114]\ttrain-mlogloss:1.20696\teval-mlogloss:1.27085\n[115]\ttrain-mlogloss:1.20571\teval-mlogloss:1.27002\n[116]\ttrain-mlogloss:1.20441\teval-mlogloss:1.26923\n[117]\ttrain-mlogloss:1.20316\teval-mlogloss:1.26848\n[118]\ttrain-mlogloss:1.20193\teval-mlogloss:1.26771\n[119]\ttrain-mlogloss:1.20063\teval-mlogloss:1.26687\n[120]\ttrain-mlogloss:1.19929\teval-mlogloss:1.26613\n[121]\ttrain-mlogloss:1.19836\teval-mlogloss:1.26556\n[122]\ttrain-mlogloss:1.19726\teval-mlogloss:1.26497\n[123]\ttrain-mlogloss:1.19600\teval-mlogloss:1.26419\n[124]\ttrain-mlogloss:1.19485\teval-mlogloss:1.26341\n[125]\ttrain-mlogloss:1.19365\teval-mlogloss:1.26272\n[126]\ttrain-mlogloss:1.19237\teval-mlogloss:1.26185\n[127]\ttrain-mlogloss:1.19111\teval-mlogloss:1.26106\n[128]\ttrain-mlogloss:1.19005\teval-mlogloss:1.26035\n[129]\ttrain-mlogloss:1.18881\teval-mlogloss:1.25961\n[130]\ttrain-mlogloss:1.18778\teval-mlogloss:1.25900\n[131]\ttrain-mlogloss:1.18670\teval-mlogloss:1.25827\n[132]\ttrain-mlogloss:1.18547\teval-mlogloss:1.25753\n[133]\ttrain-mlogloss:1.18435\teval-mlogloss:1.25694\n[134]\ttrain-mlogloss:1.18317\teval-mlogloss:1.25627\n[135]\ttrain-mlogloss:1.18211\teval-mlogloss:1.25568\n[136]\ttrain-mlogloss:1.18103\teval-mlogloss:1.25509\n[137]\ttrain-mlogloss:1.17983\teval-mlogloss:1.25438\n[138]\ttrain-mlogloss:1.17873\teval-mlogloss:1.25370\n[139]\ttrain-mlogloss:1.17752\teval-mlogloss:1.25301\n[140]\ttrain-mlogloss:1.17630\teval-mlogloss:1.25223\n[141]\ttrain-mlogloss:1.17516\teval-mlogloss:1.25153\n[142]\ttrain-mlogloss:1.17404\teval-mlogloss:1.25087\n[143]\ttrain-mlogloss:1.17303\teval-mlogloss:1.25027\n[144]\ttrain-mlogloss:1.17189\teval-mlogloss:1.24961\n[145]\ttrain-mlogloss:1.17070\teval-mlogloss:1.24895\n[146]\ttrain-mlogloss:1.16969\teval-mlogloss:1.24840\n[147]\ttrain-mlogloss:1.16891\teval-mlogloss:1.24799\n[148]\ttrain-mlogloss:1.16782\teval-mlogloss:1.24737\n[149]\ttrain-mlogloss:1.16668\teval-mlogloss:1.24668\n[150]\ttrain-mlogloss:1.16558\teval-mlogloss:1.24604\n[151]\ttrain-mlogloss:1.16450\teval-mlogloss:1.24536\n[152]\ttrain-mlogloss:1.16352\teval-mlogloss:1.24490\n[153]\ttrain-mlogloss:1.16239\teval-mlogloss:1.24422\n[154]\ttrain-mlogloss:1.16138\teval-mlogloss:1.24361\n[155]\ttrain-mlogloss:1.16036\teval-mlogloss:1.24308\n[156]\ttrain-mlogloss:1.15929\teval-mlogloss:1.24242\n[157]\ttrain-mlogloss:1.15838\teval-mlogloss:1.24198\n[158]\ttrain-mlogloss:1.15730\teval-mlogloss:1.24127\n[159]\ttrain-mlogloss:1.15627\teval-mlogloss:1.24067\n" ], [ "class_names = ['P14', 'P21', 'P28', 'P35']\nclass_results = classification_report(y_test, pred2, digits=4, target_names = ['P14', 'P21', 'P28', 'P35'])\nprint(str(class_results))", " precision recall f1-score support\n\n P14 0.6306 0.7071 0.6667 898\n P21 0.4256 0.5091 0.4636 876\n P28 0.3953 0.2662 0.3181 943\n P35 0.5202 0.5352 0.5276 938\n\n accuracy 0.5018 3655\n macro avg 0.4929 0.5044 0.4940 3655\nweighted avg 0.4924 0.5018 0.4924 3655\n\n" ], [ "# param2 = {'max_depth': 2,\n# 'eta': 0.005,\n\n# 'min_child_weight': 0,\n# 'verbosity': 0,\n# 'objective': 'multi:softprob',\n# 'num_class': 4,\n# 'silent': 'True',\n# 'gamma': 5,\n# 'subsample': 0.25,\n# 'colsample_bytree': 0.3,\n# 'colsample_bynode':.5,\n# 'reg_alpha': 0}", "_____no_output_____" ], [ "from sklearn.metrics import accuracy_score\n\nmodel_final = XGBClassifier(**param2)\nnew_feat = np.array(features)[np.array(model.feature_importances_ > t)]\neval_set = [(X_train[new_feat], y_train), (X_test[new_feat], y_test)]\nmodel_final.fit(X_train[new_feat], y_train, verbose=False, eval_set=eval_set, eval_metric=[\"merror\", 'mlogloss'])\ny_pred_f = model_final.predict(X_test[new_feat])\naccuracy = accuracy_score(y_test, y_pred_f)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\nresults = model_final.evals_result()\nepochs = len(results['validation_0']['merror'])\nx_axis = range(0, epochs)\n\nfig, ax = pl.subplots(figsize=(12,12))\nax.plot(x_axis, results['validation_0']['mlogloss'], label='Train')\nax.plot(x_axis, results['validation_1']['mlogloss'], label='Test')\nax.legend()\n\npl.ylabel('Log Loss')\npl.title('XGBoost Log Loss')\npl.show()", "_____no_output_____" ], [ "sorted(dict_importance, key=dict_importance.get, reverse=True)[:5]", "_____no_output_____" ], [ " new_feat = np.array(features)[np.array(model.feature_importances_ > best_thresh)]\nmodel2.fit(X_train[new_feat], y_train, verbose=False, eval_set=[(X_val[new_feat],y_val)], eval_metric='mlogloss')\npred3 = model2.predict(X_test[new_feat])\nacc = metrics.accuracy_score(y_test, pred3)\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, pred3))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec59fc8ba915fb2f70832fde7152970734af2996
9,421
ipynb
Jupyter Notebook
basic/.ipynb_checkpoints/record_io-checkpoint.ipynb
xiandong79/mxnet-tutorial
a775a9da2ef23c22482f1b345fca54dddd4923bb
[ "Apache-2.0" ]
2
2017-04-29T16:37:08.000Z
2017-05-24T13:18:14.000Z
basic/.ipynb_checkpoints/record_io-checkpoint.ipynb
xiandong79/mxnet-tutorial
a775a9da2ef23c22482f1b345fca54dddd4923bb
[ "Apache-2.0" ]
null
null
null
basic/.ipynb_checkpoints/record_io-checkpoint.ipynb
xiandong79/mxnet-tutorial
a775a9da2ef23c22482f1b345fca54dddd4923bb
[ "Apache-2.0" ]
3
2017-06-10T14:58:35.000Z
2020-03-25T03:47:02.000Z
28.548485
1,520
0.550154
[ [ [ "# Python Record IO\n\nIn [image_io](image_io.ipynb) we already learned how to pack image into standard recordio format and load it with ImageRecordIter. This tutorial will walk through the python interface for reading and writing record io files. It can be useful when you need more more control over the details of data pipeline. For example, when you need to augument image and label together for detection and segmentation, or when you need a custom data iterator for triplet sampling and negative sampling.\n\nSetup environment first:", "_____no_output_____" ] ], [ [ "%matplotlib inline\nfrom __future__ import print_function\nimport mxnet as mx\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "The relevent code is under `mx.recordio`. There are two classes: `MXRecordIO`, which supports sequential read and write, and `MXIndexedRecordIO`, which supports random read and sequential write.\n\n## MXRecordIO\nFirst let's take a look at `MXRecordIO`. We open a file `tmp.rec` and write 5 strings to it:", "_____no_output_____" ] ], [ [ "record = mx.recordio.MXRecordIO('tmp.rec', 'w')\nfor i in range(5):\n record.write('record_%d'%i)\nrecord.close()", "_____no_output_____" ] ], [ [ "Then we can read it back by opening the same file with 'r':", "_____no_output_____" ] ], [ [ "record = mx.recordio.MXRecordIO('tmp.rec', 'r')\nwhile True:\n item = record.read()\n if not item:\n break\n print item\nrecord.close()", "record_0\nrecord_1\nrecord_2\nrecord_3\nrecord_4\n" ] ], [ [ "## MXIndexedRecordIO\nSome times you need random access for more complex tasks. `MXIndexedRecordIO` is designed for this. Here we create a indexed record `tmp.rec` and a corresponding index file `tmp.idx`:", "_____no_output_____" ] ], [ [ "record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w')\nfor i in range(5):\n record.write_idx(i, 'record_%d'%i)\nrecord.close()", "_____no_output_____" ] ], [ [ "We can then access records with keys:", "_____no_output_____" ] ], [ [ "record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'r')\nrecord.read_idx(3)", "_____no_output_____" ] ], [ [ "You can list all keys with:", "_____no_output_____" ] ], [ [ "record.keys", "_____no_output_____" ] ], [ [ "## Packing and Unpacking Data\n\nEach record in a .rec file can contain arbitrary binary data, but machine learning data typically has a label/data structure. `mx.recordio` also contains a few utility functions for packing such data, namely: `pack`, `unpack`, `pack_img`, and `unpack_img`.\n\n### Binary Data\n`pack` and `unpack` are used for storing float (or 1d array of float) label and binary data:", "_____no_output_____" ] ], [ [ "# pack\ndata = 'data'\nlabel1 = 1.0\nheader1 = mx.recordio.IRHeader(flag=0, label=label1, id=1, id2=0)\ns1 = mx.recordio.pack(header1, data)\nprint('float label:', repr(s1))\nlabel2 = [1.0, 2.0, 3.0]\nheader2 = mx.recordio.IRHeader(flag=0, label=label2, id=2, id2=0)\ns2 = mx.recordio.pack(header2, data)\nprint('array label:', repr(s2))", "float label '\\x00\\x00\\x00\\x00\\x00\\x00\\x80?\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00data'\narray label '\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x80?\\x00\\x00\\x00@\\x00\\x00@@data'\n" ], [ "# unpack\nprint(*mx.recordio.unpack(s1))\nprint(*mx.recordio.unpack(s2))", "(HEADER(flag=0, label=1.0, id=1, id2=0), 'data')\n(HEADER(flag=3, label=array([ 1., 2., 3.], dtype=float32), id=2, id2=0), 'data')\n" ] ], [ [ "### Image Data\n\n`pack_img` and `unpack_img` are used for packing image data. Records packed by `pack_img` can be loaded by `mx.io.ImageRecordIter`.", "_____no_output_____" ] ], [ [ "# pack_img\ndata = np.ones((3,3,1), dtype=np.uint8)\nlabel = 1.0\nheader = mx.recordio.IRHeader(flag=0, label=label, id=0, id2=0)\ns = mx.recordio.pack_img(header, data, quality=100, img_fmt='.jpg')\nprint(repr(s))", "'\\x00\\x00\\x00\\x00\\x00\\x00\\x80?\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\xff\\xdb\\x00C\\x00\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\x01\\xff\\xc0\\x00\\x0b\\x08\\x00\\x03\\x00\\x03\\x01\\x01\\x11\\x00\\xff\\xc4\\x00\\x1f\\x00\\x00\\x01\\x05\\x01\\x01\\x01\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\xff\\xc4\\x00\\xb5\\x10\\x00\\x02\\x01\\x03\\x03\\x02\\x04\\x03\\x05\\x05\\x04\\x04\\x00\\x00\\x01}\\x01\\x02\\x03\\x00\\x04\\x11\\x05\\x12!1A\\x06\\x13Qa\\x07\"q\\x142\\x81\\x91\\xa1\\x08#B\\xb1\\xc1\\x15R\\xd1\\xf0$3br\\x82\\t\\n\\x16\\x17\\x18\\x19\\x1a%&\\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9\\xaa\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\\xb8\\xb9\\xba\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\\xc8\\xc9\\xca\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xff\\xda\\x00\\x08\\x01\\x01\\x00\\x00?\\x00\\xfe\\x01\\xeb\\xff\\xd9'\n" ], [ "# unpack_img\nprint(*mx.recordio.unpack_img(s))", "HEADER(flag=0, label=1.0, id=0, id2=0) [[1 1 1]\n [1 1 1]\n [1 1 1]]\n" ] ], [ [ "## Next Step\n- [Advanced Image IO](advanced_img_io.ipynb) Advanced image IO for detection, segmentation, etc...", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec5a15c1ed180f3e567529558997ba227dd70484
18,418
ipynb
Jupyter Notebook
week7/NLP_C2_W3_lecture_nb_02.ipynb
TipsyPixie/natural-language-processing
bd4f139649d63a457aace103270ed30f92135d31
[ "Beerware" ]
null
null
null
week7/NLP_C2_W3_lecture_nb_02.ipynb
TipsyPixie/natural-language-processing
bd4f139649d63a457aace103270ed30f92135d31
[ "Beerware" ]
null
null
null
week7/NLP_C2_W3_lecture_nb_02.ipynb
TipsyPixie/natural-language-processing
bd4f139649d63a457aace103270ed30f92135d31
[ "Beerware" ]
null
null
null
34.297952
405
0.504832
[ [ [ "<a name=\"building-language-model\"></a>\n# Building the language model\n\n<a name=\"count-matrix\"></a>\n### Count matrix\n\nTo calculate the n-gram probability, you will need to count frequencies of n-grams and n-gram prefixes in the training dataset. In some of the code assignment exercises, you will store the n-gram frequencies in a dictionary. \n\nIn other parts of the assignment, you will build a count matrix that keeps counts of (n-1)-gram prefix followed by all possible last words in the vocabulary.\n\nThe following code shows how to check, retrieve and update counts of n-grams in the word count dictionary.", "_____no_output_____" ] ], [ [ "# manipulate n_gram count dictionary\n\nn_gram_counts = {(\"i\", \"am\", \"happy\"): 2, (\"am\", \"happy\", \"because\"): 1}\n\n# get count for an n-gram tuple\nprint(f\"count of n-gram {('i', 'am', 'happy')}: {n_gram_counts[('i', 'am', 'happy')]}\")\n\n# check if n-gram is present in the dictionary\nif (\"i\", \"am\", \"learning\") in n_gram_counts:\n print(f\"n-gram {('i', 'am', 'learning')} found\")\nelse:\n print(f\"n-gram {('i', 'am', 'learning')} missing\")\n\n# update the count in the word count dictionary\nn_gram_counts[(\"i\", \"am\", \"learning\")] = 1\nif (\"i\", \"am\", \"learning\") in n_gram_counts:\n print(f\"n-gram {('i', 'am', 'learning')} found\")\nelse:\n print(f\"n-gram {('i', 'am', 'learning')} missing\")", "count of n-gram ('i', 'am', 'happy'): 2\nn-gram ('i', 'am', 'learning') missing\nn-gram ('i', 'am', 'learning') found\n" ] ], [ [ "The next code snippet shows how to merge two tuples in Python. That will be handy when creating the n-gram from the prefix and the last word.", "_____no_output_____" ] ], [ [ "# concatenate tuple for prefix and tuple with the last word to create the n_gram\nprefix = (\"i\", \"am\", \"happy\")\nword = \"because\"\n\n# note here the syntax for creating a tuple for a single word\nn_gram = prefix + (word,)\nprint(n_gram)", "('i', 'am', 'happy', 'because')\n" ] ], [ [ "In the lecture, you've seen that the count matrix could be made in a single pass through the corpus. Here is one approach to do that.", "_____no_output_____" ] ], [ [ "from collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\n\n\ndef single_pass_trigram_count_matrix(corpus):\n \"\"\"\n Creates the trigram count matrix from the input corpus in a single pass through the corpus.\n\n Args:\n corpus: Pre-processed and tokenized corpus.\n\n Returns:\n bigrams: list of all bigram prefixes, row index\n vocabulary: list of all found words, the column index\n count_matrix: pandas dataframe with bigram prefixes as rows,\n vocabulary words as columns\n and the counts of the bigram/word combinations (i.e. trigrams) as values\n \"\"\"\n bigrams = []\n vocabulary = []\n count_matrix_dict = defaultdict(dict)\n\n # go through the corpus once with a sliding window\n for i in range(len(corpus) - 3 + 1):\n # the sliding window starts at position i and contains 3 words\n trigram = tuple(corpus[i : i + 3])\n\n bigram = trigram[0:-1]\n if not bigram in bigrams:\n bigrams.append(bigram)\n\n last_word = trigram[-1]\n if not last_word in vocabulary:\n vocabulary.append(last_word)\n\n if (bigram, last_word) not in count_matrix_dict:\n count_matrix_dict[bigram, last_word] = 0\n\n count_matrix_dict[bigram, last_word] += 1\n\n # convert the count_matrix to np.array to fill in the blanks\n count_matrix = np.zeros((len(bigrams), len(vocabulary)))\n for trigram_key, trigam_count in count_matrix_dict.items():\n count_matrix[bigrams.index(trigram_key[0]), vocabulary.index(trigram_key[1])] = trigam_count\n\n # np.array to pandas dataframe conversion\n count_matrix = pd.DataFrame(count_matrix, index=bigrams, columns=vocabulary)\n return bigrams, vocabulary, count_matrix\n\n\ncorpus = [\"i\", \"am\", \"happy\", \"because\", \"i\", \"am\", \"learning\", \".\"]\n\nbigrams, vocabulary, count_matrix = single_pass_trigram_count_matrix(corpus)\n\ncount_matrix", "_____no_output_____" ] ], [ [ "<a name=\"probability-matrix\"></a>\n### Probability matrix\nThe next step is to build a probability matrix from the count matrix. \n\nYou can use an object dataframe from library pandas and its methods [sum](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sum.html?highlight=sum#pandas.DataFrame.sum) and [div](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.div.html) to normalize the cell counts with the sum of the respective rows. ", "_____no_output_____" ] ], [ [ "# create the probability matrix from the count matrix\nrow_sums = count_matrix.sum(axis=1)\n# delete each row by its sum\nprob_matrix = count_matrix.div(row_sums, axis=0)\n\nprint(row_sums)\nprint(prob_matrix)", "(i, am) 2.0\n(am, happy) 1.0\n(happy, because) 1.0\n(because, i) 1.0\n(am, learning) 1.0\ndtype: float64\n happy because i am learning .\n(i, am) 0.5 0.0 0.0 0.0 0.5 0.0\n(am, happy) 0.0 1.0 0.0 0.0 0.0 0.0\n(happy, because) 0.0 0.0 1.0 0.0 0.0 0.0\n(because, i) 0.0 0.0 0.0 1.0 0.0 0.0\n(am, learning) 0.0 0.0 0.0 0.0 0.0 1.0\n" ] ], [ [ "The probability matrix now helps you to find a probability of an input trigram. ", "_____no_output_____" ] ], [ [ "# find the probability of a trigram in the probability matrix\ntrigram = (\"i\", \"am\", \"happy\")\n\n# find the prefix bigram\nbigram = trigram[:-1]\nprint(f\"bigram: {bigram}\")\n\n# find the last word of the trigram\nword = trigram[-1]\nprint(f\"word: {word}\")\n\n# we are using the pandas dataframes here, column with vocabulary word comes first, row with the prefix bigram second\ntrigram_probability = prob_matrix[word][bigram]\nprint(f\"trigram_probability: {trigram_probability}\")", "bigram: ('i', 'am')\nword: happy\ntrigram_probability: 0.5\n" ] ], [ [ "In the code assignment, you will be searching for the most probable words starting with a prefix. You can use the method [str.startswith](https://docs.python.org/3/library/stdtypes.html#str.startswith) to test if a word starts with a prefix. \n\nHere is a code snippet showing how to use this method.", "_____no_output_____" ] ], [ [ "# lists all words in vocabulary starting with a given prefix\nvocabulary = [\"i\", \"am\", \"happy\", \"because\", \"learning\", \".\", \"have\", \"you\", \"seen\", \"it\", \"?\"]\nstarts_with = \"ha\"\n\nprint(f\"words in vocabulary starting with prefix: {starts_with}\\n\")\nfor word in vocabulary:\n if word.startswith(starts_with):\n print(word)", "words in vocabulary starting with prefix: ha\n\nhappy\nhave\n" ] ], [ [ "<a name=\"language-model-evaluation\"></a>\n## Language model evaluation\n<a name=\"train-validation-test-split\"></a>\n### Train/validation/test split\nIn the videos, you saw that to evaluate language models, you need to keep some of the corpus data for validation and testing.\n\nThe choice of the test and validation data should correspond as much as possible to the distribution of the data coming from the actual application. If nothing but the input corpus is known, then random sampling from the corpus is used to define the test and validation subset. \n\nHere is a code similar to what you'll see in the code assignment. The following function allows you to randomly sample the input data and return train/validation/test subsets in a split given by the method parameters.", "_____no_output_____" ] ], [ [ "# we only need train and validation %, test is the remainder\nimport random\n\n\ndef train_validation_test_split(data, train_percent, validation_percent):\n \"\"\"\n Splits the input data to train/validation/test according to the percentage provided\n\n Args:\n data: Pre-processed and tokenized corpus, i.e. list of sentences.\n train_percent: integer 0-100, defines the portion of input corpus allocated for training\n validation_percent: integer 0-100, defines the portion of input corpus allocated for validation\n\n Note: train_percent + validation_percent need to be <=100\n the reminder to 100 is allocated for the test set\n\n Returns:\n train_data: list of sentences, the training part of the corpus\n validation_data: list of sentences, the validation part of the corpus\n test_data: list of sentences, the test part of the corpus\n \"\"\"\n # fixed seed here for reproducibility\n random.seed(87)\n\n # reshuffle all input sentences\n random.shuffle(data)\n\n train_size = int(len(data) * train_percent / 100)\n train_data = data[0:train_size]\n\n validation_size = int(len(data) * validation_percent / 100)\n validation_data = data[train_size : train_size + validation_size]\n\n test_data = data[train_size + validation_size :]\n\n return train_data, validation_data, test_data\n\n\ndata = [x for x in range(0, 100)]\n\ntrain_data, validation_data, test_data = train_validation_test_split(data, 80, 10)\nprint(\n \"split 80/10/10:\\n\",\n f\"train data:{train_data}\\n\",\n f\"validation data:{validation_data}\\n\",\n f\"test data:{test_data}\\n\",\n)\n\ntrain_data, validation_data, test_data = train_validation_test_split(data, 98, 1)\nprint(\n \"split 98/1/1:\\n\", f\"train data:{train_data}\\n\", f\"validation data:{validation_data}\\n\", f\"test data:{test_data}\\n\"\n)", "split 80/10/10:\n train data:[28, 76, 5, 0, 62, 29, 54, 95, 88, 58, 4, 22, 92, 14, 50, 77, 47, 33, 75, 68, 56, 74, 43, 80, 83, 84, 73, 93, 66, 87, 9, 91, 64, 79, 20, 51, 17, 27, 12, 31, 67, 81, 7, 34, 45, 72, 38, 30, 16, 60, 40, 86, 48, 21, 70, 59, 6, 19, 2, 99, 37, 36, 52, 61, 97, 44, 26, 57, 89, 55, 53, 85, 3, 39, 10, 71, 23, 32, 25, 8]\n validation data:[78, 65, 63, 11, 49, 98, 1, 46, 15, 41]\n test data:[90, 96, 82, 42, 35, 13, 69, 24, 94, 18]\n\nsplit 98/1/1:\n train data:[66, 23, 29, 28, 52, 87, 70, 13, 15, 2, 62, 43, 82, 50, 40, 32, 30, 79, 71, 89, 6, 10, 34, 78, 11, 49, 39, 42, 26, 46, 58, 96, 97, 8, 56, 86, 33, 93, 92, 91, 57, 65, 95, 20, 72, 3, 12, 9, 47, 37, 67, 1, 16, 74, 53, 99, 54, 68, 5, 18, 27, 17, 48, 36, 24, 45, 73, 19, 41, 59, 21, 98, 0, 31, 4, 85, 80, 64, 84, 88, 25, 44, 61, 22, 60, 94, 76, 38, 77, 81, 90, 69, 63, 7, 51, 14, 55, 83]\n validation data:[35]\n test data:[75]\n\n" ] ], [ [ "<a name=\"perplexity\"></a>\n### Perplexity\n\nIn order to implement the perplexity formula, you'll need to know how to implement m-th order root of a variable.\n\n\\begin{equation*}\nPP(W)=\\sqrt[M]{\\prod_{i=1}^{m}{\\frac{1}{P(w_i|w_{i-1})}}}\n\\end{equation*}\n\nRemember from calculus:\n\n\\begin{equation*}\n\\sqrt[M]{\\frac{1}{x}} = x^{-\\frac{1}{M}}\n\\end{equation*}\n\nHere is a code that will help you with the formula.", "_____no_output_____" ] ], [ [ "# to calculate the exponent, use the following syntax\np = 10 ** (-250)\nM = 100\nperplexity = p ** (-1 / M)\nprint(perplexity)", "316.22776601683796\n" ] ], [ [ "That's all for the lab for \"N-gram language model\" lesson of week 3.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5a251f6d716a16871c03fe735c98fad1297f2e
421,120
ipynb
Jupyter Notebook
assets/notebooks/advanced-muller-plots.ipynb
phylo-baltic/baltic-gallery
58a76935c814b286eb1c9fb12c09d7dc5b6276d2
[ "MIT" ]
2
2021-01-10T11:10:28.000Z
2021-06-24T01:17:27.000Z
assets/notebooks/advanced-muller-plots.ipynb
phylo-baltic/baltic-gallery
58a76935c814b286eb1c9fb12c09d7dc5b6276d2
[ "MIT" ]
1
2021-09-01T20:26:30.000Z
2021-09-01T20:26:30.000Z
assets/notebooks/advanced-muller-plots.ipynb
phylo-baltic/baltic-gallery
58a76935c814b286eb1c9fb12c09d7dc5b6276d2
[ "MIT" ]
1
2020-11-01T10:27:34.000Z
2020-11-01T10:27:34.000Z
744.028269
174,332
0.945977
[ [ [ "# Muller plot\n\n-------\n\nMuller plots are an intuitive visualisation of a tree that utilises the unused y-axis of phylogenetic trees to display the frequency of a lineage over time. The relationships between lineages are visualised as nesting of frequencies. Depending on how frequencies are computed Muller plots can make things apparent that weren't before when data were visualised as trees.\n\nIn this example notebook we'll do two examples. One is an influenza B virus dataset where frequencies are computed using augur (the pipeline nextstrain uses to process data) which gives us normalised and smoothed trajectories of descendant sequence counts for nodes. These get computed for every node so in order to reduce clutter in the Muller plot we'll reduce the tree down to only a couple of specified nodes using a custom `skeleton_tree` function.\n\nThe other example is a small handmade tree with frequencies that are unique to a branch (i.e. frequency values correspond to the branch itself but do not include its descendants), where we'll sum the frequencies across offspring. We'll then plot the Muller plot with both unnormalised and normalised frequencies.", "_____no_output_____" ] ], [ [ "import matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\nfrom Bio import Phylo\nimport numpy as np\n\nimport glob,os,copy,requests\nfrom io import StringIO\n\nimport baltic as bt\nfrom augur import frequencies,frequency_estimators\n\n# address='https://raw.githubusercontent.com/evogytis/fluB/master/data/mcc%20trees/InfB_NPt_ALLs1.mcc.tre' ## address of example tree\naddress='https://raw.githubusercontent.com/evogytis/fluB/master/data/mcc%20trees/InfB_PAt_ALLs1.mcc.tre' ## address of example tree\n\nfetch_tree = requests.get(address) ## fetch tree\ntreeFile=StringIO(fetch_tree.text) ## stream from repo copy\n\nll=bt.loadNexus(treeFile,tip_regex='_([0-9\\-]+)$') ## treeFile here can alternatively be a path to a local file\n\nll.treeStats()\nprint('Done!')", "\nTree height: 29.500153\nTree length: 458.766950\nstrictly bifurcating tree\nannotations present\n\nNumbers of objects in tree: 903 (451 nodes and 452 leaves)\n\nDone!\n" ] ], [ [ "### Muller plot code\n\nThis cell defines the function used to plot Muller plots. Its operation is quite simply - it traverse the tree, clips frequencies to the time between when they first appear and are last seen, computes the padding available between offspring frequencies and plots them, with normalisation if necessary.", "_____no_output_____" ] ], [ [ "def clip_freq(ys):\n \"\"\"\n Clip a list of frequencies to the first non-zero and the last non-zero values.\n \"\"\"\n if sum(ys)==0.0: return []\n xs=range(len(ys))\n try:\n start=next((i for i, y in enumerate(ys) if y), None)\n end=len(xs)-next((i for i, y in enumerate(ys[::-1]) if y), None)\n except:\n print(xs,ys)\n \n if start==0: start=1\n reindex=xs[start-1:end+1]\n return reindex\n\nfrom collections import Counter\n\ndef draw(ax,node,size,timeline,bottom=None,facecolour='lightgrey',edgecolour='k',level=None,normalise=None):\n \"\"\"\n Plot a Muller plot on ax starting from a node and accessing its frequency using the size function along a timeline grid.\n \"\"\"\n \n if level==None: level=1\n \n ys=size(node) ## returns values of a node over time\n if normalise: ## if normalise function available - normalise frequencies\n ys=normalise(ys)\n \n bottom=[0-y/2 for y in ys] if bottom==None else bottom ## if no bottom provided - compute it \n \n xs=range(len(ys)) ## indices of frequencies\n xx=clip_freq(ys) ## reindex frequencies to clip zeroes at the beginning and end\n \n fc=facecolour(node) if callable(facecolour) else facecolour ## get colours\n ec=edgecolour(node) if callable(edgecolour) else edgecolour\n \n clipped_timeline=[timeline[t] for t in xx]\n clipped_bottom=[bottom[t] for t in xx]\n clipped_values=[bottom[t]+ys[t] for t in xx]\n \n ax.fill_between(clipped_timeline,clipped_bottom,clipped_values,facecolor=fc,\n edgecolor=ec,alpha=1.0,zorder=level) ## plot frequency\n \n if node.is_node(): ## if node has children\n children=[ch for ch in node.children if (ch.is_node() or size(ch))] ## children of node\n clipped_children=[list(clip_freq(size(ch))) for ch in children] ## clip frequencies of child\n clipped_freqs=Counter(sum(clipped_children,[])) ## count how many timeline indices are left\n \n N_children=[clipped_freqs[t]+1 if clipped_freqs[t]>0 else 1 for t in xs] ## count+1 of indices = number of children at any point\n \n children_sum=[sum([size(ch)[t] for ch in children]) for t in xs] ## total frequency of children\n \n if normalise:\n children_sum=normalise(children_sum) ## if normalise available - normalise child frequencies\n \n available_space=[(ys[t]-children_sum[t]) for t in xs] ## node frequency - all children frequencies = padding space left\n \n unique_space=[available_space[t]/N_children[t] for t in xs] ## how much padding to add\n \n temp_bottom=bottom ## start with bottom\n \n for c,child in enumerate(children): ## iterate over children\n child_freq=clip_freq(size(child)) ## clip child frequencies\n \n padded_bottom=[temp_bottom[t]+unique_space[t] if t in child_freq else temp_bottom[t] for t in xs] ## pad bottom with space available, if a child is present\n \n temp_bottom=draw(ax,child,size,timeline,padded_bottom,facecolour=facecolour,\n edgecolour=edgecolour,level=level+1,normalise=normalise) ## draw frequency for each child, padding as you go\n \n return [bottom[t]+ys[t] for t in xs] ## new bottom is bottom+this node's values", "_____no_output_____" ] ], [ [ "### augur clade frequencies\n\nThis cell will convert the baltic tree into a BioPython tree on which augur clade frequencies can be computed.", "_____no_output_____" ] ], [ [ "tips=[k.name for k in ll.Objects if k.branchType=='leaf'] ## get tip names\ntipDates=[k.split('_')[-1] for k in tips] ## isolate collection date from tip name\ndates=[bt.decimalDate(k,variable=True) for k in tipDates] ## convert dates into decimal dates\n\ntr = Phylo.read(StringIO(ll.toString()), \"newick\") ## create a Bio.Phylo tree from the baltic tree\n\nfor k in ll.Objects:\n if k.length<0.0:\n k.length=0.0 ## convert negative branch lengths to 0.0\n\nfor n,nd in enumerate(tr.get_terminals()):\n if hasattr(nd,'attr')==False:\n setattr(nd,'attr',{})\n nd.attr['num_date'] = bt.decimalDate(nd.name.split('_')[-1],variable=True) ## assign collection date to Bio.Phylo tips\n\npivots = np.array(frequency_estimators.make_pivots(200,np.array(dates))) ## create time grid for clade frequencies\nbfq=frequencies.tree_frequencies(tr,min_clades=50,node_filter=None,pivots=pivots,verbose=False) ## create a frequencies object\n\nbfq.estimate_clade_frequencies() ## estimate smoothed trajectories\n\nfor n,nd in enumerate(ll.traverse_tree(include_condition=lambda k:True)): ## frequencies are estimated in traversal order\n if nd.is_node():\n nd.traits['cf']=bfq.frequencies[n] ## assign frequency vector to node", "_____no_output_____" ] ], [ [ "### Plotting augur frequencies\n\nReal trees are large and clade frequencies are computed for either all or a small number of branches with augur. Traversing trees where some branches don't have frequency vectors would cause plotting issues, so in order to make everything look neat we'll reduce the tree to just the nodes whose frequencies we care about while preserving their ultimate relationships.", "_____no_output_____" ] ], [ [ "def skeleton_tree(nodes,tree):\n \"\"\"\n Reduce the tree to specified nodes, preserving their relative relationships and all the tips. \n \"\"\"\n skeleton=copy.deepcopy(tree) ## deepcopy tree\n indices=set([n.index for n in nodes]) ## index nodes we want to keep\n relations={} ## will map node to parent\n for k in [nd for nd in skeleton.getInternal() if nd.index in indices]+skeleton.getExternal(): ## iterate over nodes we want to keep and all tips\n cur_node=k\n while cur_node.parent and cur_node.parent.index not in indices: ## descend to root for as long as a parental node we want to keep is encountered\n cur_node=cur_node.parent\n else:\n relations[k]=cur_node.parent ## remember (simplified) relationship\n \n remove=[]\n for k in skeleton.Objects: ## iterate over all branches\n if k not in relations and k not in list(relations.values()): ## branch is not one we want to keep\n remove.append(k) ## remember to remove\n \n for r in remove:\n skeleton.Objects.remove(r) ## remove unwanted nodes\n \n for k in skeleton.getInternal():\n k.children=[] ## reset children of all nodes\n \n for k in relations: ## iterate over nodes we kept\n parent=relations[k] ## get the node's new parent\n \n if parent: parent.children.append(k) ## parent is available - add node to parent's children\n k.parent=parent ## assign parent\n \n for k in skeleton.Objects:\n k.length=k.height-k.parent.height if k.parent else 0.0 ## set new branch lengths now that a lot of intervening branches have been removed\n \n return skeleton\n\nfig = plt.subplots(figsize=(20,10),facecolor='w')\n\ngs = gridspec.GridSpec(1, 1,hspace=0.0,wspace=0.0)\nax = plt.subplot(gs[0])\n\nsignificant=lambda k: [len(ch.leaves)>70 for ch in k.children if ch.is_node()].count(True)>=2 or k==ll.root or ('PB1' in k.parent.traits and k.traits['PB1']!=k.parent.traits['PB1']) ## will highlight nodes with children that are nodes themselves, each with >70 descendant tips, or root, or mismatched PB1 states\n\ntarget=lambda k: k.is_node() and significant(k)\n\ntimeline=pivots\n\n################ colour assignment\nkeep=[]\n\ncolours={}\nVs=1\nYs=1\nfor k in sorted(ll.getBranches(target),key=lambda w: w.height): ## get colours for each clade\n keep.append(ll.traverse_tree(k)[0])\n if k.traits['PB1']=='V':\n c=mpl.cm.Reds(Vs/9)\n colours[k.index]=c\n Vs+=1\n elif k.traits['PB1']=='Y':\n c=mpl.cm.Blues(Ys/8)\n colours[k.index]=c\n Ys+=1\n \nsimple_ll=skeleton_tree(ll.getBranches(target),ll) ## reduce the tree to certain nodes, preserving relationships but keeping all tips\n################\n\nsize=lambda k: k.traits['cf'] if 'cf' in k.traits else None ## access frequency values\nfacecolour=lambda k: colours[k.index] if k.index in colours else 'lightgrey' ## get colour\n\ndraw(ax,simple_ll.root,size,timeline,bottom=[0 for x in timeline],facecolour=facecolour,edgecolour='k') ## draw Muller plot\n\nx_attr=lambda k: k.absoluteTime ## plot in absolute time\ny_attr=lambda k: k.y/ll.ySpan ## constrain tree y dimension to be between 0 and 1\n\nll.plotTree(ax,x_attr=x_attr,y_attr=y_attr,zorder=100000) ## plot tree\nll.plotPoints(ax,target=target,x_attr=x_attr,y_attr=y_attr,size=100,colour=facecolour,zorder=100000) ## plot preserved nodes\n\nax.vlines(ax.get_xticks(),0,1,color='w',ls='--',lw=1,zorder=1000) ## add vertical lines for dates\nax.set_xlabel('time',size=32)\nax.set_ylabel('frequency',size=32)\nax.tick_params(labelsize=26)\nax.set_xlim(min(timeline),max(timeline))\nax.set_ylim(0,1)\n\nplt.show()", "_____no_output_____" ] ], [ [ "This cell just shows the tree that is the source of Muller plot frequencies.", "_____no_output_____" ] ], [ [ "fig = plt.subplots(figsize=(20,15),facecolor='w')\n\ngs = gridspec.GridSpec(1, 1,hspace=0.0,wspace=0.0)\nax = plt.subplot(gs[0])\n\nskeleton=skeleton_tree(ll.getBranches(target),ll)\nskeleton.sortBranches()\n\nx_attr=lambda k: k.absoluteTime\ny_attr=lambda k: k.y/ll.ySpan\n\nskeleton.plotTree(ax,x_attr=x_attr,y_attr=y_attr,width=1,zorder=100000)\nskeleton.plotPoints(ax,target=lambda k: k.index in [w.index for w in ll.getBranches(target)],x_attr=x_attr,y_attr=y_attr,size=100,colour=facecolour,zorder=100000)\n\nax.set_yticks([])\nax.set_yticklabels([])\n[ax.spines[loc].set_visible(False) for loc in ax.spines if loc not in ['bottom']]\nax.set_ylim(-0.01,1.01)\n\nplt.show()", "_____no_output_____" ] ], [ [ "### Custom frequencies\n\nIf you have computed frequencies using some other method other than augur's clade frequencies or you're working with counts rather than frequencies this code will help you plot your data - normalised or unnormalised.\n\n", "_____no_output_____" ] ], [ [ "def sum_values(node,attribute):\n \"\"\"\n Collect a node's children attributes, add to parent's attribute (assumes it's a list).\n \"\"\"\n if node.is_node(): ## has children\n for child in node.children: ## iterate over children\n values=sum_values(child,attribute) ## call on children\n node.traits[attribute]=[cur_val+ch_val for cur_val,ch_val in zip(node.traits[attribute],values)] ## add children values to current node\n \n return node.traits[attribute] ## return node's values\n \n\nexample='(((\"A1\"[&freqs={0,0,0,0,1,0},fc=\"red\"],\"A2\"[&freqs={0,0,0,0,1,2},fc=\"indianred\"])[&freqs={0,0,0,1,2,2},fc=\"indianred\"],(\"B1\"[&freqs={0,0,0,0,0,1},fc=\"steelblue\"],\"B2\"[&freqs={0,0,0,0,0,3},fc=\"skyblue\"],\"B3\"[&freqs={0,0,0,0,1,0},fc=\"steelblue\"])[&freqs={0,0,0,0,1,1},fc=\"steelblue\"])[&freqs={0,0,0,2,3,1},fc=\"dimgrey\"],\"C\"[&freqs={0,0,1,1,0,0},fc=\"grey\"])[&freqs={0,1,1,1,2,4},fc=\"lightgrey\"];' ## small tree with node labels that are counts and colours\n\nll=bt.make_tree(example) ## turn string into baltic tree\nsum_values(ll.root,'freqs') ## add children's values to parent's value\n\nfig = plt.subplots(figsize=(20,10),facecolor='w')\n\ngs = gridspec.GridSpec(2, 1,hspace=0.1,wspace=0.0)\nax = plt.subplot(gs[0])\n\nfor k in ll.Objects: ## iterate over branches\n for i,x in enumerate(k.traits['freqs']): ## iterate over \"time points\"\n if x>0 and k.height==None: ## first time point where frequency is not zero and height not set yet\n k.height=i ## branch height is first non-zero frequency value\n \nfor k in ll.Objects:\n k.length=k.height-k.parent.height ## length is difference in heights\n \nll.sortBranches()\nll.plotTree(ax) ## plot tree\nll.plotPoints(ax,target=lambda k: True, colour=lambda k: k.traits['fc'],size=100) ## plot points for every branch\nll.addText(ax,x_attr=lambda k: k.height-0.02,y_attr=lambda k: k.y+0.2,target=lambda k: True,text=lambda k: k.traits['freqs'],ha='right') ## add text that shows the frequency of each branch\n\nax.set_yticks([])\nax.set_yticklabels([])\nax.tick_params(size=0,labelsize=0)\n[ax.spines[loc].set_visible(False) for loc in ax.spines]\n\nax2 = plt.subplot(gs[1],sharex=ax)\n\nsize=lambda k: k.traits['freqs'] ## get frequencies of branch\nfacecolour=lambda k: k.traits['fc'] ## get colour of branch\n\ndraw(ax2,ll.root,size,range(6),facecolour=facecolour,edgecolour='k') ## draw Muller plot\n\n[ax2.spines[loc].set_visible(False) for loc in ax2.spines if loc not in ['left','bottom']]\nax2.tick_params(labelsize=24)\nax2.set_ylabel('value',size=30)\nax2.set_xlabel('time',size=30)\n\nax2.set_xlim(0,5.4)\nax2.set_ylim(-7,7)\n\nplt.show()", "_____no_output_____" ], [ "## repeat same procedure as before\n\nll=bt.make_tree(example)\nsum_values(ll.root,'freqs')\n\nfig = plt.subplots(figsize=(20,10),facecolor='w')\n\ngs = gridspec.GridSpec(2, 1,hspace=0.1,wspace=0.0)\nax = plt.subplot(gs[0])\n\nfor k in ll.Objects:\n for i,x in enumerate(k.traits['freqs']):\n if x>0 and k.height==None:\n k.height=i\n \nfor k in ll.Objects:\n k.length=k.height-k.parent.height\n \nll.sortBranches()\nll.plotTree(ax)\nll.plotPoints(ax,target=lambda k: True, colour=lambda k: k.traits['fc'],size=100)\nll.addText(ax,x_attr=lambda k: k.height-0.02,y_attr=lambda k: k.y+0.2,target=lambda k: True,text=lambda k: k.traits['freqs'],ha='right')\n\nax.set_yticks([])\nax.set_yticklabels([])\nax.tick_params(size=0,labelsize=0)\n[ax.spines[loc].set_visible(False) for loc in ax.spines]\n\nax2 = plt.subplot(gs[1],sharex=ax)\n\nsize=lambda k: k.traits['freqs']\nfacecolour=lambda k: k.traits['fc']\nnormalise=lambda ys: [y/Max if Max!=0.0 else 0.0 for y,Max in zip(ys,ll.root.traits['freqs'])] ## normalise values of all frequencies to be between 0 and 1 by dividing by root's frequency\n\ndraw(ax2,ll.root,size,range(6),facecolour=facecolour,edgecolour='k',normalise=normalise) ## this time we're adding normalisation\n\n[ax2.spines[loc].set_visible(False) for loc in ax2.spines if loc not in ['left','bottom']]\nax2.tick_params(labelsize=24)\nax2.set_ylabel('frequency',size=30)\nax2.set_xlabel('time',size=30)\n\nax2.set_xlim(0,5.4)\nax2.set_ylim(-0.5,0.5)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec5a328a36d48c8683fbf3e696d995fce458197e
38,885
ipynb
Jupyter Notebook
Georgia_Automator.ipynb
Grassroots-Democrats-HQ/Primitive-Postcard-Sorter
270635e274763f47a6e596504dbbeacf3df9f644
[ "MIT" ]
1
2020-11-12T16:16:25.000Z
2020-11-12T16:16:25.000Z
Postcard-Autosorter/Georgia_Automator.ipynb
ShreyJ1729/Grassroots-Democrats
e3db94abdd961cbae289a43a0019247a90672f68
[ "MIT" ]
null
null
null
Postcard-Autosorter/Georgia_Automator.ipynb
ShreyJ1729/Grassroots-Democrats
e3db94abdd961cbae289a43a0019247a90672f68
[ "MIT" ]
null
null
null
35.871771
146
0.372663
[ [ [ "import pandas as pd\nimport os\n\n# excel stuff doesn't work without this:\nimport sys\nsys.path.append(\"C:/Users/shrey/anaconda3/Lib/site-packages\")\n# if someone else is using this replace the above path with the base dir of 'openpyxl'.", "_____no_output_____" ], [ "# read in main csv file\nsignups = pd.read_excel(os.path.join(\"data_input\", \"signups.xlsx\"))\n\n# filter by done/not done\nsignups = signups.loc[pd.isna(signups['Emailed'])]\nsignups.reset_index(drop = True, inplace=True)\n\n# remove irrelevant/already done requests\nsignups = signups[[\"Ready\",\n \"First Name\",\n \"Last Name\",\n \"Email\",\n \"Phone\",\n \"Are you part of a volunteer group?\",\n \"Group Name\",\n \"How many postcards are you requesting?\",\n \"Comments / Feedback\"]]\n\n# strip dangling spaces in all columns\nsignups.columns = signups.columns.str.strip()\n\n# remove duplicate email rows\nsignups.drop_duplicates(subset=[\"Email\"], keep='first', inplace=True)\nsignups.reset_index(drop=True, inplace=True)\n\nsignups", "_____no_output_____" ], [ "import numpy as np\nflagged_indices = []\nover_400 = []\n\n# flag rows with invalid postcard numbers\nfor row in signups.iterrows():\n idx = row[0]\n row = row[1]\n num_cards = row[\"How many postcards are you requesting?\"]\n if not isinstance(num_cards, int):\n flagged_indices.append(idx)\n else:\n if num_cards >= 400:\n over_400.append(idx)\n\nif (not os.path.exists(\"data_output\")):\n os.mkdir(\"data_output\")\n\n# save flagged rows\nsignup_flags = signups.iloc[flagged_indices]\nif os.path.isfile(\"data_output/flags.xlsx\"):\n os.remove(\"data_output/flags.xlsx\")\n\n# save over 400 rows\nsignup_400 = signups.iloc[over_400]\nif os.path.isfile(\"data_output/over_400.xlsx\"):\n os.remove(\"data_output/over_400.xlsx\")\n\n# save rows that have comments\nsignup_comments = signups.loc[signups['Comments / Feedback'].notna()]\nif os.path.isfile(\"data_output/comments.xlsx\"):\n os.remove(\"data_output/comments.xlsx\")\n\n# save groups too bc y not\nsignup_groups = signups.loc[signups['Group Name'].notna()]\nif os.path.isfile(\"data_output/groups.xlsx\"):\n os.remove(\"data_output/groups.xlsx\")\n\n# save all as excel\nsignup_flags.to_excel(os.path.join(\"data_output\", \"flags.xlsx\"), index=False)\nsignup_400.to_excel(os.path.join(\"data_output\", \"over_400.xlsx\"), index=False)\nsignup_comments.to_excel(os.path.join(\"data_output\", \"comments.xlsx\"), index=False)\nsignup_groups.to_excel(os.path.join(\"data_output\", \"groups.xlsx\"), index=False)\n\n# remove from signups dataframe\nsignups.drop(flagged_indices, inplace = True)\nsignups.drop(over_400, inplace = True)\nsignups.reset_index(drop=True, inplace=True)\n\nprint(\"Invalid postcard number: \", len(signup_flags), \" rows\")\nprint(\"Over 400: \", len(over_400), \" rows\")", "Invalid postcard number: 2 rows\nOver 400: 2 rows\n" ], [ "# final preprocessing signups:\nsignups = signups[[\"First Name\", \"Last Name\", \"Email\", \"How many postcards are you requesting?\"]]\nsignups", "_____no_output_____" ], [ "# read in addresses csv file\naddresses = pd.read_excel(os.path.join(\"data_input\", \"addresses.xlsx\"))\n\n# filter by done/not done\naddresses = addresses.loc[addresses['Assigned'].isna()]\naddresses = addresses[[\"Name\", \"Address 1\", \"Address 2\"]]\naddresses.reset_index(drop = True, inplace=True)\naddresses.insert(0, \"Group/volunteer\", \"\")\naddresses.insert(0, \"Assigned\", \"\")\n\nprint(\"length: \", len(addresses))\ndisplay(addresses.head())", "length: 10005\n" ], [ "# compute how many people can be mailed\n\nrunning_card_num = 0\ntotal_card_num = len(addresses)\ntotal_needed = sum(signups[\"How many postcards are you requesting?\"])\nbreaked = False\n\nfor row in signups.iterrows():\n idx = row[0]\n row = row[1]\n num_cards = row[\"How many postcards are you requesting?\"]\n running_card_num += num_cards\n # if sum > total, then go back an index and break --> go back bc this one is too much\n if (running_card_num > total_card_num):\n idx -=1\n breaked = True\n break;\n\nprint(\"total addresses needed: \", total_needed)\nprint(\"we have: \", total_card_num)\nif total_needed <= total_card_num:\n print(\"leftover: \", total_card_num - total_needed)\n\nif breaked:\n print(\"Not enough names for all volunteers. Only \", idx+1, \" will be sent.\")", "total addresses needed: 13090\nwe have: 10005\nNot enough names for all volunteers. Only 119 will be sent.\n" ], [ "# sort\nif not os.path.exists(os.path.join(\"data_output\", \"sorted_addresses\")):\n os.mkdir(os.path.join(\"data_output\", \"sorted_addresses\"))\n\nimport os\ncurr_index = 0\nfor row in signups.iterrows():\n idx = row[0]\n row = row[1]\n fname = row['First Name']\n lname = row['Last Name']\n email = row['Email']\n fullname = fname.strip() + \" \" + lname.strip()\n num_cards = row['How many postcards are you requesting?']\n if curr_index + num_cards > len(addresses):\n print(\"Not enough addresses. Stopping.\")\n break;\n \n personCards = addresses.iloc[curr_index: curr_index+num_cards]\n for i in range(num_cards):\n addresses.loc[curr_index, 'Assigned'] = \"SJ\"\n addresses.loc[curr_index, 'Group/volunteer'] = fullname\n curr_index += 1\n signups.at[idx, \"Ready\"] = \"SJ\"\n personCards = personCards.drop(columns=['Assigned', \"Group/volunteer\"])\n personCards = pd.concat([pd.DataFrame({'Name': [email]}), personCards])\n personCards.to_excel(os.path.join(\"data_output\", \"sorted_addresses\", fullname + \" \" + str(num_cards) + \".xlsx\"), index=False)\n \n if (idx % (len(signups)//10)) == 0:\n print(100*idx/len(signups), \"%\", \"Done\")\n\n# replace all \"\"s in \"Assigned\" to NaNs\n# addresses[\"Assigned\"].loc[addresses['Assigned'] == \"\", ] = np.nan\n\naddresses[\"Assigned\"].loc[addresses['Assigned'] == \"\"] = np.nan\n\naddresses.to_excel(os.path.join(\"data_output\", \"addresses.xlsx\"))\ndisplay(addresses)\nsignups.to_excel(os.path.join(\"data_output\", \"signups.xlsx\"))", "0.0 % Done\n9.49367088607595 % Done\n18.9873417721519 % Done\n28.481012658227847 % Done\n37.9746835443038 % Done\n47.46835443037975 % Done\n56.962025316455694 % Done\n66.45569620253164 % Done\nNot enough addresses. Stopping.\n" ], [ "signups", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5a4220de7146ae8da34ea8b4522c8af5de126d
191,566
ipynb
Jupyter Notebook
10_Error_handling.ipynb
AyTanase/ILAS-
436dfc416a9ece37f2e9dde8036232e9ae48f89b
[ "MIT" ]
null
null
null
10_Error_handling.ipynb
AyTanase/ILAS-
436dfc416a9ece37f2e9dde8036232e9ae48f89b
[ "MIT" ]
null
null
null
10_Error_handling.ipynb
AyTanase/ILAS-
436dfc416a9ece37f2e9dde8036232e9ae48f89b
[ "MIT" ]
null
null
null
50.451936
15,418
0.69124
[ [ [ "from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"./styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()", "_____no_output_____" ] ], [ [ "### BEFORE YOU DO ANYTHING...\nIn the terminal:\n1. Navigate to __inside__ your ILAS_Python repository.\n2. __COMMIT__ any un-commited work on your personal computer.\n3. __PULL__ any changes *you* have made using another computer.\n4. __PULL__ textbook updates (including homework answers).\n\nThen:\n1. __Open Jupyter notebook:__ Start >> Programs (すべてのプログラム) >> Programming >> Anaconda3 >> JupyterNotebook\n1. __Navigate to the ILAS_Python folder__. \n1. __Open today's seminar__ by clicking on 10_Error_Handling.", "_____no_output_____" ], [ "A note on coursework assigment:\n\nPlease complete your assigment in a __seperate__ folder to your ILAS_python repository.", "_____no_output_____" ], [ "Start by running the cell below to import some packages that we will use today...", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "# Error Handling\n\n", "_____no_output_____" ], [ "# Lesson Goal\n__Understand the meaning__ of errors generated by your programs and take logical steps to solve them.\n\nBe able to __write exceptions__ to prevent your program from allowing erroneous code to proceed undetected. ", "_____no_output_____" ], [ "# Objectives\n - Understand the diffrence between *syntax* errors and *exceptions*.\n - Understand how to interpret an error message.\n - Be able to generate exceptions of your own.\n - Be able to write code that *catches* exceptions to prevent your program from quitting if something unexpected happens.\n \n", "_____no_output_____" ], [ "# Why we are studying this\n\nWhen writing code, you occaisionally make mistakes; it happens to everyone.\n\nAn important part of learing to program is learning to:\n- fix things when they go wrong.\n- anticipate things that might go wrong and prepare for them.\n", "_____no_output_____" ], [ "# Lesson Structure\n- Syntax errors\n- Exceptions\n - Raising your own exceptions\n - Catching and handling exceptions\n - Checking user input\n - Checking automatically generated values\n - Real world case study: Integer storage and overflow\n- Review Exercises", "_____no_output_____" ], [ "To identify errors (or bugs), it often helps to:\n - test small parts of the code separately (Jupyter notebook is very useful forn this).\n - write lots of print statements. ", "_____no_output_____" ], [ "Let's look at an example error message:", "_____no_output_____" ] ], [ [ "for i in range(4):\n print i", "_____no_output_____" ] ], [ [ "*First*, error messages show you __where__ the error occurred.\n<br>Python prints the line(s) in which the error occurred. ", "_____no_output_____" ], [ "*Second*, error messages print information that is designed to tell you __what__ you are doing wrong. \n<br>The strategy to find out what is going on is to read the last sentence of the error message. ", "_____no_output_____" ], [ "Sometimes it is easy for Python to determine what is wrong and the error message is very informative. \n<br>Other times you make a more confusing error.\n<br>In this case Python often generates an error message gives little explanation of what you did wrong. ", "_____no_output_____" ], [ "Let's look at some examples of error messages that you are likely to encounter and may have already encountered.", "_____no_output_____" ], [ "Errors (or *bugs*) can be divided into two types: *syntax errors* and *exceptions*...\n\n", "_____no_output_____" ], [ "## Syntax errors\n\nSyntax errors occur when the code you write does not conform to the rules of the language. \n<br>You will probably have seen many of syntax error messages by now!", "_____no_output_____" ], [ "### `invalid syntax`\nA common error message is `invalid syntax`. \n\nThis means you have coded something that Python doesn't understand. \n\nFor example, this is often:\n - a typo, which you can often spot by looking carefully at the code.\n - a missing symbol (e.g. when expressing a conditional or a loop) ", "_____no_output_____" ], [ "The code below should:\n - check the value of `a`\n - print the message if the value of `a` is 7. \n \nWhat's wrong with the code below?", "_____no_output_____" ] ], [ [ "a = 7 \n\nif a = 7:\n print('the value of a equals 7')", "_____no_output_____" ] ], [ [ "Python shows with the `^` symbol to point to which part of your line of code it doesn't understand. ", "_____no_output_____" ], [ "__Try it yourself__\n<br>Write the corrected code in the cell below and run it again:", "_____no_output_____" ] ], [ [ "a = 7 \n\nif a == 7:\n print('the value of a equals 7')", "the value of a equals 7\n" ] ], [ [ "Use the `^` symbol to work out what is wrong with the code below:", "_____no_output_____" ] ], [ [ "avalue = 7 \nif avalue < 10\n print('the value of avalue is smaller than 10')", "_____no_output_____" ] ], [ [ "__Try it yourself__\n<br>Fix the code and re-run it in the cell below", "_____no_output_____" ] ], [ [ "avalue = 7 \nif avalue < 10:\n print('the value of avalue is smaller than 10')", "the value of avalue is smaller than 10\n" ] ], [ [ "Other times, the syntax error message may be less obvious... \n\nWhat is wrong with this code?", "_____no_output_____" ] ], [ [ "plt.plot([1,2,3]\nplt.title('Nice plot')", "_____no_output_____" ] ], [ [ "Python reads `plt.title('Nice plot')` as part of the `plt.plot` function. In this context, `plt.title('Nice plot')` makes no sense so the position of the error `^` is indicated here.", "_____no_output_____" ], [ "__Try it yourself__\n<br>Fix the code and re-run it in the cell below", "_____no_output_____" ] ], [ [ "plt.plot([1,2,3])\nplt.title('Nice plot')", "_____no_output_____" ] ], [ [ "## Exceptions\n\nExceptions are when the *syntax* is correct but something unexpected or anomalous occurs during the execution of a program. \n\nPython detects some instances of this automatically, e.g.: \n- attempting to divide by zero.\n- attempting to compute the dot product between two vectors of different lengths.", "_____no_output_____" ], [ "Attempting to divide by zero:", "_____no_output_____" ] ], [ [ "a = 1/0", "_____no_output_____" ] ], [ [ "Attempting to compute the dot product of two vectors of different lengths.", "_____no_output_____" ] ], [ [ "\na = [1, 2, 3]\nb = [1, 2, 3, 4]\nc = np.dot(a, b)", "_____no_output_____" ] ], [ [ "### Exception types\nThe error message contains:\n - the __exception type__ designed to tell you the nature of the problem.\n - a message designed to tell you what you are doing wrong.\n\n<br>A full list of Python exception types can be found here: https://docs.python.org/3/library/exceptions.html", "_____no_output_____" ], [ "Here are a few definitions of exception types:\n\n - `ValueError` : when a function argument that has the right type but an inappropriate value, and the situation is not described by a more precise exception such as IndexError.\n \n <br> \n - `TypeError` : when an operation or function is applied to an object of inappropriate type. The associated value is a string giving details about the type mismatch.\n \n <br>\n - `IndexError` : when a sequence subscript is out of range.\n \n <br>\n - `SyntaxError` : when the syntax used is not recognised by Python", "_____no_output_____" ], [ "Let's look at a few examples of errors generated by Python automatically.", "_____no_output_____" ], [ "### `IndexError: list index out of range`", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\nfor i in range(4):\n print(x[i])", "1\n2\n3\n" ] ], [ [ "Error message:\n<br>`IndexError: list index out of range`\n\nThe length of the array `x` is 3 (so `x[0]`, `x[1]`, and `x[2]`), while you are trying to print `x[3]`. \n\nAn ----> arrow points to where this problem was encountered.", "_____no_output_____" ], [ "__Try it yourself__\n<br>In the cell below, fix the code and run it again.", "_____no_output_____" ] ], [ [ "x = [1, 2, 3]\nfor i in range(len(x)):\n print(x[i])", "1\n2\n3\n" ] ], [ [ "### Longer error messages\n\nRemember that error messages *first* show you __where__ the error occurred.\n\nIf the code you write contains imported modules, this message appears as a *traceback* from the function that generates the error, all the way down to the code that you wrote. \n\nPython will show the step that was violated in every file between the original function and your code.\n\nIf the code you write contains imported modules that themselves import modules, this message can be very long. \n\n", "_____no_output_____" ], [ "For each file, it prints a few lines of the code to the screen and points to the line where the error occurred with an ---> arrow. \n\nIn the code below, the error occurs in the line `plt.plot(xdata, ydata)`, which calls a function in the `matplotlib` package.\n\nThe matplotlib function generates the error when it tries to plot `y` vs. `x`. \n<br>*Note:* the is a generic error message from `matplotlib`; it doesn't substitute the names of the arrays you have assigned in your code.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\ndef func(x, a=2, b=3):\n y = b * np.exp(-a * x)\n\nxdata = np.linspace(0, 4, 10)\n\nydata = func(xdata, b=4, a=1)\n\nplt.plot(xdata, ydata);", "_____no_output_____" ] ], [ [ "The problem is that `x and y must not be None`. \n\nIn this case `x` and `y` refer to `xdata` and `ydata`, because that is what the variables are called in the `matplotlib` function. \n\nLet's print `xdata` and `ydata` to see what is wrong:", "_____no_output_____" ] ], [ [ "print(xdata)\nprint(ydata)", "[ 0. 0.44444444 0.88888889 1.33333333 1.77777778 2.22222222\n 2.66666667 3.11111111 3.55555556 4. ]\nNone\n" ] ], [ [ "`xdata` is indeed an array with 10 values.\n\n`ydata` is equal to `None` i.e. it exists but has no value assigned to it.", "_____no_output_____" ], [ "Why is `ydata` equal to `None`?\n\nLook carefully at the function again to find what needs correcting:", "_____no_output_____" ], [ "```Python\ndef func(x, a=2, b=3):\n y = b * np.exp(-a * x)\n\nxdata = np.linspace(0, 4, 10)\n\nydata = func(xdata, b=4, a=1)\n\nplt.plot(xdata, ydata);\n```", "_____no_output_____" ], [ "__Try it yourself__\n<br>Re-write the function in the cell below and run the code again: ", "_____no_output_____" ] ], [ [ "def func(x, a=2, b=3):\n y = b * np.exp(-a * x)\n return y\n\nxdata = np.linspace(0, 4, 10)\n\nydata = func(xdata, b=4, a=1)\n\nplt.plot(xdata, ydata);", "_____no_output_____" ] ], [ [ "When you have resolved all the errors that Python has detected, your code will run.\n\nUnfortunatley, this doesn't necessarily mean that your program will do what you want it to...", "_____no_output_____" ], [ "## Raising exceptions\n\nBecause the intended functionality of the program is only known by the programmer, exceptions can require more effort to detect than syntax errors.", "_____no_output_____" ], [ "Examples, where the code will run but the output will be *incorrect*: \n- receiving negative data when only positive data is permitted, e.g. a negative integer for the number students in a class.\n- unexpected integer overflows\n\n\n", "_____no_output_____" ], [ "If invalid data is encountered, the program should output an informative message, just like when an error is detected automatically. ", "_____no_output_____" ], [ "Let's look at an example from __Seminar 5: Functions__ \n\n", "_____no_output_____" ], [ "### Parameter validity checking\n", "_____no_output_____" ], [ "__Hydrostatic Pressure 静水圧__\n\nWe defined a function to calculate the hydrostatic pressure.\n\nThe hydrostatic pressure (Pa = Nm$^{-2}$ = kg m$^{-1}$s$^{-2}$) is the pressure on a submerged object due to the overlying fluid):\n\n$$\nP = \\rho g h\n$$\n\n$g$ = acceleration due to gravity, m s$^{-2}$\n<br> $\\rho $ = fluid density, kg m$^{-3}$\n<br> $h$ = height of the fluid above the object, m. \n\n<img src=\"img/HydrostaticPressure.png\" alt=\"Drawing\" style=\"width: 350px;\"/>", "_____no_output_____" ] ], [ [ "def hp(h, rho = 1000, g = 9.81):\n \"\"\"\n Computes the hydrostatic pressure acting on a submerged object given:\n - the density of the fluid in which is it submerged, rho\n - the acceleration due to garvity, g\n - the height of fluid above the object, h\n \"\"\"\n return rho * g * h", "_____no_output_____" ] ], [ [ "This expression makes sense only for $\\rho g$ and $h > 0$.\n\nHowever, we can input negative values for h of these parameters without raising an error.", "_____no_output_____" ] ], [ [ "hp(-300, -20)", "_____no_output_____" ] ], [ [ "It is easy to input negative values by mistake, for example :\n - *the user makes a mistake*\n \n <br>\n - *another function takes the same quantity expressed using the opposite sign.* <br>Example: in Seminar 5: Functions we also consider a function to the height of a particle at time t, when falling from rest at height $r(0)$ with constant acceleration in which acceleration due to gravity is a negative value. \n \n```Python \n def position(t, r0, v0=0.0, a=-9.81):\n return r0 + (v0 * t) + (0.5 * a * t**2)\n ```", "_____no_output_____" ], [ "Rather than return an incorrect result, which could easily be overlooked, we can raise an exception in the case of invalid data.", "_____no_output_____" ], [ "### How to raise an exception\n - The keyword `raise` \n - The type of the exception\n - A string saying what caused it in () parentheses.\n", "_____no_output_____" ] ], [ [ "def hp(h, rho = 1000, g = 9.81):\n \"\"\"\n Computes the hydrostatic pressure acting on a submerged object.\n h = height of fluid above object, rho = fluid density, g = gravity \n \"\"\"\n if h < 0:\n raise ValueError(\"Height of fluid, h, must be greater than or equal to zero\")\n if rho < 0:\n raise ValueError(\"Density of fluid, rho, must be greater than or equal to zero\")\n if g < 0:\n raise ValueError(\"Acceleeration due to gravity, g, must be greater than or equal to zero\")\n\n return rho * g * h\n", "_____no_output_____" ] ], [ [ "The type of exception must be one that Python recognises.\n<br>i.e. it must appear of the list of built-in Python exceptions: https://docs.python.org/3/library/exceptions.html\n<br>(You can even write your own exception types but that is outside the scope of this course.)", "_____no_output_____" ], [ "There are no fixed rules about which error type to use. Choose the one that is most appropriate. Above, we have used the exception type `ValueError`. \n\n - `ValueError` : when a function argument that has the right type but an inappropriate value, and the situation is not described by a more precise exception such as IndexError.\n", "_____no_output_____" ], [ "Note: These are the same types that are generated when Python automatically raises an error.", "_____no_output_____" ], [ "Now if we run the same function again...", "_____no_output_____" ] ], [ [ "hp(-300, -20)", "_____no_output_____" ] ], [ [ "Note that only the *first* exception that Python encounters gets raised. \n\nThe program exits at the first error, just like automaticaly generated errors.", "_____no_output_____" ], [ "### Catching and handling exceptions\nWe don't always want the programs we write to exit when an error is encountered.\n<br>Sometimes we want the program to 'catch' the exception and then continue to do something else.", "_____no_output_____" ], [ "Let's use a real-world example to illustrate this:", "_____no_output_____" ], [ "USS Yorktown was a US Navy \"Smart Ship\" with a computer system fitted to operate a control centre from the ship's bridge. \n<br>In 1997, a crew member entered data into the system that led to an attempted division by zero. <br>The program exited, causing the ship's computer systems and the ship's propulsion systems to shut down. \n\n\n<img src=\"img/USS_Yorktown.jpg\" alt=\"Drawing\" style=\"width: 30%;\"/>", "_____no_output_____" ], [ "Code similar to that shown in the following cell would have been used to accept a user input and divide a number by that input. \n\nIf we input a non-zero numerical value, the, code works.\n\nIf we enter zero, it generates an error.", "_____no_output_____" ] ], [ [ "# Input a value and convert it from a string to a numerical type\nval = int(input(\"input a number\"))\n\nnew_val = 1 / val", "input a number0\n" ] ], [ [ "It is undesirable for the ships software to:\n - __stop__ if input data leads to a divide-by-zero.\n - __proceed erroneously__ and without warning.\n \nThe software needs to 'catch' the divide-by-zero exception, and do something else.\n", "_____no_output_____" ], [ "What could we make the to program do instead of exiting?", "_____no_output_____" ], [ "One solution might be to:\n - reduce the propulsion force.\n - ask for revised input.", "_____no_output_____" ], [ "### `try` and `except`\nIn Python, the key words `try` and `except` are used to catch errors:\n```python\ntry:\n # Attempt to do something here that might raise an exception\n # If no 'FooError' exception is raised:\n # - Run this indented code.\n # - Skip the indented code after except \nexcept FooError:\n # If a 'FooError' exception is raised above:\n # - Skip the indented code after try.\n # - Run this indented code.\n \n # For exception types other than FooError:\n # - the exception will not be caught.\n # - the program will stop.\n # - the error message will be printed.\n # If FooError is omitted, ANY exception type will be caught\n```", "_____no_output_____" ], [ "So for the Smart Ship, try and accept could have been used to prevent the program exiting if a `ZeroDivisionError` was generated:", "_____no_output_____" ] ], [ [ "val = 0\n\ntry:\n new_val = 1 / val\n \n print(f\"new number = {new_val}\")\n\n\nexcept ZeroDivisionError: \n print(\"Zero is not a valid input. Reducing propulsion force...\")", "Zero is not a valid input. Reducing propulsion force...\n" ] ], [ [ "Several `except` statements can be used to take care of different errors.\n\nThis can include assigning several exception types to a single `assert` statement by placing them inside of a tuple.", "_____no_output_____" ], [ "The following sudo-code shows example with a series of `except` statements.", "_____no_output_____" ] ], [ [ "try:\n # do something\n pass\n\nexcept ValueError:\n # handle ValueError exception\n pass\n\nexcept (TypeError, ZeroDivisionError):\n # handle multiple exceptions\n # TypeError and ZeroDivisionError\n pass\n\nexcept:\n # handle all other exceptions\n pass", "_____no_output_____" ] ], [ [ "### Checking interactive user input\nIn the case of the smart ship, the input value is given by the user:", "_____no_output_____" ] ], [ [ "try:\n # Ships computer system requests number from user\n val = int(input(\"input a number \"))\n \n new_val = 1 / val\n \n print(f\"new number = {new_val}\")\n\n\nexcept ZeroDivisionError:\n \n print(\"Zero is not a valid input. Reducing propulsion force...\")\n", "input a number 0\nZero is not a valid input. Reducing propulsion force...\n" ] ], [ [ "By catching the exception, we avoid running the part of the code that will generate the error and stop the prgram.\n\nHowever, that means we have not created a variable called new_val, which the problem code section was intended to do.\n\nThis can cause problems later in the program.", "_____no_output_____" ], [ "### Re-requesting user input\n\nRecall our example error-catching solution for the smart ship - if an error is generated:\n - reduce the propulsion force.\n - __ask for revised input.__\n ", "_____no_output_____" ], [ "One way to do this is to use a `while` loop with a `break` statement.\n\nWe keep requesting user input until valid input is given.\n\nAt that point, the `break` statement exits the loop.", "_____no_output_____" ] ], [ [ "while True:\n try:\n x = int(input(\"Please enter an even number: \"))\n \n if (x % 2 != 0):\n raise ValueError(\"Odd number entered\")\n \n break\n \n except ValueError:\n print(\"Not a valid number. Try again...\")", "Please enter an even number: 3\nNot a valid number. Try again...\nPlease enter an even number: 3\nNot a valid number. Try again...\nPlease enter an even number: four\nNot a valid number. Try again...\nPlease enter an even number: 2\n" ] ], [ [ "To make our program more readable we can also encapsulate the code in a __recursive__ function.\n\nFor example, for the smart ship:", "_____no_output_____" ] ], [ [ "def SmartShip():\n try:\n # Ships computer system requests number from user\n val = int(input(\"input a number \"))\n new_val = 1 / val\n print(f\"new number = {new_val}\")\n\n\n except ZeroDivisionError:\n print(\"Zero is not a valid input. Reducing propulsion force...\")\n # Request new input by re-running the function.\n return SmartShip()\n\n \nnew_val = SmartShip()\nprint(f\"new_val = {new_val}\")", "input a number 0\nZero is not a valid input. Reducing propulsion force...\ninput a number 0\nZero is not a valid input. Reducing propulsion force...\ninput a number 0\nZero is not a valid input. Reducing propulsion force...\ninput a number 0\nZero is not a valid input. Reducing propulsion force...\ninput a number 1\nnew number = 1.0\nnew_val = None\n" ] ], [ [ "This first example features an exception that *prevents* Python's default response to the error (i.e. exiting the code).", "_____no_output_____" ], [ "__Try it yourself__\n<br>Using the same format as the `SmartShip` example:\n```python\ntry:\n # Attempt to do something here that might raise an exception\n # If no 'FooError' exception is raised:\n # - Run this indented code.\n # - Skip the indented code after except \nexcept FooError:\n # If a 'FooError' exception is raised above:\n # - Skip the indented code after try.\n # - Run this indented code.\n \n # For exception types other than FooError:\n # - the exception will not be caught.\n # - the program will stop.\n # - the error message will be printed.\n # If FooError is omitted, ANY exception type will be caught\n```\nwrite a function that:\n- asks the user to input their age.\n- returns the users age.\n- raises an exception if the user's age is <0 and asks the user to try again.", "_____no_output_____" ] ], [ [ "def ask_age():\n age = 0\n while True:\n try:\n age = int(input(\"How old are you?: \"))\n if age >= 0:\n return age\n else:\n print(\"Realy?\")\n except:\n print(\"Please input 0~9\")\nprint(ask_age())", "_____no_output_____" ] ], [ [ "### Automatically checking parameters within code\n\nIt can also be useful to check values that are generated automatically (e.g. due to imported data such as files or sensor readings).\n", "_____no_output_____" ], [ "#### Background: bits and bytes\n\nThe smallest unit of computer memory is the *bit*; and each bit can take on one of two values; 0 or 1. \n\nFor many computer architectures the smallest usable 'block' is a *byte*.\n\nOne byte is made up of 8 bits. \n\n(e.g. a 64-bit operating system, a 32-bit operating system ... the number of bits will almost always be a multiple of 8 (one byte).)\n\n\n", "_____no_output_____" ], [ "The 'bigger' a thing we want to store, the more bytes we need. \n\nIn calculations, 'bigger' can mean:\n- how large or small the number can be.\n- the accuracy with which we want to store a number. \n", "_____no_output_____" ], [ "### Binary Numbers\n\nWhen using the binary system each number is reresented by summing a combination of base 2 numbers ($2^0, 2^1, 2^2....$).\n", "_____no_output_____" ], [ "For example, the table show the binary representation of number 0 to 15 (the maximum number that can be represeted by 4 bits.\n\nThe sum of the base 2 columns marked with a 1 is found as the decimal number in the left hand column.\n\nThe combination of 1s and 0a used to generate this decimal number, is its binary representation.\n\n|Decimal| Binary ||||\n|:------------:|:-----------:|:-----------:|:-----------:|:---------:|\n| |$2^3=8$ |$2^2=4$ |$2^1=2$ |$2^0=1$ | \n|0 |0 |0 |0 |0 | \n|1 |0 |0 |0 |1 | \n|2 |0 |0 |1 |0 | \n|3 |0 |0 |1 |1 | \n|4 |0 |1 |0 |0 | \n|5 |0 |1 |0 |1 | \n|6 |0 |1 |1 |0 | \n|7 |0 |1 |1 |1 | \n|8 |1 |0 |0 |0 | \n|9 |1 |0 |0 |1 | \n|10 |1 |0 |1 |0 | \n|11 |1 |0 |1 |1 | \n|12 |1 |1 |0 |0 | \n|13 |1 |1 |0 |1 | \n|14 |1 |1 |1 |0 | \n|15 |1 |1 |1 |1 | \n\n\n\n \n\n", "_____no_output_____" ], [ "The __largest number__ that can be represented by $n$ bits is:\n\n$2^{n} - 1$\n\nThe -1 comes from the fact that we start counting at 0 (i.e. $2^0$), rather than at 1 (i.e. $2^{1}$).", "_____no_output_____" ], [ "If, for example we had 5 bits, 10000 would equal $2^4$.\n\nFrom the pattern of 1s and 0s in the table, we can see that by subtracting 1:\n\n$2^4-1$ \n\nwe should get the 4 bit number 1111.\n", "_____no_output_____" ], [ "The __largest postitive integer__ that can be represented by $n$ bits is:\n\n$2^{n-1} - 1$\n\nThe power $n-1$ is becuase there is one less bit available when storing a *signed* integer.\n\nOne bit is used to store the sign; + positive or - negative (represented as a 0 or a 1)", "_____no_output_____" ], [ "The __largest negative integer__ that can be represented by $n$ bits is:\n\n$2^{n-1}$ \n\nThe first number when counting in the positive direction (0000 in the 4 bit example above) is zero.\n<br>Zero does not need a second representation in the negative scale.\n<br>Therefore, when counting in the negative direction:\n - 0000 = -1 (not 0)\n - 0001 = -2 \n - ....", "_____no_output_____" ], [ "The __largest unsigned integer__ that can be represented by 4 bits is:\n\n$2^{4} - 1 = 15$\n\nThe __largest positive signed integer__ that can be represented by 4 bits is:\n\n$2^{4-1} - 1 = 7$\n\nThe __largest negative signed integer__ that can be represented by 4 bits is:\n\n$2^{4-1} = 8$", "_____no_output_____" ], [ "### Integer storage and overflow\n\nIn most languages (C, C++ etc), a default number of bits are used to store a given type of number.\n\nPython is different in that it *automatically* assigns a variable type to a variable. \n\nTherefore it also automatically assigns the number of bits used to store the variable.\n\n\n\n", "_____no_output_____" ], [ "This means it will assign as many bytes as needed to represent the number entered by the user. \n\nIt starts with a 32 bit number and assigns more bytes as needed. \n\nThe largest (and smallest! - we will see how decimals are stored in next weeks seminar) number that Python can store is theoreticaly infinite. \n\nThe number size is, however, limited by the computer's memory.", "_____no_output_____" ], [ "However, when using Numpy, it is possible for an integer to *overflow* as C-style fixed precision integers.\n\nIn this case, a maximum size of 64 bits are used.\n\n$2^{64-1} - 1 = 9.223372037 \\times 10^{18}$\n\nSo if we use a number greater than $2^{64-1} - 1$ the integer will *overflow*.", "_____no_output_____" ], [ "### Example: Numpy integer overflow \nIn the array below:\n- The value with index `a[0]` is $2^{63} - 1$, the maximum storable value.\n- the data type is specified to make sure is an int.", "_____no_output_____" ] ], [ [ "a = np.array([2**63 - 1], dtype=int)\n\nprint(a, a.dtype)\n", "[9223372036854775807] int64\n" ] ], [ [ "The `bin` function prints the number in binary form, as a string.\n<br>(prefix `0b` for positive numbers, prefix `-0b` for positive numbers)\n<br>It is important to note that values are represented as regular binary number, NOT using their signed storage representation.\n\ne.g. \n \n 0b101 = 5, -0b101 = -5", "_____no_output_____" ] ], [ [ "print(bin(5), bin(-5))", "0b101 -0b101\n" ], [ "print(a, a.dtype)\n\nprint(bin(a[0]))\n\nprint(type(a[0]))\n\nprint(len(bin(a[0]))) # Number of characters in binary string representation", "[9223372036854775807] int64\n0b111111111111111111111111111111111111111111111111111111111111111\n<class 'numpy.int64'>\n65\n" ] ], [ [ " [9223372036854775807] int64\n 0b111111111111111111111111111111111111111111111111111111111111111\n <class 'numpy.int64'>\n 65\n\n\nThere are 65 characters in the string.\n<br>The first two `0b` tell us it is a positive binary number.\n\nThe 63 characters that follow tell us that the number is $2^{63}-1$.\n\n$-2^{63}-1$ is the largest value that can be stored by a 64 bit signed integer.", "_____no_output_____" ], [ "Adding 1 to the array will cause it to overflow.\n\nOverflow means that the number's value loops round to start again from it's smallest possible value.", "_____no_output_____" ] ], [ [ "a += 1\n\nprint(bin(a[0]))\n\nprint(type(a[0]))\n\nprint(len(bin(a[0]))) # Number of characters in binary string representation", "-0b1000000000000000000000000000000000000000000000000000000000000000\n<class 'numpy.int64'>\n67\n" ] ], [ [ " -0b1000000000000000000000000000000000000000000000000000000000000000\n <class 'numpy.int64'>\n 67\n\n\nThere are 67 characters in the string.\n<br>The first three `-0b` tell us it is a negative binary number.\n\nThe 64 characters that follow tell us that the number is $2^{63}$.\n\n$-2^{63}$ is the lowest value that can be stored by a 64 bit signed integer.", "_____no_output_____" ], [ "To see the number of bits required to store a number, use the bit_length method.", "_____no_output_____" ] ], [ [ "b = 8**12\nprint(b, type(b))\nprint(b.bit_length(), end=\"\\n\\n\")\n\n\nb = 8**24\nprint(b, type(b))\nprint(b.bit_length())", "68719476736 <class 'int'>\n37\n\n4722366482869645213696 <class 'int'>\n73\n" ] ], [ [ "## Example: Error handling with integer type conversion\n\nAn un-caught error due to storage limits led to the explosion of an un-manned rocket, *Ariane 5* (European Space Agency), shortly after lift-off (1996).\n\nWe will reproduce the precise mistake the developers of the Ariane 5 software made.\n\n<img src=\"img/ariane5.jpg\" alt=\"Drawing\" style=\"width: 300px;\"/>\n\n", "_____no_output_____" ], [ "The Ariane 5 rocket explosion was caused by an integer overflow. \n\nThe speed of the rocket was stored as a 64-bit float.\n\nThis was converted in the navigation software to a 16-bit integer. \n\n", "_____no_output_____" ], [ "However, the value of the float was greater than 32767, the largest number a 16-bit integer can represent.\n\nThis led to an overflow that in turn caused the navigation system to fail and the rocket to explode.\n\n", "_____no_output_____" ], [ "We can demonstrate what happened in the rocket program. \n\nConsider a speed of 40000.54 stored as a `float` (64 bits)(units are not relevant to what is being demonstrated):", "_____no_output_____" ] ], [ [ "speed_float = 40000.54", "_____no_output_____" ] ], [ [ "Let's first convert the float to a 32-bit `int`.\n\nWe can use NumPy to cast the variable as an integer with a fixed number of bits.", "_____no_output_____" ] ], [ [ "speed_int = np.int32(speed_float) \nprint(speed_int)\nprint(bin(speed_int))", "40000\n0b1001110001000000\n" ] ], [ [ "The conversion behaves as we would expect. \n\n", "_____no_output_____" ], [ "Now, if we convert the speed from the `float` to a 16-bit integer...", "_____no_output_____" ] ], [ [ "speed_int = np.int16(speed_float)\nprint(speed_int)\nprint(bin(speed_int))", "-25536\n-0b110001111000000\n" ] ], [ [ "We see clearly the result of an integer overflow since the 16-bit integer has too few bits to represent the number \n40000.", "_____no_output_____" ], [ "What might we want to program to do to avoid the integer overflow?", "_____no_output_____" ], [ "As a 16 bit integer was chosen, small memory usage was clearly an objective when writing the program.", "_____no_output_____" ], [ "One solution is to incrementally step through increasing integer sizes (16 bit, 32 bit, 64 bit ... ).\n<br>When we find an integer size that is large enough to hold the variable, we store the variable. \n\nThis means we:\n- always select the minimimum possible variable size.\n- avoid overflow errors.", "_____no_output_____" ], [ "One way to do this is using `if` and `else`.\n<br>This is known as LBYL (look before you leap) programming. ", "_____no_output_____" ] ], [ [ "speed_float = 32_10.0 # (small enough for a 16-bit int)\nspeed_float = 42_767.0 # (too large for a 16-bit int)\nspeed_float = 2_147_500_00.0 # (too large for a 32-bit int)\n\n# Check if the number to store will fit in a 16 bit integer.\nif abs(speed_float) <= (2**(16-1) - 1):\n vel = np.int16(abs(speed_float))\n \n# Check if the number to store will fit in a 32 bit integer.\nelif abs(speed_float) <= (2**(32-1) - 1):\n vel = np.int32(abs(speed_float))\n \nelse:\n raise OverflowError(\"Value too large for 32-bit int.\")", "_____no_output_____" ] ], [ [ "We can use `try` and `except` to do the same thing. \n\nIn general, the main advantages of using `try` and `except`:\n- speed-ups (e.g. preventing extra lookups: `if...and...and...and...`)\n- cleaner code (less lines/easier to read)\n- jumping more than one level of logic (e.g. where a break doesn't go far enough)\n- where the outcome is likely to be unexpected (e.g. it is difficult to define `if` and `elif` conditional statements).\n \nThis is known as EAFP (easier to ask for forgiveness than permission) programming.\n", "_____no_output_____" ], [ "###### Remember the `try` and `except` structure:\n\n```python\ntry:\n # Attempt to do something here that might raise an exception\n # If no 'FooError' exception is raised:\n # - Run this indented code.\n # - Skip the indented code after except \nexcept FooError:\n # If a 'FooError' exception is raised above:\n # - Skip the indented code after try.\n # - Run this indented code.\n \n # For exception types other than FooError:\n # - the exception will not be caught.\n # - the program will stop.\n # - the error message will be printed.\n # If FooError is omitted, ANY exception type will be caught\n```", "_____no_output_____" ], [ "Let's write two functions to try:", "_____no_output_____" ] ], [ [ "def cast_v_16(v):\n \"Convert to a 16-bit int.\"\n if abs(v) <= (2**(16-1) - 1):\n return np.int16(v)\n \n else:\n raise OverflowError(\"Cannot safely cast velocity to 16-bit int.\")\n\n \n \ndef cast_v_32(v):\n \"Convert to a 32-bit int.\"\n if abs(v) <= (2**(32-1) - 1):\n return np.int32(v)\n \n else:\n raise OverflowError(\"Value too large for 32-bit int.\")", "_____no_output_____" ] ], [ [ "Then use each of the functions in the `try` except structure.", "_____no_output_____" ] ], [ [ "v = 32_10.0 # (small enough for a 16-bit int)\nv = 42_767.0 # (too large for a 16-bit int)\nv = 2_147_500_000.0 # (too large for a 32-bit int)\n\ntry:\n # Try to cast v as 16-bit int.\n vel = cast_v_16(v)\n print(vel)\n\nexcept OverflowError:\n # If cast as 16-bit int failed, raise exception.\n # Try to cast v as 32-bit int.\n try:\n vel = cast_v_32(v)\n print(vel)\n \n except OverflowError:\n # If cast as 32-bit int failed, raise exception\n raise RuntimeError(\"Could not cast velocity to an available int type.\")\n\n\nprint(type(vel)) ", "_____no_output_____" ] ], [ [ "This block of code can itself be placed inside of a function to make the code more concise.\n<br>The only change made is returning the cast variable instead of storing it as the variable `vel`.", "_____no_output_____" ] ], [ [ "def cast_velocity(v):\n try:\n # Try to cast v to a 16-bit int\n return cast_v_16(v)\n \n except OverflowError:\n # If cast to 16-bit int failed (and exception raised), try casting to a 32-bit int\n try:\n return cast_v_32(v)\n \n except OverflowError:\n # If cast to 32-bit int failed, raise exception\n raise RuntimeError(\"Could cast v to an available int type.\")\n\n# v fits into a 16-bit int\nv_int = cast_velocity(32_10.0) \nprint(v_int, type(v_int))\n\n# v too large for a 16-bit int\n# v_int = cast_velocity(42_767.0) \n# print(v_int, type(v_int))\n\n# # v too large for a 32-bit int\n# v_int = cast_velocity(2_147_500_000.0) \n# print(v_int, type(v_int))", "3210 <class 'numpy.int16'>\n42767 <class 'numpy.int32'>\n" ] ], [ [ "### Gangnam Style\n\nIn 2014, Google switched from 32-bit integers to 64-bit integers to count views when the video \"Gangnam Style\" was viewed more than 2,147,483,647 times, the limit of 32-bit integers.\n\n<img src=\"img/gangnam.jpg\" alt=\"Drawing\" style=\"width: 400px;\"/>", "_____no_output_____" ], [ "Note: We can replace the calculation for the maximum value storable by an integer type with the method `np.iinfo(TYPE).max`, replacing `TYPE` with the integer type. e.g.\n\nFor example:\n```python\ndef cast_v_16(v):\n \"Convert to a 16-bit int.\"\n if abs(v) <= (2**(16-1) - 1):\n return np.int16(v)\n ```\ncan be written:\n```python\ndef cast_v_16(v):\n \"Convert to a 16-bit int.\"\n if abs(v) <= np.iinfo(np.int16).max:\n return np.int16(v)\n ```\n", "_____no_output_____" ], [ "### `finally`\n\nThe `try` statement in Python can have an optional `finally` clause. \n\nThe indented code following finally is executed, regardless of the outcome of the preceding `try` (and `except`).", "_____no_output_____" ] ], [ [ "def cast_velocity(v):\n try:\n # Try to cast v to a 16-bit int\n return cast_v_16(v)\n \n except OverflowError:\n # If cast to 16-bit int failed (and exception raised), try casting to a 32-bit int\n try:\n return cast_v_32(v)\n \n except OverflowError:\n # If cast to 32-bit int failed, raise exception\n raise RuntimeError(\"Could cast v to an available int type.\")\n \n finally:\n print(\"32 bit integer tried\")\n \n finally:\n print(\"16 bit integer tried\")\n \n\nv_int = cast_velocity(42_767.0) \n\nv_int = cast_velocity(2_147_500_000.0) ", "32 bit integer tried\n16 bit integer tried\n32 bit integer tried\n16 bit integer tried\n" ] ], [ [ "This is often used to \"clean up\".\n\nFor example, we may be working with a file.\n\n```Python\ntry:\n f = open(\"test.txt\")\n # perform file operations\nfinally:\n f.close()\n ```", "_____no_output_____" ], [ "### A final example\nAn example of that combines some of the ideas in this seminar.\n\nThe following code was run at the beginnig of the Seminar 8: Plotting. ", "_____no_output_____" ] ], [ [ "try:\n # If the package ipywidgets is installed it is imported\n import ipywidgets\n\nexcept ImportError:\n\n try:\n # Try to install ipywidgets\n # ! prefix runs a command as if in the terminal\n # # {sys.executable} tells python to install to the same location that the current version of python is installed\n # -m tells python to install the version that matches the python version of the current notebook (e.g. Python 3)\n !{sys.executable} -m pip install ipywidgets\n import ipywidgets\n \n except ImportError:\n # Try to install ipywidegets to the user directory if the user lacks permssion to install the package systemwide\n !{sys.executable} -m pip --user install ipywidgets\n \n finally:\n # Run a line of code needed to use the installed package with jupyter notebook\n !jupyter nbextension enable --py widgetsnbextension\n print(\"You will need to refresh your browser page\")\n ", "_____no_output_____" ] ], [ [ "### Extension Topic: A very brief introduction to the IDE debugger\n\nMany IDEs such as Spyder, MATLAB and PyCharm feature a debugger mode; a mode of running your code that is designed to make removing errors easier.\n\n\n\n", "_____no_output_____" ], [ "The underlying idea is to break your code into smaller chunks and run them sequentially.\n\nThis is a little like running a sequence of Jupyter notebook cell one after the other.\n\nRunning your code in this way can make it easier to spot where a bug occurs and wheat is causing it.\n\n", "_____no_output_____" ], [ "#### Breakpoints\n\nA breakpoint can be added next to a line of code.\n\nIn Spyder, and in many other IDEs, a break point is added by double clicking in the margin, to the left of the line number. \n\n<img src=\"img/spyder_breakpoints.png\" alt=\"Drawing\" style=\"width: 300px;\"/>", "_____no_output_____" ], [ "Every time the line with the break point is reached, the program will pause.\n\nWhen the programmer presses resume, the code will advance until the next break point.", "_____no_output_____" ], [ "You can add as many breakpoints as you like.\n\nTo romove the breakpoint simply click on it. ", "_____no_output_____" ], [ "So that you can switch easily between running the code with and without breakpoints, there are seperate buttons to run the code with and without break points.\n\nIn Spyder:\n\nthe button to run the code normally is: <br><img src=\"img/spyder_run.png\" alt=\"Drawing\" style=\"width: 50px;\"/>\n\nthe button to run the code in debugger mode is: <img src=\"img/spyder_run_debugger.png\" alt=\"Drawing\" style=\"width: 50px;\"/>\n\nthe button to advance the code to the next breakpoint is: <img src=\"img/spyder_advance.png\" alt=\"Drawing\" style=\"width: 50px;\"/>\n\n", "_____no_output_____" ], [ "All of these can be found in the toolbar at the top of the main window.\n\n<img src=\"img/spyder_toolbar.png\" alt=\"Drawing\" style=\"width: 500px;\"/>", "_____no_output_____" ], [ "On the main advantages of running your code using breakpoints, is that you can check the value of variables at different points in your program.", "_____no_output_____" ], [ "For example, as we saw earlier, the following code will automatically raise a `ZeroDivisionError`:\n \n a = 0\n \n a = 1 / a\n \n ", "_____no_output_____" ], [ "If we, for example, unknowlingly import a variable with value zero from an external file, it can be difficult to spot the source of error. ", "_____no_output_____" ] ], [ [ "a = np.loadtxt('sample_data/sample_data_seminar10.dat')\n\na = int(a[0][0])\n\na = 1 / a\n\nprint(a)", "_____no_output_____" ] ], [ [ "In this case, if we run the code, we can see that as `a = 0`, `a = 1 / a` raised an exception.\n\nIt does not reveal that the imported value was the origin of the `ZeroDivisionError`.", "_____no_output_____" ], [ "If we place a break point on the line:\n\n a = int(a[0][0])\n \nwe see that the value of `a` *immediately before* the line was run was an imported array of values equal to zero.\n\nThe line that will run when we click advance is highlighted in pink.\n\n<img src=\"img/spyder_point1.png\" alt=\"Drawing\" style=\"width: 700px;\"/>", "_____no_output_____" ], [ "Our next break point is on the line that generates the error\n\n a = 1 / a\n\n<img src=\"img/spyder_point2.png\" alt=\"Drawing\" style=\"width: 700px;\"/>\n\nThe value of `a` is 0.\n\nIf we click advance, we generate error as expected, however, we have now where the zero value came from that is causing the error. ", "_____no_output_____" ], [ "The Spyder debugger mode is a little dificult to use and minimal documentation is provided.\n\nFor those of you wishing to run Python using an IDE, I highly recommend PyCharm: https://www.jetbrains.com/pycharm/ \n\nIt is free to download if you have a university email address.\n\nClear, step-by-step instructions for running the PyCharm debugger mode (along with many other tutorials) can be found here: https://www.jetbrains.com/help/pycharm/step-2-debugging-your-first-python-application.html\n\n", "_____no_output_____" ], [ "## Review Exercises\nThe best way to learn how to resolve errors in your code is through practice.\n\nYou can complete the following excercises in the cells provided in the notebook.\n\nFor practise at writing and running standalone Python files, express the answers in as .py file and add them to a new folder called __Error_Handling_Excercises__.\n\nStore this folder, along with the folder __Input_and_Output_Exercises__ in the new location to created last week to \nsave your Python programs in this folder. \n", "_____no_output_____" ], [ "### Review Exercise: Identifiying and fixing syntax errors.\nEach example contains one or two syntactical errors. \n\nCopy and paste the section of code in the cell below the example (so that you retain the original version with errors for comparison).\n\nFix the error so that the code runs properly. \n\nNote that you will need to make changes to only one or two lines in each example. ", "_____no_output_____" ], [ "### Example 1", "_____no_output_____" ] ], [ [ "# Example 1\ny = (xvalues + 2) * (xvalues - 1) * (xvalues - 2)\nxvalues = linspace(-3, 3, 100)\nplt.plot(xvalues, y, 'r--')\nplt.plot([-2, 1, 2], [0 ,0, 0], 'bo', markersize=10)\nplt.xlabel('x-axis')\nplt.ylabel('y-axis')\nplt.title('Nice Python figure!')", "_____no_output_____" ], [ "# Copy and paste code here\n# Example 1\nxvalues = np.linspace(-3, 3, 100) # changed\ny = (xvalues + 2) * (xvalues - 1) * (xvalues - 2) #changed\nplt.plot(xvalues, y, 'r--')\nplt.plot([-2, 1, 2], [0 ,0, 0], 'bo', markersize=10)\nplt.xlabel('x-axis')\nplt.ylabel('y-axis')\nplt.title('Nice Python figure!')", "_____no_output_____" ], [ "# Example solution\nxvalues = np.linspace(-3, 3, 100) # xvalues should be placed before y. linspace should be np.linspace\ny = (xvalues + 2) * (xvalues - 1) * (xvalues - 2)\nplt.plot(xvalues, y, 'r--')\nplt.plot([-2, 1, 2], [0 ,0, 0], 'bo', markersize=10)\nplt.xlabel('x-axis')\nplt.ylabel('y-axis')\nplt.title('Nice Python figure!');", "_____no_output_____" ] ], [ [ "### Example 2", "_____no_output_____" ] ], [ [ "# Example 2\ndef test(x, alpha):\n return np.exp(-alpha * x) * np.cos(x)\nx = np.linspace(0, 10np.pi, 100)\nalpha = 0.2\ny = test(x)\nplt.plot(x, y, 'b')\nplt.xlabel('x')\nplt.ylabel('f(x)')", "_____no_output_____" ], [ "# Copy and paste code here\n# Example 2\ndef test(x, alpha):\n return np.exp(-alpha * x) * np.cos(x)\nx = np.linspace(0, 10 * np.pi, 100) # changed\nalpha = 0.2\ny = test(x, alpha) # changed\nplt.plot(x, y, 'b')\nplt.xlabel('x')\nplt.ylabel('f(x)')", "_____no_output_____" ], [ "# Example Solution\ndef test(x, alpha):\n return np.exp(-alpha * x) * np.cos(x)\nx = np.linspace(0, 10 * np.pi, 100) # * between 10 and np.pi\nalpha = 0.2\ny = test(x, alpha) # forgot to pass alpha to test\nplt.plot(x, y, 'b')\nplt.xlabel('x')\nplt.ylabel('f(x)');", "_____no_output_____" ] ], [ [ "### Example 3", "_____no_output_____" ] ], [ [ "# Example 3\na = np.array([2, 2, 4, 2, 4, 4])\n\nfor i in range(a):\n if a[i] < 3: # replace value with 77 when value equals 2\n a[i] = 77\n else: # otherwise replace value with -77\n a[i] = -77\nprint('modified a:' a)", "_____no_output_____" ], [ "# Copy and paste code here\n# Example 3\na = np.array([2, 2, 4, 2, 4, 4])\n\nfor i in range(len(a)): # changed\n if a[i] < 3: # replace value with 77 when value equals 2\n a[i] = 77\n else: # otherwise replace value with -77\n a[i] = -77\nprint('modified a:', a) # changed", "modified a: [ 77 77 -77 77 -77 -77]\n" ], [ "# Example solution\na = np.array([2, 2, 4, 2, 4, 4])\n\nfor i in range(len(a)): # range(len(a)) i.o. range(a)\n if a[i] < 3: # replace value with 77 when value equals 2\n a[i] = 77\n else: # otherwise replace value with -77\n a[i] = -77\nprint('modified a:', a) # added comma", "_____no_output_____" ] ], [ [ "### Example 4", "_____no_output_____" ] ], [ [ "# Example 4\ny = np.zeros(20, 20)\ny[8:13] = 10\nplt.matshow(y)\nplt.title(image of array y);", "_____no_output_____" ], [ "# Copy and paste code here\n# Example 4\ny = np.zeros((20, 20)) # changed\ny[8:13] = 10\nplt.matshow(y)\nplt.title(\"image of array y\"); # changed", "_____no_output_____" ], [ "# Example Solution\ny = np.zeros((20, 20)) # add parentheses around (20, 20)\ny[8:13] = 10\nplt.matshow(y)\nplt.title('image of array y'); # add quotes around text string", "_____no_output_____" ] ], [ [ "### Review Exercise: Parameter Validity Checking - Gravitational Potential. \nThe gravitational potential $V$ of a body of mass $m$ (point mass) at a distance $r$ from a body of mass $M$:\n\n$$\nV = \\frac{G M m}{r}\n$$\n\nwhere $G$ is the *gravitational constant*, $G = 6.674 × 10^{-11} m^3 kg^{-1} s^{-2}$\n\nThis expression only makes sense when $G, M, m \\ge 0$ and $r > 0$. \n\nWrite function to compute the gravitational potential that raises a `ValueError` error if $G$ or $M$ or $m$ or $r$ < 0.\n\nUse a default argument for $G$.\n", "_____no_output_____" ] ], [ [ "def gravity(M, m, r, G=6.674e-11):\n if M >= 0 and m >= 0 and G >= 0:\n if r > 0:\n return G * M * m / r\n else:\n raise ValueError(\"'r' should be positive.\")\n else:\n raise ValueError(\"'M', 'm', and 'G' should not be negative\")\n\nprint(gravity(100, 1, 1))\nprint(gravity(-100, 1, 1))", "6.673999999999999e-09\n" ], [ "# Example Solution\ndef gravity_potential(M, m, r, G=6.674e-11):\n if G < 0:\n raise ValueError(\"Gravitational constant must be greater than or equal to zero\")\n if M < 0:\n raise ValueError(\"Mass M must be greater than or equal to zero\")\n if m < 0:\n raise ValueError(\"Mass m must be greater than or equal to zero\")\n if r <= 0:\n raise ValueError(\"Distance r must be greater than zero\")\n return G*M*m/r\n\nprint(gravity_potential(1.65e12, 6.1e2, 7e3))", "_____no_output_____" ] ], [ [ "### Review Exercise: Requesting User Input - Fibonacci Series. \n\nIn Seminar 5: Functions, we studied the use of a recursive function to generate the Fibonacci series; a number sequence characterised by the equation:\n\n$$\nf_n = f_{n-1} + f_{n-2}\n$$\n\ni.e. the $n$th term $f_{n}$ is computed from the preceding terms $f_{n-1}$ and $f_{n-2}$. \n\n| Term | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 |\n|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| Output | 0 | 1 | 1 | 2 | 3 | 5 | 8 | 13 | 21 | 34 | 55 | 89 | 144 | 233 | 377 | 610 |\n\nThe recursive function used to generate the Fibonacci series:\n\n```Python\ndef f(n): \n \"Compute the nth Fibonacci number using recursion\"\n if n == 0:\n return 0 \n \n elif n == 1:\n return 1 \n \n else:\n return f(n - 1) + f(n - 2) \n\nprint(f(10))\n```\n\nThe Fibonacci series is valid only for $n \\ge 0$. \n<br>In the cell below, copy and paste the Fibonacci function, `f` shown above.\n<br>Using the pasted code as a starting point, write a piece of code that:\n- computes the nth Fibonacci number, where n is given interactively by the user (i.e. use the `input` keyword).\n- if n is less than zero, the function re-requests n until a valid input is given.\n<br>Try some invalid data cases to check that an exception is raised.", "_____no_output_____" ] ], [ [ "# Example Solution\n\ndef fibonacci(): \n \"Compute the nth Fibonacci number using recursion\"\n try:\n n = float(input(\"Enter a whole number, greater or equal to zero, to return nth Fibonacci number \"))\n \n if n < 0:\n raise ValueError(\"Number must be $\\ge 0$\")\n \n if n != int(n):\n raise ValueError(\"Number must be whole number\")\n \n return f(n)\n \n except ValueError:\n return fibonacci()\n \ndef f(n): \n \"Compute the nth Fibonacci number using recursion\"\n if n == 0:\n return 0 \n\n elif n == 1:\n return 1 \n\n else:\n return f(n - 1) + f(n - 2) \n \na = fibonacci()\nprint(a)", "_____no_output_____" ] ], [ [ "### Review Exercise: Raising Exceptions\n\nIn Seminar 4: Functions, you wrote a Python function to find the root of a mathematical function that lies between two limits.\n\nFind the function that you wrote and copy and paste it in the cell below. \n\nModify your function so that it raises an error if the maximum number of iterations is exceeded. \n\nReduce the maximum allowed iterations to test that an exception is raised.\n", "_____no_output_____" ] ], [ [ "# Copy and paste code here\ndef bisection(f, a, b, tol = 1.0e-6, nmax = 32):\n if nmax > 1024:\n raise ValueError(\"'nmax' should not be grater than 1024.\")\n a = [a, f(a)]\n b = [b, f(b)]\n if a[1] * b[1] <= 0:\n if a[1] > 0:\n c = a\n a = b\n b = c\n c = [0, 0]\n for i in range(nmax):\n if a[1] + tol > 0:\n return a[0]\n elif b[1] < tol:\n return b[0]\n else:\n c[0] = 0.5 * (a[0] + b[0])\n c[1] = f(c[0])\n if c[1] > 0:\n b[0] = c[0]\n b[1] = c[1]\n else:\n a[0] = c[0]\n a[1] = c[1]\n raise ValueError(\"No answer was found.\")\n else:\n raise ValueError(\"'f(a) * f(b)' should not be positive.\")\n\ndef F(x):\n return ((x + 2.0) * x + 3.0) * x + 4.0\n\nans = bisection(F, -100, 100)\nprint(ans, F(ans))", "-1.6506291925907135 -5.262953628459854e-09\n" ], [ "#Example Solution\n\ndef F(x):\n return (4 * x**3) - (3 * x**2) - (25 * x) - 6\n\ndef bisection(f, a, b, tol=1e-6, nmax=10):\n \"\"\"\n Estimates the root of a function, F(x), using two values; x = a and x = b, where F(a)F(b) < 0\n \"\"\"\n if (f(a) * f(b) < 0): \n \n xmid = (a + b) / 2\n\n #for i in range(nmax):\n iterations = 0\n while True:\n \n if iterations >= nmax:\n raise RuntimeError(\"Max iterations exceeded\")\n\n print(round(f(xmid), 5))\n\n if (abs(f(xmid)) < 10E-6):\n return xmid\n\n # If F(x) changes sign between F(x_mid) and F(a), \n # the root must lie between F(x_mid) and F(a)\n if f(xmid) * f(a) < 0:\n b = xmid\n xmid = (a + b)/2\n\n\n # If F(x) changes sign between F(x_mid) and F(b), \n # the root must lie between F(x_mid) and F(b) \n else:\n a = xmid\n xmid = (a + b)/2 \n \n iterations += 1\n \nroot = bisection(F, -0.6, 0)\n\nprint(\"root = \", round(root, 4))", "_____no_output_____" ] ], [ [ "# Summary\n - Errors (or *bugs*) can be divided into two types: *syntax errors* and *exceptions*.\n - Syntax errors occur when the code you write does not conform to the rules of the Python language. \n - Exceptions are when the *syntax* is correct but something unexpected occurs during the execution of a program. \n - Python detects some instances of this automatically.\n - The keyword `raise` causes Python to stop the program and generate an error message.\n - The keywords `try` and `except` can be used to *catch* exceptions; preventing anticipated errors from stopping the program.\n - The `try` is optionally followed by the keyword `finally` (somewhere in the same block of code) which executes code regardless of the outcome of the `try` statement.", "_____no_output_____" ], [ "# Homework\n\n1. __COMPLETE__ any unfinished Review Exercises.\n1. __PUSH__ the changes you make at home to your online repository:\n - push to your ILAS python repository if you complete the Review Questions __within cells in this notebook__.\n - push to your *new* repository (created after Seminar 9: Input and Output) if you complete the Review Questions __as standalone Python .py files__.\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ec5a4e307f0b1b72fd8d44b4bb87879b8dd40399
29,858
ipynb
Jupyter Notebook
docs_src/basic_data.ipynb
VictorXunS/fastai
ba401da9e93f4bee3595754abe1b7c14175c8c27
[ "Apache-2.0" ]
1
2019-04-08T09:52:28.000Z
2019-04-08T09:52:28.000Z
docs_src/basic_data.ipynb
VictorXunS/fastai
ba401da9e93f4bee3595754abe1b7c14175c8c27
[ "Apache-2.0" ]
null
null
null
docs_src/basic_data.ipynb
VictorXunS/fastai
ba401da9e93f4bee3595754abe1b7c14175c8c27
[ "Apache-2.0" ]
1
2020-05-19T12:56:20.000Z
2020-05-19T12:56:20.000Z
33.81427
788
0.577668
[ [ [ "## Get your data ready for training", "_____no_output_____" ], [ "This module defines the basic [`DataBunch`](/basic_data.html#DataBunch) object that is used inside [`Learner`](/basic_train.html#Learner) to train a model. This is the generic class, that can take any kind of fastai [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) or [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). You'll find helpful functions in the data module of every application to directly create this [`DataBunch`](/basic_data.html#DataBunch) for you.", "_____no_output_____" ] ], [ [ "from fastai.gen_doc.nbdoc import *\nfrom fastai.basics import * ", "_____no_output_____" ], [ "show_doc(DataBunch)", "_____no_output_____" ] ], [ [ "It also ensure all the dataloaders are on `device` and apply to them `tfms` as batch are drawn (like normalization). `path` is used internally to store temporary files, `collate_fn` is passed to the pytorch `Dataloader` (replacing the one there) to explain how to collate the samples picked for a batch. By default, it applies data to the object sent (see in [`vision.image`](/vision.image.html#vision.image) or the [data block API](/data_block.html) why this can be important). \n\n`train_dl`, `valid_dl` and optionally `test_dl` will be wrapped in [`DeviceDataLoader`](/basic_data.html#DeviceDataLoader).", "_____no_output_____" ], [ "### Factory method", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.create)", "_____no_output_____" ] ], [ [ "`num_workers` is the number of CPUs to use, `tfms`, `device` and `collate_fn` are passed to the init method.", "_____no_output_____" ] ], [ [ "jekyll_warn(\"You can pass regular pytorch Dataset here, but they'll require more attributes than the basic ones to work with the library. See below for more details.\")", "_____no_output_____" ] ], [ [ "### Visualization", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.show_batch)", "_____no_output_____" ] ], [ [ "### Grabbing some data", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.dl)", "_____no_output_____" ], [ "show_doc(DataBunch.one_batch)", "_____no_output_____" ], [ "show_doc(DataBunch.one_item)", "_____no_output_____" ], [ "show_doc(DataBunch.sanity_check)", "_____no_output_____" ] ], [ [ "### Load and save", "_____no_output_____" ], [ "You can save your [`DataBunch`](/basic_data.html#DataBunch) object for future use with this method.", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.save)", "_____no_output_____" ], [ "show_doc(load_data)", "_____no_output_____" ], [ "jekyll_important(\"The arguments you passed when you created your first `DataBunch` aren't saved, so you should pass them here if you don't want the default.\")", "_____no_output_____" ] ], [ [ "This is to allow you to easily create a new [`DataBunch`](/basic_data.html#DataBunch) with a different bath size for instance. You will also need to reapply any normalization (in vision) you might have done on your original [`DataBunch`](/basic_data.html#DataBunch).", "_____no_output_____" ], [ "### Empty [`DataBunch`](/basic_data.html#DataBunch) for inference", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.export)", "_____no_output_____" ], [ "show_doc(DataBunch.load_empty, full_name='load_empty')", "_____no_output_____" ] ], [ [ "This method should be used to create a [`DataBunch`](/basic_data.html#DataBunch) at inference, see the corresponding [tutorial](/tutorial.inference.html).", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.add_test)", "_____no_output_____" ] ], [ [ "### Dataloader transforms", "_____no_output_____" ] ], [ [ "show_doc(DataBunch.add_tfm)", "_____no_output_____" ] ], [ [ "Adds a transform to all dataloaders.", "_____no_output_____" ], [ "## Using a custom Dataset in fastai", "_____no_output_____" ], [ "If you want to use your pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) in fastai, you may need to implement more attributes/methods if you want to use the full functionality of the library. Some functions can easily be used with your pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) if you just add an attribute, for others, the best would be to create your own [`ItemList`](/data_block.html#ItemList) by following [this tutorial](/tutorial.itemlist.html). Here is a full list of what the library will expect.", "_____no_output_____" ], [ "### Basics", "_____no_output_____" ], [ "First of all, you obviously need to implement the methods `__len__` and `__getitem__`, as indicated by the pytorch docs. Then the most needed things would be:\n- `c` attribute: it's used in most functions that directly create a [`Learner`](/basic_train.html#Learner) ([`tabular_learner`](/tabular.data.html#tabular_learner), [`text_classifier_learner`](/text.learner.html#text_classifier_learner), [`unet_learner`](/vision.learner.html#unet_learner), [`create_cnn`](/vision.learner.html#create_cnn)) and represents the number of outputs of the final layer of your model (also the number of classes if applicable).\n- `classes` attribute: it's used by [`ClassificationInterpretation`](/train.html#ClassificationInterpretation) and also in [`collab_learner`](/collab.html#collab_learner) (best to use [`CollabDataBunch.from_df`](/collab.html#CollabDataBunch.from_df) than a pytorch [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset)) and represents the unique tags that appear in your data.\n- maybe a `loss_func` attribute: that is going to be used by [`Learner`](/basic_train.html#Learner) as a default loss function, so if you know your custom [`Dataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset) requires a particular loss, you can put it.\n", "_____no_output_____" ], [ "### For a specific application", "_____no_output_____" ], [ "In text, your dataset will need to have a `vocab` attribute that should be an instance of [`Vocab`](/text.transform.html#Vocab). It's used by [`text_classifier_learner`](/text.learner.html#text_classifier_learner) and [`language_model_learner`](/text.learner.html#language_model_learner) when building the model.\n\nIn tabular, your dataset will need to have a `cont_names` attribute (for the names of continuous variables) and a `get_emb_szs` method that returns a list of tuple `(n_classes, emb_sz)` representing, for each categorical variable, the number of different codes (don't forget to add 1 for nan) and the corresponding embedding size. Those two are used with the `c` attribute by [`tabular_learner`](/tabular.data.html#tabular_learner). ", "_____no_output_____" ], [ "### Functions that really won't work", "_____no_output_____" ], [ "To make those last functions work, you really need to use the [data block API](/data_block.html) and maybe write your own [custom ItemList](/tutorial.itemlist.html).", "_____no_output_____" ], [ "- [`DataBunch.show_batch`](/basic_data.html#DataBunch.show_batch) (requires `.x.reconstruct`, `.y.reconstruct` and `.x.show_xys`)\n- [`Learner.predict`](/basic_train.html#Learner.predict) (requires `x.set_item`, `.y.analyze_pred`, `.y.reconstruct` and maybe `.x.reconstruct`)\n- [`Learner.show_results`](/basic_train.html#Learner.show_results) (requires `x.reconstruct`, `y.analyze_pred`, `y.reconstruct` and `x.show_xyzs`)\n- `DataBunch.set_item` (requires `x.set_item`)\n- [`Learner.backward`](/basic_train.html#Learner.backward) (uses `DataBunch.set_item`)\n- [`DataBunch.export`](/basic_data.html#DataBunch.export) (requires `export`)", "_____no_output_____" ] ], [ [ "show_doc(DeviceDataLoader)", "_____no_output_____" ] ], [ [ "Put the batches of `dl` on `device` after applying an optional list of `tfms`. `collate_fn` will replace the one of `dl`. All dataloaders of a [`DataBunch`](/basic_data.html#DataBunch) are of this type. ", "_____no_output_____" ], [ "### Factory method", "_____no_output_____" ] ], [ [ "show_doc(DeviceDataLoader.create)", "_____no_output_____" ] ], [ [ "The given `collate_fn` will be used to put the samples together in one batch (by default it grabs their data attribute). `shuffle` means the dataloader will take the samples randomly if that flag is set to `True`, or in the right order otherwise. `tfms` are passed to the init method. All `kwargs` are passed to the pytorch [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) class initialization.", "_____no_output_____" ], [ "### Methods", "_____no_output_____" ] ], [ [ "show_doc(DeviceDataLoader.add_tfm)", "_____no_output_____" ], [ "show_doc(DeviceDataLoader.remove_tfm)", "_____no_output_____" ], [ "show_doc(DeviceDataLoader.new)", "_____no_output_____" ], [ "show_doc(DeviceDataLoader.proc_batch)", "_____no_output_____" ], [ "show_doc(DatasetType, doc_string=False)", "_____no_output_____" ] ], [ [ "Internal enumerator to name the training, validation and test dataset/dataloader.", "_____no_output_____" ], [ "## Undocumented Methods - Methods moved below this line will intentionally be hidden", "_____no_output_____" ] ], [ [ "show_doc(DeviceDataLoader.collate_fn)", "_____no_output_____" ] ], [ [ "## New Methods - Please document or move to the undocumented section", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec5a51fe6afb8a06730849a72fc65b7044b58736
40,734
ipynb
Jupyter Notebook
Chapter5/question8.ipynb
wllgrnt/islr-python-examples
2dfaef6e43f3472ba3df0e1060623181b40a53ff
[ "MIT" ]
2
2019-12-17T22:14:59.000Z
2020-03-24T21:10:10.000Z
Chapter5/question8.ipynb
wllgrnt/islr-python-examples
2dfaef6e43f3472ba3df0e1060623181b40a53ff
[ "MIT" ]
null
null
null
Chapter5/question8.ipynb
wllgrnt/islr-python-examples
2dfaef6e43f3472ba3df0e1060623181b40a53ff
[ "MIT" ]
null
null
null
65.7
16,000
0.637772
[ [ [ "# Chapter 5\n\n## Question 8\n\nIn which we do cross-validation on a simulated data set.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport statsmodels.formula.api as smf\nsns.set(style=\"whitegrid\")", "_____no_output_____" ] ], [ [ "### (a) Generate a simulated data set.", "_____no_output_____" ] ], [ [ "x = np.random.normal(size=100)\ny = x-2*np.square(x) + np.random.normal(size=100)", "_____no_output_____" ], [ "x", "_____no_output_____" ], [ "y", "_____no_output_____" ] ], [ [ "### (b) Create a scatterplot of X against Y", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(figsize=(15,10))\nplt.scatter(x,y)", "_____no_output_____" ] ], [ [ "As you'd expect, most of the points occur around x=0 (as we are drawing from a normal distribution with mean 0 and standard deviation 1). The plot is indeed x(1-2x), a quadratic passing through 0 with negative curvature.", "_____no_output_____" ], [ "### (c) Set a seed, then compute the LOOCV errors that result from fitting the following four models:\n\n1) $ Y = \\beta_0 + \\beta_1X + \\epsilon$\n\n2) $ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\epsilon$\n\n3) $ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\epsilon$\n\n4) $ Y = \\beta_0 + \\beta_1X + \\beta_2X^2 + \\beta_3X^3 + \\beta_4X^4 + \\epsilon$", "_____no_output_____" ] ], [ [ "df = pd.DataFrame(data=[x, y]).T\ndf.columns = [\"x\", \"y\"]\n", "_____no_output_____" ], [ "def leaveOneOut(index, *arrays):\n \"\"\"\n Given array1, array2, etc, (which should be DataFrames)\n return the row at specified index, and the remainder, for each array\n \"\"\"\n objects = []\n for array in arrays:\n row = array.iloc[index]\n objects.append(row)\n remainder = array.drop(index)\n objects.append(remainder)\n return objects\n", "_____no_output_____" ], [ "square_errors = []\n\nnp.random.seed(10)\nfor i in range(len(df)):\n row, remainder = leaveOneOut(i, df)\n\n x_test = row.x\n x_train = remainder.x\n y_test = row.y\n y_train = remainder.y\n\n model = smf.ols('y ~ x ', data=remainder)\n results = model.fit()\n y_pred = results.predict(exog=dict(x=x_test))\n square_error = np.square(y_pred - y_test)\n square_error = square_error[0] # Take the first element, as we're only predicting one point\n square_errors.append(square_error)\nprint(f\"Linear model error: {np.mean(square_errors):.2f}\")", "Linear model error: 4.57\n" ], [ "square_errors = []\n\nnp.random.seed(10)\nfor i in range(len(df)):\n row, remainder = leaveOneOut(i, df)\n\n x_test = row.x\n x_train = remainder.x\n y_test = row.y\n y_train = remainder.y\n\n model = smf.ols('y ~ x + np.square(x)', data=remainder)\n results = model.fit()\n y_pred = results.predict(exog=dict(x=x_test))\n square_error = np.square(y_pred - y_test)\n square_error = square_error[0] # Take the first element, as we're only predicting one point\n square_errors.append(square_error)\nprint(f\"Square model error: {np.mean(square_errors):.2f}\")", "Square model error: 1.13\n" ], [ "square_errors = []\n\nnp.random.seed(10)\nfor i in range(len(df)):\n row, remainder = leaveOneOut(i, df)\n\n x_test = row.x\n x_train = remainder.x\n y_test = row.y\n y_train = remainder.y\n\n model = smf.ols('y ~ x + np.square(x) + np.power(x,3)', data=remainder)\n results = model.fit()\n y_pred = results.predict(exog=dict(x=x_test))\n square_error = np.square(y_pred - y_test)\n square_error = square_error[0] # Take the first element, as we're only predicting one point\n square_errors.append(square_error)\nprint(f\"Cubic model error: {np.mean(square_errors):.2f}\")", "Cubic model error: 1.14\n" ], [ "square_errors = []\n\nnp.random.seed(10)\nfor i in range(len(df)):\n row, remainder = leaveOneOut(i, df)\n\n x_test = row.x\n x_train = remainder.x\n y_test = row.y\n y_train = remainder.y\n\n model = smf.ols('y ~ x + np.square(x) + np.power(x,3) + np.power(x,4)',\n data=remainder)\n results = model.fit()\n y_pred = results.predict(exog=dict(x=x_test))\n square_error = np.square(y_pred - y_test)\n square_error = square_error[0] # Take the first element, as we're only predicting one point\n square_errors.append(square_error)\nprint(f\"Quartic model error: {np.mean(square_errors):.2f}\")", "Quartic model error: 1.16\n" ] ], [ [ "We can check all the above using the formula:\n\n$$ CV = \\frac{1}{n} \\sum_{i=1}^{n}{\\left( \\frac{y_i-\\hat{y}_i}{1-h_i} \\right)^2} $$\n\nWhere $h_i$ is the leverage.", "_____no_output_____" ] ], [ [ "n = len(df)\nleverage = 1/n + np.square(df.x - np.mean(df.x))/np.sum(np.square(df.x - np.mean(df.x)))\ndf['leverage'] = leverage\n", "_____no_output_____" ], [ "summaries = []\nmodel = smf.ols('y ~ x ', data=df)\nresults = model.fit()\nsummaries.append(results.summary())\ny_pred = results.predict(df.x)\nerr = 1/n * np.sum( np.square( (df.y - y_pred)/(1-df.leverage) ) )\nprint(f\"Linear model error (analytic): {err:.2f}\")\n\n\n\nmodel = smf.ols('y ~ x + np.square(x)', data=df)\nresults = model.fit()\ny_pred = results.predict(df.x)\nsummaries.append(results.summary())\n\ninfl = results.get_influence()\nleverage = infl.hat_matrix_diag\nerr = 1/n * np.sum( np.square( (df.y - y_pred)/(1-leverage) ) )\nprint(f\"Quadratic model error (analytic): {err:.2f}\")\n\nmodel = smf.ols('y ~ x + np.square(x) + np.power(x,3)', data=df)\nresults = model.fit()\ny_pred = results.predict(df.x)\nsummaries.append(results.summary())\n\ninfl = results.get_influence()\nleverage = infl.hat_matrix_diag\nerr = 1/n * np.sum( np.square( (df.y - y_pred)/(1-leverage) ) )\nprint(f\"Cubic model error (analytic): {err:.2f}\")\n\nmodel = smf.ols('y ~ x + np.square(x) + np.power(x,3) + np.power(x,4)', data=df)\nresults = model.fit()\ny_pred = results.predict(df.x)\nsummaries.append(results.summary())\n\ninfl = results.get_influence()\nleverage = infl.hat_matrix_diag\nerr = 1/n * np.sum( np.square( (df.y - y_pred)/(1-leverage) ) )\nprint(f\"Quartic model error (analytic): {err:.2f}\")", "Linear model error (analytic): 4.57\nQuadratic model error (analytic): 1.13\nCubic model error (analytic): 1.14\nQuartic model error (analytic): 1.16\n" ] ], [ [ "Note I don't know how to compute the leverage for the polynomial cases, so I pulled these obscure methods from the statsmodels source code.", "_____no_output_____" ], [ "### (e) Which of the models has the smallest LOOCV error? \n\nThe quadratic model has the smallest error, as expected since y is generated as a quadratic function of x", "_____no_output_____" ], [ "### (f) Do the statistical significances of the coefficients in each model agree with the above conclusions?", "_____no_output_____" ] ], [ [ "print(summaries)", "[<class 'statsmodels.iolib.summary.Summary'>\n\"\"\"\n OLS Regression Results \n==============================================================================\nDep. Variable: y R-squared: 0.164\nModel: OLS Adj. R-squared: 0.156\nMethod: Least Squares F-statistic: 19.28\nDate: Sun, 26 May 2019 Prob (F-statistic): 2.85e-05\nTime: 16:48:20 Log-Likelihood: -214.23\nNo. Observations: 100 AIC: 432.5\nDf Residuals: 98 BIC: 437.7\nDf Model: 1 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nIntercept -1.6901 0.209 -8.088 0.000 -2.105 -1.275\nx 1.0363 0.236 4.391 0.000 0.568 1.505\n==============================================================================\nOmnibus: 19.671 Durbin-Watson: 2.146\nProb(Omnibus): 0.000 Jarque-Bera (JB): 24.058\nSkew: -1.069 Prob(JB): 5.97e-06\nKurtosis: 4.098 Cond. No. 1.16\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\"\"\", <class 'statsmodels.iolib.summary.Summary'>\n\"\"\"\n OLS Regression Results \n==============================================================================\nDep. Variable: y R-squared: 0.791\nModel: OLS Adj. R-squared: 0.786\nMethod: Least Squares F-statistic: 183.3\nDate: Sun, 26 May 2019 Prob (F-statistic): 1.12e-33\nTime: 16:48:20 Log-Likelihood: -145.00\nNo. Observations: 100 AIC: 296.0\nDf Residuals: 97 BIC: 303.8\nDf Model: 2 \nCovariance Type: nonrobust \n================================================================================\n coef std err t P>|t| [0.025 0.975]\n--------------------------------------------------------------------------------\nIntercept -0.2008 0.137 -1.469 0.145 -0.472 0.070\nx 0.9059 0.119 7.617 0.000 0.670 1.142\nnp.square(x) -1.8871 0.111 -17.041 0.000 -2.107 -1.667\n==============================================================================\nOmnibus: 0.389 Durbin-Watson: 1.933\nProb(Omnibus): 0.823 Jarque-Bera (JB): 0.546\nSkew: 0.112 Prob(JB): 0.761\nKurtosis: 2.716 Cond. No. 2.22\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\"\"\", <class 'statsmodels.iolib.summary.Summary'>\n\"\"\"\n OLS Regression Results \n==============================================================================\nDep. Variable: y R-squared: 0.792\nModel: OLS Adj. R-squared: 0.785\nMethod: Least Squares F-statistic: 121.5\nDate: Sun, 26 May 2019 Prob (F-statistic): 1.45e-32\nTime: 16:48:20 Log-Likelihood: -144.82\nNo. Observations: 100 AIC: 297.6\nDf Residuals: 96 BIC: 308.1\nDf Model: 3 \nCovariance Type: nonrobust \n==================================================================================\n coef std err t P>|t| [0.025 0.975]\n----------------------------------------------------------------------------------\nIntercept -0.2072 0.138 -1.506 0.135 -0.480 0.066\nx 1.0214 0.229 4.452 0.000 0.566 1.477\nnp.square(x) -1.8895 0.111 -16.994 0.000 -2.110 -1.669\nnp.power(x, 3) -0.0595 0.101 -0.589 0.557 -0.260 0.141\n==============================================================================\nOmnibus: 0.359 Durbin-Watson: 1.931\nProb(Omnibus): 0.836 Jarque-Bera (JB): 0.514\nSkew: 0.110 Prob(JB): 0.773\nKurtosis: 2.727 Cond. No. 5.05\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\"\"\", <class 'statsmodels.iolib.summary.Summary'>\n\"\"\"\n OLS Regression Results \n==============================================================================\nDep. Variable: y R-squared: 0.792\nModel: OLS Adj. R-squared: 0.783\nMethod: Least Squares F-statistic: 90.30\nDate: Sun, 26 May 2019 Prob (F-statistic): 1.66e-31\nTime: 16:48:20 Log-Likelihood: -144.76\nNo. Observations: 100 AIC: 299.5\nDf Residuals: 95 BIC: 312.6\nDf Model: 4 \nCovariance Type: nonrobust \n==================================================================================\n coef std err t P>|t| [0.025 0.975]\n----------------------------------------------------------------------------------\nIntercept -0.2395 0.172 -1.391 0.168 -0.581 0.102\nx 1.0195 0.231 4.421 0.000 0.562 1.477\nnp.square(x) -1.7828 0.357 -4.988 0.000 -2.492 -1.073\nnp.power(x, 3) -0.0603 0.102 -0.594 0.554 -0.262 0.141\nnp.power(x, 4) -0.0339 0.108 -0.314 0.754 -0.248 0.180\n==============================================================================\nOmnibus: 0.393 Durbin-Watson: 1.942\nProb(Omnibus): 0.822 Jarque-Bera (JB): 0.555\nSkew: 0.101 Prob(JB): 0.758\nKurtosis: 2.697 Cond. No. 13.9\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n\"\"\"]\n" ] ], [ [ "We see from the above that the cubic and above terms are not statistically significant, so there is agreement between the cross-validation results and the p-values.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec5a59664760d2db3980ee7f1e897b23de9de070
2,948
ipynb
Jupyter Notebook
notebooks/Using OpenCV/ip-camera-streaming-into-opencv.ipynb
jeffskinnerbox/computer-vision
e689680ee067c357275eb6fef53e95e92d5829dd
[ "Unlicense", "MIT" ]
1
2021-01-18T10:30:48.000Z
2021-01-18T10:30:48.000Z
notebooks/Using OpenCV/ip-camera-streaming-into-opencv.ipynb
jeffskinnerbox/computer-vision
e689680ee067c357275eb6fef53e95e92d5829dd
[ "Unlicense", "MIT" ]
1
2021-10-12T22:07:50.000Z
2021-10-12T22:07:50.000Z
notebooks/Using OpenCV/ip-camera-streaming-into-opencv.ipynb
jeffskinnerbox/computer-vision
e689680ee067c357275eb6fef53e95e92d5829dd
[ "Unlicense", "MIT" ]
null
null
null
27.296296
126
0.507802
[ [ [ "# IP Camera Streaming into OpenCV\nAs getting vision from an IP camera into OpenCV is an unnecessarily tricky stumbling block,\nwe’ll only concentrate on the code that streams vision from an IP camera to\nOpenCV which then simply displays that stream. ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport cv2\nimport time\nimport requests\nimport threading\nfrom threading import Thread, Event, ThreadError\n\nclass Cam():\n\n def __init__(self, url):\n \n self.stream = requests.get(url, stream=True)\n self.thread_cancelled = False\n self.thread = Thread(target=self.run)\n print(\"camera initialised\")\n\n \n def start(self):\n self.thread.start()\n print(\"camera stream started\")\n \n def run(self):\n bytes=''\n while not self.thread_cancelled:\n try:\n bytes+=self.stream.raw.read(1024)\n a = bytes.find('\\xff\\xd8')\n b = bytes.find('\\xff\\xd9')\n if a!=-1 and b!=-1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)\n cv2.imshow('cam',img)\n if cv2.waitKey(1) ==27:\n exit(0)\n except ThreadError:\n self.thread_cancelled = True\n \n \n def is_running(self):\n return self.thread.isAlive()\n \n \n def shut_down(self):\n self.thread_cancelled = True\n #block while waiting for thread to terminate\n while self.thread.isAlive():\n time.sleep(1)\n return True\n\n \n \nif __name__ == \"__main__\":\n url = 'http://192.168.2.1/?action=stream'\n cam = Cam(url)\n cam.start()", "_____no_output_____" ] ], [ [ "## Sources\n* [OpenCV and IP camera streaming with Python](http://benhowell.github.io/guide/2015/03/09/opencv-and-web-cam-streaming)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5a5dac2c487aae3581309d27f8db07cdffd8f7
21,794
ipynb
Jupyter Notebook
scripts/amazon.ipynb
Conni2461/book_reviews
cc39cde559ee599c1e4841e71ef304f74686ed58
[ "MIT" ]
null
null
null
scripts/amazon.ipynb
Conni2461/book_reviews
cc39cde559ee599c1e4841e71ef304f74686ed58
[ "MIT" ]
null
null
null
scripts/amazon.ipynb
Conni2461/book_reviews
cc39cde559ee599c1e4841e71ef304f74686ed58
[ "MIT" ]
null
null
null
66.243161
1,671
0.596999
[ [ [ "from sqlalchemy import create_engine, Column, Date, String, Integer, Float, exc, and_, or_\nfrom sqlalchemy.orm import Session, declarative_base\n\nimport requests\n\nfrom selectorlib import Extractor\nimport requests\nimport json\nfrom time import sleep\nimport random\nimport re", "_____no_output_____" ], [ "def scrape(url, file):\n e = Extractor.from_yaml_file(file)\n headers = {\n 'authority': 'www.amazon.com',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'dnt': '1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'sec-fetch-site': 'none',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-dest': 'document',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n }\n # Download the page using requests\n r = requests.get(url, headers=headers)\n # Simple check to check if page was blocked (Usually 503)\n if r.status_code > 500:\n return None\n\n # Pass the HTML of the page and create\n return e.extract(r.text)", "_____no_output_____" ], [ "engine = create_engine(\"sqlite+pysqlite:///books.sqlite\")\nBase = declarative_base()\nBase.metadata.create_all(engine)\nsession = Session(engine)", "_____no_output_____" ], [ "class Amazon(Base):\n __tablename__ = \"Amazon\"\n\n isbn = Column(String, primary_key=True, nullable=False)\n title = Column(String, nullable=True)\n subtitle = Column(String, nullable=True)\n author = Column(String, nullable=True)\n description = Column(String, nullable=True)\n book_type = Column(String, nullable = True)\n price = Column(Float, nullable=True)\n rating = Column(Float, nullable=True)\n review_count = Column(Integer, nullable=True)\n page_count = Column(Integer, nullable=True)\n language = Column(String, nullable=True)\n publisher = Column(String, nullable=True)\n published_date = Column(String, nullable=True)\n url = Column(String, nullable=True)\n \n\n def __repr__(self) -> str:\n return (\n f\"Amazon(isbn={self.isbn!r}, \"\n f\"title={self.title!r}, \"\n f\"subtitle={self.subtitle!r}, \"\n f\"author={self.author!r}, \"\n f\"description={self.description!r}, \"\n f\"book_type={self.book_type!r}, \"\n f\"price={self.price!r}, \"\n f\"rating={self.rating!r}, \"\n f\"review_count={self.review_count!r}, \"\n f\"page_count={self.page_count!r}, \"\n f\"language={self.language!r}, \"\n f\"publisher={self.publisher!r}, \"\n f\"published_date={self.published_date!r}, \"\n f\"url={self.url!r})\"\n )\n", "_____no_output_____" ], [ "def scrapeAmazon(level):\n print(f\"level : {-level}\")\n books = (\n session.query(Amazon).filter(((Amazon.title == None) | (Amazon.title == -level)) & (Amazon.rating > 0)).all()\n )\n print(len(books))\n for book in books:\n try:\n amazon = scrape(book.url, \"definitions/amazon.yml\")\n print(amazon, book.url)\n \n \n if amazon != None:\n changed_flag = False\n \n try: \n if amazon[\"title\"] != None:\n book.title = amazon[\"title\"]\n changed_flag = True\n \n if amazon[\"subtitle\"] != None:\n book.subtitle = amazon[\"subtitle\"]\n changed_flag = True\n\n if amazon[\"author\"] != None:\n book.author = amazon[\"author\"]\n changed_flag = True\n\n if amazon[\"description\"] != None:\n book.description = amazon[\"description\"]\n changed_flag = True\n\n if amazon[\"price\"] != None:\n book.price = re.sub(r'^.*?\\$', '', amazon[\"price\"])\n changed_flag = True\n \n if amazon[\"page_count\"] != None:\n book.page_count = amazon[\"page_count\"]\n changed_flag = True\n \n if amazon[\"language\"] != None:\n book.language = amazon[\"language\"]\n changed_flag = True\n\n if amazon[\"publisher\"] != None:\n book.publisher = amazon[\"publisher\"]\n changed_flag = True\n\n if amazon[\"published_date\"] != None:\n book.published_date = amazon[\"published_date\"]\n changed_flag = True\n\n if amazon[\"book_type\"] != None:\n book.book_type = amazon[\"book_type\"]\n changed_flag = True\n except:\n session.rollback()\n \n\n\n if not changed_flag:\n try:\n book.title = book.title - 1\n except: # does the same thing, but seperated for semantic reasons\n session.rollback()\n \n session.commit()\n except:\n pass\n \n \n ", "_____no_output_____" ], [ "#start i at 10\nfor i in range(20):\n \n scrapeAmazon(i+10)\n \n print(\"FAILED\")", "level : -10\n296\n{'title': None, 'subtitle': None, 'author': None, 'description': None, 'price': None, 'rating': None, 'review_count': None, 'page_count': None, 'language': None, 'publisher': None, 'published_date': None, 'book_type': None} https://www.amazon.com/Good-Omens/dp/B002WEBBBO?tag=NYTBSREV-20\nFAILED\nlevel : -11\n" ], [ "books = session.query(Amazon).filter(((Amazon.rating == None) | (Amazon.rating <= -2)) & (Amazon.url != None)).all()", "_____no_output_____" ], [ "session.rollback()", "_____no_output_____" ], [ "amazon = scrape(\"https://www.amazon.com/dp/1984818503?tag=NYTBSREV-20\", \"definitions/amazon.yml\")", "_____no_output_____" ], [ "amazon\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5a7b39da69f91c6d80f897d037a9baefdff078
43,369
ipynb
Jupyter Notebook
pittsburgh-bridges-data-set-analysis/models-analyses/merge_analyses/latex_merge/Section 1.2 - Learning Process and Pca Analysis.ipynb
franec94/Pittsburgh-Bridge-Dataset
682ff0e3979ca565637e858cc36dc07c2aeda7d6
[ "MIT" ]
null
null
null
pittsburgh-bridges-data-set-analysis/models-analyses/merge_analyses/latex_merge/Section 1.2 - Learning Process and Pca Analysis.ipynb
franec94/Pittsburgh-Bridge-Dataset
682ff0e3979ca565637e858cc36dc07c2aeda7d6
[ "MIT" ]
7
2021-02-02T22:51:40.000Z
2022-03-12T00:39:08.000Z
pittsburgh-bridges-data-set-analysis/models-analyses/merge_analyses/latex_merge/Section 1.2 - Learning Process and Pca Analysis.ipynb
franec94/Pittsburgh-Bridge-Dataset
682ff0e3979ca565637e858cc36dc07c2aeda7d6
[ "MIT" ]
null
null
null
97.458427
2,232
0.709585
[ [ [ "## Learning Process <a class=\"anchor\" id=\"learning-models\"></a>\n\nHere in this section we are going to partly describy and in the remaining to test and evaluate performance of various machine learning models that we selected and adopted to built up learning models for accomplishing supervised machine learning tasks related to classification problems. More precisely, we focused on binary classification problemsa, since the target variables, that is T-OR-D feature, amongst the 12 features from which the dataset is made of, and from which the more or less hundered of records are described, is a binary categorical feature which can assume the two values represented by labels DECK and THROUGH describing in two distinct manner a property about each bridge within the dataset, that property refers to the system used for constructing the bridge surface, that is commonly called deck, for let veicheles or trains or whatevert to cross the rivers that are three distinct rivers: A, M, O. Where A stands for Allegheny river, while M stands for Monongahela river and lastly O stands for Ohio river.\n\nBefore describing the fine tuning process applied to the different models accordingly to their own major properties, characteristics and features, we have decided and established to test each model performance by looking at how well all of them is going just exploiting the default setting and runnng cross-validation protocol, in other word also referred to as policy, to check the accuracy level, as ana instance and some other metrics.\n\nTo be more detailed we follow the common machine leaning working flow, that requires to split the data set, after having preprocessed it properly and in a suitable manner to meet mahcine learning models needs, into subsets commonly referred to as training set and test set. Where, the former is exploited to build up a inference model and the latter is used to check model's performance as weel as behavior up on held out sample of istances never seen before and so those examples that the learning model wasn't provided with to learn weights and select right hyper-params to plug in back into model at the end of training procedure, in order to later test the model with the found weights as well as hyper-parameters and if it meet our requirements in terms of performance values reached at test time, then it will be ready for deployments.\n\n<img src=\"images/machine_learning_workflow_2.png\" alt=\"machine learning workflow\" style=\"width: 400px;\"/>\n\\begin{figure}[h]\n\\caption{Machine Learning Workflow Example}\n\\centering\n\\includegraphics[width=0.5\\textwidth]{images/machine_learning_workflow_2.png}\n\\end{figure}\n\nAs we can see from the image just above what we have to do, following the purposed machine learning scheme or work flow are the subsequent steps:\n\n__(1) Split Dataset into Train and Test sets__:\nAfter having done with preprocessing phase we separate or divide the dataset into training set and test set, where usually the training set is bigger than the test set, and in some cases the test set, for recording some kind of performance measures, can be made of just a single instance against which the model will be tested and checked.\n\n__(2) Split Train set into smaller Train set and a Validation set__:\nOnce have made the first split then we hold out a part for later checking the test set and we focus on training set. In fact once we have selected a machine learning model amongst those we want to adopt to fit a model and compare the results obtained we try to indentify the best fit parameters to setting the model with them. To reach such a goal we can adopt different aproaches to split further the training set into a validation set and a smaller train set in order to emulate before doing test phase a possible beahvior of the trainng model once we think it is ready for the following phase that is test step.\nThere are several procedure for establishing how to divide training set into two new subsets, that are validation and a little bit smaller train set, where all of them are also connected or reffered to a learning algorithm called cross-validation which consists roughly speaking into testing the model against a smaller portion of training set in order to record and measure the performance of a model before saying we are ready to proceed with test phase. Among the existing Cross-Validation Procedure we adopt and so describe briefly the following:\n\n- **K-fold Cross-Validation**: which is a cross validation protocol exploited so that we split the training set into K-folds, in other words K-subsets all of the same size, a part eventually the last one which will be the remainder set of samples. One at a time the K-fold are left out and the model is firstly trained against K-1 subsets(folds) and then test against the left out fold for recording the performanc. At the end of the k-times we have trained the model we have recorded the performance measures and we so can average the results and understand how model in average is well doing. In oder words we can either assume the mean value as the driving value to assume the model as satisfying our constraint on performance measures or adopt the best result amongst the k-trains as the settings for hyper-params to adopt. This procedure is feasible and suitable if we do not carea botu the fact that, in cases of classification tasks, the categories might be balanced or not in terms of number of instances, as well as, it can be adopted also if we want to show a learning curve and some other performance graphics or schemes as Confusion matrtices and ROC or recall-precision curves. Latly usual values for K are: 5, 10, 20.\n\n<img src=\"images/machine_learning_workflow/cross_validation.png\" alt=\"cross validation machine learning workflow\" style=\"width: 400px;\"/>\n\\begin{figure}[h]\n\\caption{Cross Validation Example}\n\\centering\n\\includegraphics[width=0.5\\textwidth]{images/machine_learning_workflow/cross_validation.png}\n\\end{figure}\n\n- **Leave One Out Cross-Validation**: It is a special case of the K-fold Cross-Validation. In fact, instead of adopting the usual values as 5, 10, 20, for K as the number of possible subsets, we establish to identify each single instance as a possible fold. It's clear that this algorithm requires more time to be completed and does not allow to show up the graphics cited just above, since we do not enough data for confusion matrix and ROC or recall-precision curves.\n\n- **Stratified Cross-Validation**: It is a good compromise when the dataset is still large enough to be exploited for training purposes but we decide to split into K-folds the traning set such that each subset have the same proportion of samples coming from the differn classes. This operation reveals to be necessary or even mandatory when we detect that the dataset does not show the same number of samples, more or less, for each class, in other word when the dataset is unbalanced for a given attribute that is the target attribute. This means that, while trying to mitigate the issue about the unbalanced dataset we thunk as well as hope that this management let the model to fit a model which will not be affected heavily just from the most numerous class, bus still learn how to classify the samples coming from the other less numerous classes, without too mcuh misclassifying errors. As usual also with Stratified Cross-Validation we are able to show same graphics as with plain K-fold Cross-Validation, the difference is that the folds are not randomly sampled from the original training set, but yet are sampled in the same proportion per each class in order to have the same number of samples for each class inside each fold.\n\nWe try out all of the three described cross-validation techniques to measure how weel default settings for different models are doing to gaina a base line against wich compare later results coming from fine tuning process carried out exploiting grid search technique for selecting the best combinatin of purposed values for each candidate machine learning technique.\n\n__(3) Grid Search approach__: is the technique adopted when taking into account one at a time all the machine learning algorithm for selecting the best set of hyper-parameters. It consists into defining for each model a grid of possible values for different hyper-parameters that in some sense represent our degree of freedom referred to some of the properties that characterize all the different models. The grid of values might be real number ranging within a more or less interval, or a string value used to trigger a certain feature of a model combined with other related aspects of the machine learning algorithm of the given model. We recall that the standard grid search will proceed until all possible combination of provided values have been test and training with such settings have been carried out. Opposite to classic grid search it is another technique called Random Grid Search, which implies instead to let a model to choose or sample randomly the hyper-parameters within the ranges or intervals related to each hyper-params. The latter technique can be potentially less expensive since we test a reduced number of combination but might be sub-optimal even if the results can be still acceptable and meaningful.\n\n<img src=\"images/machine_learning_workflow/grid_search.png\" alt=\"grid search\" style=\"width: 400px;\"/>\n\\begin{figure}[h]\n\\caption{Grid Search Example}\n\\centering\n\\includegraphics[width=0.5\\textwidth]{images/machine_learning_workflow/grid_search.png}\n\\end{figure}\n\n__(4) Metrics and model performance's evaluation tools__: before deploying we have to test our model against a test set, that is a subset created or obtained from overall dataset which was, after preprocessing phase that turns feature values somehow into numbers, devided into two distinct sets generally of different size. The test set evaluation implies exploiting some metrics such as Accuracy, but yet there exist several others that partly are derived from confusion matrix evaluation tool, such as Precsio, Recal F1-score, and the like.\n\nSo what we understand is that we can make use of a bouch of metrics but rather than using directly those metrics we can explore model's performance by means of more useful tools such as Confusion Matrix, Roc curve in order to better understanding model's behavior when fed with unobserved new samples, as well as, how to set a threshold for determing when a target variable will suggest us that the classified sample belong to one class or the other. So, here we briefly describe which instruments we exploit to measure model's peformance starting from confusion matrix and moving ahead toward ROC curve.\n\n**Confusion Matrix**: in statistics a confusion matrix it's a grid or matrix of numbers and in the simplest scenario, correspoding to a binary classification task, it aims at showing how well a model was going when applied onto unknow or preiviously unseen data points and samples, in the following manner. Arbitrary we establish that along the rows we have the fraction of samples that the model has calssified or has assigned to agiven class, and so the rows account for *Predicted valuers*. Vice versa, along the columns we have the total number of samples for each class that all together resemble the so called *Actual Values* as we illustrate in the picture just below:\n\n<img src=\"images/machine_learning_workflow/confusion_matrix.png\" alt=\"confusion matrix example\" style=\"width: 400px;\"/>\n\\begin{figure}[h]\n\\caption{Confusion Matrix Example}\n\\centering\n\\includegraphics[width=0.5\\textwidth]{images/machine_learning_workflow/confusion_matrix.png}\n\\end{figure}\n\nSo, such a table of numbers allows us to measure the fraction of correctly classified examples belonging to the Positive class, also referred to as *True Positive(TP)* or to the Negative class, also named *True Negative(TN)* and at the same time we can derive also the fractions of wrongly classified Positive samples and Negative ones, respectively knwon as *False Positve(FP)* and *False Negative (FN)*. It is clear that looking at the diagonal matrix we can undestand that the larger the value along the diagonal the better the model was able to correctly identify the samples accordingly to their own actual class.\nFrom the four statistics measure depiceted above, that are TP,TN, FP, and FN, by the time have been dirved other useful metrics that can be exploited when the most used and well knwon measurement performance of accuracy is not enough due to the fact as an instance we wanto to analyze deeper our model behavior toward optimization of some goal or because we have to deal with dataset which are not balanced through classes to be predicted and so we wanto some other metrics to ensure the goodness of our solutions.\n\n**Roc Curve**: for the reason cited some lines earlier by exploiting the four basis metrics have been developed other useful tools for ensuring the goodness of a solution, among those tools we decide to adopt the following one, knwon as Roc Curve. It is a curve, whose acronim stands for Receiver Operating Curve, largely employed in the field of *Decison Theory* and aims at finding, siggesting or showing how moedl's performance varywhen we are going to set different thresholds for a simple scenario in which we are going to solve a binary classification problem. Such a curve require to show on the x-axis the fraction of samples corresponding to *False Positve Rate (FPR)* at a different values of the model's hyper-params corresponding to threshold set for classifying items at inference time, as well as on the x-axis the fraction of samples corresponding to *True Positive Rate(TRP)* in order to plot a curve that originates at coordinates (0,0) and terminates at coordinates (1,1), varying in the middle accordingly to the pairs of values recorded at a given threshold for (FPR,TPR) pair. We are also reporting two driving curves that are respectvely the curve related to the *Random Classifier* which corresponds to a classifier that for each instance is just randomly selecting the predicted class, and the *Perfetct Classifier* that always correctly classifyes all the samples. Our goal by analysing such a graphics is to identify the threshold value such that we are near the points on the curve that are not so far from the upper-left corner so that to maximise the TPR as well as minimizing the FPR. Another useful quantity related to Roc Curve is represented by the so called amount Area Under the Curve (AUC) that suggests us how much area under the rco curve whas accounted while varying the threshold for classiffying the test samples, in particular the higher the value the better the classifier is doing varying the threshlds. Lastly we notice that the Random Sample accounts for an AUC equls to 0.5 %, while the Perfect Classifier for 1.0 % so we aim at obtaing a value for AUC that is in at least in the between but that approaches the unity. Here, below is presented an example of Roc Curve example:\n\n<img src=\"images/machine_learning_workflow/roc_curve.jpg\" alt=\"roc curve example\" style=\"width: 400px;\"/>\n\\begin{figure}[h]\n\\caption{Roc Curve Example}\n\\centering\n\\includegraphics[width=0.5\\textwidth]{images/machine_learning_workflow/roc_curve.jpg}\n\\end{figure}\n\nLastly, before moving ahead with describing Machine Learning Models we provide a brief list of other useful metrics that can be exploited if necessary during our analyses:\n\n<img src=\"images/machine_learning_workflow/summary_statistics.gif\" alt=\"summary statistics\" style=\"width: 400px;\"/>\n\\begin{figure}[h]\n\\caption{Roc Curve Example}\n\\centering\n\\includegraphics[width=0.5\\textwidth]{images/machine_learning_workflow/summary_statistics.jpg}\n\\end{figure}\n\n**Learning Curve**:\nlearning curves constitute a great tool to diagnose bias and variance in any supervised learning algorithm. It comes in handt when we have to face against the so called **Bias-Variance Trade-Off**. In order to explain what that trade-off implies we have to say briefly what follows:\n- In supervised learning, we assume there's a real relationship between feature(s) and target and estimate this unknown relationship with a model. Provided the assumption is true, there really is a model, which we'll call $f$, which describes perfectly the relationship between features and target.\n- In practice, $f$ is almost always completely unknown, and we try to estimate it with a model $\\hat{f}$. We use a certain training set and get a certain $\\hat{f}$. If we use a different training set, we are very likely to get a different $\\hat{f}$. As we keep changing training sets, we get different outputs for $\\hat{f}$. The amount by which $\\hat{f}$ varies as we change training sets is called **variance**.\n- While, for most real-life scenarios, however, the true relationship between features and target is complicated and far from linear. Simplifying assumptions give **bias** to a model. The more erroneous the assumptions with respect to the true relationship, the higher the bias, and vice-versa.\n- In practice, however, we need to accept a trade-off. We can’t have both low bias and low variance, so we want to aim for something in the middle, knowing that:\n\\begin{equation}\n\\begin{cases}\nY = f(X) + \\quad irreducible \\quad error \\\\\nf(X) = \\hat{f(X)} + \\quad reducible \\quad error \\\\\nY = \\hat{f(X)} + \\quad reducible \\quad error + \\quad irreducible \\quad error \\\\\n \\end{cases}\n\\end{equation}\n\n<table><tr>\n <td><img src=\"images/machine_learning_workflow/biasvariance.png\" alt=\"summary statistics\" style=\"width: 400px;\"/> </td>\n <td> <img src=\"images/machine_learning_workflow/irr_error.png\" alt=\"Irreducable Error\" style=\"width: 400px;\"/> </td>\n</tr></table>\n\\begin{figure}[H]\n\t\\centering\n\t\\begin{subfigure}{.3\\textwidth}\n\t\t\\includegraphics[width=\\textwidth]{images/machine_learning_workflow/biasvariance.png}\n\t\t\\caption{Atom 1}\n\t\\end{subfigure}\n%%%%%%%%%%%%%%\n\t\\begin{subfigure}{.3\\textwidth}\n\t\t\\includegraphics[width=\\textwidth]{images/machine_learning_workflow/irr_error.png}\n\t\t\\caption{Atom 2}\n\t\\end{subfigure}\n%%%%%%%%%%%%%%\n\\end{figure}\n\n\n<table><tr>\n <td> <img src=\"images/machine_learning_workflow/add_data.png\" alt=\"add data\" style=\"width: 400px;\"/></td>\n <td> <img src=\"images/machine_learning_workflow/low_high_var.png\" alt=\"low high var\" style=\"width: 400px;\"/></td>\n</tr></table>\n\\begin{figure}[H]\n\t\\centering\n\t\\begin{subfigure}{.3\\textwidth}\n\t\t\\includegraphics[width=\\textwidth]{images/machine_learning_workflow/add_data.png}\n\t\t\\caption{Atom 1}\n\t\\end{subfigure}\n%%%%%%%%%%%%%%\n\t\\begin{subfigure}{.3\\textwidth}\n\t\t\\includegraphics[width=\\textwidth]{images/machine_learning_workflow/low_high_var.png}\n\t\t\\caption{Atom 2}\n\t\\end{subfigure}\n%%%%%%%%%%%%%%\n\\end{figure}\n\n\n**P-Value Analysis**: Here, in the following we are going to describe shortly what is and how we have exploited the statistical tool for assessing model performance from a particular point of view represented by the so called *P-value Analysis*. The p-value is widely used in *statistical hypothesis testing*, specifically in *null hypothesis significance testing*. In this method, as part of experimental design, before performing the experiment, one first chooses a model (the null hypothesis) and a threshold value for p, called the *significance level* of the test, traditionally *5% or 1%* and denoted as $\\alpha$. If the p-value is less than the chosen significance level ($\\alpha$), that suggests that the observed data is sufficiently inconsistent with the null hypothesis and that the null hypothesis may be rejected. However, that does not prove that the tested hypothesis is true. When the p-value is calculated correctly, this test guarantees that the type I error rate is at most $\\alpha$. For typical analysis, using the standard $\\alpha = 0.05$ cutoff, the null hypothesis is rejected when $\\rho < .05$ and not rejected when $\\rho > .05$. The p-value does not, in itself, support reasoning about the probabilities of hypotheses but is only a tool for deciding whether to reject the null hypothesis.\n\nIn our particular case:\n\n- Null hypothesis ($H_{0}$): we ask ourselve whether a given particular model is able to predict class labels, with performance that matches just a uniform model's performance with $Prob(THROUGH) = 0.5$\n\n- Test statistic: shuffleing target labels and measuring accuracy score to determine significance level\n\n- Alpha level ($\\alpha$, designated threshold of significance): 0.05\n\n\n__(5)Deployment__: The last step for building a machine learning system comprise the deployment of one or more machine learning trained models that fit and satisfy constraints that was intially fixed before doing any kind of analysis. The main goal of deployemnt is to employ such statistics models for predicting, in other words making inference, against new data, unknown obeservatons for which we do not knwo their target class values.", "_____no_output_____" ], [ "### Pricipal Component Analysis\n\nAfter having investigate the data points inside the dataset, I move one to another section of my report where I decide to explore examples that made up the entire dataset using a particular technique in the field of statistical analysis that corresponds, precisely, to so called Principal Component Analysis. In fact, the major objective of this section is understand whether it is possible to transform, by means of some kind of linear transformation given by a mathematical calculation, the original data examples into reprojected representation that allows me to retrieve most useful information to be later exploited at training time. So, lets dive a bit whitin what is and which are main concepts, pros and cons about Principal Component Analysis.\n\nFirstly, we know that **Principal Component Analysis**, more shortly PCA, is a statistical procedure that uses an orthogonal transformation to convert a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called *principal components*. This transformation is defined in such a way that:\n- the first principal component has the largest possible variance (that is, accounts for as much of the variability in the data as possible),\n- and each succeeding component in turn has the highest variance possible under the constraint that it is orthogonal to the preceding components.\n\nThe resulting vectors, each being a linear combination of the variables and containing n observations, are an uncorrelated orthogonal basis set. PCA is sensitive to the relative scaling of the original variables.\n\nPCA is mostly used as a tool in *exploratory data analysis* and for making predictive models, for that reasons I used such a technique here, before going through the different learning technique for producing my models.\n\n#### Several Different Implementation\n\nFrom the theory and the filed of research in statistics, we know that out there, there are several different implementation and way of computing principal component analysis, and each adopted technique has different performance as well as numerical stability. The three major derivations are:\n- PCA by means of an iterative based procedure of extraing pricipal components one after the other selecting each time the one that account for the most of variance along its own axis, within the remainig subspace to be derived.\n- The second possible way of performing PCA is done via calculation of *Covariance Matrix* applied to attributes, that are our independent predictive variables, used to represent data points.\n- Lastly, it is used the technique known as *Singular Valued Decomposition* applied to the overall data points within our dataset.\n\nReading scikit-learn documentation, I discovered that PCA's derivation uses the *LAPACK implementation* of the *full SVD* or a *randomized truncated SVD* by the method of *Halko et al. 2009*, depending on the shape of the input data and the number of components to extract. Therefore I will descrive mainly that way of deriving the method with respect to the others that, instead, will be described more briefly and roughly.\n\n#### PCA's Iterative based Method\nGoing in order, as depicted briefly above, I start describing PCA obtained by means of iterative based procedure to extract one at a time a new principal componet explointing the data points at hand.\n\nWe begin, recalling that, PCA is defined as an orthogonal linear transformation that transforms the data to a new coordinate system such that the greatest variance by some scalar projection of the data comes to lie on the first coordinate (called the first principal component), the second greatest variance on the second coordinate, and so on.\n\nWe suppose to deal with a data matrix X, with column-wise zero empirical mean, where each of the n rows represents a different repetition of the experiment, and each of the p columns gives a particular kind of feature.\n\nFrom a math poitn of view, the transformation is defined by a set of p-dimensional vectors of weights or coefficients $\\mathbf {w} _{(k)}=(w_{1},\\dots ,w_{p})_{(k)}$ that map each row vector $\\mathbf{x}_{(i)}$ of X to a new vector of principal component scores ${\\displaystyle \\mathbf {t} _{(i)}=(t_{1},\\dots ,t_{l})_{(i)}}$, given by: ${\\displaystyle {t_{k}}_{(i)}=\\mathbf {x} _{(i)}\\cdot \\mathbf {w} _{(k)}\\qquad \\mathrm {for} \\qquad i=1,\\dots ,n\\qquad k=1,\\dots ,l}$.\n\nIn this way all the individual variables ${\\displaystyle t_{1},\\dots ,t_{l}}$ of t considered over the data set successively inherit the maximum possible variance from X, with each coefficient vector w constrained to be a unit vector.\n\nMore precisely, the first component In order to maximize variance has to satisfy the following expression:\n\n${\\displaystyle \\mathbf {w} _{(1)}={\\underset {\\Vert \\mathbf {w} \\Vert =1}{\\operatorname {\\arg \\,max} }}\\,\\left\\{\\sum _{i}\\left(t_{1}\\right)_{(i)}^{2}\\right\\}={\\underset {\\Vert \\mathbf {w} \\Vert =1}{\\operatorname {\\arg \\,max} }}\\,\\left\\{\\sum _{i}\\left(\\mathbf {x} _{(i)}\\cdot \\mathbf {w} \\right)^{2}\\right\\}}$\n\nSo, with $w_{1}$ found, the first principal component of a data vector $x_{1}$ can then be given as a score $t_{1(i)} = x_{1} ⋅ w_{1}$ in the transformed co-ordinates, or as the corresponding vector in the original variables, $(x_{1} ⋅ w_{1})w_{1}$.\n\nThe others remainig components are computed as folloes. The kth component can be found by subtracting the first k − 1 principal components from X, as in the following expression:\n\n- ${\\displaystyle \\mathbf {\\hat {X}} _{k}=\\mathbf {X} -\\sum _{s=1}^{k-1}\\mathbf {X} \\mathbf {w} _{(s)}\\mathbf {w} _{(s)}^{\\rm {T}}}$\n\n- and then finding the weight vector which extracts the maximum variance from this new data matrix ${\\mathbf {w}}_{{(k)}}={\\underset {\\Vert {\\mathbf {w}}\\Vert =1}{\\operatorname {arg\\,max}}}\\left\\{\\Vert {\\mathbf {{\\hat {X}}}}_{{k}}{\\mathbf {w}}\\Vert ^{2}\\right\\}={\\operatorname {\\arg \\,max}}\\,\\left\\{{\\tfrac {{\\mathbf {w}}^{T}{\\mathbf {{\\hat {X}}}}_{{k}}^{T}{\\mathbf {{\\hat {X}}}}_{{k}}{\\mathbf {w}}}{{\\mathbf {w}}^{T}{\\mathbf {w}}}}\\right\\}$\n\nIt turns out that:\n- from the formulas depicted above me get the remaining eigenvectors of $X^{T}X$, with the maximum values for the quantity in brackets given by their corresponding eigenvalues. Thus the weight vectors are eigenvectors of $X^{T}X$.\n- The kth principal component of a data vector $x_(i)$ can therefore be given as a score $t_{k(i)} = x_{(i)} ⋅ w_(k)$ in the transformed co-ordinates, or as the corresponding vector in the space of the original variables, $(x_{(i)} ⋅ w_{(k)}) w_{(k)}$, where $w_{(k)}$ is the kth eigenvector of $X^{T}X$.\n- The full principal components decomposition of X can therefore be given as: ${\\displaystyle \\mathbf {T} =\\mathbf {X} \\mathbf {W}}$, where W is a p-by-p matrix of weights whose columns are the eigenvectors of $X^{T}X$.\n\n#### Covariance Matrix for PCA analysis\n\nPCA made from covarian matrix computation requires the calculation of sample covariance matrix of the dataset as follows: $\\mathbf{Q} \\propto \\mathbf{X}^T \\mathbf{X} = \\mathbf{W} \\mathbf{\\Lambda} \\mathbf{W}^T$.\n\nThe empirical covariance matrix between the principal components becomes ${\\displaystyle \\mathbf {W} ^{T}\\mathbf {Q} \\mathbf {W} \\propto \\mathbf {W} ^{T}\\mathbf {W} \\,\\mathbf {\\Lambda } \\,\\mathbf {W} ^{T}\\mathbf {W} =\\mathbf {\\Lambda } }$.\n\n\n#### Singular Value Decomposition for PCA analysis\n\nFinally, the principal components transformation can also be associated with another matrix factorization, the singular value decomposition (SVD) of X, ${\\displaystyle \\mathbf {X} =\\mathbf {U} \\mathbf {\\Sigma } \\mathbf {W} ^{T}}$, where more precisely:\n- Σ is an n-by-p rectangular diagonal matrix of positive numbers $σ_{(k)}$, called the singular values of X;\n- instead U is an n-by-n matrix, the columns of which are orthogonal unit vectors of length n called the left singular vectors of X;\n- Then, W is a p-by-p whose columns are orthogonal unit vectors of length p and called the right singular vectors of X.\n\nfactorizingn the matrix ${X^{T}X}$, it can be written as:\n\n${\\begin{aligned}\\mathbf {X} ^{T}\\mathbf {X} &=\\mathbf {W} \\mathbf {\\Sigma } ^{T}\\mathbf {U} ^{T}\\mathbf {U} \\mathbf {\\Sigma } \\mathbf {W} ^{T}\\\\&=\\mathbf {W} \\mathbf {\\Sigma } ^{T}\\mathbf {\\Sigma } \\mathbf {W} ^{T}\\\\&=\\mathbf {W} \\mathbf {\\hat {\\Sigma }} ^{2}\\mathbf {W} ^{T}\\end{aligned}}$\n\nWhere we recall that ${\\displaystyle \\mathbf {\\hat {\\Sigma }} }$ is the square diagonal matrix with the singular values of X and the excess zeros chopped off that satisfies ${\\displaystyle \\mathbf {{\\hat {\\Sigma }}^{2}} =\\mathbf {\\Sigma } ^{T}\\mathbf {\\Sigma } } {\\displaystyle \\mathbf {{\\hat {\\Sigma }}^{2}} =\\mathbf {\\Sigma } ^{T}\\mathbf {\\Sigma } }$. Comparison with the eigenvector factorization of $X^{T}X$ establishes that the right singular vectors W of X are equivalent to the eigenvectors of $X^{T}X$ , while the singular values $σ_{(k)}$ of X are equal to the square-root of the eigenvalues $λ_{(k)}$ of $X^{T}X$ . \n\nAt this point we understand that using the singular value decomposition the score matrix T can be written as:\n\n${\\begin{aligned} \\mathbf{T} & = \\mathbf{X} \\mathbf{W} \\\\ & = \\mathbf{U}\\mathbf{\\Sigma}\\mathbf{W}^T \\mathbf{W} \\\\ & = \\mathbf{U}\\mathbf{\\Sigma} \\end{aligned}}$\n\nso each column of T is given by one of the left singular vectors of X multiplied by the corresponding singular value. This form is also the polar decomposition of T.\n\nEfficient algorithms exist to calculate the SVD, as in scikit-learn package, of X without having to form the matrix $X^{T}X$, so computing the SVD is now the standard way to calculate a principal components analysis from a data matrix", "_____no_output_____" ] ], [ [ "from utils.all_imports import *;\n%matplotlib inline\n# Set seed for notebook repeatability\nnp.random.seed(0)", "None\n" ], [ "# =========================================================================== #\n# READ INPUT DATASET\n# =========================================================================== #\ndataset_path, dataset_name, column_names, TARGET_COL = get_dataset_location()\nestimators_list, estimators_names = get_estimators()\n\ndataset, feature_vs_values = load_brdiges_dataset(dataset_path, dataset_name)\ncolumns_2_avoid = ['ERECTED', 'LENGTH', 'LOCATION']\n# Array used for storing graphs\nplots_names = list(map(lambda xi: f\"{xi}_learning_curve.png\", estimators_names))\npca_kernels_list = ['linear', 'poly', 'rbf', 'cosine', 'sigmoid']\ncv_list = list(range(10, 1, -1))", "_____no_output_____" ], [ "# Make distinction between Target Variable and Predictors\n# --------------------------------------------------------------------------- #\nrescaledX, y, columns = prepare_data_for_train(dataset, target_col=TARGET_COL)", "Summary about Target Variable {target_col}\n--------------------------------------------------\n2 57\n1 13\nName: T-OR-D, dtype: int64\nshape features matrix X, after normalizing: (70, 11)\n" ], [ "# sns.pairplot(dataset, hue=TARGET_COL, height=2.5);", "_____no_output_____" ], [ "df = show_table_pc_analysis(X=rescaledX); df.to_csv('cumulative_varation_explained.csv') # df", "Cumulative varation explained(percentage) up to given number of pcs:\n" ], [ "n_components = rescaledX.shape[1]\npca = PCA(n_components=n_components)\n# pca = PCA(n_components=2)\n\n#X_pca = pca.fit_transform(X)\npca = pca.fit(rescaledX)\nX_pca = pca.transform(rescaledX)\n \nfig = show_cum_variance_vs_components(pca, n_components)\n\npy.sign_in('franec94', 'STMaADdoKsk66UekCPGa')\npy.iplot(fig, filename='selecting-principal-components {}'.format('Standardize'))", "_____no_output_____" ] ], [ [ "#### Major Pros & Cons of PCA\n\n", "_____no_output_____" ], [ "### Test Error Rate versus Training Sample Size\n\nHere, with the following set of graphics we aim at describing how the *Test Error Rate* changes across different *Training Set Size*, in particular running the tests simply emploing the different estimators or classification algorithm with their default setting and running the training phase via cross-validation technique and adopint increasing training set size. Main goal with the illustration of such a bounch of graphics is to grasph a basic idea, that might be used to lead the training phase about how much the training set size affects the performance of a classifier at least with default settings of the hyper-parameter of the latter.", "_____no_output_____" ] ], [ [ "try_comparing_various_online_solvers_by_kernel(X=rescaledX, y=y, kernels=None, heldout=[.75, .7, .65, .6, .55, .5, .45, .4, .35, .33, .3, .25, .2], rounds=20, n_classes=-1, n_components=9, estimators=estimators_list, cv=StratifiedKFold(2), verbose=0, show_fig=True, save_fig=False, stratified_flag=True, n_splits=3, random_state=42, gridshape=(3, 2), figsize=(15, 15), title=\"try comparing various online solvers by kernel\", fig_name=\"try_comparing_various_online_solvers_by_kernel.png\")", "_____no_output_____" ] ], [ [ "The main results we can get from the analysis we can carry out looking at the graphics just reported above is what follows:\n\n- the *GuassianNB Classifier* in four out of five trials, that are more precisely trial for which we have employed kernel-tricks such as Linear, Polynomial, Rbf, Sigmoid the resulting estimators follow a trend is characterized by increasing and decrising in test error rate with some large picks after we have used at least half of the samples from the dataset. Furthermore, the Polynomial based Estimator results to be the worst among those cosidered earlier, in fact near the half of the training set size the trend even becomes increasing going apart from the decreasing trend characterized from alternating local maxima and minima. Finally, when observing how such a classifier behaves when trained adopting Cosine trick we can refer that it follows a trend somewhat similar to the one followed also from other classifier expect for decision trees classifier.\n\n- the estimators based of *Decision Trees Classifier*, together with also *GuassianNB Classifiers*, are the two techniques amongst the others that were importantly affected by the training set size chosen for performing such analysis. In fact, even if in general is characterized from a decreasing trend, such a trend seems to be less stable than the others observed for the remaning classification techniques, in fact the trend is highly fluctuating with large picks when crossing and going towards larger training set sizes. The worst cases were when the classification technique was performed after having pre-preprocessed data examples by means of kernel-Pca based on Linear and Rbf tricks, because *Test Error Rate* turned to increasing after a given training set size which is larger than half of samples. While if we compare the generalization represented by Random Forest classifiers with respect to Decision Trees Classifier, it seems that the former across the different trials adopint also different kernel-trick for pre-processing the training examples follows a more stable decreasing trend for Test Error Rate versus training set size graphics, where just in two cases out of five which are trials where we have exploited Rbf and Sigmoid kernel-Pca tricks the random forest models seems to follwo in the second half of the Test Erro Rate curve a more fluctuating trend.\n\n- while, speaking about all the other learning algorithms, what we have noticed is that in general the several training phase executed for the models varying the training set size follows a decreasing trend, which means either that allowing the estimatores to be trained by means of larger training set they gain more insights about how can be characterized training examples, in order to better discriminate between the two classes, or that models overfit to much against the training set. However, all the models seem to start at the very beginning with a Test Error Rate somewhat yet not too large and keep reducing it across the different training set size. More precisely, we have reported that Knn Classifier performs better across the different attempts when the training set size was in the range between half and *70%* of the training set size. While SVMs, RandomForests, and Sgd classifiers seem to reach better performance when training examples in the training set reaches let to use a traning set size in the range between *70% and 80%* of the training set size. Lastly the logisti regression classifier seems to follow a decreasing trend for Test Error Rate which results to be the most conservative, with a reduced number of fluctuation which also have been a contained changes or fluctuations.", "_____no_output_____" ], [ "### References \n- Metrics and Diagnositc Tools:\n - (__Confusion Matrix__) <font color='blue'>https://en.wikipedia.org/wiki/Confusion_matrix</font>\n - (__F-1-Sccore, Accuracy, and Precision-Recall__) <font color='blue'>https://towardsdatascience.com/beyond-accuracy-precision-and-recall-3da06bea9f6c</font>\n - (__Roc Curve__) <font color='blue'>https://en.wikipedia.org/wiki/Receiver_operating_characteristic</font>\n - (__P-value__) <font color='blue'>https://en.wikipedia.org/wiki/P-value</font>\n- Statistics:\n - (__Correlation and dependence__) <font color='blue'>https://en.wikipedia.org/wiki/Correlation_and_dependence</font>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec5a7d9a3f2d432f9de2378d379f5df4f3e82fab
7,549
ipynb
Jupyter Notebook
Missions_to_Mars/mission_to_mars_test.ipynb
hamcha555/web-scraping-challenge
d515d61467ec171f0c38bb7e37904fd7b5cd8628
[ "ADSL" ]
null
null
null
Missions_to_Mars/mission_to_mars_test.ipynb
hamcha555/web-scraping-challenge
d515d61467ec171f0c38bb7e37904fd7b5cd8628
[ "ADSL" ]
null
null
null
Missions_to_Mars/mission_to_mars_test.ipynb
hamcha555/web-scraping-challenge
d515d61467ec171f0c38bb7e37904fd7b5cd8628
[ "ADSL" ]
null
null
null
41.478022
2,032
0.552259
[ [ [ "# Dependencies\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nimport time\n\n\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n# # NASA Mars News\n# ### Scrape the \"Mars News Site\" and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.\n\n# URL of page to be scraped\nurl = 'https://redplanetscience.com/'\nbrowser.visit(url)\n\ntime.sleep(1)\n\n# HTML object\nhtml = browser.html\n\n# Parse HTML with Beautiful Soup\nsoup = bs(html, 'html.parser')\n\n# scrape the first title\nnews_title = soup.find('div', class_='content_title').text\n\n# scrape the first paragraph\nnews_p = soup.find('div', class_='article_teaser_body').text\n\n# # JPL Mars Space Images - Featured Image\n\n# URL of page to be scraped\nurl = 'https://spaceimages-mars.com/'\nbrowser.visit(url)\n\n# HTML object\nhtml = browser.html\n\n# Parse HTML with Beautiful Soup\nsoup = bs(html, 'html.parser')\n\n# scrape for URL\nfeatured_image_url = url + soup.find('a', class_='showimg fancybox-thumbs')['href']\n\n# # Mars Facts\n# ### Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.\n\n# URL of page to be scraped\nurl = 'https://galaxyfacts-mars.com/'\n\ntables = pd.read_html(url)\n\n# ### Use Pandas to convert the data to a HTML table string.\n\ndf = tables[1]\n\nhtml_table = df.to_html()\n\n# # Mars Hemispheres\n\n# URL of page to be scraped\nlist_url = ['https://marshemispheres.com/cerberus.html', 'https://marshemispheres.com/schiaparelli.html', 'https://marshemispheres.com/syrtis.html', 'https://marshemispheres.com/valles.html']\n\nmars_hemi_list = []\n\nfor x in list_url:\n browser.visit(x)\n # HTML object\n html = browser.html\n # Parse HTML with Beautiful Soup\n soup = bs(html, 'html.parser')\n\n # scrape the title\n title = soup.find('h2', class_='title').text\n #scrape the full size image url\n href = 'https://marshemispheres.com/' + soup.find_all('a')[4]['href']\n\n # Run only if title, price, and link are available\n if (title and href):\n # Print results\n print('-------------')\n print(title)\n print(href)\n\n # Dictionary to be inserted as a MongoDB document \n v={'title': title, 'img_url': href}\n mars_hemi_list.append(v)\n\nmars_dict ={\n \"Title\" : news_title,\n \"Summary\" : news_p,\n \"ImageURL\" : featured_image_url,\n \"MarsStats\" : html_table,\n \"MarsHemi\" : mars_hemi_list\n}\n\nprint(mars_dict)", "\n\n====== WebDriver manager ======\nCurrent google-chrome version is 97.0.4692\nGet LATEST chromedriver version for 97.0.4692 google-chrome\nDriver [C:\\Users\\Hamilton\\.wdm\\drivers\\chromedriver\\win32\\97.0.4692.71\\chromedriver.exe] found in cache\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec5a905592206ba5a32fc358ada4b130fe17567c
388,977
ipynb
Jupyter Notebook
notebooks/intake_usage/0_Check_Catalog_Data.ipynb
IOMRC/intake-aodn
cd7d4df1fafc1afd36182a5d535fe62ab3fd2643
[ "BSD-3-Clause" ]
2
2021-12-10T04:24:02.000Z
2022-02-04T14:04:29.000Z
notebooks/intake_usage/0_Check_Catalog_Data.ipynb
IOMRC/intake-aodn
cd7d4df1fafc1afd36182a5d535fe62ab3fd2643
[ "BSD-3-Clause" ]
null
null
null
notebooks/intake_usage/0_Check_Catalog_Data.ipynb
IOMRC/intake-aodn
cd7d4df1fafc1afd36182a5d535fe62ab3fd2643
[ "BSD-3-Clause" ]
null
null
null
80.317365
52,772
0.53251
[ [ [ "# Notebook to loop through the available datasets and check they open and load OK", "_____no_output_____" ] ], [ [ "import sys\nimport os\nsys.path.append('/home/jovyan/intake-aodn')\nimport intake_aodn\nimport intake", "_____no_output_____" ], [ "from intake_aodn.utils import get_local_cluster\nclient = get_local_cluster()\nclient", "_____no_output_____" ] ], [ [ "## SSTARS Climatology", "_____no_output_____" ] ], [ [ "intake_aodn.cat.aodn_s3.SSTAARS_Daily_Climatology.to_dask()", "_____no_output_____" ], [ "intake_aodn.cat.aodn_s3.SSTAARS_Monthly_Climatology.to_dask()", "_____no_output_____" ] ], [ [ "# Fuse SST and ocean colour", "_____no_output_____" ] ], [ [ "dsets = []\nfor entry in intake_aodn.cat.aodn_s3:\n if isinstance(intake_aodn.cat.aodn_s3[entry],intake_aodn.drivers.RefZarrStackSource):\n print(entry)\n ds=intake_aodn.cat.aodn_s3[entry](startdt='2021-01-01',\n enddt='2021-03-01',\n cropto=dict(latitude=-32.,longitude=115.,method='nearest')).read()\n dsets.append(ds)\n ", "SST_L3S_1d_ngt\nMODIS_1d_chl_oc3\nMODIS_1d_chl_gsm\nMODIS_1d_K_490\n" ], [ "import xarray as xr\nds = xr.merge(dsets,compat='override')", "_____no_output_____" ], [ "ds", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nvariables = ['sea_surface_temperature','chl_oc3','chl_gsm','K_490']\nfig, axs = plt.subplots(len(variables),1,sharex=True,figsize=(8,10))\nfor i,v in enumerate(variables):\n ds[v].plot(ax=axs[i],marker='.')\nfig.tight_layout()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec5a9f4b7d2f244c2b0bc6523aa2bb3fdf0c0200
91,643
ipynb
Jupyter Notebook
Covid-19 Data Analysis.ipynb
JacobSawalbil/PROJECTS
6ec27e1527f4a20c49fb1f0b673cf390ccce38b8
[ "Apache-2.0" ]
null
null
null
Covid-19 Data Analysis.ipynb
JacobSawalbil/PROJECTS
6ec27e1527f4a20c49fb1f0b673cf390ccce38b8
[ "Apache-2.0" ]
null
null
null
Covid-19 Data Analysis.ipynb
JacobSawalbil/PROJECTS
6ec27e1527f4a20c49fb1f0b673cf390ccce38b8
[ "Apache-2.0" ]
null
null
null
60.530383
44,544
0.681503
[ [ [ "# Task: Covid-19 Data Analysis\n### This notebook is used to understand the comprehension of Data Analysis techniques using Pandas library.", "_____no_output_____" ], [ "### Import the necessary libraries", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "### Question 1", "_____no_output_____" ], [ "#### Read the dataset", "_____no_output_____" ] ], [ [ "path = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/01-01-2021.csv'\ndf = pd.read_csv(path)\ndf.info()\ndf.head()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4005 entries, 0 to 4004\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 FIPS 3265 non-null float64\n 1 Admin2 3270 non-null object \n 2 Province_State 3830 non-null object \n 3 Country_Region 4005 non-null object \n 4 Last_Update 4005 non-null object \n 5 Lat 3917 non-null float64\n 6 Long_ 3917 non-null float64\n 7 Confirmed 4005 non-null int64 \n 8 Deaths 4005 non-null int64 \n 9 Recovered 4005 non-null int64 \n 10 Active 4005 non-null int64 \n 11 Combined_Key 4005 non-null object \n 12 Incident_Rate 3916 non-null float64\n 13 Case_Fatality_Ratio 3957 non-null float64\ndtypes: float64(5), int64(4), object(5)\nmemory usage: 438.2+ KB\n" ] ], [ [ "#### Display the top 5 rows in the data", "_____no_output_____" ] ], [ [ "pd.DataFrame(np.random.rand(5))", "_____no_output_____" ] ], [ [ "#### Show the information of the dataset", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4005 entries, 0 to 4004\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 FIPS 3265 non-null float64\n 1 Admin2 3270 non-null object \n 2 Province_State 3830 non-null object \n 3 Country_Region 4005 non-null object \n 4 Last_Update 4005 non-null object \n 5 Lat 3917 non-null float64\n 6 Long_ 3917 non-null float64\n 7 Confirmed 4005 non-null int64 \n 8 Deaths 4005 non-null int64 \n 9 Recovered 4005 non-null int64 \n 10 Active 4005 non-null int64 \n 11 Combined_Key 4005 non-null object \n 12 Incident_Rate 3916 non-null float64\n 13 Case_Fatality_Ratio 3957 non-null float64\ndtypes: float64(5), int64(4), object(5)\nmemory usage: 438.2+ KB\n" ] ], [ [ "#### Show the sum of missing values of features in the dataset", "_____no_output_____" ] ], [ [ "df.isnull().sum()", "_____no_output_____" ] ], [ [ "### Question 2", "_____no_output_____" ], [ "#### Show the number of Confirmed cases by Country", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Confirmed'].sum().reset_index()\nworld.head()", "_____no_output_____" ] ], [ [ "#### Show the number of Deaths by Country", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Deaths'].sum().reset_index()\nworld.head()", "_____no_output_____" ] ], [ [ "#### Show the number of Recovered cases by Country", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Recovered'].sum().reset_index()\nworld.head()", "_____no_output_____" ] ], [ [ "#### Show the number of Active Cases by Country", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Active'].sum().reset_index()\nworld.head()", "_____no_output_____" ] ], [ [ "#### Show the latest number of Confirmed, Deaths, Recovered and Active cases Country-wise", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Confirmed','Deaths','Recovered','Active'].sum().reset_index()\nworld.head()", "C:\\Users\\JABS\\AppData\\Local\\Temp/ipykernel_8804/4148972617.py:1: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.\n world = df.groupby(\"Country_Region\")['Confirmed','Deaths','Recovered','Active'].sum().reset_index()\n" ] ], [ [ "### Question 3", "_____no_output_____" ], [ "### Show the countries with no recovered cases", "_____no_output_____" ] ], [ [ "world = df.groupby('Country_Region')['Recovered'].sum().reset_index()\nresult = df[df['Recovered']==0][['Country_Region','Recovered']]\nprint(result)", " Country_Region Recovered\n9 Australia 0\n23 Belgium 0\n24 Belgium 0\n25 Belgium 0\n26 Belgium 0\n... ... ...\n4000 Tonga 0\n4001 Winter Olympics 2022 0\n4002 Antarctica 0\n4003 United Kingdom 0\n4004 United Kingdom 0\n\n[3402 rows x 2 columns]\n" ] ], [ [ "#### Show the countries with no confirmed cases", "_____no_output_____" ] ], [ [ "world = df.groupby('Country_Region')['Confirmed'].sum().reset_index()\nresult = df[df['Confirmed']==0][['Country_Region','Confirmed']]\nprint(result)", " Country_Region Confirmed\n78 Canada 0\n174 Colombia 0\n265 India 0\n280 India 0\n414 Mexico 0\n485 Peru 0\n612 Spain 0\n700 US 0\n712 US 0\n721 US 0\n742 US 0\n756 US 0\n760 US 0\n894 US 0\n945 US 0\n1215 US 0\n1445 US 0\n1656 US 0\n1776 US 0\n1822 US 0\n1859 US 0\n1890 US 0\n2155 US 0\n2335 US 0\n2449 US 0\n2558 US 0\n2733 US 0\n2818 US 0\n2886 US 0\n2937 US 0\n3003 US 0\n3092 US 0\n3142 US 0\n3208 US 0\n3575 US 0\n3718 US 0\n3817 US 0\n3887 US 0\n3919 US 0\n3955 United Kingdom 0\n3965 United Kingdom 0\n3978 China 0\n3979 Kiribati 0\n3980 Palau 0\n3981 New Zealand 0\n3982 Summer Olympics 2020 0\n3999 Malaysia 0\n4000 Tonga 0\n4001 Winter Olympics 2022 0\n4002 Antarctica 0\n" ] ], [ [ "#### Show the countries with no deaths", "_____no_output_____" ] ], [ [ "world = df.groupby('Country_Region')['Deaths'].sum().reset_index()\nresult = df[df['Deaths']==0][['Country_Region','Deaths']]\nprint(result)", "Empty DataFrame\nColumns: [Country_Region, Deaths]\nIndex: []\n" ] ], [ [ "### Question 4", "_____no_output_____" ], [ "#### Show the Top 10 countries with Confirmed cases", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Confirmed'].sum().reset_index()\nworld.head(10)", "_____no_output_____" ] ], [ [ "#### Show the Top 10 Countries with Active cases", "_____no_output_____" ] ], [ [ "world = df.groupby(\"Country_Region\")['Active'].sum().reset_index()\nworld.head(10)", "_____no_output_____" ] ], [ [ "### Question 5", "_____no_output_____" ], [ "#### Plot Country-wise Total deaths, confirmed, recovered and active casaes where total deaths have exceeded 50,000", "_____no_output_____" ] ], [ [ "df = df.groupby([\"Country_Region\"])[\"Deaths\", \"Confirmed\", \"Recovered\", \"Active\"].sum().reset_index()\ndf = df.sort_values(by='Deaths', ascending=False)\ndf = df[df['Deaths']>50]\nplt.figure(figsize=(15, 5))\nplt.plot(df['Country_Region'], df['Deaths'],color='red')\nplt.plot(df['Country_Region'], df['Confirmed'],color='green')\nplt.plot(df['Country_Region'], df['Recovered'], color='blue')\nplt.plot(df['Country_Region'], df['Active'], color='black')\n \nplt.title('Total Deaths(>50000), Confirmed, Recovered and Active Cases by Country')\nplt.show()", "C:\\Users\\JABS\\AppData\\Local\\Temp/ipykernel_8804/3550498885.py:1: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.\n df = df.groupby([\"Country_Region\"])[\"Deaths\", \"Confirmed\", \"Recovered\", \"Active\"].sum().reset_index()\n" ] ], [ [ "### Question 6", "_____no_output_____" ], [ "### Plot Province/State wise Deaths in USA", "_____no_output_____" ] ], [ [ "import plotly.express as px\nimport plotly.io as pio", "_____no_output_____" ], [ "df = df[df['Country_Region']=='US'].drop(df.columns['Country_Region','Lat', 'Long_'],axis=1)\ndf = df[df.sum(axis = 1) > 0]\ndf = df.groupby(['Province/State'])['Deaths'].sum().reset_index()\ndf_death = df[df['Deaths'] > 0]\nstate_fig = px.bar(df_death, x='Province/State', y='Deaths', title='State wise deaths reported of COVID-19 in USA', text='Deaths')\nstate_fig.show()", "_____no_output_____" ], [ "covid_data= pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/01-09-2021.csv')", "_____no_output_____" ], [ "covid_data.columns", "_____no_output_____" ] ], [ [ "### Question 7", "_____no_output_____" ], [ "### Plot Province/State Wise Active Cases in USA", "_____no_output_____" ], [ "### Question 8", "_____no_output_____" ], [ "### Plot Province/State Wise Confirmed cases in USA", "_____no_output_____" ], [ "### Question 9", "_____no_output_____" ], [ "### Plot Worldwide Confirmed Cases over time", "_____no_output_____" ] ], [ [ "import plotly.express as px\nimport plotly.io as pio", "_____no_output_____" ], [ "import plotly.express as px", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
ec5ad9c86b4c13efadf8836299f0aa4a43ce3608
150,461
ipynb
Jupyter Notebook
Primative Text Analysis/topic_Modeling_Negative.ipynb
otrejo08/data_bootcamp_project3
2c0c0f93b7e7e7a9914b70d77724225de63dc540
[ "FTL", "CNRI-Python" ]
2
2019-05-10T00:41:14.000Z
2019-05-16T23:21:20.000Z
Primative Text Analysis/topic_Modeling_Negative.ipynb
otrejo08/data_bootcamp_project3
2c0c0f93b7e7e7a9914b70d77724225de63dc540
[ "FTL", "CNRI-Python" ]
null
null
null
Primative Text Analysis/topic_Modeling_Negative.ipynb
otrejo08/data_bootcamp_project3
2c0c0f93b7e7e7a9914b70d77724225de63dc540
[ "FTL", "CNRI-Python" ]
null
null
null
104.12526
69,406
0.581719
[ [ [ "# Importing libraries\nimport numpy as np\nfrom pandas import Series, DataFrame\nimport pandas as pd\nimport seaborn as sns\nfrom collections import Counter\nfrom numpy.random import randn\nfrom datetime import datetime\n\nfrom geopy.distance import vincenty\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom tqdm import tqdm\ntqdm.pandas()\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom collections import Counter\nfrom scipy import stats\nfrom dateutil import parser\n# import eli5\n# from eli5.sklearn import PermutationImportance\n# from skopt.space import Real\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\n\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n\n\n\n\n# Change pandas viewing options\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)", "C:\\Users\\mvesk\\Anaconda3\\lib\\site-packages\\nltk\\twitter\\__init__.py:20: UserWarning: The twython library has not been installed. Some functionality from the twitter package will not be available.\n warnings.warn(\"The twython library has not been installed. \"\nC:\\Users\\mvesk\\Anaconda3\\lib\\site-packages\\sklearn\\ensemble\\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\n from numpy.core.umath_tests import inner1d\n" ], [ "negReviews = pd.read_csv('Review_ASCII_Negative.csv')\ndf= negReviews", "_____no_output_____" ], [ "# Seperate NLP features\nnlp_feats = ['comments']\ncorpus = df[nlp_feats]\ndf = df.drop(nlp_feats, axis = 1)\nprint(\"Dataset has {} rows, {} columns.\".format(*df.shape))", "Dataset has 6098 rows, 19 columns.\n" ], [ "import nltk\n#nltk.data.path.append(\"\\Users\\mvesk\\Project3\\all_listings.csv\")\n\n\nfrom nltk import word_tokenize\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n\nfrom gensim.corpora.dictionary import Dictionary\nfrom gensim.models.tfidfmodel import TfidfModel\nfrom gensim.models.ldamodel import LdaModel\n\nimport itertools\nfrom collections import Counter\nfrom collections import defaultdict\n\nimport json\nimport pyLDAvis.gensim\npyLDAvis.enable_notebook()", "_____no_output_____" ], [ "def preprocess_text(corpus):\n \"\"\"Takes a corpus in list format and applies basic preprocessing steps of word tokenization,\n removing of english stop words, lower case and lemmatization.\"\"\"\n processed_corpus = []\n english_words = set(nltk.corpus.words.words())\n english_stopwords = set(stopwords.words('english'))\n# customize_stop_words = [\n# 'kilda', 'u','mins','didn','myk','emma','peter','de','st','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30',\n# \t'41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57','58','59','60','61','62','63','64','65','66','67','68','69','70','.']\n# english_stopwords.extend(customize_stop_words)\n wordnet_lemmatizer = WordNetLemmatizer()\n tokenizer = RegexpTokenizer(r'[\\w|!]+')\n for row in corpus:\n word_tokens = tokenizer.tokenize(row)\n word_tokens_lower = [t.lower() for t in word_tokens]\n word_tokens_lower_english = [t for t in word_tokens_lower if t in english_words or not t.isalpha()]\n word_tokens_no_stops = [t for t in word_tokens_lower_english if not t in english_stopwords]\n word_tokens_no_stops_lemmatized = [wordnet_lemmatizer.lemmatize(t) for t in word_tokens_no_stops]\n processed_corpus.append(word_tokens_no_stops_lemmatized)\n return processed_corpus", "_____no_output_____" ], [ "def nlp_model_pipeline(processed_corpus):\n \"\"\"Takes processed corpus and produce dictionary, doc_term_matrix and LDA model\"\"\"\n # Creates the term dictionary (every unique term in corpus is assigned an index)\n dictionary = Dictionary(processed_corpus)\n # Convert corpus into Document Term Matrix using dictionary prepared above\n doc_term_matrix = [dictionary.doc2bow(listing) for listing in processed_corpus] \n return dictionary, doc_term_matrix", "_____no_output_____" ], [ "def LDA_topic_modelling(doc_term_matrix, dictionary, num_topics=3, passes=2):\n # Create an object for LDA model and train it on Document-Term-Matrix\n LDA = LdaModel\n ldamodel = LDA(doc_term_matrix, num_topics=num_topics, id2word = dictionary, passes=passes)\n return ldamodel", "_____no_output_____" ], [ "\tdef add_topics_to_df(ldamodel, doc_term_matrix, df, new_col, num_topics):\n # Convert into Per-document topic probability matrix:\n docTopicProbMat = ldamodel[doc_term_matrix]\n docTopicProbDf = pd.DataFrame(index=df.index, columns=range(0, num_topics))\n for i, doc in enumerate(docTopicProbMat):\n for topic in doc:\n docTopicProbDf.iloc[i, topic[0]] = topic[1]\n docTopicProbDf[new_col] = docTopicProbDf.idxmax(axis=1)\n df_topics = docTopicProbDf[new_col]\n # Merge with df\n df_new = pd.concat([df, df_topics], axis=1)\n return df_new", "_____no_output_____" ], [ "corpus.head()", "_____no_output_____" ], [ "%%time\ncorpus_comments= corpus['comments'].astype(str)\nprocessed_corpus_comments = preprocess_text(corpus_comments)\ndictionary_comments, doc_term_matrix_comments = nlp_model_pipeline(processed_corpus_comments)\n", "Wall time: 3.23 s\n" ], [ "%%time\nldamodel_comments = LDA_topic_modelling(doc_term_matrix_comments, dictionary_comments, num_topics=10, passes=20)", "Wall time: 37.7 s\n" ], [ "%%time\nnegative=pyLDAvis.gensim.prepare(ldamodel_comments, doc_term_matrix_comments, dictionary_comments)", "Wall time: 1.97 s\n" ], [ "negative", "_____no_output_____" ], [ "\npyLDAvis.save_html(negative,'negative10_lda.html')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5ada58fc515b5234dbf811f19e2a1f012e4101
3,809
ipynb
Jupyter Notebook
examples/python/notebooks/Merging EBM Models.ipynb
Mu-L/interpret
b7c37b71ef6917be859c4ea504ecda01ed2271b6
[ "MIT" ]
2,122
2019-05-06T23:04:04.000Z
2019-10-29T16:58:35.000Z
examples/python/notebooks/Merging EBM Models.ipynb
ruiwang1994/interpret
755963a767f2761a87f7f00345fc1532b8c66ffd
[ "MIT" ]
87
2019-05-07T23:00:32.000Z
2019-10-25T21:51:38.000Z
examples/python/notebooks/Merging EBM Models.ipynb
ruiwang1994/interpret
755963a767f2761a87f7f00345fc1532b8c66ffd
[ "MIT" ]
256
2019-05-08T14:27:48.000Z
2019-10-27T20:02:59.000Z
27.601449
101
0.57863
[ [ [ "## Setup the dataset", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv(\n \"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data\",\n header=None)\ndf.columns = [\n \"Age\", \"WorkClass\", \"fnlwgt\", \"Education\", \"EducationNum\",\n \"MaritalStatus\", \"Occupation\", \"Relationship\", \"Race\", \"Gender\",\n \"CapitalGain\", \"CapitalLoss\", \"HoursPerWeek\", \"NativeCountry\", \"Income\"\n]\n# df = df.sample(frac=0.1, random_state=1)\ntrain_cols = df.columns[0:-1]\nlabel = df.columns[-1]\nX = df[train_cols]\ny = df[label].apply(lambda x: 0 if x == \" <=50K\" else 1) #Turning response into 0 and 1\n\nseed = 1\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=seed)", "_____no_output_____" ] ], [ [ "## Explore the dataset", "_____no_output_____" ] ], [ [ "from interpret import show\nfrom interpret.data import ClassHistogram\n\nhist = ClassHistogram().explain_data(X_train, y_train, name = 'Train Data')\nshow(hist)", "_____no_output_____" ] ], [ [ "## Training multiple EBM models", "_____no_output_____" ] ], [ [ "from interpret.glassbox import ExplainableBoostingClassifier\n\n# Fitting multiple EBM models with different training datasets and random seeds\nseed =1\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=seed)\nebm1 = ExplainableBoostingClassifier(random_state=seed, n_jobs=-1)\n\nebm1.fit(X_train, y_train) \n\nseed +=10\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=seed)\n\nebm2 = ExplainableBoostingClassifier(random_state=seed, n_jobs=-1)\nebm2.fit(X_train, y_train) \n\nseed +=10\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=seed)\n\nebm3 = ExplainableBoostingClassifier(random_state=seed, n_jobs=-1)\nebm3.fit(X_train, y_train)", "_____no_output_____" ] ], [ [ "## Merging multiple trained EBM models", "_____no_output_____" ] ], [ [ "#Merging multiple EBM models\n\nfrom interpret.glassbox.ebm.utils import *\nfrom interpret import show\n\nmodels = [ebm1, ebm2 , ebm3]\nmerged_ebm = merge_ebms(models=models)\n\nebm_global = merged_ebm.explain_global(name='EBM')\nshow(ebm_global)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec5ae697ec3d0df5c6d1a0e39128e249810b15f1
27,855
ipynb
Jupyter Notebook
project/3_modelling/model_evaluation/mobilenetv2.ipynb
NgoJunHaoJason/CZ4042
7e82b23879e312c1175d2d3b49791a6ecaa76118
[ "MIT" ]
null
null
null
project/3_modelling/model_evaluation/mobilenetv2.ipynb
NgoJunHaoJason/CZ4042
7e82b23879e312c1175d2d3b49791a6ecaa76118
[ "MIT" ]
null
null
null
project/3_modelling/model_evaluation/mobilenetv2.ipynb
NgoJunHaoJason/CZ4042
7e82b23879e312c1175d2d3b49791a6ecaa76118
[ "MIT" ]
null
null
null
27,855
27,855
0.682714
[ [ [ "# Evaluate MobileNet V2", "_____no_output_____" ], [ "Install Weights and Biases since it is not in Google Colab by default", "_____no_output_____" ] ], [ [ "!pip install wandb -qqq", "_____no_output_____" ] ], [ [ "Mount Google Drive to access images", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "from wandb.keras import WandbCallback\nimport wandb", "_____no_output_____" ], [ "# Initialise Weights and Biases API\n# Hyperparameters\n\n# previous config:\n# config={\n# 'batch_size': 128, # tried 64, 256\n# 'epochs': 10, # tried 20\n# 'seed': 0, # tried 42\n# 'learning_rate': 0.0001, # tried 0.001, 0.00005\n# 'dropout_rate': 0.2, # tried 0.5 and no dropout\n# 'bn_momentum': 0.9, # tried default of 0.99\n# 'fc1_num_neurons': 1024,\n# 'fc2_num_neurons': 512,\n# 'fc3_num_neurons': 256,\n# 'hidden_activation': 'relu',\n# 'output_activation': 'sigmoid', # tried softmax\n# 'loss_function': 'binary_crossentropy',\n# 'metrics': ['accuracy'],\n# }\n\n# tried sgd\n# tried splitting data into train-val-test instead of using test set as val set\n\ndefaults = {\n 'epochs': 20,\n 'batch_size': 64,\n 'fc1_num_neurons': 1024,\n 'fc2_num_neurons': 512,\n 'fc3_num_neurons': 256,\n 'seed': 7,\n 'learning_rate': 1e-3,\n 'optimizer': 'adam',\n 'hidden_activation': 'relu',\n 'output_activation': 'sigmoid',\n 'loss_function': 'binary_crossentropy',\n 'metrics': ['accuracy'],\n}\n\nwandb.login()\n\nrun = wandb.init(\n name='mobilenet_v2',\n project='cz4042',\n config=defaults,\n)\n\nconfig = wandb.config", "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mburntice\u001b[0m (use `wandb login --relogin` to force relogin)\n" ] ], [ [ "Import relevant libraries", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom tensorflow.keras.applications.resnet50 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\n\nimport pandas as pd\nimport tensorflow as tf", "_____no_output_____" ], [ "tf.random.set_seed(config.seed)", "_____no_output_____" ] ], [ [ "Load data", "_____no_output_____" ] ], [ [ "# Load dataset as dataframe\nbase_path = '/content/drive/My Drive/NN Project/'\ndf = pd.read_csv(base_path + 'aligned_gender.txt', sep='\\t')\ndf['datadir'] = base_path + df['datadir'].astype(str)\n\n# Train test split\ntrain_df, test_df = train_test_split(df, test_size=0.2, random_state=config.seed)", "_____no_output_____" ], [ "print(train_df.shape)\nprint(test_df.shape)", "(9755, 2)\n(2439, 2)\n" ], [ "train_df['gender'].value_counts() / train_df.shape[0] * 100", "_____no_output_____" ], [ "# Load images into keras image generator \ndatagen_train = ImageDataGenerator(\n preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input,\n)\ndatagen_test = ImageDataGenerator(\n preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input,\n)\n\ntrain_generator = datagen_train.flow_from_dataframe(\n dataframe=train_df,\n x_col='datadir',\n y_col='gender',\n batch_size=config.batch_size,\n seed=config.seed,\n shuffle=True,\n class_mode='raw',\n target_size=(224,224),\n)\n\ntest_generator = datagen_test.flow_from_dataframe(\n dataframe=test_df,\n x_col='datadir',\n y_col='gender',\n batch_size=config.batch_size,\n seed=config.seed,\n shuffle=True,\n class_mode='raw',\n target_size=(224,224),\n)", "Found 9755 validated image filenames.\nFound 2439 validated image filenames.\n" ], [ "mobile_net_v2 = tf.keras.applications.MobileNetV2(\n include_top=False,\n pooling='avg',\n weights='imagenet',\n input_shape=(224,224,3),\n)\nmobile_net_v2.trainable = False\n\nfc1 = tf.keras.layers.Dense(\n config.fc1_num_neurons,\n activation=config.hidden_activation,\n)\n\nfc2 = tf.keras.layers.Dense(\n config.fc2_num_neurons,\n activation=config.hidden_activation,\n)\n\nfc3 = tf.keras.layers.Dense(\n config.fc3_num_neurons,\n activation=config.hidden_activation,\n)\n\nmodel = tf.keras.models.Sequential([\n mobile_net_v2,\n tf.keras.layers.Flatten(),\n tf.keras.layers.BatchNormalization(),\n fc1,\n tf.keras.layers.BatchNormalization(),\n fc2,\n tf.keras.layers.BatchNormalization(),\n fc3,\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(1, activation=config.output_activation),\n])\n\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nmobilenetv2_1.00_224 (Functi (None, 1280) 2257984 \n_________________________________________________________________\nflatten (Flatten) (None, 1280) 0 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 1280) 5120 \n_________________________________________________________________\ndense (Dense) (None, 1024) 1311744 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 1024) 4096 \n_________________________________________________________________\ndense_1 (Dense) (None, 512) 524800 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 512) 2048 \n_________________________________________________________________\ndense_2 (Dense) (None, 256) 131328 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 256) 1024 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 257 \n=================================================================\nTotal params: 4,238,401\nTrainable params: 1,974,273\nNon-trainable params: 2,264,128\n_________________________________________________________________\n" ], [ "# Compile model \nmodel.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=config.learning_rate),\n loss=config.loss_function,\n metrics=config.metrics,\n)", "_____no_output_____" ], [ "history = model.fit(\n train_generator,\n validation_data=test_generator,\n epochs=config.epochs,\n callbacks=[WandbCallback()],\n)", "Epoch 1/20\n153/153 [==============================] - 146s 956ms/step - loss: 0.4091 - accuracy: 0.8226 - val_loss: 0.3268 - val_accuracy: 0.8573\nEpoch 2/20\n153/153 [==============================] - 147s 958ms/step - loss: 0.2033 - accuracy: 0.9195 - val_loss: 0.3131 - val_accuracy: 0.8815\nEpoch 3/20\n153/153 [==============================] - 146s 954ms/step - loss: 0.1355 - accuracy: 0.9482 - val_loss: 0.3392 - val_accuracy: 0.8733\nEpoch 4/20\n153/153 [==============================] - 146s 957ms/step - loss: 0.0958 - accuracy: 0.9644 - val_loss: 0.3428 - val_accuracy: 0.8795\nEpoch 5/20\n153/153 [==============================] - 145s 951ms/step - loss: 0.0770 - accuracy: 0.9717 - val_loss: 0.3614 - val_accuracy: 0.8864\nEpoch 6/20\n153/153 [==============================] - 145s 949ms/step - loss: 0.0736 - accuracy: 0.9724 - val_loss: 0.3777 - val_accuracy: 0.8868\nEpoch 7/20\n153/153 [==============================] - 146s 951ms/step - loss: 0.0524 - accuracy: 0.9823 - val_loss: 0.4463 - val_accuracy: 0.8864\nEpoch 8/20\n153/153 [==============================] - 145s 949ms/step - loss: 0.0421 - accuracy: 0.9847 - val_loss: 0.4537 - val_accuracy: 0.8831\nEpoch 9/20\n153/153 [==============================] - 144s 942ms/step - loss: 0.0483 - accuracy: 0.9818 - val_loss: 0.4483 - val_accuracy: 0.8811\nEpoch 10/20\n153/153 [==============================] - 143s 934ms/step - loss: 0.0335 - accuracy: 0.9885 - val_loss: 0.4385 - val_accuracy: 0.8795\nEpoch 11/20\n153/153 [==============================] - 143s 936ms/step - loss: 0.0377 - accuracy: 0.9869 - val_loss: 0.4255 - val_accuracy: 0.8823\nEpoch 12/20\n153/153 [==============================] - 142s 928ms/step - loss: 0.0398 - accuracy: 0.9856 - val_loss: 0.4777 - val_accuracy: 0.8897\nEpoch 13/20\n153/153 [==============================] - 143s 932ms/step - loss: 0.0304 - accuracy: 0.9891 - val_loss: 0.4720 - val_accuracy: 0.8897\nEpoch 14/20\n153/153 [==============================] - 143s 933ms/step - loss: 0.0325 - accuracy: 0.9882 - val_loss: 0.4826 - val_accuracy: 0.8844\nEpoch 15/20\n153/153 [==============================] - 144s 938ms/step - loss: 0.0333 - accuracy: 0.9878 - val_loss: 0.4911 - val_accuracy: 0.8856\nEpoch 16/20\n153/153 [==============================] - 142s 930ms/step - loss: 0.0229 - accuracy: 0.9925 - val_loss: 0.4821 - val_accuracy: 0.8926\nEpoch 17/20\n153/153 [==============================] - 143s 937ms/step - loss: 0.0229 - accuracy: 0.9913 - val_loss: 0.5285 - val_accuracy: 0.8827\nEpoch 18/20\n153/153 [==============================] - 142s 930ms/step - loss: 0.0218 - accuracy: 0.9913 - val_loss: 0.4964 - val_accuracy: 0.8823\nEpoch 19/20\n153/153 [==============================] - 142s 929ms/step - loss: 0.0170 - accuracy: 0.9937 - val_loss: 0.5245 - val_accuracy: 0.8905\nEpoch 20/20\n153/153 [==============================] - 140s 916ms/step - loss: 0.0159 - accuracy: 0.9945 - val_loss: 0.5182 - val_accuracy: 0.8827\n" ], [ "results = model.evaluate(\n test_generator,\n callbacks=[WandbCallback()],\n)", "39/39 [==============================] - 28s 707ms/step - loss: 0.5182 - accuracy: 0.8827\n" ], [ "save_path = base_path + '5. Saved Weights and Models/Evaluation/mobilenetv2_eval.h5'\nmodel.save(save_path)", "_____no_output_____" ] ], [ [ "Let Weights and Biases know that this run is complete.", "_____no_output_____" ] ], [ [ "run.finish()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec5aebd31e132ffd0e95f543426eb32f08fc74ae
6,514
ipynb
Jupyter Notebook
homeworks_basic/Lab1_ML_pipeline_and_SVM/Lab1_part1_differentiation.ipynb
9OMShitikov/ml-mipt
860bffe0be0f0c92c005eea2f7feb96ae5634638
[ "MIT" ]
440
2020-09-01T18:02:37.000Z
2022-03-31T19:08:28.000Z
homeworks_basic/Lab1_ML_pipeline_and_SVM/Lab1_part1_differentiation.ipynb
9OMShitikov/ml-mipt
860bffe0be0f0c92c005eea2f7feb96ae5634638
[ "MIT" ]
6
2020-10-05T15:04:34.000Z
2022-01-27T15:17:29.000Z
homeworks_basic/Lab1_ML_pipeline_and_SVM/Lab1_part1_differentiation.ipynb
9OMShitikov/ml-mipt
860bffe0be0f0c92c005eea2f7feb96ae5634638
[ "MIT" ]
420
2020-08-29T13:22:22.000Z
2022-03-29T19:55:14.000Z
23.017668
329
0.52487
[ [ [ "*Credits: materials from this notebook belong to YSDA [Practical DL](https://github.com/yandexdataschool/Practical_DL) course. Special thanks for making them available online.*", "_____no_output_____" ], [ "# Lab assignment №1, part 1\n\nThis lab assignment consists of several parts. You are supposed to make some transformations, train some models, estimate the quality of the models and explain your results.\n\nSeveral comments:\n* Don't hesitate to ask questions, it's a good practice.\n* No private/public sharing, please. The copied assignments will be graded with 0 points.\n* Blocks of this lab will be graded separately.", "_____no_output_____" ], [ "## Part 1. Matrix differentiation", "_____no_output_____" ], [ "Since it easy to google every task please please please try to undestand what's going on. The \"just answer\" thing will be not counted, make sure to present derivation of your solution. It is absolutely OK if you found an answer on web then just exercise in $\\LaTeX$ copying it into here.", "_____no_output_____" ] ], [ [ "# If on colab, uncomment the following lines\n\n# ! wget https://raw.githubusercontent.com/girafe-ai/ml-mipt/basic_f20/homeworks_basic/Lab1_ML_pipeline_and_SVM/grad.png", "_____no_output_____" ] ], [ [ "Useful links: \n[1](http://www.machinelearning.ru/wiki/images/2/2a/Matrix-Gauss.pdf)\n[2](http://www.atmos.washington.edu/~dennis/MatrixCalculus.pdf)\n[3](http://cal.cs.illinois.edu/~johannes/research/matrix%20calculus.pdf)\n[4](http://research.microsoft.com/en-us/um/people/cmbishop/prml/index.htm)", "_____no_output_____" ], [ "## ex. 1", "_____no_output_____" ], [ "$$ \ny = x^Tx, \\quad x \\in \\mathbb{R}^N \n$$", "_____no_output_____" ], [ "$$\n\\frac{dy}{dx} = \n$$ ", "_____no_output_____" ], [ "## ex. 2", "_____no_output_____" ], [ "$$ y = tr(AB) \\quad A,B \\in \\mathbb{R}^{N \\times N} $$ ", "_____no_output_____" ], [ "$$\n\\frac{dy}{dA} =\n$$", "_____no_output_____" ], [ "## ex. 3", "_____no_output_____" ], [ "$$ \ny = x^TAc , \\quad A\\in \\mathbb{R}^{N \\times N}, x\\in \\mathbb{R}^{N}, c\\in \\mathbb{R}^{N} \n$$", "_____no_output_____" ], [ "$$\n\\frac{dy}{dx} =\n$$", "_____no_output_____" ], [ "$$\n\\frac{dy}{dA} =\n$$ ", "_____no_output_____" ], [ "Hint for the latter (one of the ways): use *ex. 2* result and the fact \n$$\ntr(ABC) = tr (CAB)\n$$", "_____no_output_____" ], [ "## ex. 4", "_____no_output_____" ], [ "Classic matrix factorization example. Given matrix $X$ you need to find $A$, $S$ to approximate $X$. This can be done by simple gradient descent iteratively alternating $A$ and $S$ updates.\n$$\nJ = || X - AS ||_F^2 , \\quad A\\in \\mathbb{R}^{N \\times R} , \\quad S\\in \\mathbb{R}^{R \\times M}\n$$\n$$\n\\frac{dJ}{dS} = ? \n$$\n\nYou may use one of the following approaches:", "_____no_output_____" ], [ "#### First approach\nUsing ex.2 and the fact:\n$$\n|| X ||_F^2 = tr(XX^T) \n$$ \nit is easy to derive gradients (you can find it in one of the refs). ", "_____no_output_____" ], [ "#### Second approach\nYou can use *slightly different techniques* if they suits you. Take a look at this derivation:\n<img src=\"grad.png\">\n(excerpt from [Handbook of blind source separation, Jutten, page 517](https://books.google.ru/books?id=PTbj03bYH6kC&printsec=frontcover&dq=Handbook+of+Blind+Source+Separation&hl=en&sa=X&ved=0ahUKEwi-q_apiJDLAhULvXIKHVXJDWcQ6AEIHDAA#v=onepage&q=Handbook%20of%20Blind%20Source%20Separation&f=false), open for better picture).", "_____no_output_____" ], [ "#### Third approach\nAnd finally we can use chain rule! \nlet $ F = AS $ \n\n**Find**\n$$\n\\frac{dJ}{dF} = \n$$ \nand \n$$\n\\frac{dF}{dS} = \n$$ \n(the shape should be $ NM \\times RM )$.\n\nNow it is easy do get desired gradients:\n$$\n\\frac{dJ}{dS} = \n$$ ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec5aee436053eca228b3239125d789ef18538d70
87,240
ipynb
Jupyter Notebook
Data Wrangling/get_spotify_track_artist_data.ipynb
filichuables/Spotify-Genre-Classifier-and-Recommender-System
bce08dc827fbf7fbd152d67c8a09cc6d6d5c4aff
[ "MIT" ]
1
2020-12-02T06:00:36.000Z
2020-12-02T06:00:36.000Z
Data Wrangling/get_spotify_track_artist_data.ipynb
filichuables/spotify_recommender_engine
bce08dc827fbf7fbd152d67c8a09cc6d6d5c4aff
[ "MIT" ]
null
null
null
Data Wrangling/get_spotify_track_artist_data.ipynb
filichuables/spotify_recommender_engine
bce08dc827fbf7fbd152d67c8a09cc6d6d5c4aff
[ "MIT" ]
null
null
null
134.215385
20,370
0.617973
[ [ [ "import numpy as np\nimport pandas as pd\n\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\nimport keyring\nimport time\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)", "_____no_output_____" ] ], [ [ "## Setup Spotipy credentials and query wrapper", "_____no_output_____" ] ], [ [ "client_credentials_manager = SpotifyClientCredentials(client_id=keyring.get_password('spotify', 'cid'),\n client_secret=keyring.get_password('spotify', 'secret') )\nsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)", "_____no_output_____" ] ], [ [ "## Get sample artists data", "_____no_output_____" ] ], [ [ "artist_id = '2YZyLoL8N0Wb9xBt1NhZWg'", "_____no_output_____" ], [ "# View sp.track output\nsp.artist(artist_id)", "_____no_output_____" ] ], [ [ "## Get sample track data", "_____no_output_____" ] ], [ [ "track_id = \"74tLlkN3rgVzRqQJgPfink\"", "_____no_output_____" ], [ "# View sp.track output\nsp.track(track_id)", "_____no_output_____" ], [ "# View sp.audio_featrues output\nsp.audio_features(track_id)", "_____no_output_____" ] ], [ [ "## Read consolidated spotify daily charts", "_____no_output_____" ] ], [ [ "df = pd.read_csv('data/spotify_daily_charts.csv')\ndf.head()", "_____no_output_____" ] ], [ [ "## Get data of unique tracks in charts ", "_____no_output_____" ] ], [ [ "def get_track_data(t_id): \n track_data = sp.track(t_id)\n track_features = sp.audio_features(t_id)\n \n #get only main(first) artist\n td_list = [t_id,\\\n track_data['name'],\\\n track_data['artists'][0]['id'],\\\n track_data['artists'][0]['name'],\\\n track_data['album']['uri'].split(\":\")[2],\\\n track_data['duration_ms'],\\\n track_data['album']['release_date'],\\\n track_data['popularity']]\n data = pd.DataFrame([td_list], columns = ['track_id','track_name','artist_id','artist_name','album_id','duration','release_date','popularity'])\n\n relevant_cols = ['danceability', 'energy', 'key', 'loudness', 'mode',\\\n 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo'] \n \n tf_data = pd.DataFrame(track_features)\n tf_data = tf_data[relevant_cols]\n \n data = pd.concat([data, tf_data], axis=1)\n return data\n", "_____no_output_____" ], [ "get_track_data(track_id)", "_____no_output_____" ], [ "track_df = df[['track_id','track_name']].drop_duplicates()\ntrack_df", "_____no_output_____" ], [ "len(pd.unique(track_df['track_id'].values)),len(pd.unique(track_df['track_name'].values))", "_____no_output_____" ] ], [ [ "> Q: Why is it that we have fewer unique track names than unique track ids? Is this expected or does it indicate a data processing error?", "_____no_output_____" ] ], [ [ "track_list = track_df['track_id'].values\ndf_list=[]\n\nfor i,track_id in enumerate(track_list):\n print('[%d/%d] Fetching track data for %s... ' % \n (i+1,len(track_list),track_df[track_df['track_id']==track_id]['track_name'].values[0]), end = \" \") \n track_data = get_track_data(track_id) \n df_list.append(track_data)\n print('done!')\n \n #sleep for 100 secs per 100 requests to avoid being blocked\n if (i % 100 == 0)&(i > 0):\n time.sleep(5)", "[1973/2292] Fetching track data for 시작... done!\n[1974/2292] Fetching track data for Binibini... done!\n[1975/2292] Fetching track data for FANCY... done!\n[1976/2292] Fetching track data for THE SCOTTS... done!\n[1977/2292] Fetching track data for If You’re Too Shy (Let Me Know)... done!\n[1978/2292] Fetching track data for If You’re Too Shy (Let Me Know) - Edit... done!\n[1979/2292] Fetching track data for Righteous... done!\n[1980/2292] Fetching track data for Passenger Seat (Acoustic)... done!\n[1981/2292] Fetching track data for Nandito Na... done!\n[1982/2292] Fetching track data for I Choose - From The Netflix Original Film The Willoughbys... done!\n[1983/2292] Fetching track data for Fight Song... done!\n[1984/2292] Fetching track data for Savage Remix (feat. Beyoncé)... done!\n[1985/2292] Fetching track data for Moonlight... done!\n[1986/2292] Fetching track data for Play Date... done!\n[1987/2292] Fetching track data for Passenger Seat... done!\n[1988/2292] Fetching track data for Beautiful Scars... done!\n[1989/2292] Fetching track data for Toosie Slide... done!\n[1990/2292] Fetching track data for Say So (feat. Nicki Minaj)... done!\n[1991/2292] Fetching track data for Pain 1993 (with Playboi Carti)... done!\n[1992/2292] Fetching track data for Be Kind (with Halsey)... done!\n[1993/2292] Fetching track data for eight(Prod.&amp;Feat. SUGA of BTS)... done!\n[1994/2292] Fetching track data for Happy... done!\n[1995/2292] Fetching track data for Bisayang Gwapito... done!\n[1996/2292] Fetching track data for La La Lost You - Acoustic Version... done!\n[1997/2292] Fetching track data for Stuck with U (with Justin Bieber)... done!\n[1998/2292] Fetching track data for With A Smile... done!\n[1999/2292] Fetching track data for I Love You&#39;s... done!\n[2000/2292] Fetching track data for Cutie Uyyy... done!\n[2001/2292] Fetching track data for Alapaap... done!\n[2002/2292] Fetching track data for Magasin... done!\n[2003/2292] Fetching track data for Pare Ko... done!\n[2004/2292] Fetching track data for Huwag Mo Nang Itanong... done!\n[2005/2292] Fetching track data for Alapaap / Overdrive... done!\n[2006/2292] Fetching track data for Halik... done!\n[2007/2292] Fetching track data for Huling Sayaw... done!\n[2008/2292] Fetching track data for Zombie... done!\n[2009/2292] Fetching track data for Zombie - English Ver.... done!\n[2010/2292] Fetching track data for good guys... done!\n[2011/2292] Fetching track data for Zombie... done!\n[2012/2292] Fetching track data for Yours... done!\n[2013/2292] Fetching track data for Ang Huling El Bimbo... done!\n[2014/2292] Fetching track data for Daisies... done!\n[2015/2292] Fetching track data for Lose Somebody... done!\n[2016/2292] Fetching track data for Can&#39;t You See Me?... done!\n[2017/2292] Fetching track data for Drama... done!\n[2018/2292] Fetching track data for Fairy of Shampoo... done!\n[2019/2292] Fetching track data for Cornelia Street - Live From Paris... done!\n[2020/2292] Fetching track data for Puhon... done!\n[2021/2292] Fetching track data for Rain On Me (with Ariana Grande)... done!\n[2022/2292] Fetching track data for Daechwita... done!\n[2023/2292] Fetching track data for Moonlight... done!\n[2024/2292] Fetching track data for What do you think?... done!\n[2025/2292] Fetching track data for Strange... done!\n[2026/2292] Fetching track data for 28... done!\n[2027/2292] Fetching track data for Burn It... done!\n[2028/2292] Fetching track data for People... done!\n[2029/2292] Fetching track data for Honsool... done!\n[2030/2292] Fetching track data for Dear my friend (feat. Kim Jong Wan of NELL)... done!\n[2031/2292] Fetching track data for Interlude : Set me free... done!\n[2032/2292] Fetching track data for Punch... done!\n[2033/2292] Fetching track data for This Love Isn&#39;t Crazy... done!\n[2034/2292] Fetching track data for Tonight (I Wish I Was Your Boy)... done!\n[2035/2292] Fetching track data for In Your Eyes (with Doja Cat) - Remix... done!\n[2036/2292] Fetching track data for Party Girl... done!\n[2037/2292] Fetching track data for Marikit... done!\n[2038/2292] Fetching track data for KLWKN... done!\n[2039/2292] Fetching track data for IDK You Yet... done!\n[2040/2292] Fetching track data for Candy... done!\n[2041/2292] Fetching track data for R U Ridin’?... done!\n[2042/2292] Fetching track data for Bungee... done!\n[2043/2292] Fetching track data for Poppin’... done!\n[2044/2292] Fetching track data for Underwater... done!\n[2045/2292] Fetching track data for Love Again... done!\n[2046/2292] Fetching track data for Ghost... done!\n[2047/2292] Fetching track data for Sour Candy (with BLACKPINK)... done!\n[2048/2292] Fetching track data for 911... done!\n[2049/2292] Fetching track data for Sour Candy (with BLACKPINK)... done!\n[2050/2292] Fetching track data for Rain On Me (with Ariana Grande)... done!\n[2051/2292] Fetching track data for Stupid Love... done!\n[2052/2292] Fetching track data for Free Woman... done!\n[2053/2292] Fetching track data for Alice... done!\n[2054/2292] Fetching track data for Fun Tonight... done!\n[2055/2292] Fetching track data for Lose Somebody... done!\n[2056/2292] Fetching track data for Chromatica II... done!\n[2057/2292] Fetching track data for Enigma... done!\n[2058/2292] Fetching track data for Plastic Doll... done!\n[2059/2292] Fetching track data for Chromatica I... done!\n[2060/2292] Fetching track data for Sine From Above (with Elton John)... done!\n[2061/2292] Fetching track data for Replay... done!\n[2062/2292] Fetching track data for Babylon... done!\n[2063/2292] Fetching track data for 1000 Doves... done!\n[2064/2292] Fetching track data for 3AM... done!\n[2065/2292] Fetching track data for MORE &amp; MORE... done!\n[2066/2292] Fetching track data for OXYGEN... done!\n[2067/2292] Fetching track data for MAKE ME GO... done!\n[2068/2292] Fetching track data for FIREWORK... done!\n[2069/2292] Fetching track data for SHADOW... done!\n[2070/2292] Fetching track data for SWEET SUMMER DAY... done!\n[2071/2292] Fetching track data for DON’T CALL ME AGAIN... done!\n[2072/2292] Fetching track data for What is Love... done!\n[2073/2292] Fetching track data for MORE &amp; MORE... done!\n[2074/2292] Fetching track data for OXYGEN... done!\n[2075/2292] Fetching track data for MAKE ME GO... done!\n[2076/2292] Fetching track data for FIREWORK... done!\n[2077/2292] Fetching track data for SHADOW... done!\n[2078/2292] Fetching track data for SWEET SUMMER DAY... done!\n[2079/2292] Fetching track data for DON&#39;T CALL ME AGAIN... done!\n[2080/2292] Fetching track data for Freedom (feat. Kendrick Lamar)... done!\n[2081/2292] Fetching track data for Lifetime... done!\n[2082/2292] Fetching track data for Pagsuko... done!\n[2083/2292] Fetching track data for Mariposa... done!\n[2084/2292] Fetching track data for Pangga... done!\n[2085/2292] Fetching track data for Hurry Home (with beabadoobee &amp; Jay Som)... done!\n[2086/2292] Fetching track data for Higa... done!\n[2087/2292] Fetching track data for Binhi... done!\n[2088/2292] Fetching track data for Marikit... done!\n[2089/2292] Fetching track data for Savage Love (Did Somebody Break Your Heart)... done!\n[2090/2292] Fetching track data for WAYO... done!\n[2091/2292] Fetching track data for no song without you... done!\n[2092/2292] Fetching track data for Savage Love (Laxed - Siren Beat)... done!\n[2093/2292] Fetching track data for Banana (feat. Shaggy) - DJ FLe - Minisiren Remix... done!\n[2094/2292] Fetching track data for ROCKSTAR (feat. Roddy Ricch) - BLM REMIX... done!\n[2095/2292] Fetching track data for Umaasa... done!\n[2096/2292] Fetching track data for Secret Story of the Swan... done!\n[2097/2292] Fetching track data for Malayo... done!\n[2098/2292] Fetching track data for Best Friend... done!\n[2099/2292] Fetching track data for Breaking Me... done!\n[2100/2292] Fetching track data for Stay Gold... done!\n[2101/2292] Fetching track data for Chinita Girl... done!\n[2102/2292] Fetching track data for BLACK PARADE... done!\n[2103/2292] Fetching track data for Left &amp; Right... done!\n[2104/2292] Fetching track data for Fearless... done!\n[2105/2292] Fetching track data for Kidult... done!\n[2106/2292] Fetching track data for I Wish... done!\n[2107/2292] Fetching track data for How You Like That... done!\n[2108/2292] Fetching track data for Love Somebody... done!\n[2109/2292] Fetching track data for How You Like That... done!\n[2110/2292] Fetching track data for God’s Menu... done!\n[2111/2292] Fetching track data for Maria... done!\n[2112/2292] Fetching track data for if this is the last time... done!\n[2113/2292] Fetching track data for Nakikinig Ka Ba Sa Akin... done!\n[2114/2292] Fetching track data for Just Friends... done!\n[2115/2292] Fetching track data for Di Biro... done!\n[2116/2292] Fetching track data for no song without you... done!\n[2117/2292] Fetching track data for la la la that’s how it goes... done!\n[2118/2292] Fetching track data for free love... done!\n[2119/2292] Fetching track data for by my side... done!\n[2120/2292] Fetching track data for dear P... done!\n[2121/2292] Fetching track data for one way to tokyo... done!\n[2122/2292] Fetching track data for loving you is so easy... done!\n[2123/2292] Fetching track data for Go Crazy... done!\n[2124/2292] Fetching track data for can’t bear to be without you... done!\n[2125/2292] Fetching track data for lines on our faces... done!\n[2126/2292] Fetching track data for Are You Bored Yet? (feat. Clairo)... done!\n[2127/2292] Fetching track data for Past Life (with Selena Gomez)... done!\n[2128/2292] Fetching track data for Monster... done!\n[2129/2292] Fetching track data for Diamond... done!\n[2130/2292] Fetching track data for Feel Good... done!\n[2131/2292] Fetching track data for Jelly... done!\n[2132/2292] Fetching track data for Uncover (Sung by SEULGI) - Bonus Track... done!\n[2133/2292] Fetching track data for Miloves (Otw Sayo)... done!\n[2134/2292] Fetching track data for Pac-Man... done!\n[2135/2292] Fetching track data for Telephone... done!\n[2136/2292] Fetching track data for Scars To Your Beautiful... done!\n[2137/2292] Fetching track data for Life&#39;s A Mess (feat. Halsey)... done!\n[2138/2292] Fetching track data for Urong Sulong... done!\n[2139/2292] Fetching track data for Stunnin&#39; (feat. Harm Franklin)... done!\n[2140/2292] Fetching track data for I Will Be Here (Original Soundtrack from the movie &quot;Through Night And Day&quot;)... done!\n[2141/2292] Fetching track data for Come &amp; Go (with Marshmello)... done!\n[2142/2292] Fetching track data for Wishing Well... done!\n[2143/2292] Fetching track data for 1 Billion Views... done!\n[2144/2292] Fetching track data for Your eyes tell... done!\n[2145/2292] Fetching track data for INTRO : Calling... done!\n[2146/2292] Fetching track data for OUTRO : The Journey... done!\n[2147/2292] Fetching track data for Telephone... done!\n[2148/2292] Fetching track data for Stay Gold... done!\n[2149/2292] Fetching track data for Smile... done!\n[2150/2292] Fetching track data for Party Girl (Remix)... done!\n[2151/2292] Fetching track data for Lights... done!\n[2152/2292] Fetching track data for Love In My Pocket... done!\n[2153/2292] Fetching track data for I... done!\n[2154/2292] Fetching track data for Boy With Luv - Japanese ver.... done!\n[2155/2292] Fetching track data for Strawberries &amp; Cigarettes... done!\n[2156/2292] Fetching track data for Party Girl (Remix)... done!\n[2157/2292] Fetching track data for POPSTAR (feat. Drake)... done!\n[2158/2292] Fetching track data for I Knew I Loved You... done!\n[2159/2292] Fetching track data for WHATS POPPIN (feat. DaBaby, Tory Lanez &amp; Lil Wayne) - Remix... done!\n[2160/2292] Fetching track data for Beautiful Scars - Acoustic... done!\n[2161/2292] Fetching track data for Love Somebody... done!\n[2162/2292] Fetching track data for What You Waiting For... done!\n[2163/2292] Fetching track data for Easy... done!\n[2164/2292] Fetching track data for Selene... done!\n[2165/2292] Fetching track data for Little Things... done!\n[2166/2292] Fetching track data for What Makes You Beautiful... done!\n[2167/2292] Fetching track data for Story of My Life... done!\n[2168/2292] Fetching track data for Perfect... done!\n[2169/2292] Fetching track data for History... done!\n[2170/2292] Fetching track data for They Don&#39;t Know About Us... done!\n[2171/2292] Fetching track data for Night Changes... done!\n[2172/2292] Fetching track data for Steal My Girl... done!\n[2173/2292] Fetching track data for 18... done!\n[2174/2292] Fetching track data for Drag Me Down... done!\n[2175/2292] Fetching track data for Live While We&#39;re Young... done!\n[2176/2292] Fetching track data for Kiss You... done!\n[2177/2292] Fetching track data for Best Song Ever... done!\n[2178/2292] Fetching track data for You &amp; I... done!\n[2179/2292] Fetching track data for One Thing... done!\n[2180/2292] Fetching track data for More Than This... done!\n[2181/2292] Fetching track data for Gotta Be You... done!\n[2182/2292] Fetching track data for Midnight Memories... done!\n[2183/2292] Fetching track data for Last First Kiss... done!\n[2184/2292] Fetching track data for the 1... done!\n[2185/2292] Fetching track data for cardigan... done!\n[2186/2292] Fetching track data for exile (feat. Bon Iver)... done!\n[2187/2292] Fetching track data for my tears ricochet... done!\n[2188/2292] Fetching track data for the last great american dynasty... done!\n[2189/2292] Fetching track data for august... done!\n[2190/2292] Fetching track data for mirrorball... done!\n[2191/2292] Fetching track data for this is me trying... done!\n[2192/2292] Fetching track data for seven... done!\n[2193/2292] Fetching track data for illicit affairs... done!\n[2194/2292] Fetching track data for invisible string... done!\n[2195/2292] Fetching track data for mad woman... done!\n[2196/2292] Fetching track data for betty... done!\n[2197/2292] Fetching track data for epiphany... done!\n[2198/2292] Fetching track data for peace... done!\n[2199/2292] Fetching track data for hoax... done!\n[2200/2292] Fetching track data for Nobody&#39;s Love... done!\n[2201/2292] Fetching track data for I Don’t Wanna Live Forever (Fifty Shades Darker)... done!\n[2202/2292] Fetching track data for I Knew You Were Trouble.... done!\n[2203/2292] Fetching track data for Shake It Off... done!\n[2204/2292] Fetching track data for cardigan... done!\n[2205/2292] Fetching track data for my tears ricochet... done!\n[2206/2292] Fetching track data for august... done!\n[2207/2292] Fetching track data for this is me trying... done!\n[2208/2292] Fetching track data for invisible string... done!\n[2209/2292] Fetching track data for hoax... done!\n[2210/2292] Fetching track data for Heather... done!\n[2211/2292] Fetching track data for Lost In The Wild... done!\n[2212/2292] Fetching track data for my future... done!\n[2213/2292] Fetching track data for Hanggang Sa Huli... done!\n[2214/2292] Fetching track data for Alab (Burning)... done!\n[2215/2292] Fetching track data for Go Up... done!\n[2216/2292] Fetching track data for Tilaluha... done!\n[2217/2292] Fetching track data for Love Goes... done!\n[2218/2292] Fetching track data for Love Goes - EDM Version... done!\n[2219/2292] Fetching track data for Party Girl... done!\n[2220/2292] Fetching track data for Roses - Imanbek Remix... done!\n[2221/2292] Fetching track data for BOY - KR Ver.... done!\n[2222/2292] Fetching track data for Daylight... done!\n[2223/2292] Fetching track data for Smile (with The Weeknd)... done!\n[2224/2292] Fetching track data for WAP (feat. Megan Thee Stallion)... done!\n[2225/2292] Fetching track data for BOY... done!\n[2226/2292] Fetching track data for Breath... done!\n[2227/2292] Fetching track data for Electric Love... done!\n[2228/2292] Fetching track data for you!... done!\n[2229/2292] Fetching track data for When I Look At You... done!\n[2230/2292] Fetching track data for Laugh Now Cry Later (feat. Lil Durk)... done!\n[2231/2292] Fetching track data for Midnight Sky... done!\n[2232/2292] Fetching track data for Uuwian... done!\n[2233/2292] Fetching track data for Can&#39;t Get Out... done!\n[2234/2292] Fetching track data for Kabilang Buhay... done!\n[2235/2292] Fetching track data for Not Shy... done!\n[2236/2292] Fetching track data for Don’t Give A What... done!\n[2237/2292] Fetching track data for Not Shy... done!\n[2238/2292] Fetching track data for the lakes - bonus track... done!\n[2239/2292] Fetching track data for Lose... done!\n[2240/2292] Fetching track data for Dynamite... done!\n[2241/2292] Fetching track data for MORE &amp; MORE - English Version... done!\n[2242/2292] Fetching track data for Dynamite - Instrumental... done!\n[2243/2292] Fetching track data for MORE &amp; MORE - English Ver.... done!\n[2244/2292] Fetching track data for Dynamite - Acoustic Remix... done!\n[2245/2292] Fetching track data for Dynamite - EDM Remix... done!\n[2246/2292] Fetching track data for 24H... done!\n[2247/2292] Fetching track data for you broke me first... done!\n[2248/2292] Fetching track data for ILYSB - STRIPPED... done!\n[2249/2292] Fetching track data for Mood (feat. Iann Dior)... done!\n[2250/2292] Fetching track data for Our First Song... done!\n[2251/2292] Fetching track data for Ice Cream (with Selena Gomez)... done!\n[2252/2292] Fetching track data for THE BADDEST... done!\n[2253/2292] Fetching track data for Over Now (with The Weeknd)... done!\n[2254/2292] Fetching track data for Dynamite - Tropical Remix... done!\n[2255/2292] Fetching track data for Daisies... done!\n[2256/2292] Fetching track data for Never Really Over... done!\n[2257/2292] Fetching track data for Dynamite - Poolside Remix... done!\n[2258/2292] Fetching track data for Cry About It Later... done!\n[2259/2292] Fetching track data for Mood Swings (feat. Lil Tjay)... done!\n[2260/2292] Fetching track data for Senorita... done!\n[2261/2292] Fetching track data for Beautiful In White... done!\n[2262/2292] Fetching track data for Dynamite... done!\n[2263/2292] Fetching track data for Take You Dancing... done!\n[2264/2292] Fetching track data for A Wish On Christmas Night... done!\n[2265/2292] Fetching track data for Do You Hear What I Hear?... done!\n[2266/2292] Fetching track data for The Bells At Christmas... done!\n[2267/2292] Fetching track data for A Christmas Song for You... done!\n[2268/2292] Fetching track data for Pagdating Ng Pasko... done!\n[2269/2292] Fetching track data for It Is the Lord!... done!\n[2270/2292] Fetching track data for Let Me Be The One... done!\n[2271/2292] Fetching track data for When a Child Is Born... done!\n[2272/2292] Fetching track data for Secret Love Song (feat. Jason Derulo)... done!\n[2273/2292] Fetching track data for Head &amp; Heart (feat. MNEK)... done!\n[2274/2292] Fetching track data for At My Worst... done!\n[2275/2292] Fetching track data for Mad at Disney... done!\n[2276/2292] Fetching track data for You Got It... done!\n[2277/2292] Fetching track data for Cry About It Later... done!\n[2278/2292] Fetching track data for Castle In The Sky... done!\n[2279/2292] Fetching track data for Lose... done!\n[2280/2292] Fetching track data for Selene... done!\n[2281/2292] Fetching track data for Plot Twist... done!\n[2282/2292] Fetching track data for Wide Open (Foreword)... done!\n[2283/2292] Fetching track data for Switchblade... done!\n[2284/2292] Fetching track data for Waterfalls... done!\n[2285/2292] Fetching track data for Nightcrawlers... done!\n[2286/2292] Fetching track data for Pandemonium... done!\n[2287/2292] Fetching track data for If There&#39;s Nothing Left...... done!\n[2288/2292] Fetching track data for OK Not To Be OK... done!\n[2289/2292] Fetching track data for Kabet... done!\n[2290/2292] Fetching track data for Back Door... done!\n[2291/2292] Fetching track data for always, i&#39;ll care... done!\n[2292/2292] Fetching track data for God&#39;s Menu... done!\n" ], [ "tracks_data_df = pd.concat(df_list)\ntracks_data_df.head()", "_____no_output_____" ], [ "tracks_data_df.to_csv('data/spotify_daily_charts_tracks.csv', index=False, encoding='utf-8')", "_____no_output_____" ], [ "tracks_data_df.describe()", "_____no_output_____" ] ], [ [ "## Get data of unique artists in charts ", "_____no_output_____" ] ], [ [ "#Get unique artists id\nartist_df = tracks_data_df[['artist_id','artist_name']].drop_duplicates()\nartist_df", "_____no_output_____" ], [ "len(artist_df)", "_____no_output_____" ] ], [ [ "> Q: What does the ratio of unique artists to unique tracks tell you about the nature of the Spotify top-streamed market?", "_____no_output_____" ] ], [ [ "def get_artist_data(a_id):\n \n artist_data = sp.artist(a_id)\n\n ad_list = [a_id,\\\n artist_data['name'],\\\n artist_data['followers']['total'],\\\n artist_data['genres'],\\\n artist_data['popularity']]\n data = pd.DataFrame([ad_list], columns = ['artist_id','artist_name','total_followers','genres','popularity'])\n\n return data\n", "_____no_output_____" ], [ "get_artist_data(artist_id)", "_____no_output_____" ], [ "artist_list = artist_df['artist_id'].values\ndf_list=[]\n\nfor i,artist_id in enumerate(artist_list):\n print('[%d/%d] Fetching artist data for %s... ' % \n (i+1,len(artist_list),artist_df[artist_df['artist_id']==artist_id]['artist_name'].values[0]), end = \" \") \n artist_data = get_artist_data(artist_id) \n df_list.append(artist_data)\n print('done!')\n \n #sleep for 100 secs per 100 requests to avoid being blocked\n if (i % 100 == 0)& (i > 0):\n time.sleep(5) ", "tching artist data for Andrea Babierra... done!\n[253/606] Fetching artist data for Anna Kendrick... done!\n[254/606] Fetching artist data for 88rising... done!\n[255/606] Fetching artist data for Why Don&#39;t We... done!\n[256/606] Fetching artist data for Tyga... done!\n[257/606] Fetching artist data for Daddy Yankee... done!\n[258/606] Fetching artist data for gnash... done!\n[259/606] Fetching artist data for Panic! At The Disco... done!\n[260/606] Fetching artist data for The Carters... done!\n[261/606] Fetching artist data for Ella Mai... done!\n[262/606] Fetching artist data for Jeremy Zucker... done!\n[263/606] Fetching artist data for Boyce Avenue... done!\n[264/606] Fetching artist data for Joseph Vincent... done!\n[265/606] Fetching artist data for Amber Leigh Irish... done!\n[266/606] Fetching artist data for Matt Johnson... done!\n[267/606] Fetching artist data for Chelsea Cutler... done!\n[268/606] Fetching artist data for Loud Luxury... done!\n[269/606] Fetching artist data for Unique Salonga... done!\n[270/606] Fetching artist data for benny blanco... done!\n[271/606] Fetching artist data for Twenty One Pilots... done!\n[272/606] Fetching artist data for iKON... done!\n[273/606] Fetching artist data for MØ... done!\n[274/606] Fetching artist data for Triple H... done!\n[275/606] Fetching artist data for Angelina Cruz... done!\n[276/606] Fetching artist data for I Belong to the Zoo... done!\n[277/606] Fetching artist data for 6ix9ine... done!\n[278/606] Fetching artist data for Dynoro... done!\n[279/606] Fetching artist data for Kina Grannis... done!\n[280/606] Fetching artist data for Dean Lewis... done!\n[281/606] Fetching artist data for PRETTYMUCH... done!\n[282/606] Fetching artist data for MAX... done!\n[283/606] Fetching artist data for Travis Scott... done!\n[284/606] Fetching artist data for Marione... done!\n[285/606] Fetching artist data for Imago... done!\n[286/606] Fetching artist data for Sigala... done!\n[287/606] Fetching artist data for Nyoy Volante... done!\n[288/606] Fetching artist data for HONNE... done!\n[289/606] Fetching artist data for JoJo... done!\n[290/606] Fetching artist data for Paramore... done!\n[291/606] Fetching artist data for Kris Kross Amsterdam... done!\n[292/606] Fetching artist data for Jose Mari Chan... done!\n[293/606] Fetching artist data for Mariah Carey... done!\n[294/606] Fetching artist data for Nick Jonas... done!\n[295/606] Fetching artist data for No Rome... done!\n[296/606] Fetching artist data for the bird and the bee... done!\n[297/606] Fetching artist data for Girls&#39; Generation-Oh!GG... done!\n[298/606] Fetching artist data for Silk City... done!\n[299/606] Fetching artist data for Alesso... done!\n[300/606] Fetching artist data for Ciara... done!\n[301/606] Fetching artist data for Khel Pangilinan... done!\n[302/606] Fetching artist data for Lukas Graham... done!\n[303/606] Fetching artist data for Joji... done!\n[304/606] Fetching artist data for Gucci Mane... done!\n[305/606] Fetching artist data for Because... done!\n[306/606] Fetching artist data for Avril Lavigne... done!\n[307/606] Fetching artist data for Lil Peep... done!\n[308/606] Fetching artist data for Leanne &amp; Naara... done!\n[309/606] Fetching artist data for juan karlos... done!\n[310/606] Fetching artist data for Lil Wayne... done!\n[311/606] Fetching artist data for This Band... done!\n[312/606] Fetching artist data for Lil Mosey... done!\n[313/606] Fetching artist data for Halsey... done!\n[314/606] Fetching artist data for Lady Gaga... done!\n[315/606] Fetching artist data for Drei Rana... done!\n[316/606] Fetching artist data for Charli XCX... done!\n[317/606] Fetching artist data for Midnight Meetings... done!\n[318/606] Fetching artist data for Patch Quiwa... done!\n[319/606] Fetching artist data for Bad Bunny... done!\n[320/606] Fetching artist data for Kodak Black... done!\n[321/606] Fetching artist data for Ava Max... done!\n[322/606] Fetching artist data for Zara Larsson... done!\n[323/606] Fetching artist data for Westlife... done!\n[324/606] Fetching artist data for RM... done!\n[325/606] Fetching artist data for Steve Aoki... done!\n[326/606] Fetching artist data for Ellie Goulding... done!\n[327/606] Fetching artist data for Normani... done!\n[328/606] Fetching artist data for Queen... done!\n[329/606] Fetching artist data for EXO... done!\n[330/606] Fetching artist data for IZ*ONE... done!\n[331/606] Fetching artist data for Lil Eddie... done!\n[332/606] Fetching artist data for K/DA... done!\n[333/606] Fetching artist data for IU... done!\n[334/606] Fetching artist data for JENNIE... done!\n[335/606] Fetching artist data for P!nk... done!\n[336/606] Fetching artist data for Wham!... done!\n[337/606] Fetching artist data for Lavaado... done!\n[338/606] Fetching artist data for Bobby Helms... done!\n[339/606] Fetching artist data for Brenda Lee... done!\n[340/606] Fetching artist data for Andy Williams... done!\n[341/606] Fetching artist data for Meek Mill... done!\n[342/606] Fetching artist data for Reese Lansangan... done!\n[343/606] Fetching artist data for Mark Ronson... done!\n[344/606] Fetching artist data for Sponge Cola... done!\n[345/606] Fetching artist data for Just Hush... done!\n[346/606] Fetching artist data for TJ Monterde... done!\n[347/606] Fetching artist data for Band Aid... done!\n[348/606] Fetching artist data for Paul McCartney... done!\n[349/606] Fetching artist data for Dean Martin... done!\n[350/606] Fetching artist data for Brett Eldredge... done!\n[351/606] Fetching artist data for John Lennon... done!\n[352/606] Fetching artist data for Frank Sinatra... done!\n[353/606] Fetching artist data for The Ronettes... done!\n[354/606] Fetching artist data for Kelly Clarkson... done!\n[355/606] Fetching artist data for Perry Como... done!\n[356/606] Fetching artist data for Burl Ives... done!\n[357/606] Fetching artist data for ABS-CBN Music All Star... done!\n[358/606] Fetching artist data for Aiza Seguerra... done!\n[359/606] Fetching artist data for Nat King Cole... done!\n[360/606] Fetching artist data for Bing Crosby... done!\n[361/606] Fetching artist data for Gary Valenciano... done!\n[362/606] Fetching artist data for Daryl Hall &amp; John Oates... done!\n[363/606] Fetching artist data for Britney Spears... done!\n[364/606] Fetching artist data for Nef Medina... done!\n[365/606] Fetching artist data for José Feliciano... done!\n[366/606] Fetching artist data for Tony Bennett... done!\n[367/606] Fetching artist data for Maria Aragon... done!\n[368/606] Fetching artist data for Kithara... done!\n[369/606] Fetching artist data for Chris Rea... done!\n[370/606] Fetching artist data for Gwen Stefani... done!\n[371/606] Fetching artist data for Pentatonix... done!\n[372/606] Fetching artist data for The Jackson 5... done!\n[373/606] Fetching artist data for Elvis Presley... done!\n[374/606] Fetching artist data for Eartha Kitt... done!\n[375/606] Fetching artist data for Tres Hermanas... done!\n[376/606] Fetching artist data for Miley Cyrus... done!\n[377/606] Fetching artist data for Rachelle Ann Go... done!\n[378/606] Fetching artist data for Bruce Springsteen... done!\n[379/606] Fetching artist data for Kylie Minogue... done!\n[380/606] Fetching artist data for Ariel Rivera... done!\n[381/606] Fetching artist data for Glee Cast... done!\n[382/606] Fetching artist data for Christina Aguilera... done!\n[383/606] Fetching artist data for Cocoy Claravall... done!\n[384/606] Fetching artist data for Lyca Gairanod... done!\n[385/606] Fetching artist data for Stevie Wonder... done!\n[386/606] Fetching artist data for Judy Garland... done!\n[387/606] Fetching artist data for Shakin&#39; Stevens... done!\n[388/606] Fetching artist data for Chuck Berry... done!\n[389/606] Fetching artist data for ALLMO$T... done!\n[390/606] Fetching artist data for SHAUN... done!\n[391/606] Fetching artist data for SHAED... done!\n[392/606] Fetching artist data for PDL... done!\n[393/606] Fetching artist data for Feder... done!\n[394/606] Fetching artist data for Alex Gonzaga... done!\n[395/606] Fetching artist data for XYLØ... done!\n[396/606] Fetching artist data for Sigma... done!\n[397/606] Fetching artist data for Russ... done!\n[398/606] Fetching artist data for Skylar Grey... done!\n[399/606] Fetching artist data for Chris Brown... done!\n[400/606] Fetching artist data for A Boogie Wit da Hoodie... done!\n[401/606] Fetching artist data for Gesaffelstein... done!\n[402/606] Fetching artist data for Kehlani... done!\n[403/606] Fetching artist data for Rivermaya... done!\n[404/606] Fetching artist data for Lewis Capaldi... done!\n[405/606] Fetching artist data for 21 Savage... done!\n[406/606] Fetching artist data for Galantis... done!\n[407/606] Fetching artist data for JM De Guzman... done!\n[408/606] Fetching artist data for ITZY... done!\n[409/606] Fetching artist data for Céline Dion... done!\n[410/606] Fetching artist data for Shania Twain... done!\n[411/606] Fetching artist data for YUNGBLUD... done!\n[412/606] Fetching artist data for Armi Millare... done!\n[413/606] Fetching artist data for Paolo Sandejas... done!\n[414/606] Fetching artist data for Billy Crawford... done!\n[415/606] Fetching artist data for Mabel... done!\n[416/606] Fetching artist data for Jonas Brothers... done!\n[417/606] Fetching artist data for TOMORROW X TOGETHER... done!\n[418/606] Fetching artist data for Caleb Santos... done!\n[419/606] Fetching artist data for Sofia Reyes... done!\n[420/606] Fetching artist data for FLETCHER... done!\n[421/606] Fetching artist data for Maren Morris... done!\n[422/606] Fetching artist data for Pricetagg... done!\n[423/606] Fetching artist data for Rex Orange County... done!\n[424/606] Fetching artist data for Lil Nas X... done!\n[425/606] Fetching artist data for Avery... done!\n[426/606] Fetching artist data for EVERGLOW... done!\n[427/606] Fetching artist data for Ronan Keating... done!\n[428/606] Fetching artist data for Alisson Shore... done!\n[429/606] Fetching artist data for SZA... done!\n[430/606] Fetching artist data for Freestyle... done!\n[431/606] Fetching artist data for Dominic Fike... done!\n[432/606] Fetching artist data for Join The Club... done!\n[433/606] Fetching artist data for Lizzo... done!\n[434/606] Fetching artist data for CLR... done!\n[435/606] Fetching artist data for Kiyo... done!\n[436/606] Fetching artist data for WINNER... done!\n[437/606] Fetching artist data for Agsunta... done!\n[438/606] Fetching artist data for Tyler, The Creator... done!\n[439/606] Fetching artist data for Luh Kel... done!\n[440/606] Fetching artist data for Lea Salonga... done!\n[441/606] Fetching artist data for Mena Massoud... done!\n[442/606] Fetching artist data for Naomi Scott... done!\n[443/606] Fetching artist data for Alan Menken... done!\n[444/606] Fetching artist data for Young Thug... done!\n[445/606] Fetching artist data for LEE HI... done!\n[446/606] Fetching artist data for Will Smith... done!\n[447/606] Fetching artist data for SOMI... done!\n[448/606] Fetching artist data for Ali Gatie... done!\n[449/606] Fetching artist data for Sam Feldt... done!\n[450/606] Fetching artist data for Reneé Dominique... done!\n[451/606] Fetching artist data for Donnalyn Bartolome... done!\n[452/606] Fetching artist data for Ashley O... done!\n[453/606] Fetching artist data for J Boog... done!\n[454/606] Fetching artist data for Blanco Brown... done!\n[455/606] Fetching artist data for VVS Collective... done!\n[456/606] Fetching artist data for Arvey... done!\n[457/606] Fetching artist data for King Badger... done!\n[458/606] Fetching artist data for Stephanie Poetri... done!\n[459/606] Fetching artist data for Y2K... done!\n[460/606] Fetching artist data for Psychedelic Boyz... done!\n[461/606] Fetching artist data for Kina... done!\n[462/606] Fetching artist data for Lil Tecca... done!\n[463/606] Fetching artist data for Skusta Clee... done!\n[464/606] Fetching artist data for Tones And I... done!\n[465/606] Fetching artist data for Megan Thee Stallion... done!\n[466/606] Fetching artist data for Daniel Padilla... done!\n[467/606] Fetching artist data for Justin Vasquez... done!\n[468/606] Fetching artist data for Geo Ong... done!\n[469/606] Fetching artist data for X1... done!\n[470/606] Fetching artist data for SB19... done!\n[471/606] Fetching artist data for Magnus Haven... done!\n[472/606] Fetching artist data for blackbear... done!\n[473/606] Fetching artist data for Dan + Shay... done!\n[474/606] Fetching artist data for SuperM... done!\n[475/606] Fetching artist data for French Montana... done!\n[476/606] Fetching artist data for Harry Styles... done!\n[477/606] Fetching artist data for Michael Dutchi Libranda... done!\n[478/606] Fetching artist data for Regard... done!\n[479/606] Fetching artist data for Nik Makino... done!\n[480/606] Fetching artist data for Arizona Zervas... done!\n[481/606] Fetching artist data for My Chemical Romance... done!\n[482/606] Fetching artist data for Ant Saunders... done!\n[483/606] Fetching artist data for JP Saxe... done!\n[484/606] Fetching artist data for True Damage... done!\n[485/606] Fetching artist data for Idina Menzel... done!\n[486/606] Fetching artist data for Tatak Pinoy All-Stars... done!\n[487/606] Fetching artist data for Matthaios... done!\n[488/606] Fetching artist data for Jonathan Groff... done!\n[489/606] Fetching artist data for Kristen Bell... done!\n[490/606] Fetching artist data for Josh Gad... done!\n[491/606] Fetching artist data for Evan Rachel Wood... done!\n[492/606] Fetching artist data for Kacey Musgraves... done!\n[493/606] Fetching artist data for Stormzy... done!\n[494/606] Fetching artist data for Trevor Daniel... done!\n[495/606] Fetching artist data for Hotdog... done!\n[496/606] Fetching artist data for Jack Stauber... done!\n[497/606] Fetching artist data for U2... done!\n[498/606] Fetching artist data for Ray J... done!\n[499/606] Fetching artist data for Darlene Love... done!\n[500/606] Fetching artist data for MAMAMOO... done!\n[501/606] Fetching artist data for KAROL G... done!\n[502/606] Fetching artist data for Roddy Ricch... done!\n[503/606] Fetching artist data for DaBaby... done!\n[504/606] Fetching artist data for Don Toliver... done!\n[505/606] Fetching artist data for Doja Cat... done!\n[506/606] Fetching artist data for PUBLIC... done!\n[507/606] Fetching artist data for Future... done!\n[508/606] Fetching artist data for Mac Miller... done!\n[509/606] Fetching artist data for BROCKHAMPTON... done!\n[510/606] Fetching artist data for NIKI... done!\n[511/606] Fetching artist data for Bee Gees... done!\n[512/606] Fetching artist data for Young Vito... done!\n[513/606] Fetching artist data for Soulstice... done!\n[514/606] Fetching artist data for ZICO... done!\n[515/606] Fetching artist data for Hayley Williams... done!\n[516/606] Fetching artist data for Wiz Khalifa... done!\n[517/606] Fetching artist data for Surfaces... done!\n[518/606] Fetching artist data for Raf Davis... done!\n[519/606] Fetching artist data for Air Supply... done!\n[520/606] Fetching artist data for Powfu... done!\n[521/606] Fetching artist data for Flow G... done!\n[522/606] Fetching artist data for MC Einstein... done!\n[523/606] Fetching artist data for BENEE... done!\n[524/606] Fetching artist data for V... done!\n[525/606] Fetching artist data for Maximillian... done!\n[526/606] Fetching artist data for Hillsong UNITED... done!\n[527/606] Fetching artist data for Kenny Rogers... done!\n[528/606] Fetching artist data for SAINt JHN... done!\n[529/606] Fetching artist data for Yoon Mirae... done!\n[530/606] Fetching artist data for Yerin Baek... done!\n[531/606] Fetching artist data for MEDUZA... done!\n[532/606] Fetching artist data for Jr Crown... done!\n[533/606] Fetching artist data for Emman... done!\n[534/606] Fetching artist data for PARTYNEXTDOOR... done!\n[535/606] Fetching artist data for Surf Mesa... done!\n[536/606] Fetching artist data for DripReport... done!\n[537/606] Fetching artist data for Manu Pilas... done!\n[538/606] Fetching artist data for krissy &amp; ericka... done!\n[539/606] Fetching artist data for (G)I-DLE... done!\n[540/606] Fetching artist data for Casting Crowns... done!\n[541/606] Fetching artist data for Sam Fischer... done!\n[542/606] Fetching artist data for GOT7... done!\n[543/606] Fetching artist data for Gaho... done!\n[544/606] Fetching artist data for THE SCOTTS... done!\n[545/606] Fetching artist data for Rachel Platten... done!\n[546/606] Fetching artist data for Danzz... done!\n[547/606] Fetching artist data for Melanie Martinez... done!\n[548/606] Fetching artist data for TAEYEON... done!\n[549/606] Fetching artist data for Brian Pepito... done!\n[550/606] Fetching artist data for Soulthrll... done!\n[551/606] Fetching artist data for Tanya Manalang... done!\n[552/606] Fetching artist data for DAY6... done!\n[553/606] Fetching artist data for Raiden... done!\n[554/606] Fetching artist data for Gian Magdangal... done!\n[555/606] Fetching artist data for Agust D... done!\n[556/606] Fetching artist data for NCT 127... done!\n[557/606] Fetching artist data for Carly Rae Jepsen... done!\n[558/606] Fetching artist data for StaySolidRocky... done!\n[559/606] Fetching artist data for Juan Caoile... done!\n[560/606] Fetching artist data for Music Hero... done!\n[561/606] Fetching artist data for Alexander 23... done!\n[562/606] Fetching artist data for BAEKHYUN... done!\n[563/606] Fetching artist data for Peach Tree Rascals... done!\n[564/606] Fetching artist data for Arthur Nery... done!\n[565/606] Fetching artist data for Ajay Stephens... done!\n[566/606] Fetching artist data for BANG YE DAM... done!\n[567/606] Fetching artist data for Jawsh 685... done!\n[568/606] Fetching artist data for Conkarah... done!\n[569/606] Fetching artist data for Ace Banzuelo... done!\n[570/606] Fetching artist data for Topic... done!\n[571/606] Fetching artist data for Lil Vinceyy... done!\n[572/606] Fetching artist data for SEVENTEEN... done!\n[573/606] Fetching artist data for Stray Kids... done!\n[574/606] Fetching artist data for Hwa Sa... done!\n[575/606] Fetching artist data for Audrey Mika... done!\n[576/606] Fetching artist data for Jana Garcia... done!\n[577/606] Fetching artist data for Wallows... done!\n[578/606] Fetching artist data for Red Velvet - IRENE &amp; SEULGI... done!\n[579/606] Fetching artist data for Michael Bars... done!\n[580/606] Fetching artist data for EXO-SC... done!\n[581/606] Fetching artist data for Curtis Waters... done!\n[582/606] Fetching artist data for Alessandra De Rossi... done!\n[583/606] Fetching artist data for Rich Brian... done!\n[584/606] Fetching artist data for Twenty Nine Eleven... done!\n[585/606] Fetching artist data for Jack Harlow... done!\n[586/606] Fetching artist data for One Direction... done!\n[587/606] Fetching artist data for Conan Gray... done!\n[588/606] Fetching artist data for WALK THE MOON... done!\n[589/606] Fetching artist data for TREASURE... done!\n[590/606] Fetching artist data for Sam Kim... done!\n[591/606] Fetching artist data for BØRNS... done!\n[592/606] Fetching artist data for Jem Macatuno... done!\n[593/606] Fetching artist data for Bandang Lapis... done!\n[594/606] Fetching artist data for Tate McRae... done!\n[595/606] Fetching artist data for 24kGoldn... done!\n[596/606] Fetching artist data for Pop Smoke... done!\n[597/606] Fetching artist data for DJ Noiz... done!\n[598/606] Fetching artist data for Shane Filan... done!\n[599/606] Fetching artist data for Jimmy Bondoc... done!\n[600/606] Fetching artist data for Joel Corry... done!\n[601/606] Fetching artist data for Pink Sweat$... done!\n[602/606] Fetching artist data for salem ilese... done!\n[603/606] Fetching artist data for Vedo... done!\n[604/606] Fetching artist data for Logan Harris... done!\n[605/606] Fetching artist data for Julian Sean... done!\n[606/606] Fetching artist data for Gagong Rapper... done!\n" ], [ "artist_data_df = pd.concat(df_list)\nartist_data_df ", "_____no_output_____" ], [ "artist_data_df.to_csv('data/spotify_daily_charts_artists.csv', index=False, encoding='utf-8')", "_____no_output_____" ] ], [ [ "## Resources\n- Spotify API reference manual https://developer.spotify.com/documentation/web-api/reference/search/search/", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ec5aef9768779ff05a842689f2b8d568b14058e8
19,757
ipynb
Jupyter Notebook
NAACP/Word2Vec/Word2Vec_Analysis_TFIDF_Neighborhood.ipynb
drash7/summer2021internship
da826ba6c1efae2d946f925c05ccdfadb65179e6
[ "MIT" ]
3
2021-06-09T15:21:25.000Z
2021-08-02T16:58:30.000Z
NAACP/Word2Vec/Word2Vec_Analysis_TFIDF_Neighborhood.ipynb
drash7/summer2021internship
da826ba6c1efae2d946f925c05ccdfadb65179e6
[ "MIT" ]
15
2021-06-02T19:24:36.000Z
2021-07-03T18:22:48.000Z
NAACP/Word2Vec/Word2Vec_Analysis_TFIDF_Neighborhood.ipynb
drash7/summer2021internship
da826ba6c1efae2d946f925c05ccdfadb65179e6
[ "MIT" ]
37
2021-05-31T21:22:42.000Z
2021-11-10T16:23:25.000Z
54.277473
1,642
0.62469
[ [ [ "import pandas as pd\nimport gensim\nfrom collections import defaultdict", "_____no_output_____" ], [ "#'south_boston_waterfront' not included\nsubs = ['fenway', 'beacon_hill', 'downtown', 'south_boston', 'east_boston', 'back_bay', 'jamaica_plain',\n 'south_end', 'charlestown', 'brighton', 'allston', 'west_end', 'roslindale', 'north_end',\n 'mission_hill', 'harbor_islands', 'longwood_medical_area', 'dorchester', 'roxbury', 'mattapan', 'hyde_park']", "_____no_output_____" ], [ "years = [2014, 2015, 2016, 2017, 2018]\ntext_corpus = pd.DataFrame()\nfor year in years:\n temp = pd.read_csv('globe_data/bostonglobe' + str(year) + '.csv')\n text_corpus = pd.concat([text_corpus, temp], axis=0)\n\ndef custom_standardization(data):\n\n spec_chars = [\"!\",'\"',\"#\",\"%\",\"&\",\"'\",\"(\",\")\", \"*\",\"+\",\",\",\n \"-\",\".\",\"/\",\":\",\";\",\"<\", \"=\",\">\",\"?\",\"@\",\"[\",\n \"\\\\\",\"]\",\"^\",\"_\", \"`\",\"{\",\"|\",\"}\",\"~\",\"–\", \n \"\\xc2\", \"\\xa0\", \"\\x80\", \"\\x9c\", \"\\x99\", \"\\x94\", \n \"\\xad\", \"\\xe2\", \"\\x9d\", \"\\n\", \"x9d\", \"xc2\", \n \"xa0\", \"x80\", \"x9c\", \"x99\", \"x94\", \"xad\", \"xe2\"]\n\n for char in spec_chars:\n data['text'] = data['text'].str.strip()\n #data['text'] = str(data['text']).lower()\n data['text'] = data['text'].str.replace(char, ' ')\n #data['text'] = stemmer.stem(str(data['text']))\n\n return data\n\ntext_corpus = custom_standardization(text_corpus)\nprint('corpus standardized')\nprint()\n \n# turn DataFrame into a list of lists of tokens\ndocuments = []\nfor row in text_corpus.values:\n [row] = row\n temp = row.lower().split()\n documents.append(temp)\n\n# create Word2Vec model\n# the skip-grams method is used here, with a window of 10\nmodel = gensim.models.Word2Vec(window=10, min_count=2, sg=1, workers=10)\nmodel.build_vocab(documents) # prepare the model vocabulary\n\n# train model on available data\n# I use 5 epochs since that's standard\nmodel.train(corpus_iterable=documents, total_examples=len(documents), epochs=5)\n\nfor sub in subs:\n print('starting work with ' + sub)\n print()\n sub_TFIDF = pd.DataFrame()\n for year in years:\n data = pd.read_csv('../TF-IDF/Yearly_TFIDF_Scores_by_Neighborhood/' + str(year) + '/' + 'TFIDF_' + sub + '.csv')\n data = data.drop(['Unnamed: 0'], axis=1)\n sub_TFIDF = pd.concat([sub_TFIDF, data], axis=0)\n\n sub_TFIDF = sub_TFIDF.sort_values('weight', ascending=False)\n print(sub + ' term weights sorted')\n \n keywords = []\n for row in sub_TFIDF.itertuples(index=False):\n if len(keywords) < 15 and row.term not in keywords:\n if row.term != 'hokule':\n keywords.append(row.term)\n\n # finding similar words and creating a csv file\n\n def compute_similar_words(model,source_word, topn=5):\n similar_words = [source_word]\n try:\n top_words = model.wv.most_similar(source_word, topn=topn)\n similar_words.extend([val[0] for val in top_words])\n except KeyError as err:\n print(err.args)\n return similar_words \n\n def compute_similar_words_for_all_tasks(model, topn=5):\n columns = ['word' + str(i - 1) for i in range(1, topn + 2)]\n df = pd.DataFrame(data=None, columns=columns)\n for source_word in keywords:\n similar_words = compute_similar_words(model, source_word, topn)\n df.loc[len(df)] = similar_words\n df.to_csv('similar_words_task/neighborhood_TFIDF/' + sub + '_similar_words.csv')\n \n words = compute_similar_words_for_all_tasks(model)\n print(sub + ' similar words to most important terms generated')\n print()", "corpus standardized\n\nstarting work with fenway\n\nfenway term weights sorted\nfenway similar words to most important terms generated\n\nstarting work with beacon_hill\n\nbeacon_hill term weights sorted\nbeacon_hill similar words to most important terms generated\n\nstarting work with downtown\n\ndowntown term weights sorted\ndowntown similar words to most important terms generated\n\nstarting work with south_boston\n\nsouth_boston term weights sorted\nsouth_boston similar words to most important terms generated\n\nstarting work with east_boston\n\neast_boston term weights sorted\neast_boston similar words to most important terms generated\n\nstarting work with back_bay\n\nback_bay term weights sorted\nback_bay similar words to most important terms generated\n\nstarting work with jamaica_plain\n\njamaica_plain term weights sorted\njamaica_plain similar words to most important terms generated\n\nstarting work with south_end\n\nsouth_end term weights sorted\nsouth_end similar words to most important terms generated\n\nstarting work with charlestown\n\ncharlestown term weights sorted\ncharlestown similar words to most important terms generated\n\nstarting work with brighton\n\nbrighton term weights sorted\nbrighton similar words to most important terms generated\n\nstarting work with allston\n\nallston term weights sorted\nallston similar words to most important terms generated\n\nstarting work with west_end\n\nwest_end term weights sorted\nwest_end similar words to most important terms generated\n\nstarting work with roslindale\n\nroslindale term weights sorted\nroslindale similar words to most important terms generated\n\nstarting work with north_end\n\nnorth_end term weights sorted\nnorth_end similar words to most important terms generated\n\nstarting work with mission_hill\n\nmission_hill term weights sorted\nmission_hill similar words to most important terms generated\n\nstarting work with harbor_islands\n\nharbor_islands term weights sorted\nharbor_islands similar words to most important terms generated\n\nstarting work with longwood_medical_area\n\nlongwood_medical_area term weights sorted\nlongwood_medical_area similar words to most important terms generated\n\n(\"Key 'hokule' not present\",)\n" ], [ "# allston: lots of names; a few different terms, but no clear pattern\n# back bay: a couple names; lots of words to do with biology/biotech\n# beacon hill: lots of names; one legal/law enforcement term\n# brighton: lots of names; one legal/law enforcement term\n# charlestown: no names; several terms to do with shapes; one legal/law enforcement term\n# downtown: lots of names; a couple legal/law enforcement terms\n# east boston: a couple names; a couple technical terms; no clear pattern\n# fenway: a few names; one political term; one legal/law enforcement term (debatable)\n# harbor islands: lots of names; several different terms, but no clear pattern\n# jamaica plain: no names; a lot of camping/sailing related terms\n# longwood medical center: almost all names\n# mission hill: almost all names\n# north end: several names; a couple legal/law enforcement terms\n# roslindale: several names; one legal/law enforcement related term; a couple sports related terms\n# south boston: a lot of names; a couple legal/law enforcement terms\n# south end: a lot of names; no clear pattern among the rest\n# west end: almost all names; one political term\n\n# dorchester: a couple names; one political term\n# hyde park: almost all names\n# mattapan: a couple names; a couple political terms; several sport (football) related terms\n# roxbury: several names; a couple political terms; a couple legal/law enforcement related terms", "_____no_output_____" ], [ "# trend across white neighborhoods: occurrence of terms to do with legal/law enforcement\n# trend across black neighborhoods: occurrence of terms to do with politics", "_____no_output_____" ], [ "# potential next step: remove names from the corpus and repeat the entire process\n# concern: doing the exact same thing for Derry; currently struggling with removing names", "_____no_output_____" ], [ "# potential further step: compare the same word w.r.t. different neighborhoods \n# for example, generate word embeddings for the articles for each neighborhood and compare the word embeddings for a given term\n\n# lemmatize articles, remove stopwords, remove names from corpus, repeat entire process", "_____no_output_____" ], [ "# can narrow down focus to work with individual articles\n# build word embedding on an article\n# look for certain keywords using some method and see how close those keywords are to their vectors from the entire corpus for that neighborhood (or perhaps, group of neighborhoods)\n\n# let's say you have an article talking about an election in dorchester\n# build a word embedding model on article\n# take keyword election\n# extract closest word to that keyword\n# take vector embedding for election and compare its vector with the\n # rest of the articles for the neighborhood\n# this is essentially an extra step in case top TF-IDF words don't give enough information\n\n# also separate articles by sub-neighborhood", "_____no_output_____" ], [ "#1 cleaning the data (remove duplicates) DONE\n#2 separate black and white neighborhoods and then by sub-neighborhood DONE\n#3 work with a subset of the data\n#4 get TF-IDF weights for each article in that subset\n #4.5 article:sub-neighborhood articles = neighborhood articles:text corpus\n#5 get word embeddings for the top words\n#6 get sub-neighborhood names from here: https://drive.google.com/file/d/1le8X9VQwO-cM4VVAAb4quAlgerr94T0x/view", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5af19dd1118a1bc68da09afbeadb72deab5b71
19,884
ipynb
Jupyter Notebook
Python_CoderDojo19.ipynb
seldoncode/Python_CoderDojo
2b0e33ac517cae853af00122b14c4e5719d770c3
[ "MIT" ]
null
null
null
Python_CoderDojo19.ipynb
seldoncode/Python_CoderDojo
2b0e33ac517cae853af00122b14c4e5719d770c3
[ "MIT" ]
null
null
null
Python_CoderDojo19.ipynb
seldoncode/Python_CoderDojo
2b0e33ac517cae853af00122b14c4e5719d770c3
[ "MIT" ]
null
null
null
40.91358
242
0.301599
[ [ [ "<a href=\"https://colab.research.google.com/github/seldoncode/Python_CoderDojo/blob/main/Python_CoderDojo19.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Juegos con Inteligencia artificial", "_____no_output_____" ], [ "### Juego de los palillos\n* El [NIM](https://es.wikipedia.org/wiki/Nimrod_(videojuego)) es el precursor de los videojuegos, contruido en 1951.\n* El juego de los palillos similar al \"juego de la última piedra\"\n* Dados unos palillos iniciales, por ejemplo entre 16 y 23\n* Dos jugadores (persona y máquina) quitan por turnos 1 o 2 o 3 palillos\n* Comienza quitando el humano\n* El último en quitar gana\n\nFases del juego:\n 1. Presentación del juego\n 2. Elección por el usuario de nivel (fácil / difícil)\n 3. Elección aleatoria de palillos iniciales (entre 16 y 23)\n 4. Elección aleatoria de palillos quitados:\n - entre 1 y 3, o\n - entre 1 y 4, o\n - entre 1 y 5\n 5. El jugador humano comienza siempre el juego\n 6. Mostrar el área de juego con los palillos totales\n 7. Turno del jugador humano:\n - Elige el número de palillos a quitar\n - Se muestra el área de juego sin esos palillos\n - Se cambia el turno\n 8. Turno de la máquina\n - Si el nivel es fácil elige aleatoriamente\n - Si el nivel es difícil llama a la IA\n - Se muestra el área de juego sin los palillos retirados\n - Se cambia el turno\n 9. Después de cada movimiento se comprueba si no hay más palillos:\n - Si es así se comprueba quién cogió el último\n - Se muestra el ganador de la partida\n 10. Si aún hay palillos, se sigue con el turno siguiente", "_____no_output_____" ] ], [ [ "'''Juego de los palillos o de Nim'''\nimport random\nimport time\nimport os\n\ndef presentacion_1():\n '''Mostramos una pantalla de inicio\n Se indica el nivel fácil o dificil que elige el usuario'''\n print()\n print(\" ***************** JUEGO DE LOS PALILLOS *****************\")\n print()\n print()\n print(\" Gana quien toma el último palillo \")\n print()\n print()\n print(\" 1. FÁCIL 2. DIFÍCIL\")\n print()\n print()\n print(\" ************************************************************\")\n print()\n nivel = \"\"\n while nivel != \"1\" and nivel != \"2\":\n nivel = input(\" Elige nivel(1/2): \")\n return nivel\n\n\ndef presentacion_2(palillos, quitas):\n '''Mostramos una segunda pantalla con los datos del juego,\n y pedimos al usuario que e enter para empezar.'''\n print()\n print(\" ***************** JUEGO DE LOS PALILLOS *****************\")\n print()\n print()\n print(f\" Habrá {palillos} iniciales \")\n print()\n print(f\" Se pueden quitar entre 1 y {quitas}\")\n print()\n print(\" Empiezas a mover tu\")\n print()\n print(\" ************************************************************\")\n print()\n imput(\"Pulsa Enter para empezar ...\")\n\ndef sorteo_opciones():\n '''Devuelve dos valores aleatorios: Número de palillos iniciales (entre 16 y 23)\n y número máximo de palillos a quitar (entre 3 y 5)'''\n palillos = random.randint(16, 23)\n quitas = random.randint(3, 5)\n return palillos, quitas\n\n\ndef area_de_juego(palillos, quitas):\n '''Muestra los palillos que hay en cada jugada de la partida.'''\n print()\n print(\" ***************** JUEGO DE LOS PALILLOS *****************\")\n print()\n print()\n\n for fila in range(4):\n print(end = \" \")\n for p in range(1, palillos+1):\n print(\"|\", end = \" \")\n if p % quitas == 0: # haciendo grupos de 4 palillos, o el nº de quitas que sea\n print(end = \" \") # un espacio más\n print()\n \n print()\n print()\n print()\n\ndef movimientos_jugador():\n '''Devuelve el número de palillos que el usuario quita en cada jugada'''\n \n\n\n\n\n\ndef movimiento_ordenador_aleatorio():\n '''Devuelve el número aleatorio de palillos a quitar como jugada del ordenador'''\n pass\n\ndef movimiento_ordenador_con_ia():\n '''Devuelve el número de palillos a quitar como jugada del ordenador\n calculados para que el ordenador gane siempre que pueda'''\n pass\n\ndef mostrar_ganador():\n '''Muestra pantalla final con el ganador de la partida: jugador u ordenador'''\n pass\n\n########## Programa principal #############\n\n# Flujo del programa\n\n", "_____no_output_____" ], [ "import random\nimport time\nimport os\n\npalillos={16,17,18,19,20,2}\n\n#Crear funciones\n\ndef presentacion():\n print(\"**************************************\")\n print(\"** JUEGO DE LOS PALILLOS ****\")\n print(\"** **\")\n print(\"* GANA QUIEN SAQUE EL ÚLTIMO PALILLO *\")\n print(\"* *\")\n print(\"*** 1° FÁCIL / 2° DIFÍCI ***\")\n print(\"**************************************\")\n print()\n nivel=input(\"Elige nivel (1/2): \")\n return nivel\n\ndef presentacion_2(palillos,quita):\n print(\"**************************************\")\n print(\"** JUEGO DE LOS PALILLOS ****\")\n print(f\"** HABRA {palillos} PALILLOS EN TOTAL **\")\n print(\"** Y **\")\n print(f\"* SE PUEDE SACAR DE 1 A {quita} *\")\n print(\"*** ENPIEZAS TU ***\")\n print(\"**************************************\")\n print()\n input(\"Presione ENTRER para empezar: \")\n\ndef area_de_juego(p,q):\n print()\n print(\"** JUEGO DE LOS PALILLOS ****\".center(100))\n print()\n print()\n \n for fila in range(4):\n print(end=\" \")\n for p in range(1,p+1):\n print(\"|\",end=\" \")\n if p % q == 0:\n print(end=\" \")\n print()\n\n print()\n print()\n print()\n\ndef eleccion():\n palillos = random.randrange(16,24,1)\n quita = random.randrange(3,6,1) # Eleccion y quita de palillos aleatoria\n return palillos, quita\n\ndef movimiento_jugador(p,q):\n if q == 3:\n q = (\"1\",\"2\",\"3\")\n elif q == 4:\n q = (\"1\",\"2\",\"3\",\"4\")\n elif q == 5:\n q = (\"1\",\"2\",\"3\",\"4\",\"5\")\n quita = input(\" Palillos a quitar: \")\n while quita not in q or int(quita)>p:\n if quita not in q:\n quita = input(f\" Elige entre 1 y {q}\")\n elif int(quita)>p:\n quita = input(f\" Solo quedan {p} palillos: \")\n else:\n palillos_quita= int(quita)\n return palillos_quita\n\ndef movimiento_cpu(p,q):\n if p <= q:\n palillos_quita = p\n else:\n palillos_quita = random.randint(1,q)\n while palillos_quita > p:\n palillos_quita = random.randint(1,q)\n return palillos_quita\n\ndef movimientos_cpu_ia(p,q):\n palillos_quita = None\n while palillos_quita is None or palillos_quita > p:\n if p <= q:\n palillos_quita = p\n elif p % (q+1) == 5:\n palillos_quita = 5\n elif p % (q+1) == 4:\n palillos_quita = 4\n elif p % (q+1) == 3:\n palillos_quita = 3\n elif p % (q+1)== 2:\n palillos_quita = 2\n elif p %(q+1) == 1:\n palillos_quita = 1\n elif p % (q+1) == 0:\n palillos_quita = random.randint(1,2)\n return palillos_quita\n\ndef mostrar_ganador(turno):\n if turno == 2:\n mensaje1 = \" HAS AGARRADO EL ULTIMO PALILLO\"\n mensaje2 = \" ¡¡¡HAS GANADO!!!\"\n elif turno == 1:\n mensaje1 = \" EL CPU AGARRO EL ULTIMO PALILLO\"\n mensaje2 = \" ¡¡¡HAS PERDIDO!!!\"\n\n print()\n print(\"** JUEGO DE LOS PALILLOS ****\")\n print(f\"{mensaje1}\")\n print(f\"{mensaje2}\")\n print()\n\n# Bucle principa del juego\n\nturno=1\n\np,q = eleccion()\n\nos.system(\"cls\")\nnivel=presentacion()\n\nos.system(\"cls\")\npresentacion_2(p,q)\n\njugando=True\n\nwhile jugando:\n\n os.system(\"cls\")\n area_de_juego(p,q)\n \n if turno ==1:\n jugada=movimiento_jugador(p,q)\n turno=2\n elif turno ==2:\n print (\"CPU PENSANDO....\")\n time.sleep(2)\n if nivel == \"1\":\n jugada = movimiento_cpu(p,q)\n elif nivel == \"2\":\n jugada = movimientos_cpu_ia(p,q)\n turno=1\n\n p-=jugada\n \n if p == 0 :\n os.system(\"cls\")\n mostrar_ganador(turno)\n jugando = False", "**************************************\n** JUEGO DE LOS PALILLOS ****\n** **\n* GANA QUIEN SAQUE EL ÚLTIMO PALILLO *\n* *\n*** 1° FÁCIL / 2° DIFÍCI ***\n**************************************\n\nElige nivel (1/2): 2\n**************************************\n** JUEGO DE LOS PALILLOS ****\n** HABRA 22 PALILLOS EN TOTAL **\n** Y **\n* SE PUEDE SACAR DE 1 A 4 *\n*** ENPIEZAS TU ***\n**************************************\n\nPresione ENTRER para empezar: \n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | | | | | \n\n\n\n Palillos a quitar: 2\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | | | \n\n\n\nCPU PENSANDO....\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | | | | \n\n\n\n Palillos a quitar: 3\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | | \n\n\n\nCPU PENSANDO....\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | \n | | | | | | | | | | | | | | \n\n\n\n Palillos a quitar: 4\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | | \n | | | | | | | | | | \n | | | | | | | | | | \n | | | | | | | | | | \n\n\n\nCPU PENSANDO....\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n | | | | | | | | | \n\n\n\n Palillos a quitar: 4\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | | \n | | | | | \n | | | | | \n | | | | | \n\n\n\nCPU PENSANDO....\n\n ** JUEGO DE LOS PALILLOS **** \n\n\n | | | | \n | | | | \n | | | | \n | | | | \n\n\n\n Palillos a quitar: 4\n\n** JUEGO DE LOS PALILLOS ****\n HAS AGARRADO EL ULTIMO PALILLO\n ¡¡¡HAS GANADO!!!\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
ec5af84f10a5d1c4f0fc4cd7c268860ca32231d3
6,349
ipynb
Jupyter Notebook
docs/source/NLP/solved/Planning refreshments (Solved).ipynb
ffraile/operations-research
3e143ac4ec7031f008505cc2695095ab8dfa2f28
[ "MIT" ]
null
null
null
docs/source/NLP/solved/Planning refreshments (Solved).ipynb
ffraile/operations-research
3e143ac4ec7031f008505cc2695095ab8dfa2f28
[ "MIT" ]
null
null
null
docs/source/NLP/solved/Planning refreshments (Solved).ipynb
ffraile/operations-research
3e143ac4ec7031f008505cc2695095ab8dfa2f28
[ "MIT" ]
null
null
null
37.791667
496
0.587809
[ [ [ "# Planning refreshment production\nA firm that packs refreshments employs the same syrup to produce its 1.5 l COLI and PEPSA products on its S1 production line. Once processed, each hectolitre of syrup produces 40 units of the 1.5 l COLI product or 20 units of the 1.5 l PEPSA product. If $x_1$ units of the 1.5 l COLI product and $x_2$ units of the 1.5 l PEPSA product are produced, the firm estimates that the daily income obtained in dollars would be given by the following function:\n\n$f(x) = 49000·x_1 -x_1^2 + 30·x_2 -2·x_2^2$\n\nIt costs 150 dollars to buy and process each hectolitre of syrup. The S1 packaging line has a net capacity of producing 56,800 1.5 l product units every day. The firm is committed to produce at least half the amount of PEPSA than COLI. Although priority orders tend to amend its production planning, the firm wishes to have a basic product planning that optimises its daily profits.\n\n**a)** Formulate a non-linear programming model that helps the firm create its basic daily product planning for its S1 packaging line.\nOur decision variables are: \n\n- $x_1$: Units of COLI\n- $x_2$: Units of PEPSA\n\nThe objective is to maximise the profits, so we need to calculate the difference between the revenues and the costs: \n\n$\\max z = f(x)-c(x)$\n\nLet us first consider the costs as a function of the number of hectolitres of syrup used ($h_l$): \n\n$c = 150*h_l$\n\n$h_l = x_1/40 + x_2/20$\n\nIf we define the cost as a function of $x=[x_1,x_2]$, then:\n\n$c(x) = 150·(x_1/40 + x_2/20)$\n\nNow, z becomes:\n\n$z = 49000·x_1 -x_1^2 + 30·x_2 -2·x_2^2 - 3.75·x_1 -7.5·x_2$\n\nAs for the constraints, the capacity cannot exceed the total capacity of the line:\n\n$x_1 + x_2 \\leq 56800$\n\nAnd the \n\n$z = 48996.25.25x_1 - x_1^2 + 22.5·x_2 - 2x_2^2$\n\n$s.t$\n\n$x_1 + x_2 \\leq 56800$\n\n$x_2 \\geq x_1/2$\n\nOr in canonical form: \n\n$z = 43000.25x_1 - x_1^2 - 2970x_2- 2x_2^2$\n\n$s.t.$ \n\n$x_1 + x_2 -56800 \\leq 0$ \n\n$x_1 - 2x_2 \\leq 0$\n\n$x_1, x_2 \\geq 0$\n\n\n**b)** Write down the Kuhn Tucker conditions that would help the company determine the basic daily production plan.\n\nThe Lagrangian is:\n\n$L = 43000.25x_1 - x_1^2 - 2970x_2- 2x_2^2 + \\lambda_1\\left(x_1 + x_2 -56800\\right) + \\lambda_2\\left(x_1/2 - x_2\\right)$\n\n**Gradient condition:**\n\n$\\nabla(L)=0$\n\n$\\dfrac{dL}{dq_1} = 48996.25-2x_1+\\lambda_1+\\lambda_2 = 0$\n\n$\\dfrac{dL}{dq_2} = 105-4x_2+\\lambda_1-\\lambda_2 = 0$\n\n**Feasibility Condition:**\n\n$x_1 + x_2 -56800 \\leq 0$\n\n$x_1 - 2x_2 \\leq 0$\n\n**Orthogonality Condition:**\n\n$\\lambda_1·(x_1 + x_2 -56800) = 0$\n\n$\\lambda_2·(x_1 - 2x_2) = 0$\n\n**Non-negativity condition:**\n\n$x_1, x_2, \\geq 0$\n\n$\\lambda_1, \\lambda_2 \\leq 0$\n\n\n**c)** The following critical point is found $x_1= 13838.33; x_2= 6919.16; \\lambda_1=0; \\lambda_2=-15323.34; z = 287249204.2$, where $\\lambda_1$ is the Lagrangian multiplier corresponding to the S1 packaging line capacity constraint. Can you conclude whether it is a local/global maximum? With this solution, What is the maximum quantity that the firm would be willing to pay for 1 h of overtime production? How much would the daily profit increase per additional COLI unit produced?\n\nThe Hessian matrix is: \n\n$H = \\begin{bmatrix}\n-2 & 0\\\\\n0 & -4\n\\end{bmatrix}$\n\nThe Hessian determinants are: \n\n$H_1 = -2$\n\n$H_2 = 8$\n\nIt is not possible to determine whether this Kuhn-Tucker point is a local maximum or minimum. However, we can do the test in the equivalent minimisation problem $min z^* = -z$. In this case, the Hessian matrix is:\n\n$H = \\begin{bmatrix}\n2 & 0\\\\\n0 & 4\n\\end{bmatrix}$\n\n$H_1 = 2$\n\n$H_2 = 8$\n\nWhich is definitive positive and therefore this Kuhn Tucker point is a global minimum in the equivalent minimisation problem and by extension a global maximum in the original maximimation problem.\n\nIn this Kuhn-Tucker point, the $\\lambda_1$ is the shadow price of the constraint representing the net capacity and is equal to zero, meaning that the capacity is not binding the profit and it is not profitable for the company to increase it with extra hours. \nOn the other hand, the shadow price of the second constraint is $\\lambda_2$ is -15323.34, meaning that a unitary change of the function $g_2$ increases the objective function in 15323.34. Since $x_1$ (COLI) is directly proportional to $g_2(x)$, a positive change in the right hand side would increase the profit in 15323.34. Since changes in $x_2$ are multiplied by a factor of -2 in the second constraint, a unitary change of $x_2$ (PEPSA) would decrease the profit in 7661.67.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown" ] ]
ec5b0312df1eaab894f5ed44b71ad6e4b1b1b002
8,993
ipynb
Jupyter Notebook
ML_Sandbox/.ipynb_checkpoints/rolltec_features-checkpoint.ipynb
ChristopherGS/sensor_readings
1695349276292ba16206cbc9efb94c10b65d439e
[ "BSD-3-Clause" ]
2
2015-10-01T09:03:31.000Z
2019-07-23T19:42:00.000Z
ML_Sandbox/rolltec_features.ipynb
ChristopherGS/sensor_readings
1695349276292ba16206cbc9efb94c10b65d439e
[ "BSD-3-Clause" ]
null
null
null
ML_Sandbox/rolltec_features.ipynb
ChristopherGS/sensor_readings
1695349276292ba16206cbc9efb94c10b65d439e
[ "BSD-3-Clause" ]
3
2016-01-13T13:07:26.000Z
2019-04-12T05:43:21.000Z
47.582011
120
0.573446
[ [ [ "import math\nimport pandas as pd\nimport numpy as np\n\ndef root_sum_square(x, y, z):\n sum = ((x**2)+(y**2)+(z**2))\n rss = math.sqrt(sum)\n return rss\n\ndef root_mean_square(x, y, z):\n mean = ((x**2)+(y**2)+(z**2))/3\n rss = math.sqrt(mean)\n return rss\n\ndef tiltx(x, y, z):\n try:\n prep = (x/(math.sqrt((y**2)+(z**2))))\n tilt = math.atan(prep)\n except ZeroDivisionError:\n tilt = 0\n return tilt\n\ndef tilty(x, y, z):\n try:\n prep = (y/(math.sqrt((x**2)+(z**2))))\n tilt = math.atan(prep)\n except ZeroDivisionError:\n tilt = 0\n return tilt\n \ndef max_min_diff(max, min):\n diff = max - min\n return diff\n\ndef magnitude(x, y, z):\n magnitude = x + y + z\n return magnitude\n\ndef create_features(df, _window=50, test=False):\n \"\"\"builds the data features, then applies\n overlapping logic\n \"\"\"\n \n accel_x = df['ACCEL_X'].astype(float)\n accel_y = df['ACCEL_Y'].astype(float)\n accel_z = df['ACCEL_Z'].astype(float)\n gyro_x = df['GYRO_X'].astype(float)\n gyro_y = df['GYRO_Y'].astype(float)\n gyro_z = df['GYRO_Z'].astype(float)\n \n df2 = pd.DataFrame()\n \n # capture tilt here, then average later\n \n df2['tiltx'] = df.apply(lambda x: tiltx(x['ACCEL_X'], x['ACCEL_Y'], x['ACCEL_Z']), axis=1)\n df2['tilty'] = df.apply(lambda x: tilty(x['ACCEL_X'], x['ACCEL_Y'], x['ACCEL_Z']), axis=1)\n \n # Capture stand state here, then average later\n \n if (test==False):\n df2['stand'] = df['stand'].astype(float)\n \n TIME_SEQUENCE_LENGTH = _window\n \n # Basics\n \n df2['ACCEL_X'] = pd.rolling_mean(accel_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['ACCEL_Y'] = pd.rolling_mean(accel_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['ACCEL_Z'] = pd.rolling_mean(accel_z, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['GYRO_X'] = pd.rolling_mean(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['GYRO_Y'] = pd.rolling_mean(gyro_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['GYRO_Z'] = pd.rolling_mean(gyro_z, TIME_SEQUENCE_LENGTH-2, center=True)\n \n # rolling median\n\n df2['rolling_median_x'] = pd.rolling_median(accel_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_median_y'] = pd.rolling_median(accel_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_median_z'] = pd.rolling_median(accel_z, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_median_gx'] = pd.rolling_median(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_median_gy'] = pd.rolling_median(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_median_gz'] = pd.rolling_median(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n \n # rolling max\n \n df2['rolling_max_x'] = pd.rolling_max(accel_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_max_y'] = pd.rolling_max(accel_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_max_z'] = pd.rolling_max(accel_z, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_max_gx'] = pd.rolling_max(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_max_gy'] = pd.rolling_max(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_max_gz'] = pd.rolling_max(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n \n # rolling min\n \n df2['rolling_min_x'] = pd.rolling_min(accel_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_min_y'] = pd.rolling_min(accel_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_min_z'] = pd.rolling_min(accel_z, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_min_gx'] = pd.rolling_min(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_min_gy'] = pd.rolling_min(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_min_gz'] = pd.rolling_min(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n \n # rolling sum\n \n df2['rolling_sum_x'] = pd.rolling_sum(accel_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_sum_y'] = pd.rolling_sum(accel_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_sum_z'] = pd.rolling_sum(accel_z, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_sum_gx'] = pd.rolling_sum(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_sum_gy'] = pd.rolling_sum(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_sum_gz'] = pd.rolling_sum(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n \n # standard deviation\n \n df2['rolling_std_x'] = pd.rolling_std(accel_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_std_y'] = pd.rolling_std(accel_y, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_std_z'] = pd.rolling_std(accel_z, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_std_gx'] = pd.rolling_std(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_std_gy'] = pd.rolling_std(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n df2['rolling_std_gz'] = pd.rolling_std(gyro_x, TIME_SEQUENCE_LENGTH-2, center=True)\n \n # Tilt\n df2['avg_tiltx'] = pd.rolling_mean(df2['tiltx'], TIME_SEQUENCE_LENGTH-2, center=True)\n df2['avg_tilty'] = pd.rolling_mean(df2['tilty'], TIME_SEQUENCE_LENGTH-2, center=True)\n \n \n if (test==False):\n # standing up detection\n df2['avg_stand'] = pd.rolling_mean(df2['stand'], TIME_SEQUENCE_LENGTH-2, center=True)\n print df2['avg_stand']\n\n # round standing up as we need it to be either '0' or '1' for training later\n df2['avg_stand'] = df2['avg_stand'].apply(lambda x: math.ceil(x))\n\n ol_upper = _window/2\n ol_lower = ol_upper-1\n \n new_df = df2[ol_lower::ol_upper] # 50% overlap with 30\n \n new_df['max_min_x'] = df2.apply(lambda x: max_min_diff(x['rolling_max_x'], x['rolling_min_x']), axis=1)\n new_df['max_min_y'] = df2.apply(lambda x: max_min_diff(x['rolling_max_y'], x['rolling_min_y']), axis=1)\n new_df['max_min_z'] = df2.apply(lambda x: max_min_diff(x['rolling_max_z'], x['rolling_min_z']), axis=1)\n new_df['max_min_gx'] = df2.apply(lambda x: max_min_diff(x['rolling_max_gx'], x['rolling_min_gx']), axis=1)\n new_df['max_min_gy'] = df2.apply(lambda x: max_min_diff(x['rolling_max_gy'], x['rolling_min_gy']), axis=1)\n new_df['max_min_gz'] = df2.apply(lambda x: max_min_diff(x['rolling_max_gz'], x['rolling_min_gz']), axis=1)\n \n new_df['acc_rss'] = df2.apply(lambda x: root_sum_square(x['ACCEL_X'], x['ACCEL_Y'], x['ACCEL_Z']), axis=1)\n new_df['gyro_rss'] = df2.apply(lambda x: root_sum_square(x['GYRO_X'], x['GYRO_Y'], x['GYRO_Z']), axis=1)\n \n new_df['acc_rms'] = df2.apply(lambda x: root_mean_square(x['ACCEL_X'], x['ACCEL_Y'], x['ACCEL_Z']), axis=1)\n new_df['gyro_rms'] = df2.apply(lambda x: root_mean_square(x['GYRO_X'], x['GYRO_Y'], x['GYRO_Z']), axis=1)\n \n new_df['acc_magnitude'] = df2.apply(lambda x: magnitude(x['ACCEL_X'], x['ACCEL_Y'], x['ACCEL_Z']), axis=1)\n new_df['gyro_magnitude'] = df2.apply(lambda x: magnitude(x['GYRO_X'], x['GYRO_Y'], x['GYRO_Z']), axis=1)\n \n return new_df", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec5b0c04ee2916d31b9780374804b57ff1bab277
293,282
ipynb
Jupyter Notebook
models/HackingTogether.ipynb
sandias42/mlware
f365623c4efcf54c539c1dc95e569fab80c90286
[ "MIT" ]
4
2017-03-03T18:46:21.000Z
2019-01-22T19:19:00.000Z
models/HackingTogether.ipynb
sandias42/mlware
f365623c4efcf54c539c1dc95e569fab80c90286
[ "MIT" ]
null
null
null
models/HackingTogether.ipynb
sandias42/mlware
f365623c4efcf54c539c1dc95e569fab80c90286
[ "MIT" ]
null
null
null
49.971375
1,170
0.562101
[ [ [ "from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\n#from sklearn.model_selection import cross_val_score\nfrom scipy.io import mmread\nimport numpy as np", "_____no_output_____" ], [ "malware_classes = [\"Agent\", \"AutoRun\", \"FraudLoad\", \"FraudPack\", \"Hupigon\", \"Krap\",\n \"Lipler\", \"Magania\", \"None\", \"Poison\", \"Swizzor\", \"Tdss\",\n \"VB\", \"Virut\", \"Zbot\"]\n\n# a function for writing predictions in the required format\ndef write_predictions(predictions, ids, outfile):\n \"\"\"\n assumes len(predictions) == len(ids), and that predictions[i] is the\n index of the predicted class with the malware_classes list above for\n the executable corresponding to ids[i].\n outfile will be overwritten\n \"\"\"\n with open(outfile,\"w+\") as f:\n # write header\n f.write(\"Id,Prediction\\n\")\n for i, history_id in enumerate(ids):\n f.write(\"%s,%d\\n\" % (history_id, predictions[i]))\n\ndef classes_to_Y(classes):\n output = []\n for cls in classes:\n output.append(malware_classes.index(cls))\n return np.array(output)", "_____no_output_____" ], [ "# load training classes\nclasses = np.load(\"../data/features/train_classes.npy\")\n\n# load sparse matrix of training data\nsparse_mat_train_test = mmread(\"../data/features/naive_word_hashed_full_features.mtx\")", "_____no_output_____" ], [ "# classes and load BIG tfidf feature matrix\nclasses = np.load(\"../data/features/train_classes.npy\")\n\n# load sparse matrix of training data\nsparse_mat_train_test = mmread(\"../data/features/tfifd_4gram_hashed_full_features.mtx\")", "_____no_output_____" ], [ "# convert csr to a numpy array\nsparse = sparse_mat_train_test\n\n# pull out training examples\nX = sparse.tocsc()[:classes.shape[0],:]\n# X_CV = X[-300:]\n# X = X[:-300]\n\nX_test = sparse.tocsc()[classes.shape[0]:,:]\nprint X_test.shape\n\nY = classes_to_Y(classes)\n# Y_CV = Y[-300:]\n# Y = Y[:-300]", "(3724, 1048576)\n" ], [ "RF = RandomForestClassifier()\nRF.fit(X, Y)", "_____no_output_____" ], [ "# preds = RF.predict(X_CV)\n\n# mistakes = 0\n# for i in range(len(preds)):\n# if preds[i] != Y_CV[i]:\n# mistakes += 1\n# print malware_classes[Y_CV[i]]\n# print mistakes", "_____no_output_____" ], [ "test_pred = RF.predict(X_test)", "_____no_output_____" ], [ "print test_pred\ntest_ids = np.load(\"../data/features/test_ids.npy\")\nprint test_ids\nwrite_predictions(test_pred, test_ids, \"../predictions/rfc_10.csv\")", "_____no_output_____" ], [ "# Try an SGDClassifier learning directly from the sparse matrix\nSGD = SGDClassifier(n_jobs=1, n_iter=100, verbose=1, loss=\"modified_huber\")\nSGD.fit(X,Y)", "-- Epoch 1\nNorm: 77.67, NNZs: 1042090, Bias: -1.302051, T: 3086, Avg. loss: 1.456797\nTotal training time: 0.95 seconds.\n-- Epoch 2\nNorm: 59.59, NNZs: 1048073, Bias: -1.360660, T: 6172, Avg. loss: 0.908354\nTotal training time: 1.88 seconds.\n-- Epoch 3\nNorm: 48.76, NNZs: 1048417, Bias: -1.596870, T: 9258, Avg. loss: 0.657365\nTotal training time: 2.81 seconds.\n-- Epoch 4\nNorm: 41.35, NNZs: 1048540, Bias: -1.502075, T: 12344, Avg. loss: 0.513506\nTotal training time: 3.76 seconds.\n-- Epoch 5\nNorm: 36.60, NNZs: 1048561, Bias: -1.494563, T: 15430, Avg. loss: 0.423420\nTotal training time: 4.71 seconds.\n-- Epoch 6\nNorm: 33.26, NNZs: 1048569, Bias: -1.469440, T: 18516, Avg. loss: 0.361178\nTotal training time: 5.67 seconds.\n-- Epoch 7\nNorm: 30.88, NNZs: 1048575, Bias: -1.445343, T: 21602, Avg. loss: 0.315927\nTotal training time: 6.66 seconds.\n-- Epoch 8\nNorm: 29.38, NNZs: 1048575, Bias: -1.422461, T: 24688, Avg. loss: 0.281520\nTotal training time: 7.65 seconds.\n-- Epoch 9\nNorm: 28.11, NNZs: 1048575, Bias: -1.409079, T: 27774, Avg. loss: 0.254595\nTotal training time: 8.67 seconds.\n-- Epoch 10\nNorm: 27.21, NNZs: 1048575, Bias: -1.393612, T: 30860, Avg. loss: 0.233107\nTotal training time: 9.69 seconds.\n-- Epoch 11\nNorm: 26.59, NNZs: 1048576, Bias: -1.375169, T: 33946, Avg. loss: 0.215281\nTotal training time: 10.73 seconds.\n-- Epoch 12\nNorm: 26.06, NNZs: 1048576, Bias: -1.357556, T: 37032, Avg. loss: 0.200374\nTotal training time: 11.77 seconds.\n-- Epoch 13\nNorm: 25.63, NNZs: 1048576, Bias: -1.343194, T: 40118, Avg. loss: 0.187738\nTotal training time: 12.83 seconds.\n-- Epoch 14\nNorm: 25.26, NNZs: 1048576, Bias: -1.328021, T: 43204, Avg. loss: 0.176998\nTotal training time: 13.89 seconds.\n-- Epoch 15\nNorm: 24.93, NNZs: 1048576, Bias: -1.318604, T: 46290, Avg. loss: 0.167585\nTotal training time: 14.97 seconds.\n-- Epoch 16\nNorm: 24.64, NNZs: 1048576, Bias: -1.317068, T: 49376, Avg. loss: 0.159323\nTotal training time: 16.05 seconds.\n-- Epoch 17\nNorm: 24.39, NNZs: 1048576, Bias: -1.305546, T: 52462, Avg. loss: 0.152026\nTotal training time: 17.11 seconds.\n-- Epoch 18\nNorm: 24.17, NNZs: 1048576, Bias: -1.301301, T: 55548, Avg. loss: 0.145469\nTotal training time: 18.15 seconds.\n-- Epoch 19\nNorm: 23.99, NNZs: 1048576, Bias: -1.291367, T: 58634, Avg. loss: 0.139672\nTotal training time: 19.25 seconds.\n-- Epoch 20\nNorm: 23.82, NNZs: 1048576, Bias: -1.287426, T: 61720, Avg. loss: 0.134393\nTotal training time: 20.32 seconds.\n-- Epoch 21\nNorm: 23.66, NNZs: 1048576, Bias: -1.284001, T: 64806, Avg. loss: 0.129594\nTotal training time: 21.39 seconds.\n-- Epoch 22\nNorm: 23.52, NNZs: 1048576, Bias: -1.281684, T: 67892, Avg. loss: 0.125268\nTotal training time: 22.47 seconds.\n-- Epoch 23\nNorm: 23.45, NNZs: 1048576, Bias: -1.267348, T: 70978, Avg. loss: 0.121304\nTotal training time: 23.55 seconds.\n-- Epoch 24\nNorm: 23.34, NNZs: 1048576, Bias: -1.264132, T: 74064, Avg. loss: 0.117654\nTotal training time: 24.64 seconds.\n-- Epoch 25\nNorm: 23.23, NNZs: 1048576, Bias: -1.262490, T: 77150, Avg. loss: 0.114283\nTotal training time: 25.73 seconds.\n-- Epoch 26\nNorm: 23.15, NNZs: 1048576, Bias: -1.259324, T: 80236, Avg. loss: 0.111164\nTotal training time: 26.84 seconds.\n-- Epoch 27\nNorm: 23.09, NNZs: 1048576, Bias: -1.248037, T: 83322, Avg. loss: 0.108293\nTotal training time: 27.94 seconds.\n-- Epoch 28\nNorm: 23.00, NNZs: 1048576, Bias: -1.250925, T: 86408, Avg. loss: 0.105602\nTotal training time: 29.04 seconds.\n-- Epoch 29\nNorm: 22.91, NNZs: 1048576, Bias: -1.253672, T: 89494, Avg. loss: 0.103110\nTotal training time: 30.15 seconds.\n-- Epoch 30\nNorm: 22.87, NNZs: 1048576, Bias: -1.240805, T: 92580, Avg. loss: 0.100778\nTotal training time: 31.23 seconds.\n-- Epoch 31\nNorm: 22.79, NNZs: 1048576, Bias: -1.244566, T: 95666, Avg. loss: 0.098577\nTotal training time: 32.32 seconds.\n-- Epoch 32\nNorm: 22.74, NNZs: 1048576, Bias: -1.240373, T: 98752, Avg. loss: 0.096533\nTotal training time: 33.41 seconds.\n-- Epoch 33\nNorm: 22.71, NNZs: 1048576, Bias: -1.232836, T: 101838, Avg. loss: 0.094608\nTotal training time: 34.54 seconds.\n-- Epoch 34\nNorm: 22.64, NNZs: 1048576, Bias: -1.233997, T: 104924, Avg. loss: 0.092796\nTotal training time: 35.64 seconds.\n-- Epoch 35\nNorm: 22.62, NNZs: 1048576, Bias: -1.225989, T: 108010, Avg. loss: 0.091089\nTotal training time: 36.73 seconds.\n-- Epoch 36\nNorm: 22.56, NNZs: 1048576, Bias: -1.227333, T: 111096, Avg. loss: 0.089480\nTotal training time: 37.87 seconds.\n-- Epoch 37\nNorm: 22.54, NNZs: 1048576, Bias: -1.221439, T: 114182, Avg. loss: 0.087938\nTotal training time: 39.00 seconds.\n-- Epoch 38\nNorm: 22.50, NNZs: 1048576, Bias: -1.221137, T: 117268, Avg. loss: 0.086465\nTotal training time: 40.13 seconds.\n-- Epoch 39\nNorm: 22.45, NNZs: 1048576, Bias: -1.221648, T: 120354, Avg. loss: 0.085084\nTotal training time: 41.24 seconds.\n-- Epoch 40\nNorm: 22.43, NNZs: 1048576, Bias: -1.217199, T: 123440, Avg. loss: 0.083770\nTotal training time: 42.34 seconds.\n-- Epoch 41\nNorm: 22.38, NNZs: 1048576, Bias: -1.218209, T: 126526, Avg. loss: 0.082521\nTotal training time: 43.48 seconds.\n-- Epoch 42\nNorm: 22.36, NNZs: 1048576, Bias: -1.213363, T: 129612, Avg. loss: 0.081323\nTotal training time: 44.58 seconds.\n-- Epoch 43\nNorm: 22.32, NNZs: 1048576, Bias: -1.212747, T: 132698, Avg. loss: 0.080179\nTotal training time: 45.71 seconds.\n-- Epoch 44\nNorm: 22.32, NNZs: 1048576, Bias: -1.205459, T: 135784, Avg. loss: 0.079096\nTotal training time: 46.85 seconds.\n-- Epoch 45\nNorm: 22.30, NNZs: 1048576, Bias: -1.204819, T: 138870, Avg. loss: 0.078058\nTotal training time: 47.96 seconds.\n-- Epoch 46\nNorm: 22.27, NNZs: 1048576, Bias: -1.205774, T: 141956, Avg. loss: 0.077068\nTotal training time: 49.12 seconds.\n-- Epoch 47\nNorm: 22.26, NNZs: 1048576, Bias: -1.201781, T: 145042, Avg. loss: 0.076120\nTotal training time: 50.25 seconds.\n-- Epoch 48\nNorm: 22.25, NNZs: 1048576, Bias: -1.196711, T: 148128, Avg. loss: 0.075210\nTotal training time: 51.41 seconds.\n-- Epoch 49\nNorm: 22.22, NNZs: 1048576, Bias: -1.197819, T: 151214, Avg. loss: 0.074329\nTotal training time: 52.54 seconds.\n-- Epoch 50\nNorm: 22.19, NNZs: 1048576, Bias: -1.198979, T: 154300, Avg. loss: 0.073485\nTotal training time: 53.67 seconds.\n-- Epoch 51\nNorm: 22.17, NNZs: 1048576, Bias: -1.196627, T: 157386, Avg. loss: 0.072673\nTotal training time: 54.80 seconds.\n-- Epoch 52\nNorm: 22.16, NNZs: 1048576, Bias: -1.191958, T: 160472, Avg. loss: 0.071895\nTotal training time: 55.92 seconds.\n-- Epoch 53\nNorm: 22.14, NNZs: 1048576, Bias: -1.194991, T: 163558, Avg. loss: 0.071137\nTotal training time: 57.08 seconds.\n-- Epoch 54\nNorm: 22.13, NNZs: 1048576, Bias: -1.187069, T: 166644, Avg. loss: 0.070408\nTotal training time: 58.19 seconds.\n-- Epoch 55\nNorm: 22.11, NNZs: 1048576, Bias: -1.188746, T: 169730, Avg. loss: 0.069708\nTotal training time: 59.33 seconds.\n-- Epoch 56\nNorm: 22.09, NNZs: 1048576, Bias: -1.189913, T: 172816, Avg. loss: 0.069034\nTotal training time: 60.46 seconds.\n-- Epoch 57\nNorm: 22.07, NNZs: 1048576, Bias: -1.187669, T: 175902, Avg. loss: 0.068387\nTotal training time: 61.61 seconds.\n-- Epoch 58\nNorm: 22.06, NNZs: 1048576, Bias: -1.186179, T: 178988, Avg. loss: 0.067761\nTotal training time: 62.76 seconds.\n-- Epoch 59\nNorm: 22.05, NNZs: 1048576, Bias: -1.183565, T: 182074, Avg. loss: 0.067155\nTotal training time: 63.89 seconds.\n-- Epoch 60\nNorm: 22.02, NNZs: 1048576, Bias: -1.183524, T: 185160, Avg. loss: 0.066566\nTotal training time: 65.03 seconds.\n-- Epoch 61\nNorm: 22.02, NNZs: 1048576, Bias: -1.179021, T: 188246, Avg. loss: 0.065997\nTotal training time: 66.17 seconds.\n-- Epoch 62\nNorm: 22.00, NNZs: 1048576, Bias: -1.180775, T: 191332, Avg. loss: 0.065453\nTotal training time: 67.32 seconds.\n-- Epoch 63\nNorm: 21.99, NNZs: 1048576, Bias: -1.180874, T: 194418, Avg. loss: 0.064920\nTotal training time: 68.45 seconds.\n-- Epoch 64\nNorm: 21.98, NNZs: 1048576, Bias: -1.177321, T: 197504, Avg. loss: 0.064407\nTotal training time: 69.58 seconds.\n-- Epoch 65\nNorm: 21.97, NNZs: 1048576, Bias: -1.177286, T: 200590, Avg. loss: 0.063905\nTotal training time: 70.72 seconds.\n-- Epoch 66\nNorm: 21.95, NNZs: 1048576, Bias: -1.179452, T: 203676, Avg. loss: 0.063414\nTotal training time: 71.88 seconds.\n-- Epoch 67\nNorm: 21.94, NNZs: 1048576, Bias: -1.177844, T: 206762, Avg. loss: 0.062946\nTotal training time: 73.04 seconds.\n-- Epoch 68\nNorm: 21.93, NNZs: 1048576, Bias: -1.177308, T: 209848, Avg. loss: 0.062490\nTotal training time: 74.17 seconds.\n-- Epoch 69\nNorm: 21.93, NNZs: 1048576, Bias: -1.171366, T: 212934, Avg. loss: 0.062047\nTotal training time: 75.31 seconds.\n-- Epoch 70\nNorm: 21.91, NNZs: 1048576, Bias: -1.170824, T: 216020, Avg. loss: 0.061619\nTotal training time: 76.47 seconds.\n-- Epoch 71\nNorm: 21.90, NNZs: 1048576, Bias: -1.171329, T: 219106, Avg. loss: 0.061197\nTotal training time: 77.62 seconds.\n-- Epoch 72\nNorm: 21.89, NNZs: 1048576, Bias: -1.170357, T: 222192, Avg. loss: 0.060791\nTotal training time: 78.78 seconds.\n-- Epoch 73\nNorm: 21.88, NNZs: 1048576, Bias: -1.169003, T: 225278, Avg. loss: 0.060394\nTotal training time: 79.94 seconds.\n-- Epoch 74\nNorm: 21.88, NNZs: 1048576, Bias: -1.166433, T: 228364, Avg. loss: 0.060003\nTotal training time: 81.09 seconds.\n-- Epoch 75\nNorm: 21.86, NNZs: 1048576, Bias: -1.168478, T: 231450, Avg. loss: 0.059626\nTotal training time: 82.26 seconds.\n-- Epoch 76\nNorm: 21.85, NNZs: 1048576, Bias: -1.168009, T: 234536, Avg. loss: 0.059260\nTotal training time: 83.39 seconds.\n-- Epoch 77\nNorm: 21.84, NNZs: 1048576, Bias: -1.166813, T: 237622, Avg. loss: 0.058897\nTotal training time: 84.53 seconds.\n-- Epoch 78\nNorm: 21.84, NNZs: 1048576, Bias: -1.165954, T: 240708, Avg. loss: 0.058549\nTotal training time: 85.68 seconds.\n-- Epoch 79\nNorm: 21.83, NNZs: 1048576, Bias: -1.164672, T: 243794, Avg. loss: 0.058208\nTotal training time: 86.83 seconds.\n-- Epoch 80\nNorm: 21.82, NNZs: 1048576, Bias: -1.164671, T: 246880, Avg. loss: 0.057877\nTotal training time: 87.96 seconds.\n-- Epoch 81\nNorm: 21.81, NNZs: 1048576, Bias: -1.163039, T: 249966, Avg. loss: 0.057553\nTotal training time: 89.11 seconds.\n-- Epoch 82\nNorm: 21.80, NNZs: 1048576, Bias: -1.162639, T: 253052, Avg. loss: 0.057236\nTotal training time: 90.26 seconds.\n-- Epoch 83\nNorm: 21.81, NNZs: 1048576, Bias: -1.160212, T: 256138, Avg. loss: 0.056926\nTotal training time: 91.42 seconds.\n-- Epoch 84\nNorm: 21.79, NNZs: 1048576, Bias: -1.161217, T: 259224, Avg. loss: 0.056628\nTotal training time: 92.59 seconds.\n-- Epoch 85\nNorm: 21.79, NNZs: 1048576, Bias: -1.159867, T: 262310, Avg. loss: 0.056336\nTotal training time: 93.75 seconds.\n-- Epoch 86\nNorm: 21.79, NNZs: 1048576, Bias: -1.157562, T: 265396, Avg. loss: 0.056050\nTotal training time: 94.89 seconds.\n-- Epoch 87\nNorm: 21.78, NNZs: 1048576, Bias: -1.158538, T: 268482, Avg. loss: 0.055770\nTotal training time: 96.04 seconds.\n-- Epoch 88\nNorm: 21.77, NNZs: 1048576, Bias: -1.158842, T: 271568, Avg. loss: 0.055492\nTotal training time: 97.19 seconds.\n-- Epoch 89\nNorm: 21.77, NNZs: 1048576, Bias: -1.155767, T: 274654, Avg. loss: 0.055223\nTotal training time: 98.36 seconds.\n-- Epoch 90\nNorm: 21.76, NNZs: 1048576, Bias: -1.155814, T: 277740, Avg. loss: 0.054962\nTotal training time: 99.54 seconds.\n-- Epoch 91\nNorm: 21.75, NNZs: 1048576, Bias: -1.154413, T: 280826, Avg. loss: 0.054706\nTotal training time: 100.70 seconds.\n-- Epoch 92\nNorm: 21.75, NNZs: 1048576, Bias: -1.154173, T: 283912, Avg. loss: 0.054454\nTotal training time: 101.87 seconds.\n-- Epoch 93\nNorm: 21.74, NNZs: 1048576, Bias: -1.154553, T: 286998, Avg. loss: 0.054209\nTotal training time: 103.04 seconds.\n-- Epoch 94\nNorm: 21.74, NNZs: 1048576, Bias: -1.152786, T: 290084, Avg. loss: 0.053967\nTotal training time: 104.19 seconds.\n-- Epoch 95\nNorm: 21.73, NNZs: 1048576, Bias: -1.152330, T: 293170, Avg. loss: 0.053734\nTotal training time: 105.52 seconds.\n-- Epoch 96\nNorm: 21.73, NNZs: 1048576, Bias: -1.149618, T: 296256, Avg. loss: 0.053503\nTotal training time: 106.66 seconds.\n-- Epoch 97\nNorm: 21.72, NNZs: 1048576, Bias: -1.151733, T: 299342, Avg. loss: 0.053276\nTotal training time: 107.86 seconds.\n-- Epoch 98\nNorm: 21.72, NNZs: 1048576, Bias: -1.150719, T: 302428, Avg. loss: 0.053054\nTotal training time: 108.99 seconds.\n-- Epoch 99\nNorm: 21.71, NNZs: 1048576, Bias: -1.150707, T: 305514, Avg. loss: 0.052835\nTotal training time: 110.18 seconds.\n-- Epoch 100\nNorm: 21.71, NNZs: 1048576, Bias: -1.149837, T: 308600, Avg. loss: 0.052620\nTotal training time: 111.32 seconds.\n-- Epoch 1\nNorm: 48.76, NNZs: 268410, Bias: -1.094205, T: 3086, Avg. loss: 0.362955\nTotal training time: 0.85 seconds.\n-- Epoch 2\nNorm: 33.05, NNZs: 660020, Bias: -1.167910, T: 6172, Avg. loss: 0.209077\nTotal training time: 1.69 seconds.\n-- Epoch 3\nNorm: 24.03, NNZs: 675065, Bias: -1.205391, T: 9258, Avg. loss: 0.146811\nTotal training time: 2.54 seconds.\n-- Epoch 4\nNorm: 19.77, NNZs: 679990, Bias: -1.204209, T: 12344, Avg. loss: 0.113609\nTotal training time: 3.38 seconds.\n-- Epoch 5\nNorm: 17.30, NNZs: 690045, Bias: -1.172828, T: 15430, Avg. loss: 0.092479\nTotal training time: 4.22 seconds.\n-- Epoch 6\nNorm: 15.84, NNZs: 761212, Bias: -1.156251, T: 18516, Avg. loss: 0.078776\nTotal training time: 5.07 seconds.\n-- Epoch 7\nNorm: 14.75, NNZs: 885135, Bias: -1.150141, T: 21602, Avg. loss: 0.068553\nTotal training time: 5.93 seconds.\n-- Epoch 8\nNorm: 13.91, NNZs: 906604, Bias: -1.151863, T: 24688, Avg. loss: 0.060850\nTotal training time: 6.78 seconds.\n-- Epoch 9\nNorm: 13.49, NNZs: 972284, Bias: -1.133839, T: 27774, Avg. loss: 0.054896\nTotal training time: 7.64 seconds.\n-- Epoch 10\nNorm: 13.16, NNZs: 975230, Bias: -1.132622, T: 30860, Avg. loss: 0.050122\nTotal training time: 8.51 seconds.\n-- Epoch 11\nNorm: 12.83, NNZs: 981227, Bias: -1.122867, T: 33946, Avg. loss: 0.046129\nTotal training time: 9.37 seconds.\n-- Epoch 12\nNorm: 12.59, NNZs: 989720, Bias: -1.118108, T: 37032, Avg. loss: 0.042729\nTotal training time: 10.24 seconds.\n-- Epoch 13\nNorm: 12.35, NNZs: 992333, Bias: -1.116211, T: 40118, Avg. loss: 0.039956\nTotal training time: 11.11 seconds.\n-- Epoch 14\nNorm: 12.09, NNZs: 1004857, Bias: -1.114919, T: 43204, Avg. loss: 0.037554\nTotal training time: 11.99 seconds.\n-- Epoch 15\nNorm: 11.98, NNZs: 1005807, Bias: -1.109148, T: 46290, Avg. loss: 0.035521\nTotal training time: 12.87 seconds.\n-- Epoch 16\nNorm: 11.84, NNZs: 1005916, Bias: -1.107373, T: 49376, Avg. loss: 0.033679\nTotal training time: 13.74 seconds.\n-- Epoch 17\nNorm: 11.84, NNZs: 1007510, Bias: -1.103772, T: 52462, Avg. loss: 0.032053\nTotal training time: 14.63 seconds.\n-- Epoch 18\nNorm: 11.74, NNZs: 1008107, Bias: -1.103243, T: 55548, Avg. loss: 0.030628\nTotal training time: 15.50 seconds.\n-- Epoch 19\nNorm: 11.66, NNZs: 1008139, Bias: -1.098769, T: 58634, Avg. loss: 0.029344\nTotal training time: 16.37 seconds.\n-- Epoch 20\nNorm: 11.56, NNZs: 1008275, Bias: -1.096136, T: 61720, Avg. loss: 0.028188\nTotal training time: 17.24 seconds.\n-- Epoch 21\nNorm: 11.54, NNZs: 1008559, Bias: -1.094597, T: 64806, Avg. loss: 0.027147\nTotal training time: 18.12 seconds.\n-- Epoch 22\nNorm: 11.46, NNZs: 1008668, Bias: -1.093778, T: 67892, Avg. loss: 0.026173\nTotal training time: 19.00 seconds.\n-- Epoch 23\nNorm: 11.45, NNZs: 1011582, Bias: -1.091468, T: 70978, Avg. loss: 0.025290\nTotal training time: 19.89 seconds.\n-- Epoch 24\nNorm: 11.43, NNZs: 1011626, Bias: -1.090222, T: 74064, Avg. loss: 0.024490\nTotal training time: 20.77 seconds.\n-- Epoch 25\nNorm: 11.36, NNZs: 1011682, Bias: -1.091281, T: 77150, Avg. loss: 0.023742\nTotal training time: 21.66 seconds.\n-- Epoch 26\nNorm: 11.38, NNZs: 1011705, Bias: -1.088688, T: 80236, Avg. loss: 0.023066\nTotal training time: 22.55 seconds.\n-- Epoch 27\nNorm: 11.35, NNZs: 1011719, Bias: -1.087146, T: 83322, Avg. loss: 0.022429\nTotal training time: 23.42 seconds.\n-- Epoch 28\nNorm: 11.30, NNZs: 1011729, Bias: -1.085678, T: 86408, Avg. loss: 0.021849\nTotal training time: 24.31 seconds.\n-- Epoch 29\nNorm: 11.27, NNZs: 1011787, Bias: -1.085264, T: 89494, Avg. loss: 0.021303\nTotal training time: 25.19 seconds.\n-- Epoch 30\nNorm: 11.22, NNZs: 1011806, Bias: -1.086529, T: 92580, Avg. loss: 0.020788\nTotal training time: 26.08 seconds.\n-- Epoch 31\nNorm: 11.23, NNZs: 1011835, Bias: -1.084689, T: 95666, Avg. loss: 0.020313\nTotal training time: 26.97 seconds.\n-- Epoch 32\nNorm: 11.20, NNZs: 1011888, Bias: -1.082472, T: 98752, Avg. loss: 0.019863\nTotal training time: 27.84 seconds.\n-- Epoch 33\nNorm: 11.18, NNZs: 1011918, Bias: -1.081300, T: 101838, Avg. loss: 0.019440\nTotal training time: 28.73 seconds.\n-- Epoch 34\nNorm: 11.20, NNZs: 1012018, Bias: -1.078283, T: 104924, Avg. loss: 0.019045\nTotal training time: 29.63 seconds.\n-- Epoch 35\nNorm: 11.14, NNZs: 1012065, Bias: -1.081106, T: 108010, Avg. loss: 0.018655\nTotal training time: 30.52 seconds.\n-- Epoch 36\nNorm: 11.16, NNZs: 1012068, Bias: -1.078816, T: 111096, Avg. loss: 0.018308\nTotal training time: 31.40 seconds.\n-- Epoch 37\nNorm: 11.17, NNZs: 1012082, Bias: -1.075873, T: 114182, Avg. loss: 0.017968\nTotal training time: 32.29 seconds.\n-- Epoch 38\nNorm: 11.13, NNZs: 1012090, Bias: -1.077820, T: 117268, Avg. loss: 0.017653\nTotal training time: 33.18 seconds.\n-- Epoch 39\nNorm: 11.15, NNZs: 1012094, Bias: -1.074639, T: 120354, Avg. loss: 0.017351\nTotal training time: 34.07 seconds.\n-- Epoch 40\nNorm: 11.13, NNZs: 1012094, Bias: -1.075721, T: 123440, Avg. loss: 0.017064\nTotal training time: 34.96 seconds.\n-- Epoch 41\nNorm: 11.09, NNZs: 1012144, Bias: -1.077111, T: 126526, Avg. loss: 0.016793\nTotal training time: 35.85 seconds.\n-- Epoch 42\nNorm: 11.08, NNZs: 1012145, Bias: -1.073466, T: 129612, Avg. loss: 0.016535\nTotal training time: 36.73 seconds.\n-- Epoch 43\nNorm: 11.07, NNZs: 1012153, Bias: -1.073860, T: 132698, Avg. loss: 0.016288\nTotal training time: 37.63 seconds.\n-- Epoch 44\nNorm: 11.05, NNZs: 1012182, Bias: -1.073622, T: 135784, Avg. loss: 0.016050\nTotal training time: 38.53 seconds.\n-- Epoch 45\nNorm: 11.06, NNZs: 1012200, Bias: -1.072512, T: 138870, Avg. loss: 0.015823\nTotal training time: 39.43 seconds.\n-- Epoch 46\nNorm: 11.07, NNZs: 1012202, Bias: -1.069087, T: 141956, Avg. loss: 0.015606\nTotal training time: 40.32 seconds.\n-- Epoch 47\nNorm: 11.02, NNZs: 1012202, Bias: -1.072562, T: 145042, Avg. loss: 0.015396\nTotal training time: 41.22 seconds.\n-- Epoch 48\nNorm: 11.05, NNZs: 1012202, Bias: -1.070236, T: 148128, Avg. loss: 0.015200\nTotal training time: 42.12 seconds.\n-- Epoch 49\nNorm: 11.02, NNZs: 1012202, Bias: -1.070622, T: 151214, Avg. loss: 0.015002\nTotal training time: 43.01 seconds.\n-- Epoch 50\nNorm: 11.02, NNZs: 1012256, Bias: -1.070181, T: 154300, Avg. loss: 0.014818\nTotal training time: 43.91 seconds.\n-- Epoch 51\nNorm: 11.02, NNZs: 1012256, Bias: -1.069047, T: 157386, Avg. loss: 0.014642\nTotal training time: 44.80 seconds.\n-- Epoch 52\nNorm: 11.02, NNZs: 1012257, Bias: -1.069103, T: 160472, Avg. loss: 0.014473\nTotal training time: 45.70 seconds.\n-- Epoch 53\nNorm: 11.00, NNZs: 1012257, Bias: -1.068669, T: 163558, Avg. loss: 0.014307\nTotal training time: 46.59 seconds.\n-- Epoch 54\nNorm: 11.02, NNZs: 1012257, Bias: -1.066491, T: 166644, Avg. loss: 0.014148\nTotal training time: 47.48 seconds.\n-- Epoch 55\nNorm: 10.99, NNZs: 1012257, Bias: -1.066976, T: 169730, Avg. loss: 0.013997\nTotal training time: 48.38 seconds.\n-- Epoch 56\nNorm: 10.99, NNZs: 1012257, Bias: -1.064913, T: 172816, Avg. loss: 0.013851\nTotal training time: 49.27 seconds.\n-- Epoch 57\nNorm: 10.98, NNZs: 1012259, Bias: -1.066820, T: 175902, Avg. loss: 0.013712\nTotal training time: 50.18 seconds.\n-- Epoch 58\nNorm: 10.99, NNZs: 1012259, Bias: -1.064707, T: 178988, Avg. loss: 0.013575\nTotal training time: 51.08 seconds.\n-- Epoch 59\nNorm: 11.02, NNZs: 1012276, Bias: -1.061418, T: 182074, Avg. loss: 0.013442\nTotal training time: 51.98 seconds.\n-- Epoch 60\nNorm: 11.01, NNZs: 1012286, Bias: -1.063053, T: 185160, Avg. loss: 0.013314\nTotal training time: 52.88 seconds.\n-- Epoch 61\nNorm: 11.00, NNZs: 1012286, Bias: -1.063182, T: 188246, Avg. loss: 0.013190\nTotal training time: 53.77 seconds.\n-- Epoch 62\nNorm: 10.98, NNZs: 1012286, Bias: -1.062385, T: 191332, Avg. loss: 0.013073\nTotal training time: 54.67 seconds.\n-- Epoch 63\nNorm: 11.00, NNZs: 1012290, Bias: -1.061551, T: 194418, Avg. loss: 0.012954\nTotal training time: 55.57 seconds.\n-- Epoch 64\nNorm: 10.97, NNZs: 1012296, Bias: -1.062428, T: 197504, Avg. loss: 0.012841\nTotal training time: 56.47 seconds.\n-- Epoch 65\nNorm: 10.98, NNZs: 1012296, Bias: -1.062294, T: 200590, Avg. loss: 0.012733\nTotal training time: 57.38 seconds.\n-- Epoch 66\nNorm: 10.99, NNZs: 1012311, Bias: -1.061549, T: 203676, Avg. loss: 0.012626\nTotal training time: 58.27 seconds.\n-- Epoch 67\nNorm: 10.97, NNZs: 1012320, Bias: -1.061085, T: 206762, Avg. loss: 0.012525\nTotal training time: 59.17 seconds.\n-- Epoch 68\nNorm: 10.97, NNZs: 1012320, Bias: -1.060694, T: 209848, Avg. loss: 0.012425\nTotal training time: 60.08 seconds.\n-- Epoch 69\nNorm: 10.98, NNZs: 1012324, Bias: -1.059666, T: 212934, Avg. loss: 0.012327\nTotal training time: 60.98 seconds.\n-- Epoch 70\nNorm: 10.97, NNZs: 1012324, Bias: -1.059192, T: 216020, Avg. loss: 0.012234\nTotal training time: 61.89 seconds.\n-- Epoch 71\nNorm: 10.96, NNZs: 1012324, Bias: -1.059816, T: 219106, Avg. loss: 0.012143\nTotal training time: 62.79 seconds.\n-- Epoch 72\nNorm: 10.96, NNZs: 1012325, Bias: -1.059340, T: 222192, Avg. loss: 0.012054\nTotal training time: 63.69 seconds.\n-- Epoch 73\nNorm: 10.96, NNZs: 1012330, Bias: -1.058963, T: 225278, Avg. loss: 0.011968\nTotal training time: 64.59 seconds.\n-- Epoch 74\nNorm: 10.97, NNZs: 1012351, Bias: -1.057695, T: 228364, Avg. loss: 0.011884\nTotal training time: 65.50 seconds.\n-- Epoch 75\nNorm: 10.96, NNZs: 1012351, Bias: -1.058323, T: 231450, Avg. loss: 0.011802\nTotal training time: 66.41 seconds.\n-- Epoch 76\nNorm: 10.95, NNZs: 1012351, Bias: -1.057632, T: 234536, Avg. loss: 0.011722\nTotal training time: 67.30 seconds.\n-- Epoch 77\nNorm: 10.94, NNZs: 1012351, Bias: -1.058062, T: 237622, Avg. loss: 0.011642\nTotal training time: 68.21 seconds.\n-- Epoch 78\nNorm: 10.95, NNZs: 1012351, Bias: -1.056597, T: 240708, Avg. loss: 0.011567\nTotal training time: 69.11 seconds.\n-- Epoch 79\nNorm: 10.93, NNZs: 1012351, Bias: -1.057815, T: 243794, Avg. loss: 0.011493\nTotal training time: 70.02 seconds.\n-- Epoch 80\nNorm: 10.95, NNZs: 1012352, Bias: -1.055877, T: 246880, Avg. loss: 0.011421\nTotal training time: 70.92 seconds.\n-- Epoch 81\nNorm: 10.93, NNZs: 1012358, Bias: -1.057903, T: 249966, Avg. loss: 0.011349\nTotal training time: 71.84 seconds.\n-- Epoch 82\nNorm: 10.96, NNZs: 1012358, Bias: -1.054306, T: 253052, Avg. loss: 0.011279\nTotal training time: 72.73 seconds.\n-- Epoch 83\nNorm: 10.93, NNZs: 1012358, Bias: -1.056532, T: 256138, Avg. loss: 0.011213\nTotal training time: 73.64 seconds.\n-- Epoch 84\nNorm: 10.94, NNZs: 1012358, Bias: -1.054781, T: 259224, Avg. loss: 0.011147\nTotal training time: 74.54 seconds.\n-- Epoch 85\nNorm: 10.93, NNZs: 1012358, Bias: -1.055801, T: 262310, Avg. loss: 0.011083\nTotal training time: 75.45 seconds.\n-- Epoch 86\nNorm: 10.93, NNZs: 1012358, Bias: -1.055910, T: 265396, Avg. loss: 0.011020\nTotal training time: 76.36 seconds.\n-- Epoch 87\nNorm: 10.93, NNZs: 1012358, Bias: -1.054639, T: 268482, Avg. loss: 0.010959\nTotal training time: 77.25 seconds.\n-- Epoch 88\nNorm: 10.93, NNZs: 1012358, Bias: -1.054624, T: 271568, Avg. loss: 0.010899\nTotal training time: 78.16 seconds.\n-- Epoch 89\nNorm: 10.93, NNZs: 1012431, Bias: -1.054827, T: 274654, Avg. loss: 0.010840\nTotal training time: 79.06 seconds.\n-- Epoch 90\nNorm: 10.92, NNZs: 1012431, Bias: -1.053167, T: 277740, Avg. loss: 0.010783\nTotal training time: 79.96 seconds.\n-- Epoch 91\nNorm: 10.92, NNZs: 1012432, Bias: -1.054808, T: 280826, Avg. loss: 0.010727\nTotal training time: 80.86 seconds.\n-- Epoch 92\nNorm: 10.92, NNZs: 1012432, Bias: -1.053843, T: 283912, Avg. loss: 0.010673\nTotal training time: 81.77 seconds.\n-- Epoch 93\nNorm: 10.90, NNZs: 1012432, Bias: -1.054278, T: 286998, Avg. loss: 0.010619\nTotal training time: 82.67 seconds.\n-- Epoch 94\nNorm: 10.92, NNZs: 1012432, Bias: -1.052728, T: 290084, Avg. loss: 0.010567\nTotal training time: 83.58 seconds.\n-- Epoch 95\nNorm: 10.92, NNZs: 1012432, Bias: -1.052724, T: 293170, Avg. loss: 0.010516\nTotal training time: 84.48 seconds.\n-- Epoch 96\nNorm: 10.92, NNZs: 1012432, Bias: -1.052559, T: 296256, Avg. loss: 0.010466\nTotal training time: 85.38 seconds.\n-- Epoch 97\nNorm: 10.91, NNZs: 1012432, Bias: -1.052324, T: 299342, Avg. loss: 0.010417\nTotal training time: 86.29 seconds.\n-- Epoch 98\nNorm: 10.91, NNZs: 1012432, Bias: -1.051564, T: 302428, Avg. loss: 0.010369\nTotal training time: 87.19 seconds.\n-- Epoch 99\nNorm: 10.92, NNZs: 1012434, Bias: -1.051132, T: 305514, Avg. loss: 0.010322\nTotal training time: 88.10 seconds.\n-- Epoch 100\nNorm: 10.92, NNZs: 1012438, Bias: -1.051177, T: 308600, Avg. loss: 0.010275\nTotal training time: 89.00 seconds.\n-- Epoch 1\nNorm: 43.84, NNZs: 919589, Bias: -1.297817, T: 3086, Avg. loss: 0.441649\nTotal training time: 0.88 seconds.\n-- Epoch 2\nNorm: 29.46, NNZs: 969207, Bias: -1.640113, T: 6172, Avg. loss: 0.296224\nTotal training time: 1.75 seconds.\n-- Epoch 3\nNorm: 24.28, NNZs: 1025457, Bias: -1.689444, T: 9258, Avg. loss: 0.220149\nTotal training time: 2.63 seconds.\n-- Epoch 4\nNorm: 20.41, NNZs: 1046898, Bias: -1.730356, T: 12344, Avg. loss: 0.175269\nTotal training time: 3.52 seconds.\n-- Epoch 5\nNorm: 18.64, NNZs: 1048046, Bias: -1.704498, T: 15430, Avg. loss: 0.145320\nTotal training time: 4.41 seconds.\n-- Epoch 6\nNorm: 16.94, NNZs: 1048096, Bias: -1.722691, T: 18516, Avg. loss: 0.124282\nTotal training time: 5.30 seconds.\n-- Epoch 7\nNorm: 16.01, NNZs: 1048454, Bias: -1.725472, T: 21602, Avg. loss: 0.109577\nTotal training time: 6.22 seconds.\n-- Epoch 8\nNorm: 15.21, NNZs: 1048496, Bias: -1.736268, T: 24688, Avg. loss: 0.098590\nTotal training time: 7.12 seconds.\n-- Epoch 9\nNorm: 14.61, NNZs: 1048546, Bias: -1.739190, T: 27774, Avg. loss: 0.089462\nTotal training time: 8.04 seconds.\n-- Epoch 10\nNorm: 14.00, NNZs: 1048554, Bias: -1.756302, T: 30860, Avg. loss: 0.082054\nTotal training time: 8.96 seconds.\n-- Epoch 11\nNorm: 13.69, NNZs: 1048556, Bias: -1.750957, T: 33946, Avg. loss: 0.075992\nTotal training time: 9.87 seconds.\n-- Epoch 12\nNorm: 13.31, NNZs: 1048559, Bias: -1.758947, T: 37032, Avg. loss: 0.070978\nTotal training time: 10.80 seconds.\n-- Epoch 13\nNorm: 13.24, NNZs: 1048562, Bias: -1.751330, T: 40118, Avg. loss: 0.066614\nTotal training time: 11.73 seconds.\n-- Epoch 14\nNorm: 12.96, NNZs: 1048564, Bias: -1.754893, T: 43204, Avg. loss: 0.062952\nTotal training time: 12.65 seconds.\n-- Epoch 15\nNorm: 12.62, NNZs: 1048568, Bias: -1.768599, T: 46290, Avg. loss: 0.059723\nTotal training time: 13.57 seconds.\n-- Epoch 16\nNorm: 12.54, NNZs: 1048568, Bias: -1.762690, T: 49376, Avg. loss: 0.056876\nTotal training time: 14.49 seconds.\n-- Epoch 17\nNorm: 12.49, NNZs: 1048571, Bias: -1.761090, T: 52462, Avg. loss: 0.054365\nTotal training time: 15.43 seconds.\n-- Epoch 18\nNorm: 12.42, NNZs: 1048571, Bias: -1.761539, T: 55548, Avg. loss: 0.052132\nTotal training time: 16.36 seconds.\n-- Epoch 19\nNorm: 12.21, NNZs: 1048571, Bias: -1.770177, T: 58634, Avg. loss: 0.050096\nTotal training time: 17.32 seconds.\n-- Epoch 20\nNorm: 12.27, NNZs: 1048573, Bias: -1.763100, T: 61720, Avg. loss: 0.048242\nTotal training time: 18.25 seconds.\n-- Epoch 21\nNorm: 12.17, NNZs: 1048574, Bias: -1.766244, T: 64806, Avg. loss: 0.046558\nTotal training time: 19.20 seconds.\n-- Epoch 22\nNorm: 12.05, NNZs: 1048574, Bias: -1.769099, T: 67892, Avg. loss: 0.045051\nTotal training time: 20.14 seconds.\n-- Epoch 23\nNorm: 12.00, NNZs: 1048574, Bias: -1.772379, T: 70978, Avg. loss: 0.043659\nTotal training time: 21.09 seconds.\n-- Epoch 24\nNorm: 11.98, NNZs: 1048574, Bias: -1.769875, T: 74064, Avg. loss: 0.042404\nTotal training time: 22.02 seconds.\n-- Epoch 25\nNorm: 11.86, NNZs: 1048575, Bias: -1.772895, T: 77150, Avg. loss: 0.041256\nTotal training time: 22.96 seconds.\n-- Epoch 26\nNorm: 11.82, NNZs: 1048575, Bias: -1.773614, T: 80236, Avg. loss: 0.040158\nTotal training time: 23.93 seconds.\n-- Epoch 27\nNorm: 11.75, NNZs: 1048575, Bias: -1.778131, T: 83322, Avg. loss: 0.039158\nTotal training time: 25.06 seconds.\n-- Epoch 28\nNorm: 11.79, NNZs: 1048575, Bias: -1.772220, T: 86408, Avg. loss: 0.038237\nTotal training time: 26.01 seconds.\n-- Epoch 29\nNorm: 11.69, NNZs: 1048575, Bias: -1.778704, T: 89494, Avg. loss: 0.037344\nTotal training time: 26.95 seconds.\n-- Epoch 30\nNorm: 11.71, NNZs: 1048575, Bias: -1.776435, T: 92580, Avg. loss: 0.036535\nTotal training time: 27.90 seconds.\n-- Epoch 31\nNorm: 11.65, NNZs: 1048575, Bias: -1.777427, T: 95666, Avg. loss: 0.035777\nTotal training time: 28.84 seconds.\n-- Epoch 32\nNorm: 11.66, NNZs: 1048576, Bias: -1.776208, T: 98752, Avg. loss: 0.035040\nTotal training time: 29.77 seconds.\n-- Epoch 33\nNorm: 11.61, NNZs: 1048576, Bias: -1.778061, T: 101838, Avg. loss: 0.034373\nTotal training time: 30.73 seconds.\n-- Epoch 34\nNorm: 11.61, NNZs: 1048576, Bias: -1.778642, T: 104924, Avg. loss: 0.033723\nTotal training time: 31.67 seconds.\n-- Epoch 35\nNorm: 11.51, NNZs: 1048576, Bias: -1.784494, T: 108010, Avg. loss: 0.033128\nTotal training time: 32.66 seconds.\n-- Epoch 36\nNorm: 11.50, NNZs: 1048576, Bias: -1.783897, T: 111096, Avg. loss: 0.032564\nTotal training time: 33.60 seconds.\n-- Epoch 37\nNorm: 11.50, NNZs: 1048576, Bias: -1.783137, T: 114182, Avg. loss: 0.032034\nTotal training time: 34.55 seconds.\n-- Epoch 38\nNorm: 11.46, NNZs: 1048576, Bias: -1.786509, T: 117268, Avg. loss: 0.031518\nTotal training time: 35.51 seconds.\n-- Epoch 39\nNorm: 11.48, NNZs: 1048576, Bias: -1.783387, T: 120354, Avg. loss: 0.031029\nTotal training time: 36.47 seconds.\n-- Epoch 40\nNorm: 11.49, NNZs: 1048576, Bias: -1.782077, T: 123440, Avg. loss: 0.030564\nTotal training time: 37.46 seconds.\n-- Epoch 41\nNorm: 11.46, NNZs: 1048576, Bias: -1.782465, T: 126526, Avg. loss: 0.030129\nTotal training time: 38.39 seconds.\n-- Epoch 42\nNorm: 11.39, NNZs: 1048576, Bias: -1.788345, T: 129612, Avg. loss: 0.029710\nTotal training time: 39.36 seconds.\n-- Epoch 43\nNorm: 11.41, NNZs: 1048576, Bias: -1.785744, T: 132698, Avg. loss: 0.029313\nTotal training time: 40.29 seconds.\n-- Epoch 44\nNorm: 11.44, NNZs: 1048576, Bias: -1.781543, T: 135784, Avg. loss: 0.028928\nTotal training time: 41.24 seconds.\n-- Epoch 45\nNorm: 11.34, NNZs: 1048576, Bias: -1.787866, T: 138870, Avg. loss: 0.028564\nTotal training time: 42.20 seconds.\n-- Epoch 46\nNorm: 11.35, NNZs: 1048576, Bias: -1.786659, T: 141956, Avg. loss: 0.028216\nTotal training time: 43.15 seconds.\n-- Epoch 47\nNorm: 11.31, NNZs: 1048576, Bias: -1.789866, T: 145042, Avg. loss: 0.027884\nTotal training time: 44.11 seconds.\n-- Epoch 48\nNorm: 11.32, NNZs: 1048576, Bias: -1.787866, T: 148128, Avg. loss: 0.027566\nTotal training time: 45.07 seconds.\n-- Epoch 49\nNorm: 11.31, NNZs: 1048576, Bias: -1.788340, T: 151214, Avg. loss: 0.027259\nTotal training time: 46.03 seconds.\n-- Epoch 50\nNorm: 11.34, NNZs: 1048576, Bias: -1.785057, T: 154300, Avg. loss: 0.026960\nTotal training time: 47.00 seconds.\n-- Epoch 51\nNorm: 11.26, NNZs: 1048576, Bias: -1.790213, T: 157386, Avg. loss: 0.026673\nTotal training time: 47.95 seconds.\n-- Epoch 52\nNorm: 11.26, NNZs: 1048576, Bias: -1.791161, T: 160472, Avg. loss: 0.026398\nTotal training time: 48.94 seconds.\n-- Epoch 53\nNorm: 11.24, NNZs: 1048576, Bias: -1.791119, T: 163558, Avg. loss: 0.026133\nTotal training time: 49.90 seconds.\n-- Epoch 54\nNorm: 11.23, NNZs: 1048576, Bias: -1.791721, T: 166644, Avg. loss: 0.025877\nTotal training time: 50.86 seconds.\n-- Epoch 55\nNorm: 11.24, NNZs: 1048576, Bias: -1.789849, T: 169730, Avg. loss: 0.025630\nTotal training time: 51.82 seconds.\n-- Epoch 56\nNorm: 11.22, NNZs: 1048576, Bias: -1.792224, T: 172816, Avg. loss: 0.025393\nTotal training time: 52.80 seconds.\n-- Epoch 57\nNorm: 11.23, NNZs: 1048576, Bias: -1.790593, T: 175902, Avg. loss: 0.025168\nTotal training time: 53.77 seconds.\n-- Epoch 58\nNorm: 11.24, NNZs: 1048576, Bias: -1.790185, T: 178988, Avg. loss: 0.024945\nTotal training time: 54.74 seconds.\n-- Epoch 59\nNorm: 11.21, NNZs: 1048576, Bias: -1.790586, T: 182074, Avg. loss: 0.024732\nTotal training time: 55.69 seconds.\n-- Epoch 60\nNorm: 11.20, NNZs: 1048576, Bias: -1.790925, T: 185160, Avg. loss: 0.024526\nTotal training time: 56.63 seconds.\n-- Epoch 61\nNorm: 11.21, NNZs: 1048576, Bias: -1.791219, T: 188246, Avg. loss: 0.024324\nTotal training time: 57.61 seconds.\n-- Epoch 62\nNorm: 11.20, NNZs: 1048576, Bias: -1.790609, T: 191332, Avg. loss: 0.024127\nTotal training time: 58.55 seconds.\n-- Epoch 63\nNorm: 11.19, NNZs: 1048576, Bias: -1.791681, T: 194418, Avg. loss: 0.023938\nTotal training time: 59.55 seconds.\n-- Epoch 64\nNorm: 11.17, NNZs: 1048576, Bias: -1.792692, T: 197504, Avg. loss: 0.023757\nTotal training time: 60.51 seconds.\n-- Epoch 65\nNorm: 11.18, NNZs: 1048576, Bias: -1.792899, T: 200590, Avg. loss: 0.023580\nTotal training time: 61.47 seconds.\n-- Epoch 66\nNorm: 11.14, NNZs: 1048576, Bias: -1.794442, T: 203676, Avg. loss: 0.023406\nTotal training time: 62.44 seconds.\n-- Epoch 67\nNorm: 11.13, NNZs: 1048576, Bias: -1.795308, T: 206762, Avg. loss: 0.023238\nTotal training time: 63.39 seconds.\n-- Epoch 68\nNorm: 11.14, NNZs: 1048576, Bias: -1.794319, T: 209848, Avg. loss: 0.023078\nTotal training time: 64.36 seconds.\n-- Epoch 69\nNorm: 11.13, NNZs: 1048576, Bias: -1.793882, T: 212934, Avg. loss: 0.022922\nTotal training time: 65.32 seconds.\n-- Epoch 70\nNorm: 11.15, NNZs: 1048576, Bias: -1.793390, T: 216020, Avg. loss: 0.022770\nTotal training time: 66.28 seconds.\n-- Epoch 71\nNorm: 11.13, NNZs: 1048576, Bias: -1.794557, T: 219106, Avg. loss: 0.022621\nTotal training time: 67.28 seconds.\n-- Epoch 72\nNorm: 11.12, NNZs: 1048576, Bias: -1.794278, T: 222192, Avg. loss: 0.022477\nTotal training time: 68.23 seconds.\n-- Epoch 73\nNorm: 11.10, NNZs: 1048576, Bias: -1.797008, T: 225278, Avg. loss: 0.022334\nTotal training time: 69.24 seconds.\n-- Epoch 74\nNorm: 11.14, NNZs: 1048576, Bias: -1.792744, T: 228364, Avg. loss: 0.022198\nTotal training time: 70.18 seconds.\n-- Epoch 75\nNorm: 11.12, NNZs: 1048576, Bias: -1.794690, T: 231450, Avg. loss: 0.022063\nTotal training time: 71.16 seconds.\n-- Epoch 76\nNorm: 11.12, NNZs: 1048576, Bias: -1.795159, T: 234536, Avg. loss: 0.021934\nTotal training time: 72.17 seconds.\n-- Epoch 77\nNorm: 11.13, NNZs: 1048576, Bias: -1.793626, T: 237622, Avg. loss: 0.021806\nTotal training time: 73.12 seconds.\n-- Epoch 78\nNorm: 11.09, NNZs: 1048576, Bias: -1.797215, T: 240708, Avg. loss: 0.021680\nTotal training time: 74.12 seconds.\n-- Epoch 79\nNorm: 11.09, NNZs: 1048576, Bias: -1.796140, T: 243794, Avg. loss: 0.021559\nTotal training time: 75.06 seconds.\n-- Epoch 80\nNorm: 11.11, NNZs: 1048576, Bias: -1.795799, T: 246880, Avg. loss: 0.021441\nTotal training time: 76.04 seconds.\n-- Epoch 81\nNorm: 11.11, NNZs: 1048576, Bias: -1.795470, T: 249966, Avg. loss: 0.021326\nTotal training time: 77.00 seconds.\n-- Epoch 82\nNorm: 11.08, NNZs: 1048576, Bias: -1.797073, T: 253052, Avg. loss: 0.021214\nTotal training time: 77.98 seconds.\n-- Epoch 83\nNorm: 11.08, NNZs: 1048576, Bias: -1.797214, T: 256138, Avg. loss: 0.021102\nTotal training time: 78.93 seconds.\n-- Epoch 84\nNorm: 11.08, NNZs: 1048576, Bias: -1.797635, T: 259224, Avg. loss: 0.020996\nTotal training time: 79.92 seconds.\n-- Epoch 85\nNorm: 11.06, NNZs: 1048576, Bias: -1.798996, T: 262310, Avg. loss: 0.020890\nTotal training time: 80.88 seconds.\n-- Epoch 86\nNorm: 11.07, NNZs: 1048576, Bias: -1.797014, T: 265396, Avg. loss: 0.020789\nTotal training time: 81.83 seconds.\n-- Epoch 87\nNorm: 11.08, NNZs: 1048576, Bias: -1.797395, T: 268482, Avg. loss: 0.020689\nTotal training time: 82.84 seconds.\n-- Epoch 88\nNorm: 11.09, NNZs: 1048576, Bias: -1.796144, T: 271568, Avg. loss: 0.020591\nTotal training time: 83.79 seconds.\n-- Epoch 89\nNorm: 11.06, NNZs: 1048576, Bias: -1.798225, T: 274654, Avg. loss: 0.020496\nTotal training time: 84.77 seconds.\n-- Epoch 90\nNorm: 11.06, NNZs: 1048576, Bias: -1.797923, T: 277740, Avg. loss: 0.020403\nTotal training time: 85.75 seconds.\n-- Epoch 91\nNorm: 11.06, NNZs: 1048576, Bias: -1.797942, T: 280826, Avg. loss: 0.020312\nTotal training time: 86.75 seconds.\n-- Epoch 92\nNorm: 11.06, NNZs: 1048576, Bias: -1.797976, T: 283912, Avg. loss: 0.020221\nTotal training time: 87.71 seconds.\n-- Epoch 93\nNorm: 11.06, NNZs: 1048576, Bias: -1.797813, T: 286998, Avg. loss: 0.020134\nTotal training time: 88.68 seconds.\n-- Epoch 94\nNorm: 11.05, NNZs: 1048576, Bias: -1.798804, T: 290084, Avg. loss: 0.020048\nTotal training time: 89.66 seconds.\n-- Epoch 95\nNorm: 11.04, NNZs: 1048576, Bias: -1.798956, T: 293170, Avg. loss: 0.019965\nTotal training time: 90.64 seconds.\n-- Epoch 96\nNorm: 11.04, NNZs: 1048576, Bias: -1.798607, T: 296256, Avg. loss: 0.019882\nTotal training time: 91.60 seconds.\n-- Epoch 97\nNorm: 11.03, NNZs: 1048576, Bias: -1.799461, T: 299342, Avg. loss: 0.019802\nTotal training time: 92.58 seconds.\n-- Epoch 98\nNorm: 11.02, NNZs: 1048576, Bias: -1.799976, T: 302428, Avg. loss: 0.019723\nTotal training time: 93.55 seconds.\n-- Epoch 99\nNorm: 11.02, NNZs: 1048576, Bias: -1.800288, T: 305514, Avg. loss: 0.019646\nTotal training time: 94.52 seconds.\n-- Epoch 100\nNorm: 11.04, NNZs: 1048576, Bias: -1.798479, T: 308600, Avg. loss: 0.019569\nTotal training time: 95.47 seconds.\n-- Epoch 1\nNorm: 42.77, NNZs: 969992, Bias: -1.237836, T: 3086, Avg. loss: 0.295260\nTotal training time: 0.87 seconds.\n-- Epoch 2\nNorm: 28.80, NNZs: 1019663, Bias: -1.193088, T: 6172, Avg. loss: 0.178148\nTotal training time: 1.74 seconds.\n-- Epoch 3\nNorm: 22.54, NNZs: 1035747, Bias: -1.152668, T: 9258, Avg. loss: 0.126980\nTotal training time: 2.61 seconds.\n-- Epoch 4\nNorm: 18.84, NNZs: 1037201, Bias: -1.116101, T: 12344, Avg. loss: 0.101564\nTotal training time: 3.47 seconds.\n-- Epoch 5\nNorm: 16.30, NNZs: 1038876, Bias: -1.107627, T: 15430, Avg. loss: 0.083704\nTotal training time: 4.34 seconds.\n-- Epoch 6\nNorm: 14.74, NNZs: 1040330, Bias: -1.065213, T: 18516, Avg. loss: 0.071320\nTotal training time: 5.21 seconds.\n-- Epoch 7\nNorm: 13.76, NNZs: 1045559, Bias: -1.046965, T: 21602, Avg. loss: 0.062910\nTotal training time: 6.11 seconds.\n-- Epoch 8\nNorm: 12.94, NNZs: 1046515, Bias: -1.041284, T: 24688, Avg. loss: 0.056206\nTotal training time: 7.01 seconds.\n-- Epoch 9\nNorm: 12.53, NNZs: 1048216, Bias: -1.031964, T: 27774, Avg. loss: 0.050895\nTotal training time: 7.92 seconds.\n-- Epoch 10\nNorm: 12.09, NNZs: 1048277, Bias: -1.017811, T: 30860, Avg. loss: 0.046488\nTotal training time: 8.81 seconds.\n-- Epoch 11\nNorm: 11.67, NNZs: 1048313, Bias: -1.026327, T: 33946, Avg. loss: 0.042953\nTotal training time: 9.72 seconds.\n-- Epoch 12\nNorm: 11.54, NNZs: 1048380, Bias: -1.012582, T: 37032, Avg. loss: 0.039995\nTotal training time: 10.64 seconds.\n-- Epoch 13\nNorm: 11.33, NNZs: 1048481, Bias: -1.009811, T: 40118, Avg. loss: 0.037462\nTotal training time: 11.57 seconds.\n-- Epoch 14\nNorm: 11.23, NNZs: 1048526, Bias: -1.005243, T: 43204, Avg. loss: 0.035319\nTotal training time: 12.51 seconds.\n-- Epoch 15\nNorm: 11.06, NNZs: 1048533, Bias: -1.000152, T: 46290, Avg. loss: 0.033379\nTotal training time: 13.44 seconds.\n-- Epoch 16\nNorm: 10.85, NNZs: 1048536, Bias: -1.009658, T: 49376, Avg. loss: 0.031639\nTotal training time: 14.35 seconds.\n-- Epoch 17\nNorm: 10.83, NNZs: 1048539, Bias: -0.997220, T: 52462, Avg. loss: 0.030165\nTotal training time: 15.26 seconds.\n-- Epoch 18\nNorm: 10.68, NNZs: 1048544, Bias: -1.000205, T: 55548, Avg. loss: 0.028870\nTotal training time: 16.19 seconds.\n-- Epoch 19\nNorm: 10.67, NNZs: 1048558, Bias: -0.991088, T: 58634, Avg. loss: 0.027706\nTotal training time: 17.10 seconds.\n-- Epoch 20\nNorm: 10.54, NNZs: 1048558, Bias: -0.990407, T: 61720, Avg. loss: 0.026670\nTotal training time: 18.03 seconds.\n-- Epoch 21\nNorm: 10.50, NNZs: 1048561, Bias: -0.984775, T: 64806, Avg. loss: 0.025717\nTotal training time: 18.95 seconds.\n-- Epoch 22\nNorm: 10.47, NNZs: 1048562, Bias: -0.982454, T: 67892, Avg. loss: 0.024852\nTotal training time: 19.86 seconds.\n-- Epoch 23\nNorm: 10.46, NNZs: 1048563, Bias: -0.977335, T: 70978, Avg. loss: 0.024057\nTotal training time: 20.79 seconds.\n-- Epoch 24\nNorm: 10.42, NNZs: 1048565, Bias: -0.973453, T: 74064, Avg. loss: 0.023336\nTotal training time: 21.71 seconds.\n-- Epoch 25\nNorm: 10.35, NNZs: 1048567, Bias: -0.972497, T: 77150, Avg. loss: 0.022666\nTotal training time: 22.64 seconds.\n-- Epoch 26\nNorm: 10.29, NNZs: 1048567, Bias: -0.972700, T: 80236, Avg. loss: 0.022043\nTotal training time: 23.58 seconds.\n-- Epoch 27\nNorm: 10.28, NNZs: 1048567, Bias: -0.969012, T: 83322, Avg. loss: 0.021476\nTotal training time: 24.50 seconds.\n-- Epoch 28\nNorm: 10.23, NNZs: 1048568, Bias: -0.969254, T: 86408, Avg. loss: 0.020950\nTotal training time: 25.45 seconds.\n-- Epoch 29\nNorm: 10.20, NNZs: 1048570, Bias: -0.968411, T: 89494, Avg. loss: 0.020451\nTotal training time: 26.38 seconds.\n-- Epoch 30\nNorm: 10.21, NNZs: 1048570, Bias: -0.964060, T: 92580, Avg. loss: 0.019988\nTotal training time: 27.31 seconds.\n-- Epoch 31\nNorm: 10.21, NNZs: 1048571, Bias: -0.963106, T: 95666, Avg. loss: 0.019540\nTotal training time: 28.26 seconds.\n-- Epoch 32\nNorm: 10.17, NNZs: 1048571, Bias: -0.959821, T: 98752, Avg. loss: 0.019138\nTotal training time: 29.19 seconds.\n-- Epoch 33\nNorm: 10.12, NNZs: 1048571, Bias: -0.961241, T: 101838, Avg. loss: 0.018750\nTotal training time: 30.13 seconds.\n-- Epoch 34\nNorm: 10.13, NNZs: 1048571, Bias: -0.953908, T: 104924, Avg. loss: 0.018388\nTotal training time: 31.06 seconds.\n-- Epoch 35\nNorm: 10.09, NNZs: 1048571, Bias: -0.956655, T: 108010, Avg. loss: 0.018038\nTotal training time: 32.00 seconds.\n-- Epoch 36\nNorm: 10.12, NNZs: 1048571, Bias: -0.951206, T: 111096, Avg. loss: 0.017702\nTotal training time: 32.93 seconds.\n-- Epoch 37\nNorm: 10.08, NNZs: 1048571, Bias: -0.952589, T: 114182, Avg. loss: 0.017395\nTotal training time: 33.87 seconds.\n-- Epoch 38\nNorm: 10.04, NNZs: 1048571, Bias: -0.951381, T: 117268, Avg. loss: 0.017108\nTotal training time: 34.79 seconds.\n-- Epoch 39\nNorm: 10.01, NNZs: 1048571, Bias: -0.953975, T: 120354, Avg. loss: 0.016828\nTotal training time: 35.75 seconds.\n-- Epoch 40\nNorm: 9.99, NNZs: 1048571, Bias: -0.950088, T: 123440, Avg. loss: 0.016572\nTotal training time: 36.67 seconds.\n-- Epoch 41\nNorm: 9.98, NNZs: 1048571, Bias: -0.954707, T: 126526, Avg. loss: 0.016308\nTotal training time: 37.64 seconds.\n-- Epoch 42\nNorm: 9.99, NNZs: 1048571, Bias: -0.947158, T: 129612, Avg. loss: 0.016079\nTotal training time: 38.57 seconds.\n-- Epoch 43\nNorm: 9.99, NNZs: 1048571, Bias: -0.946200, T: 132698, Avg. loss: 0.015852\nTotal training time: 39.50 seconds.\n-- Epoch 44\nNorm: 9.96, NNZs: 1048571, Bias: -0.947459, T: 135784, Avg. loss: 0.015636\nTotal training time: 40.47 seconds.\n-- Epoch 45\nNorm: 9.97, NNZs: 1048571, Bias: -0.943583, T: 138870, Avg. loss: 0.015429\nTotal training time: 41.39 seconds.\n-- Epoch 46\nNorm: 9.95, NNZs: 1048571, Bias: -0.943424, T: 141956, Avg. loss: 0.015231\nTotal training time: 42.33 seconds.\n-- Epoch 47\nNorm: 9.94, NNZs: 1048571, Bias: -0.942791, T: 145042, Avg. loss: 0.015039\nTotal training time: 43.29 seconds.\n-- Epoch 48\nNorm: 9.94, NNZs: 1048571, Bias: -0.940312, T: 148128, Avg. loss: 0.014859\nTotal training time: 44.21 seconds.\n-- Epoch 49\nNorm: 9.93, NNZs: 1048571, Bias: -0.940294, T: 151214, Avg. loss: 0.014687\nTotal training time: 45.16 seconds.\n-- Epoch 50\nNorm: 9.90, NNZs: 1048571, Bias: -0.940388, T: 154300, Avg. loss: 0.014519\nTotal training time: 46.11 seconds.\n-- Epoch 51\nNorm: 9.89, NNZs: 1048571, Bias: -0.939145, T: 157386, Avg. loss: 0.014359\nTotal training time: 47.05 seconds.\n-- Epoch 52\nNorm: 9.87, NNZs: 1048571, Bias: -0.940541, T: 160472, Avg. loss: 0.014200\nTotal training time: 48.02 seconds.\n-- Epoch 53\nNorm: 9.87, NNZs: 1048571, Bias: -0.937555, T: 163558, Avg. loss: 0.014050\nTotal training time: 49.15 seconds.\n-- Epoch 54\nNorm: 9.86, NNZs: 1048571, Bias: -0.936869, T: 166644, Avg. loss: 0.013904\nTotal training time: 50.09 seconds.\n-- Epoch 55\nNorm: 9.85, NNZs: 1048571, Bias: -0.936914, T: 169730, Avg. loss: 0.013764\nTotal training time: 51.05 seconds.\n-- Epoch 56\nNorm: 9.87, NNZs: 1048572, Bias: -0.933295, T: 172816, Avg. loss: 0.013629\nTotal training time: 51.97 seconds.\n-- Epoch 57\nNorm: 9.83, NNZs: 1048572, Bias: -0.934238, T: 175902, Avg. loss: 0.013502\nTotal training time: 52.91 seconds.\n-- Epoch 58\nNorm: 9.82, NNZs: 1048572, Bias: -0.933932, T: 178988, Avg. loss: 0.013373\nTotal training time: 53.87 seconds.\n-- Epoch 59\nNorm: 9.83, NNZs: 1048572, Bias: -0.933301, T: 182074, Avg. loss: 0.013252\nTotal training time: 54.81 seconds.\n-- Epoch 60\nNorm: 9.82, NNZs: 1048572, Bias: -0.931729, T: 185160, Avg. loss: 0.013137\nTotal training time: 55.74 seconds.\n-- Epoch 61\nNorm: 9.81, NNZs: 1048572, Bias: -0.932224, T: 188246, Avg. loss: 0.013024\nTotal training time: 56.70 seconds.\n-- Epoch 62\nNorm: 9.81, NNZs: 1048572, Bias: -0.930083, T: 191332, Avg. loss: 0.012916\nTotal training time: 57.64 seconds.\n-- Epoch 63\nNorm: 9.80, NNZs: 1048573, Bias: -0.932942, T: 194418, Avg. loss: 0.012802\nTotal training time: 58.63 seconds.\n-- Epoch 64\nNorm: 9.82, NNZs: 1048573, Bias: -0.927481, T: 197504, Avg. loss: 0.012702\nTotal training time: 59.54 seconds.\n-- Epoch 65\nNorm: 9.82, NNZs: 1048574, Bias: -0.926119, T: 200590, Avg. loss: 0.012601\nTotal training time: 60.47 seconds.\n-- Epoch 66\nNorm: 9.80, NNZs: 1048574, Bias: -0.928053, T: 203676, Avg. loss: 0.012504\nTotal training time: 61.44 seconds.\n-- Epoch 67\nNorm: 9.79, NNZs: 1048574, Bias: -0.928215, T: 206762, Avg. loss: 0.012407\nTotal training time: 62.40 seconds.\n-- Epoch 68\nNorm: 9.80, NNZs: 1048574, Bias: -0.923952, T: 209848, Avg. loss: 0.012317\nTotal training time: 63.32 seconds.\n-- Epoch 69\nNorm: 9.79, NNZs: 1048574, Bias: -0.925510, T: 212934, Avg. loss: 0.012229\nTotal training time: 64.28 seconds.\n-- Epoch 70\nNorm: 9.79, NNZs: 1048574, Bias: -0.922602, T: 216020, Avg. loss: 0.012143\nTotal training time: 65.21 seconds.\n-- Epoch 71\nNorm: 9.77, NNZs: 1048574, Bias: -0.924592, T: 219106, Avg. loss: 0.012060\nTotal training time: 66.17 seconds.\n-- Epoch 72\nNorm: 9.77, NNZs: 1048574, Bias: -0.923064, T: 222192, Avg. loss: 0.011977\nTotal training time: 67.12 seconds.\n-- Epoch 73\nNorm: 9.76, NNZs: 1048574, Bias: -0.922974, T: 225278, Avg. loss: 0.011900\nTotal training time: 68.06 seconds.\n-- Epoch 74\nNorm: 9.76, NNZs: 1048574, Bias: -0.920860, T: 228364, Avg. loss: 0.011824\nTotal training time: 69.00 seconds.\n-- Epoch 75\nNorm: 9.76, NNZs: 1048574, Bias: -0.921727, T: 231450, Avg. loss: 0.011749\nTotal training time: 69.97 seconds.\n-- Epoch 76\nNorm: 9.75, NNZs: 1048574, Bias: -0.921144, T: 234536, Avg. loss: 0.011677\nTotal training time: 70.92 seconds.\n-- Epoch 77\nNorm: 9.74, NNZs: 1048574, Bias: -0.920611, T: 237622, Avg. loss: 0.011605\nTotal training time: 71.88 seconds.\n-- Epoch 78\nNorm: 9.74, NNZs: 1048574, Bias: -0.919598, T: 240708, Avg. loss: 0.011537\nTotal training time: 72.82 seconds.\n-- Epoch 79\nNorm: 9.74, NNZs: 1048574, Bias: -0.918700, T: 243794, Avg. loss: 0.011470\nTotal training time: 73.76 seconds.\n-- Epoch 80\nNorm: 9.74, NNZs: 1048574, Bias: -0.918564, T: 246880, Avg. loss: 0.011405\nTotal training time: 74.71 seconds.\n-- Epoch 81\nNorm: 9.73, NNZs: 1048574, Bias: -0.918717, T: 249966, Avg. loss: 0.011340\nTotal training time: 75.68 seconds.\n-- Epoch 82\nNorm: 9.73, NNZs: 1048574, Bias: -0.917215, T: 253052, Avg. loss: 0.011278\nTotal training time: 76.63 seconds.\n-- Epoch 83\nNorm: 9.72, NNZs: 1048574, Bias: -0.918039, T: 256138, Avg. loss: 0.011214\nTotal training time: 77.60 seconds.\n-- Epoch 84\nNorm: 9.73, NNZs: 1048574, Bias: -0.916803, T: 259224, Avg. loss: 0.011155\nTotal training time: 78.55 seconds.\n-- Epoch 85\nNorm: 9.73, NNZs: 1048574, Bias: -0.915894, T: 262310, Avg. loss: 0.011097\nTotal training time: 79.50 seconds.\n-- Epoch 86\nNorm: 9.72, NNZs: 1048574, Bias: -0.916566, T: 265396, Avg. loss: 0.011039\nTotal training time: 80.47 seconds.\n-- Epoch 87\nNorm: 9.72, NNZs: 1048574, Bias: -0.915407, T: 268482, Avg. loss: 0.010984\nTotal training time: 81.41 seconds.\n-- Epoch 88\nNorm: 9.72, NNZs: 1048574, Bias: -0.914042, T: 271568, Avg. loss: 0.010929\nTotal training time: 82.38 seconds.\n-- Epoch 89\nNorm: 9.72, NNZs: 1048574, Bias: -0.912964, T: 274654, Avg. loss: 0.010877\nTotal training time: 83.32 seconds.\n-- Epoch 90\nNorm: 9.72, NNZs: 1048574, Bias: -0.913511, T: 277740, Avg. loss: 0.010824\nTotal training time: 84.30 seconds.\n-- Epoch 91\nNorm: 9.72, NNZs: 1048574, Bias: -0.912933, T: 280826, Avg. loss: 0.010773\nTotal training time: 85.24 seconds.\n-- Epoch 92\nNorm: 9.71, NNZs: 1048574, Bias: -0.913496, T: 283912, Avg. loss: 0.010722\nTotal training time: 86.23 seconds.\n-- Epoch 93\nNorm: 9.71, NNZs: 1048574, Bias: -0.912560, T: 286998, Avg. loss: 0.010674\nTotal training time: 87.18 seconds.\n-- Epoch 94\nNorm: 9.71, NNZs: 1048574, Bias: -0.910742, T: 290084, Avg. loss: 0.010626\nTotal training time: 88.14 seconds.\n-- Epoch 95\nNorm: 9.70, NNZs: 1048574, Bias: -0.913001, T: 293170, Avg. loss: 0.010578\nTotal training time: 89.13 seconds.\n-- Epoch 96\nNorm: 9.71, NNZs: 1048574, Bias: -0.910852, T: 296256, Avg. loss: 0.010533\nTotal training time: 90.07 seconds.\n-- Epoch 97\nNorm: 9.70, NNZs: 1048574, Bias: -0.910785, T: 299342, Avg. loss: 0.010487\nTotal training time: 91.03 seconds.\n-- Epoch 98\nNorm: 9.71, NNZs: 1048574, Bias: -0.908818, T: 302428, Avg. loss: 0.010443\nTotal training time: 91.96 seconds.\n-- Epoch 99\nNorm: 9.69, NNZs: 1048574, Bias: -0.911566, T: 305514, Avg. loss: 0.010395\nTotal training time: 92.96 seconds.\n-- Epoch 100\nNorm: 9.70, NNZs: 1048574, Bias: -0.908709, T: 308600, Avg. loss: 0.010353\nTotal training time: 93.89 seconds.\n-- Epoch 1\nNorm: 52.22, NNZs: 777557, Bias: -1.191743, T: 3086, Avg. loss: 0.345165\nTotal training time: 0.86 seconds.\n-- Epoch 2\nNorm: 34.55, NNZs: 788178, Bias: -1.273348, T: 6172, Avg. loss: 0.195040\nTotal training time: 1.70 seconds.\n-- Epoch 3\nNorm: 26.52, NNZs: 792338, Bias: -1.345562, T: 9258, Avg. loss: 0.137813\nTotal training time: 2.54 seconds.\n-- Epoch 4\nNorm: 21.46, NNZs: 794286, Bias: -1.329919, T: 12344, Avg. loss: 0.106099\nTotal training time: 3.37 seconds.\n-- Epoch 5\nNorm: 18.45, NNZs: 800140, Bias: -1.327466, T: 15430, Avg. loss: 0.086409\nTotal training time: 4.21 seconds.\n-- Epoch 6\nNorm: 16.66, NNZs: 809053, Bias: -1.320502, T: 18516, Avg. loss: 0.073209\nTotal training time: 5.06 seconds.\n-- Epoch 7\nNorm: 15.43, NNZs: 812181, Bias: -1.308318, T: 21602, Avg. loss: 0.063690\nTotal training time: 5.90 seconds.\n-- Epoch 8\nNorm: 14.48, NNZs: 815276, Bias: -1.301841, T: 24688, Avg. loss: 0.056529\nTotal training time: 6.74 seconds.\n-- Epoch 9\nNorm: 13.89, NNZs: 820439, Bias: -1.297429, T: 27774, Avg. loss: 0.050911\nTotal training time: 7.59 seconds.\n-- Epoch 10\nNorm: 13.52, NNZs: 823108, Bias: -1.286105, T: 30860, Avg. loss: 0.046457\nTotal training time: 8.43 seconds.\n-- Epoch 11\nNorm: 13.13, NNZs: 827149, Bias: -1.284209, T: 33946, Avg. loss: 0.042786\nTotal training time: 9.28 seconds.\n-- Epoch 12\nNorm: 12.83, NNZs: 871388, Bias: -1.281253, T: 37032, Avg. loss: 0.039715\nTotal training time: 10.14 seconds.\n-- Epoch 13\nNorm: 12.65, NNZs: 881909, Bias: -1.278617, T: 40118, Avg. loss: 0.037098\nTotal training time: 10.99 seconds.\n-- Epoch 14\nNorm: 12.47, NNZs: 883040, Bias: -1.276694, T: 43204, Avg. loss: 0.034863\nTotal training time: 11.84 seconds.\n-- Epoch 15\nNorm: 12.32, NNZs: 960053, Bias: -1.274308, T: 46290, Avg. loss: 0.032931\nTotal training time: 12.71 seconds.\n-- Epoch 16\nNorm: 12.22, NNZs: 960431, Bias: -1.276081, T: 49376, Avg. loss: 0.031229\nTotal training time: 13.56 seconds.\n-- Epoch 17\nNorm: 12.13, NNZs: 965420, Bias: -1.273172, T: 52462, Avg. loss: 0.029734\nTotal training time: 14.41 seconds.\n-- Epoch 18\nNorm: 12.08, NNZs: 968131, Bias: -1.268998, T: 55548, Avg. loss: 0.028398\nTotal training time: 15.27 seconds.\n-- Epoch 19\nNorm: 11.98, NNZs: 968999, Bias: -1.271257, T: 58634, Avg. loss: 0.027195\nTotal training time: 16.13 seconds.\n-- Epoch 20\nNorm: 11.93, NNZs: 989680, Bias: -1.268330, T: 61720, Avg. loss: 0.026111\nTotal training time: 17.00 seconds.\n-- Epoch 21\nNorm: 11.87, NNZs: 990015, Bias: -1.266936, T: 64806, Avg. loss: 0.025137\nTotal training time: 17.86 seconds.\n-- Epoch 22\nNorm: 11.84, NNZs: 990184, Bias: -1.263968, T: 67892, Avg. loss: 0.024247\nTotal training time: 18.72 seconds.\n-- Epoch 23\nNorm: 11.80, NNZs: 990734, Bias: -1.263005, T: 70978, Avg. loss: 0.023425\nTotal training time: 19.59 seconds.\n-- Epoch 24\nNorm: 11.74, NNZs: 991075, Bias: -1.264871, T: 74064, Avg. loss: 0.022673\nTotal training time: 20.46 seconds.\n-- Epoch 25\nNorm: 11.72, NNZs: 991303, Bias: -1.262589, T: 77150, Avg. loss: 0.021973\nTotal training time: 21.32 seconds.\n-- Epoch 26\nNorm: 11.69, NNZs: 991396, Bias: -1.259605, T: 80236, Avg. loss: 0.021340\nTotal training time: 22.19 seconds.\n-- Epoch 27\nNorm: 11.65, NNZs: 996256, Bias: -1.259557, T: 83322, Avg. loss: 0.020752\nTotal training time: 23.06 seconds.\n-- Epoch 28\nNorm: 11.62, NNZs: 996385, Bias: -1.260394, T: 86408, Avg. loss: 0.020204\nTotal training time: 23.93 seconds.\n-- Epoch 29\nNorm: 11.61, NNZs: 996659, Bias: -1.259086, T: 89494, Avg. loss: 0.019688\nTotal training time: 24.79 seconds.\n-- Epoch 30\nNorm: 11.59, NNZs: 996769, Bias: -1.257306, T: 92580, Avg. loss: 0.019211\nTotal training time: 25.66 seconds.\n-- Epoch 31\nNorm: 11.57, NNZs: 996879, Bias: -1.255814, T: 95666, Avg. loss: 0.018766\nTotal training time: 26.53 seconds.\n-- Epoch 32\nNorm: 11.55, NNZs: 997088, Bias: -1.255155, T: 98752, Avg. loss: 0.018346\nTotal training time: 27.39 seconds.\n-- Epoch 33\nNorm: 11.53, NNZs: 997214, Bias: -1.254989, T: 101838, Avg. loss: 0.017952\nTotal training time: 28.26 seconds.\n-- Epoch 34\nNorm: 11.52, NNZs: 997278, Bias: -1.252776, T: 104924, Avg. loss: 0.017583\nTotal training time: 29.13 seconds.\n-- Epoch 35\nNorm: 11.49, NNZs: 997473, Bias: -1.254527, T: 108010, Avg. loss: 0.017227\nTotal training time: 30.01 seconds.\n-- Epoch 36\nNorm: 11.48, NNZs: 997615, Bias: -1.252701, T: 111096, Avg. loss: 0.016895\nTotal training time: 30.88 seconds.\n-- Epoch 37\nNorm: 11.47, NNZs: 997727, Bias: -1.252116, T: 114182, Avg. loss: 0.016580\nTotal training time: 31.76 seconds.\n-- Epoch 38\nNorm: 11.44, NNZs: 997801, Bias: -1.251419, T: 117268, Avg. loss: 0.016283\nTotal training time: 32.63 seconds.\n-- Epoch 39\nNorm: 11.44, NNZs: 997920, Bias: -1.249631, T: 120354, Avg. loss: 0.016001\nTotal training time: 33.50 seconds.\n-- Epoch 40\nNorm: 11.43, NNZs: 1003043, Bias: -1.250274, T: 123440, Avg. loss: 0.015731\nTotal training time: 34.38 seconds.\n-- Epoch 41\nNorm: 11.43, NNZs: 1003067, Bias: -1.247221, T: 126526, Avg. loss: 0.015475\nTotal training time: 35.25 seconds.\n-- Epoch 42\nNorm: 11.41, NNZs: 1003126, Bias: -1.249140, T: 129612, Avg. loss: 0.015232\nTotal training time: 36.12 seconds.\n-- Epoch 43\nNorm: 11.40, NNZs: 1003200, Bias: -1.247755, T: 132698, Avg. loss: 0.014996\nTotal training time: 37.00 seconds.\n-- Epoch 44\nNorm: 11.39, NNZs: 1003226, Bias: -1.246927, T: 135784, Avg. loss: 0.014774\nTotal training time: 37.87 seconds.\n-- Epoch 45\nNorm: 11.38, NNZs: 1003283, Bias: -1.247513, T: 138870, Avg. loss: 0.014562\nTotal training time: 38.74 seconds.\n-- Epoch 46\nNorm: 11.37, NNZs: 1003329, Bias: -1.246524, T: 141956, Avg. loss: 0.014359\nTotal training time: 39.61 seconds.\n-- Epoch 47\nNorm: 11.37, NNZs: 1003351, Bias: -1.246003, T: 145042, Avg. loss: 0.014165\nTotal training time: 40.48 seconds.\n-- Epoch 48\nNorm: 11.35, NNZs: 1004595, Bias: -1.245946, T: 148128, Avg. loss: 0.013978\nTotal training time: 41.36 seconds.\n-- Epoch 49\nNorm: 11.34, NNZs: 1004641, Bias: -1.245978, T: 151214, Avg. loss: 0.013800\nTotal training time: 42.24 seconds.\n-- Epoch 50\nNorm: 11.34, NNZs: 1004690, Bias: -1.244780, T: 154300, Avg. loss: 0.013628\nTotal training time: 43.12 seconds.\n-- Epoch 51\nNorm: 11.34, NNZs: 1004740, Bias: -1.243806, T: 157386, Avg. loss: 0.013462\nTotal training time: 43.99 seconds.\n-- Epoch 52\nNorm: 11.34, NNZs: 1004779, Bias: -1.242459, T: 160472, Avg. loss: 0.013301\nTotal training time: 44.86 seconds.\n-- Epoch 53\nNorm: 11.33, NNZs: 1004822, Bias: -1.243272, T: 163558, Avg. loss: 0.013147\nTotal training time: 45.73 seconds.\n-- Epoch 54\nNorm: 11.32, NNZs: 1004851, Bias: -1.242500, T: 166644, Avg. loss: 0.012998\nTotal training time: 46.61 seconds.\n-- Epoch 55\nNorm: 11.31, NNZs: 1004886, Bias: -1.242334, T: 169730, Avg. loss: 0.012855\nTotal training time: 47.49 seconds.\n-- Epoch 56\nNorm: 11.31, NNZs: 1004893, Bias: -1.241166, T: 172816, Avg. loss: 0.012718\nTotal training time: 48.36 seconds.\n-- Epoch 57\nNorm: 11.31, NNZs: 1005074, Bias: -1.240686, T: 175902, Avg. loss: 0.012585\nTotal training time: 49.24 seconds.\n-- Epoch 58\nNorm: 11.29, NNZs: 1005148, Bias: -1.242080, T: 178988, Avg. loss: 0.012455\nTotal training time: 50.11 seconds.\n-- Epoch 59\nNorm: 11.29, NNZs: 1005172, Bias: -1.240831, T: 182074, Avg. loss: 0.012332\nTotal training time: 51.00 seconds.\n-- Epoch 60\nNorm: 11.29, NNZs: 1005222, Bias: -1.241203, T: 185160, Avg. loss: 0.012211\nTotal training time: 51.87 seconds.\n-- Epoch 61\nNorm: 11.29, NNZs: 1005232, Bias: -1.240707, T: 188246, Avg. loss: 0.012095\nTotal training time: 52.74 seconds.\n-- Epoch 62\nNorm: 11.29, NNZs: 1005233, Bias: -1.238154, T: 191332, Avg. loss: 0.011982\nTotal training time: 53.61 seconds.\n-- Epoch 63\nNorm: 11.28, NNZs: 1005270, Bias: -1.239400, T: 194418, Avg. loss: 0.011873\nTotal training time: 54.49 seconds.\n-- Epoch 64\nNorm: 11.27, NNZs: 1005332, Bias: -1.239935, T: 197504, Avg. loss: 0.011768\nTotal training time: 55.37 seconds.\n-- Epoch 65\nNorm: 11.27, NNZs: 1005336, Bias: -1.237900, T: 200590, Avg. loss: 0.011666\nTotal training time: 56.24 seconds.\n-- Epoch 66\nNorm: 11.26, NNZs: 1005352, Bias: -1.238161, T: 203676, Avg. loss: 0.011567\nTotal training time: 57.13 seconds.\n-- Epoch 67\nNorm: 11.26, NNZs: 1005359, Bias: -1.237679, T: 206762, Avg. loss: 0.011472\nTotal training time: 58.01 seconds.\n-- Epoch 68\nNorm: 11.26, NNZs: 1005381, Bias: -1.236558, T: 209848, Avg. loss: 0.011379\nTotal training time: 58.89 seconds.\n-- Epoch 69\nNorm: 11.25, NNZs: 1005413, Bias: -1.238776, T: 212934, Avg. loss: 0.011287\nTotal training time: 59.77 seconds.\n-- Epoch 70\nNorm: 11.25, NNZs: 1005416, Bias: -1.237418, T: 216020, Avg. loss: 0.011199\nTotal training time: 60.65 seconds.\n-- Epoch 71\nNorm: 11.25, NNZs: 1005478, Bias: -1.236043, T: 219106, Avg. loss: 0.011114\nTotal training time: 61.53 seconds.\n-- Epoch 72\nNorm: 11.24, NNZs: 1005480, Bias: -1.236217, T: 222192, Avg. loss: 0.011031\nTotal training time: 62.41 seconds.\n-- Epoch 73\nNorm: 11.24, NNZs: 1005513, Bias: -1.235969, T: 225278, Avg. loss: 0.010950\nTotal training time: 63.29 seconds.\n-- Epoch 74\nNorm: 11.24, NNZs: 1005520, Bias: -1.235424, T: 228364, Avg. loss: 0.010870\nTotal training time: 64.17 seconds.\n-- Epoch 75\nNorm: 11.24, NNZs: 1005535, Bias: -1.235308, T: 231450, Avg. loss: 0.010793\nTotal training time: 65.05 seconds.\n-- Epoch 76\nNorm: 11.24, NNZs: 1005540, Bias: -1.234541, T: 234536, Avg. loss: 0.010718\nTotal training time: 65.93 seconds.\n-- Epoch 77\nNorm: 11.23, NNZs: 1006172, Bias: -1.235504, T: 237622, Avg. loss: 0.010645\nTotal training time: 66.81 seconds.\n-- Epoch 78\nNorm: 11.22, NNZs: 1006182, Bias: -1.234795, T: 240708, Avg. loss: 0.010574\nTotal training time: 67.69 seconds.\n-- Epoch 79\nNorm: 11.23, NNZs: 1006197, Bias: -1.233212, T: 243794, Avg. loss: 0.010505\nTotal training time: 68.56 seconds.\n-- Epoch 80\nNorm: 11.22, NNZs: 1006219, Bias: -1.234330, T: 246880, Avg. loss: 0.010437\nTotal training time: 69.44 seconds.\n-- Epoch 81\nNorm: 11.22, NNZs: 1006219, Bias: -1.234558, T: 249966, Avg. loss: 0.010372\nTotal training time: 70.33 seconds.\n-- Epoch 82\nNorm: 11.22, NNZs: 1006251, Bias: -1.233879, T: 253052, Avg. loss: 0.010307\nTotal training time: 71.21 seconds.\n-- Epoch 83\nNorm: 11.21, NNZs: 1006267, Bias: -1.234223, T: 256138, Avg. loss: 0.010244\nTotal training time: 72.10 seconds.\n-- Epoch 84\nNorm: 11.21, NNZs: 1006267, Bias: -1.233385, T: 259224, Avg. loss: 0.010183\nTotal training time: 72.98 seconds.\n-- Epoch 85\nNorm: 11.21, NNZs: 1006272, Bias: -1.232727, T: 262310, Avg. loss: 0.010123\nTotal training time: 73.86 seconds.\n-- Epoch 86\nNorm: 11.21, NNZs: 1006318, Bias: -1.232827, T: 265396, Avg. loss: 0.010064\nTotal training time: 74.88 seconds.\n-- Epoch 87\nNorm: 11.20, NNZs: 1006325, Bias: -1.233433, T: 268482, Avg. loss: 0.010007\nTotal training time: 75.82 seconds.\n-- Epoch 88\nNorm: 11.20, NNZs: 1006325, Bias: -1.232633, T: 271568, Avg. loss: 0.009952\nTotal training time: 76.69 seconds.\n-- Epoch 89\nNorm: 11.20, NNZs: 1006339, Bias: -1.231996, T: 274654, Avg. loss: 0.009897\nTotal training time: 77.58 seconds.\n-- Epoch 90\nNorm: 11.19, NNZs: 1006347, Bias: -1.232048, T: 277740, Avg. loss: 0.009844\nTotal training time: 78.45 seconds.\n-- Epoch 91\nNorm: 11.19, NNZs: 1006347, Bias: -1.231113, T: 280826, Avg. loss: 0.009792\nTotal training time: 79.34 seconds.\n-- Epoch 92\nNorm: 11.19, NNZs: 1006357, Bias: -1.231524, T: 283912, Avg. loss: 0.009740\nTotal training time: 80.23 seconds.\n-- Epoch 93\nNorm: 11.19, NNZs: 1006361, Bias: -1.231106, T: 286998, Avg. loss: 0.009691\nTotal training time: 81.10 seconds.\n-- Epoch 94\nNorm: 11.19, NNZs: 1006380, Bias: -1.230740, T: 290084, Avg. loss: 0.009642\nTotal training time: 81.99 seconds.\n-- Epoch 95\nNorm: 11.18, NNZs: 1006391, Bias: -1.231423, T: 293170, Avg. loss: 0.009594\nTotal training time: 82.89 seconds.\n-- Epoch 96\nNorm: 11.19, NNZs: 1006391, Bias: -1.230266, T: 296256, Avg. loss: 0.009546\nTotal training time: 83.76 seconds.\n-- Epoch 97\nNorm: 11.19, NNZs: 1006398, Bias: -1.229687, T: 299342, Avg. loss: 0.009501\nTotal training time: 84.64 seconds.\n-- Epoch 98\nNorm: 11.18, NNZs: 1006404, Bias: -1.229916, T: 302428, Avg. loss: 0.009456\nTotal training time: 85.53 seconds.\n-- Epoch 99\nNorm: 11.18, NNZs: 1006404, Bias: -1.229084, T: 305514, Avg. loss: 0.009412\nTotal training time: 86.42 seconds.\n-- Epoch 100\nNorm: 11.18, NNZs: 1006404, Bias: -1.229049, T: 308600, Avg. loss: 0.009369\nTotal training time: 87.30 seconds.\n-- Epoch 1\nNorm: 43.53, NNZs: 824709, Bias: -1.131929, T: 3086, Avg. loss: 0.362345\nTotal training time: 0.87 seconds.\n-- Epoch 2\nNorm: 29.12, NNZs: 886916, Bias: -1.280635, T: 6172, Avg. loss: 0.206335\nTotal training time: 1.72 seconds.\n-- Epoch 3\nNorm: 23.00, NNZs: 992093, Bias: -1.256746, T: 9258, Avg. loss: 0.147275\nTotal training time: 2.57 seconds.\n-- Epoch 4\nNorm: 19.49, NNZs: 1023067, Bias: -1.249765, T: 12344, Avg. loss: 0.115365\nTotal training time: 3.44 seconds.\n-- Epoch 5\nNorm: 16.69, NNZs: 1038905, Bias: -1.272052, T: 15430, Avg. loss: 0.094689\nTotal training time: 4.29 seconds.\n-- Epoch 6\nNorm: 15.19, NNZs: 1047317, Bias: -1.273828, T: 18516, Avg. loss: 0.080775\nTotal training time: 5.17 seconds.\n-- Epoch 7\nNorm: 14.28, NNZs: 1047694, Bias: -1.247780, T: 21602, Avg. loss: 0.070526\nTotal training time: 6.04 seconds.\n-- Epoch 8\nNorm: 13.58, NNZs: 1047880, Bias: -1.243587, T: 24688, Avg. loss: 0.062806\nTotal training time: 6.92 seconds.\n-- Epoch 9\nNorm: 13.04, NNZs: 1048341, Bias: -1.243608, T: 27774, Avg. loss: 0.056644\nTotal training time: 7.81 seconds.\n-- Epoch 10\nNorm: 12.70, NNZs: 1048447, Bias: -1.228893, T: 30860, Avg. loss: 0.051687\nTotal training time: 8.70 seconds.\n-- Epoch 11\nNorm: 12.30, NNZs: 1048487, Bias: -1.226673, T: 33946, Avg. loss: 0.047653\nTotal training time: 9.60 seconds.\n-- Epoch 12\nNorm: 12.02, NNZs: 1048507, Bias: -1.230589, T: 37032, Avg. loss: 0.044168\nTotal training time: 10.50 seconds.\n-- Epoch 13\nNorm: 11.81, NNZs: 1048527, Bias: -1.227575, T: 40118, Avg. loss: 0.041328\nTotal training time: 11.39 seconds.\n-- Epoch 14\nNorm: 11.64, NNZs: 1048535, Bias: -1.223267, T: 43204, Avg. loss: 0.038875\nTotal training time: 12.29 seconds.\n-- Epoch 15\nNorm: 11.51, NNZs: 1048544, Bias: -1.222546, T: 46290, Avg. loss: 0.036733\nTotal training time: 13.19 seconds.\n-- Epoch 16\nNorm: 11.39, NNZs: 1048558, Bias: -1.225217, T: 49376, Avg. loss: 0.034847\nTotal training time: 14.09 seconds.\n-- Epoch 17\nNorm: 11.34, NNZs: 1048559, Bias: -1.215559, T: 52462, Avg. loss: 0.033182\nTotal training time: 14.99 seconds.\n-- Epoch 18\nNorm: 11.28, NNZs: 1048564, Bias: -1.213872, T: 55548, Avg. loss: 0.031704\nTotal training time: 15.90 seconds.\n-- Epoch 19\nNorm: 11.22, NNZs: 1048568, Bias: -1.207381, T: 58634, Avg. loss: 0.030365\nTotal training time: 16.83 seconds.\n-- Epoch 20\nNorm: 11.10, NNZs: 1048573, Bias: -1.219746, T: 61720, Avg. loss: 0.029155\nTotal training time: 17.76 seconds.\n-- Epoch 21\nNorm: 11.07, NNZs: 1048575, Bias: -1.213426, T: 64806, Avg. loss: 0.028079\nTotal training time: 18.69 seconds.\n-- Epoch 22\nNorm: 11.00, NNZs: 1048575, Bias: -1.213263, T: 67892, Avg. loss: 0.027100\nTotal training time: 19.59 seconds.\n-- Epoch 23\nNorm: 10.97, NNZs: 1048575, Bias: -1.210155, T: 70978, Avg. loss: 0.026190\nTotal training time: 20.50 seconds.\n-- Epoch 24\nNorm: 10.94, NNZs: 1048575, Bias: -1.207947, T: 74064, Avg. loss: 0.025368\nTotal training time: 21.40 seconds.\n-- Epoch 25\nNorm: 10.92, NNZs: 1048575, Bias: -1.206871, T: 77150, Avg. loss: 0.024603\nTotal training time: 22.35 seconds.\n-- Epoch 26\nNorm: 10.87, NNZs: 1048575, Bias: -1.201398, T: 80236, Avg. loss: 0.023893\nTotal training time: 23.24 seconds.\n-- Epoch 27\nNorm: 10.80, NNZs: 1048575, Bias: -1.208998, T: 83322, Avg. loss: 0.023241\nTotal training time: 24.16 seconds.\n-- Epoch 28\nNorm: 10.78, NNZs: 1048575, Bias: -1.206902, T: 86408, Avg. loss: 0.022636\nTotal training time: 25.11 seconds.\n-- Epoch 29\nNorm: 10.73, NNZs: 1048575, Bias: -1.207994, T: 89494, Avg. loss: 0.022067\nTotal training time: 26.02 seconds.\n-- Epoch 30\nNorm: 10.72, NNZs: 1048575, Bias: -1.205589, T: 92580, Avg. loss: 0.021546\nTotal training time: 26.95 seconds.\n-- Epoch 31\nNorm: 10.71, NNZs: 1048575, Bias: -1.202178, T: 95666, Avg. loss: 0.021046\nTotal training time: 27.85 seconds.\n-- Epoch 32\nNorm: 10.68, NNZs: 1048575, Bias: -1.202774, T: 98752, Avg. loss: 0.020581\nTotal training time: 28.77 seconds.\n-- Epoch 33\nNorm: 10.67, NNZs: 1048575, Bias: -1.202621, T: 101838, Avg. loss: 0.020146\nTotal training time: 29.72 seconds.\n-- Epoch 34\nNorm: 10.65, NNZs: 1048575, Bias: -1.202523, T: 104924, Avg. loss: 0.019736\nTotal training time: 30.64 seconds.\n-- Epoch 35\nNorm: 10.63, NNZs: 1048575, Bias: -1.201389, T: 108010, Avg. loss: 0.019349\nTotal training time: 31.57 seconds.\n-- Epoch 36\nNorm: 10.61, NNZs: 1048575, Bias: -1.200603, T: 111096, Avg. loss: 0.018985\nTotal training time: 32.48 seconds.\n-- Epoch 37\nNorm: 10.59, NNZs: 1048575, Bias: -1.200407, T: 114182, Avg. loss: 0.018642\nTotal training time: 33.43 seconds.\n-- Epoch 38\nNorm: 10.59, NNZs: 1048575, Bias: -1.198683, T: 117268, Avg. loss: 0.018315\nTotal training time: 34.35 seconds.\n-- Epoch 39\nNorm: 10.56, NNZs: 1048575, Bias: -1.199552, T: 120354, Avg. loss: 0.018005\nTotal training time: 35.28 seconds.\n-- Epoch 40\nNorm: 10.55, NNZs: 1048575, Bias: -1.200647, T: 123440, Avg. loss: 0.017705\nTotal training time: 36.23 seconds.\n-- Epoch 41\nNorm: 10.55, NNZs: 1048575, Bias: -1.194124, T: 126526, Avg. loss: 0.017417\nTotal training time: 37.12 seconds.\n-- Epoch 42\nNorm: 10.54, NNZs: 1048575, Bias: -1.197282, T: 129612, Avg. loss: 0.017151\nTotal training time: 38.09 seconds.\n-- Epoch 43\nNorm: 10.51, NNZs: 1048575, Bias: -1.200387, T: 132698, Avg. loss: 0.016887\nTotal training time: 39.04 seconds.\n-- Epoch 44\nNorm: 10.50, NNZs: 1048575, Bias: -1.199673, T: 135784, Avg. loss: 0.016644\nTotal training time: 39.97 seconds.\n-- Epoch 45\nNorm: 10.51, NNZs: 1048575, Bias: -1.196053, T: 138870, Avg. loss: 0.016409\nTotal training time: 40.88 seconds.\n-- Epoch 46\nNorm: 10.49, NNZs: 1048575, Bias: -1.197098, T: 141956, Avg. loss: 0.016185\nTotal training time: 41.82 seconds.\n-- Epoch 47\nNorm: 10.50, NNZs: 1048575, Bias: -1.194564, T: 145042, Avg. loss: 0.015970\nTotal training time: 42.75 seconds.\n-- Epoch 48\nNorm: 10.48, NNZs: 1048575, Bias: -1.194940, T: 148128, Avg. loss: 0.015761\nTotal training time: 43.68 seconds.\n-- Epoch 49\nNorm: 10.47, NNZs: 1048575, Bias: -1.195819, T: 151214, Avg. loss: 0.015563\nTotal training time: 44.61 seconds.\n-- Epoch 50\nNorm: 10.48, NNZs: 1048575, Bias: -1.193398, T: 154300, Avg. loss: 0.015372\nTotal training time: 45.53 seconds.\n-- Epoch 51\nNorm: 10.46, NNZs: 1048575, Bias: -1.194168, T: 157386, Avg. loss: 0.015187\nTotal training time: 46.46 seconds.\n-- Epoch 52\nNorm: 10.46, NNZs: 1048575, Bias: -1.194310, T: 160472, Avg. loss: 0.015013\nTotal training time: 47.39 seconds.\n-- Epoch 53\nNorm: 10.45, NNZs: 1048575, Bias: -1.193660, T: 163558, Avg. loss: 0.014844\nTotal training time: 48.35 seconds.\n-- Epoch 54\nNorm: 10.44, NNZs: 1048575, Bias: -1.194115, T: 166644, Avg. loss: 0.014681\nTotal training time: 49.29 seconds.\n-- Epoch 55\nNorm: 10.44, NNZs: 1048575, Bias: -1.193052, T: 169730, Avg. loss: 0.014525\nTotal training time: 50.21 seconds.\n-- Epoch 56\nNorm: 10.44, NNZs: 1048575, Bias: -1.193275, T: 172816, Avg. loss: 0.014372\nTotal training time: 51.17 seconds.\n-- Epoch 57\nNorm: 10.42, NNZs: 1048575, Bias: -1.194920, T: 175902, Avg. loss: 0.014223\nTotal training time: 52.13 seconds.\n-- Epoch 58\nNorm: 10.42, NNZs: 1048575, Bias: -1.192643, T: 178988, Avg. loss: 0.014084\nTotal training time: 53.04 seconds.\n-- Epoch 59\nNorm: 10.41, NNZs: 1048575, Bias: -1.193038, T: 182074, Avg. loss: 0.013947\nTotal training time: 53.99 seconds.\n-- Epoch 60\nNorm: 10.41, NNZs: 1048575, Bias: -1.191655, T: 185160, Avg. loss: 0.013815\nTotal training time: 54.90 seconds.\n-- Epoch 61\nNorm: 10.41, NNZs: 1048575, Bias: -1.191329, T: 188246, Avg. loss: 0.013686\nTotal training time: 55.84 seconds.\n-- Epoch 62\nNorm: 10.39, NNZs: 1048575, Bias: -1.192065, T: 191332, Avg. loss: 0.013559\nTotal training time: 56.80 seconds.\n-- Epoch 63\nNorm: 10.39, NNZs: 1048575, Bias: -1.191969, T: 194418, Avg. loss: 0.013439\nTotal training time: 57.72 seconds.\n-- Epoch 64\nNorm: 10.39, NNZs: 1048575, Bias: -1.190544, T: 197504, Avg. loss: 0.013323\nTotal training time: 58.67 seconds.\n-- Epoch 65\nNorm: 10.39, NNZs: 1048575, Bias: -1.189598, T: 200590, Avg. loss: 0.013211\nTotal training time: 59.62 seconds.\n-- Epoch 66\nNorm: 10.38, NNZs: 1048575, Bias: -1.190188, T: 203676, Avg. loss: 0.013101\nTotal training time: 60.58 seconds.\n-- Epoch 67\nNorm: 10.37, NNZs: 1048575, Bias: -1.191867, T: 206762, Avg. loss: 0.012993\nTotal training time: 61.53 seconds.\n-- Epoch 68\nNorm: 10.37, NNZs: 1048575, Bias: -1.189964, T: 209848, Avg. loss: 0.012890\nTotal training time: 62.46 seconds.\n-- Epoch 69\nNorm: 10.38, NNZs: 1048575, Bias: -1.187028, T: 212934, Avg. loss: 0.012790\nTotal training time: 63.40 seconds.\n-- Epoch 70\nNorm: 10.36, NNZs: 1048575, Bias: -1.189528, T: 216020, Avg. loss: 0.012692\nTotal training time: 64.34 seconds.\n-- Epoch 71\nNorm: 10.36, NNZs: 1048575, Bias: -1.188285, T: 219106, Avg. loss: 0.012598\nTotal training time: 65.29 seconds.\n-- Epoch 72\nNorm: 10.36, NNZs: 1048575, Bias: -1.188801, T: 222192, Avg. loss: 0.012507\nTotal training time: 66.24 seconds.\n-- Epoch 73\nNorm: 10.35, NNZs: 1048575, Bias: -1.189995, T: 225278, Avg. loss: 0.012417\nTotal training time: 67.20 seconds.\n-- Epoch 74\nNorm: 10.36, NNZs: 1048575, Bias: -1.187428, T: 228364, Avg. loss: 0.012331\nTotal training time: 68.14 seconds.\n-- Epoch 75\nNorm: 10.35, NNZs: 1048575, Bias: -1.188485, T: 231450, Avg. loss: 0.012245\nTotal training time: 69.09 seconds.\n-- Epoch 76\nNorm: 10.35, NNZs: 1048575, Bias: -1.187973, T: 234536, Avg. loss: 0.012164\nTotal training time: 70.05 seconds.\n-- Epoch 77\nNorm: 10.35, NNZs: 1048575, Bias: -1.186872, T: 237622, Avg. loss: 0.012084\nTotal training time: 70.99 seconds.\n-- Epoch 78\nNorm: 10.34, NNZs: 1048575, Bias: -1.188435, T: 240708, Avg. loss: 0.012005\nTotal training time: 71.95 seconds.\n-- Epoch 79\nNorm: 10.35, NNZs: 1048575, Bias: -1.185987, T: 243794, Avg. loss: 0.011928\nTotal training time: 72.88 seconds.\n-- Epoch 80\nNorm: 10.35, NNZs: 1048575, Bias: -1.186400, T: 246880, Avg. loss: 0.011854\nTotal training time: 73.83 seconds.\n-- Epoch 81\nNorm: 10.34, NNZs: 1048575, Bias: -1.185699, T: 249966, Avg. loss: 0.011782\nTotal training time: 74.78 seconds.\n-- Epoch 82\nNorm: 10.34, NNZs: 1048575, Bias: -1.185692, T: 253052, Avg. loss: 0.011711\nTotal training time: 75.76 seconds.\n-- Epoch 83\nNorm: 10.33, NNZs: 1048575, Bias: -1.186563, T: 256138, Avg. loss: 0.011641\nTotal training time: 76.70 seconds.\n-- Epoch 84\nNorm: 10.33, NNZs: 1048575, Bias: -1.186355, T: 259224, Avg. loss: 0.011574\nTotal training time: 77.65 seconds.\n-- Epoch 85\nNorm: 10.33, NNZs: 1048575, Bias: -1.186516, T: 262310, Avg. loss: 0.011508\nTotal training time: 78.60 seconds.\n-- Epoch 86\nNorm: 10.33, NNZs: 1048575, Bias: -1.186151, T: 265396, Avg. loss: 0.011444\nTotal training time: 79.56 seconds.\n-- Epoch 87\nNorm: 10.32, NNZs: 1048575, Bias: -1.187173, T: 268482, Avg. loss: 0.011380\nTotal training time: 80.54 seconds.\n-- Epoch 88\nNorm: 10.32, NNZs: 1048575, Bias: -1.186092, T: 271568, Avg. loss: 0.011318\nTotal training time: 81.49 seconds.\n-- Epoch 89\nNorm: 10.32, NNZs: 1048575, Bias: -1.185277, T: 274654, Avg. loss: 0.011258\nTotal training time: 82.43 seconds.\n-- Epoch 90\nNorm: 10.32, NNZs: 1048575, Bias: -1.184904, T: 277740, Avg. loss: 0.011199\nTotal training time: 83.37 seconds.\n-- Epoch 91\nNorm: 10.32, NNZs: 1048575, Bias: -1.185513, T: 280826, Avg. loss: 0.011141\nTotal training time: 84.34 seconds.\n-- Epoch 92\nNorm: 10.32, NNZs: 1048575, Bias: -1.184557, T: 283912, Avg. loss: 0.011084\nTotal training time: 85.26 seconds.\n-- Epoch 93\nNorm: 10.32, NNZs: 1048575, Bias: -1.183584, T: 286998, Avg. loss: 0.011028\nTotal training time: 86.21 seconds.\n-- Epoch 94\nNorm: 10.32, NNZs: 1048575, Bias: -1.184607, T: 290084, Avg. loss: 0.010975\nTotal training time: 87.18 seconds.\n-- Epoch 95\nNorm: 10.32, NNZs: 1048575, Bias: -1.183467, T: 293170, Avg. loss: 0.010921\nTotal training time: 88.11 seconds.\n-- Epoch 96\nNorm: 10.31, NNZs: 1048575, Bias: -1.184132, T: 296256, Avg. loss: 0.010869\nTotal training time: 89.09 seconds.\n-- Epoch 97\nNorm: 10.32, NNZs: 1048575, Bias: -1.182904, T: 299342, Avg. loss: 0.010818\nTotal training time: 90.05 seconds.\n-- Epoch 98\nNorm: 10.31, NNZs: 1048575, Bias: -1.183567, T: 302428, Avg. loss: 0.010769\nTotal training time: 91.01 seconds.\n-- Epoch 99\nNorm: 10.31, NNZs: 1048575, Bias: -1.183303, T: 305514, Avg. loss: 0.010720\nTotal training time: 91.95 seconds.\n-- Epoch 100\nNorm: 10.30, NNZs: 1048575, Bias: -1.183851, T: 308600, Avg. loss: 0.010672\nTotal training time: 92.91 seconds.\n-- Epoch 1\nNorm: 40.78, NNZs: 1048108, Bias: -2.206101, T: 3086, Avg. loss: 0.330234\nTotal training time: 0.93 seconds.\n-- Epoch 2\nNorm: 26.54, NNZs: 1048516, Bias: -2.360115, T: 6172, Avg. loss: 0.177010\nTotal training time: 1.83 seconds.\n-- Epoch 3\nNorm: 20.33, NNZs: 1048553, Bias: -2.380501, T: 9258, Avg. loss: 0.124405\nTotal training time: 2.75 seconds.\n-- Epoch 4\nNorm: 16.57, NNZs: 1048563, Bias: -2.386004, T: 12344, Avg. loss: 0.094801\nTotal training time: 3.65 seconds.\n-- Epoch 5\nNorm: 14.50, NNZs: 1048565, Bias: -2.376244, T: 15430, Avg. loss: 0.077014\nTotal training time: 4.57 seconds.\n-- Epoch 6\nNorm: 13.03, NNZs: 1048567, Bias: -2.377482, T: 18516, Avg. loss: 0.064879\nTotal training time: 5.49 seconds.\n-- Epoch 7\nNorm: 12.06, NNZs: 1048568, Bias: -2.380975, T: 21602, Avg. loss: 0.056189\nTotal training time: 6.43 seconds.\n-- Epoch 8\nNorm: 11.35, NNZs: 1048570, Bias: -2.382133, T: 24688, Avg. loss: 0.049609\nTotal training time: 7.35 seconds.\n-- Epoch 9\nNorm: 10.80, NNZs: 1048574, Bias: -2.383995, T: 27774, Avg. loss: 0.044488\nTotal training time: 8.31 seconds.\n-- Epoch 10\nNorm: 10.34, NNZs: 1048574, Bias: -2.384124, T: 30860, Avg. loss: 0.040423\nTotal training time: 9.27 seconds.\n-- Epoch 11\nNorm: 9.99, NNZs: 1048575, Bias: -2.384398, T: 33946, Avg. loss: 0.037053\nTotal training time: 10.23 seconds.\n-- Epoch 12\nNorm: 9.76, NNZs: 1048576, Bias: -2.383160, T: 37032, Avg. loss: 0.034251\nTotal training time: 11.20 seconds.\n-- Epoch 13\nNorm: 9.61, NNZs: 1048576, Bias: -2.381811, T: 40118, Avg. loss: 0.031860\nTotal training time: 12.18 seconds.\n-- Epoch 14\nNorm: 9.44, NNZs: 1048576, Bias: -2.381298, T: 43204, Avg. loss: 0.029830\nTotal training time: 13.17 seconds.\n-- Epoch 15\nNorm: 9.34, NNZs: 1048576, Bias: -2.382051, T: 46290, Avg. loss: 0.028045\nTotal training time: 14.17 seconds.\n-- Epoch 16\nNorm: 9.21, NNZs: 1048576, Bias: -2.382707, T: 49376, Avg. loss: 0.026491\nTotal training time: 15.37 seconds.\n-- Epoch 17\nNorm: 9.26, NNZs: 1048576, Bias: -2.378126, T: 52462, Avg. loss: 0.025109\nTotal training time: 16.35 seconds.\n-- Epoch 18\nNorm: 9.13, NNZs: 1048576, Bias: -2.380797, T: 55548, Avg. loss: 0.023883\nTotal training time: 17.35 seconds.\n-- Epoch 19\nNorm: 9.01, NNZs: 1048576, Bias: -2.382209, T: 58634, Avg. loss: 0.022790\nTotal training time: 18.33 seconds.\n-- Epoch 20\nNorm: 9.04, NNZs: 1048576, Bias: -2.380947, T: 61720, Avg. loss: 0.021802\nTotal training time: 19.33 seconds.\n-- Epoch 21\nNorm: 9.01, NNZs: 1048576, Bias: -2.379938, T: 64806, Avg. loss: 0.020918\nTotal training time: 20.33 seconds.\n-- Epoch 22\nNorm: 8.92, NNZs: 1048576, Bias: -2.382341, T: 67892, Avg. loss: 0.020104\nTotal training time: 21.33 seconds.\n-- Epoch 23\nNorm: 8.90, NNZs: 1048576, Bias: -2.382677, T: 70978, Avg. loss: 0.019361\nTotal training time: 22.33 seconds.\n-- Epoch 24\nNorm: 8.94, NNZs: 1048576, Bias: -2.381018, T: 74064, Avg. loss: 0.018682\nTotal training time: 23.35 seconds.\n-- Epoch 25\nNorm: 8.90, NNZs: 1048576, Bias: -2.382258, T: 77150, Avg. loss: 0.018059\nTotal training time: 24.36 seconds.\n-- Epoch 26\nNorm: 8.91, NNZs: 1048576, Bias: -2.381465, T: 80236, Avg. loss: 0.017473\nTotal training time: 25.37 seconds.\n-- Epoch 27\nNorm: 8.87, NNZs: 1048576, Bias: -2.382063, T: 83322, Avg. loss: 0.016941\nTotal training time: 26.38 seconds.\n-- Epoch 28\nNorm: 8.91, NNZs: 1048576, Bias: -2.381257, T: 86408, Avg. loss: 0.016439\nTotal training time: 27.40 seconds.\n-- Epoch 29\nNorm: 8.84, NNZs: 1048576, Bias: -2.383429, T: 89494, Avg. loss: 0.015972\nTotal training time: 28.41 seconds.\n-- Epoch 30\nNorm: 8.85, NNZs: 1048576, Bias: -2.382124, T: 92580, Avg. loss: 0.015541\nTotal training time: 29.41 seconds.\n-- Epoch 31\nNorm: 8.83, NNZs: 1048576, Bias: -2.382669, T: 95666, Avg. loss: 0.015138\nTotal training time: 30.41 seconds.\n-- Epoch 32\nNorm: 8.80, NNZs: 1048576, Bias: -2.383237, T: 98752, Avg. loss: 0.014761\nTotal training time: 31.42 seconds.\n-- Epoch 33\nNorm: 8.80, NNZs: 1048576, Bias: -2.382654, T: 101838, Avg. loss: 0.014402\nTotal training time: 32.43 seconds.\n-- Epoch 34\nNorm: 8.72, NNZs: 1048576, Bias: -2.384428, T: 104924, Avg. loss: 0.014064\nTotal training time: 33.45 seconds.\n-- Epoch 35\nNorm: 8.80, NNZs: 1048576, Bias: -2.381162, T: 108010, Avg. loss: 0.013742\nTotal training time: 34.45 seconds.\n-- Epoch 36\nNorm: 8.76, NNZs: 1048576, Bias: -2.381728, T: 111096, Avg. loss: 0.013439\nTotal training time: 35.47 seconds.\n-- Epoch 37\nNorm: 8.79, NNZs: 1048576, Bias: -2.380629, T: 114182, Avg. loss: 0.013153\nTotal training time: 36.49 seconds.\n-- Epoch 38\nNorm: 8.72, NNZs: 1048576, Bias: -2.383068, T: 117268, Avg. loss: 0.012882\nTotal training time: 37.50 seconds.\n-- Epoch 39\nNorm: 8.74, NNZs: 1048576, Bias: -2.382226, T: 120354, Avg. loss: 0.012630\nTotal training time: 38.51 seconds.\n-- Epoch 40\nNorm: 8.74, NNZs: 1048576, Bias: -2.382594, T: 123440, Avg. loss: 0.012387\nTotal training time: 39.54 seconds.\n-- Epoch 41\nNorm: 8.72, NNZs: 1048576, Bias: -2.382878, T: 126526, Avg. loss: 0.012156\nTotal training time: 40.55 seconds.\n-- Epoch 42\nNorm: 8.70, NNZs: 1048576, Bias: -2.383642, T: 129612, Avg. loss: 0.011936\nTotal training time: 41.56 seconds.\n-- Epoch 43\nNorm: 8.71, NNZs: 1048576, Bias: -2.382896, T: 132698, Avg. loss: 0.011728\nTotal training time: 42.57 seconds.\n-- Epoch 44\nNorm: 8.72, NNZs: 1048576, Bias: -2.382646, T: 135784, Avg. loss: 0.011527\nTotal training time: 43.59 seconds.\n-- Epoch 45\nNorm: 8.66, NNZs: 1048576, Bias: -2.384230, T: 138870, Avg. loss: 0.011336\nTotal training time: 44.59 seconds.\n-- Epoch 46\nNorm: 8.66, NNZs: 1048576, Bias: -2.384209, T: 141956, Avg. loss: 0.011153\nTotal training time: 45.60 seconds.\n-- Epoch 47\nNorm: 8.72, NNZs: 1048576, Bias: -2.382064, T: 145042, Avg. loss: 0.010975\nTotal training time: 46.62 seconds.\n-- Epoch 48\nNorm: 8.71, NNZs: 1048576, Bias: -2.382568, T: 148128, Avg. loss: 0.010807\nTotal training time: 47.64 seconds.\n-- Epoch 49\nNorm: 8.63, NNZs: 1048576, Bias: -2.385161, T: 151214, Avg. loss: 0.010644\nTotal training time: 48.65 seconds.\n-- Epoch 50\nNorm: 8.62, NNZs: 1048576, Bias: -2.385757, T: 154300, Avg. loss: 0.010488\nTotal training time: 49.68 seconds.\n-- Epoch 51\nNorm: 8.71, NNZs: 1048576, Bias: -2.382281, T: 157386, Avg. loss: 0.010338\nTotal training time: 50.71 seconds.\n-- Epoch 52\nNorm: 8.70, NNZs: 1048576, Bias: -2.382242, T: 160472, Avg. loss: 0.010193\nTotal training time: 51.73 seconds.\n-- Epoch 53\nNorm: 8.67, NNZs: 1048576, Bias: -2.383543, T: 163558, Avg. loss: 0.010056\nTotal training time: 52.77 seconds.\n-- Epoch 54\nNorm: 8.65, NNZs: 1048576, Bias: -2.384632, T: 166644, Avg. loss: 0.009923\nTotal training time: 53.80 seconds.\n-- Epoch 55\nNorm: 8.67, NNZs: 1048576, Bias: -2.383711, T: 169730, Avg. loss: 0.009796\nTotal training time: 54.82 seconds.\n-- Epoch 56\nNorm: 8.64, NNZs: 1048576, Bias: -2.384689, T: 172816, Avg. loss: 0.009672\nTotal training time: 55.85 seconds.\n-- Epoch 57\nNorm: 8.64, NNZs: 1048576, Bias: -2.384425, T: 175902, Avg. loss: 0.009554\nTotal training time: 56.88 seconds.\n-- Epoch 58\nNorm: 8.66, NNZs: 1048576, Bias: -2.383993, T: 178988, Avg. loss: 0.009438\nTotal training time: 57.90 seconds.\n-- Epoch 59\nNorm: 8.63, NNZs: 1048576, Bias: -2.384925, T: 182074, Avg. loss: 0.009328\nTotal training time: 58.93 seconds.\n-- Epoch 60\nNorm: 8.63, NNZs: 1048576, Bias: -2.384802, T: 185160, Avg. loss: 0.009220\nTotal training time: 59.96 seconds.\n-- Epoch 61\nNorm: 8.64, NNZs: 1048576, Bias: -2.384519, T: 188246, Avg. loss: 0.009115\nTotal training time: 60.97 seconds.\n-- Epoch 62\nNorm: 8.60, NNZs: 1048576, Bias: -2.385816, T: 191332, Avg. loss: 0.009014\nTotal training time: 62.01 seconds.\n-- Epoch 63\nNorm: 8.65, NNZs: 1048576, Bias: -2.384281, T: 194418, Avg. loss: 0.008915\nTotal training time: 63.03 seconds.\n-- Epoch 64\nNorm: 8.63, NNZs: 1048576, Bias: -2.385082, T: 197504, Avg. loss: 0.008821\nTotal training time: 64.07 seconds.\n-- Epoch 65\nNorm: 8.63, NNZs: 1048576, Bias: -2.384698, T: 200590, Avg. loss: 0.008729\nTotal training time: 65.10 seconds.\n-- Epoch 66\nNorm: 8.59, NNZs: 1048576, Bias: -2.386231, T: 203676, Avg. loss: 0.008640\nTotal training time: 66.13 seconds.\n-- Epoch 67\nNorm: 8.62, NNZs: 1048576, Bias: -2.385569, T: 206762, Avg. loss: 0.008554\nTotal training time: 67.16 seconds.\n-- Epoch 68\nNorm: 8.62, NNZs: 1048576, Bias: -2.385628, T: 209848, Avg. loss: 0.008470\nTotal training time: 68.18 seconds.\n-- Epoch 69\nNorm: 8.62, NNZs: 1048576, Bias: -2.385312, T: 212934, Avg. loss: 0.008389\nTotal training time: 69.21 seconds.\n-- Epoch 70\nNorm: 8.65, NNZs: 1048576, Bias: -2.384366, T: 216020, Avg. loss: 0.008309\nTotal training time: 70.23 seconds.\n-- Epoch 71\nNorm: 8.58, NNZs: 1048576, Bias: -2.387040, T: 219106, Avg. loss: 0.008231\nTotal training time: 71.26 seconds.\n-- Epoch 72\nNorm: 8.61, NNZs: 1048576, Bias: -2.385715, T: 222192, Avg. loss: 0.008157\nTotal training time: 72.29 seconds.\n-- Epoch 73\nNorm: 8.62, NNZs: 1048576, Bias: -2.385872, T: 225278, Avg. loss: 0.008083\nTotal training time: 73.32 seconds.\n-- Epoch 74\nNorm: 8.61, NNZs: 1048576, Bias: -2.385979, T: 228364, Avg. loss: 0.008013\nTotal training time: 74.35 seconds.\n-- Epoch 75\nNorm: 8.61, NNZs: 1048576, Bias: -2.385847, T: 231450, Avg. loss: 0.007945\nTotal training time: 75.38 seconds.\n-- Epoch 76\nNorm: 8.60, NNZs: 1048576, Bias: -2.386059, T: 234536, Avg. loss: 0.007878\nTotal training time: 76.41 seconds.\n-- Epoch 77\nNorm: 8.56, NNZs: 1048576, Bias: -2.387525, T: 237622, Avg. loss: 0.007811\nTotal training time: 77.45 seconds.\n-- Epoch 78\nNorm: 8.59, NNZs: 1048576, Bias: -2.386445, T: 240708, Avg. loss: 0.007749\nTotal training time: 78.47 seconds.\n-- Epoch 79\nNorm: 8.59, NNZs: 1048576, Bias: -2.386271, T: 243794, Avg. loss: 0.007687\nTotal training time: 79.50 seconds.\n-- Epoch 80\nNorm: 8.59, NNZs: 1048576, Bias: -2.386524, T: 246880, Avg. loss: 0.007626\nTotal training time: 80.54 seconds.\n-- Epoch 81\nNorm: 8.59, NNZs: 1048576, Bias: -2.386246, T: 249966, Avg. loss: 0.007567\nTotal training time: 81.55 seconds.\n-- Epoch 82\nNorm: 8.59, NNZs: 1048576, Bias: -2.386657, T: 253052, Avg. loss: 0.007509\nTotal training time: 82.59 seconds.\n-- Epoch 83\nNorm: 8.58, NNZs: 1048576, Bias: -2.386992, T: 256138, Avg. loss: 0.007452\nTotal training time: 83.63 seconds.\n-- Epoch 84\nNorm: 8.60, NNZs: 1048576, Bias: -2.386511, T: 259224, Avg. loss: 0.007397\nTotal training time: 84.67 seconds.\n-- Epoch 85\nNorm: 8.60, NNZs: 1048576, Bias: -2.386426, T: 262310, Avg. loss: 0.007344\nTotal training time: 85.70 seconds.\n-- Epoch 86\nNorm: 8.58, NNZs: 1048576, Bias: -2.387260, T: 265396, Avg. loss: 0.007292\nTotal training time: 86.74 seconds.\n-- Epoch 87\nNorm: 8.58, NNZs: 1048576, Bias: -2.387303, T: 268482, Avg. loss: 0.007241\nTotal training time: 87.77 seconds.\n-- Epoch 88\nNorm: 8.58, NNZs: 1048576, Bias: -2.387221, T: 271568, Avg. loss: 0.007191\nTotal training time: 88.81 seconds.\n-- Epoch 89\nNorm: 8.60, NNZs: 1048576, Bias: -2.386639, T: 274654, Avg. loss: 0.007142\nTotal training time: 89.84 seconds.\n-- Epoch 90\nNorm: 8.58, NNZs: 1048576, Bias: -2.387152, T: 277740, Avg. loss: 0.007095\nTotal training time: 90.87 seconds.\n-- Epoch 91\nNorm: 8.59, NNZs: 1048576, Bias: -2.386903, T: 280826, Avg. loss: 0.007047\nTotal training time: 91.91 seconds.\n-- Epoch 92\nNorm: 8.58, NNZs: 1048576, Bias: -2.387195, T: 283912, Avg. loss: 0.007002\nTotal training time: 92.95 seconds.\n-- Epoch 93\nNorm: 8.57, NNZs: 1048576, Bias: -2.387936, T: 286998, Avg. loss: 0.006956\nTotal training time: 93.99 seconds.\n-- Epoch 94\nNorm: 8.57, NNZs: 1048576, Bias: -2.387712, T: 290084, Avg. loss: 0.006913\nTotal training time: 95.03 seconds.\n-- Epoch 95\nNorm: 8.59, NNZs: 1048576, Bias: -2.387103, T: 293170, Avg. loss: 0.006869\nTotal training time: 96.06 seconds.\n-- Epoch 96\nNorm: 8.60, NNZs: 1048576, Bias: -2.386830, T: 296256, Avg. loss: 0.006827\nTotal training time: 97.09 seconds.\n-- Epoch 97\nNorm: 8.57, NNZs: 1048576, Bias: -2.388183, T: 299342, Avg. loss: 0.006785\nTotal training time: 98.14 seconds.\n-- Epoch 98\nNorm: 8.56, NNZs: 1048576, Bias: -2.388442, T: 302428, Avg. loss: 0.006745\nTotal training time: 99.17 seconds.\n-- Epoch 99\nNorm: 8.57, NNZs: 1048576, Bias: -2.387878, T: 305514, Avg. loss: 0.006705\nTotal training time: 100.20 seconds.\n-- Epoch 100\nNorm: 8.57, NNZs: 1048576, Bias: -2.387840, T: 308600, Avg. loss: 0.006667\nTotal training time: 101.24 seconds.\n-- Epoch 1\nNorm: 39.75, NNZs: 520039, Bias: -1.172487, T: 3086, Avg. loss: 0.328657\nTotal training time: 0.84 seconds.\n-- Epoch 2\nNorm: 29.59, NNZs: 593331, Bias: -1.334361, T: 6172, Avg. loss: 0.203309\nTotal training time: 1.68 seconds.\n-- Epoch 3\nNorm: 23.76, NNZs: 598940, Bias: -1.350860, T: 9258, Avg. loss: 0.144297\nTotal training time: 2.53 seconds.\n-- Epoch 4\nNorm: 20.10, NNZs: 605169, Bias: -1.370536, T: 12344, Avg. loss: 0.111457\nTotal training time: 3.37 seconds.\n-- Epoch 5\nNorm: 17.52, NNZs: 609251, Bias: -1.363389, T: 15430, Avg. loss: 0.090362\nTotal training time: 4.21 seconds.\n-- Epoch 6\nNorm: 15.84, NNZs: 614894, Bias: -1.375155, T: 18516, Avg. loss: 0.076133\nTotal training time: 5.06 seconds.\n-- Epoch 7\nNorm: 14.63, NNZs: 617538, Bias: -1.381805, T: 21602, Avg. loss: 0.065980\nTotal training time: 5.90 seconds.\n-- Epoch 8\nNorm: 13.91, NNZs: 623704, Bias: -1.387283, T: 24688, Avg. loss: 0.058441\nTotal training time: 6.75 seconds.\n-- Epoch 9\nNorm: 13.36, NNZs: 626048, Bias: -1.391225, T: 27774, Avg. loss: 0.052534\nTotal training time: 7.59 seconds.\n-- Epoch 10\nNorm: 12.94, NNZs: 636746, Bias: -1.391878, T: 30860, Avg. loss: 0.047819\nTotal training time: 8.44 seconds.\n-- Epoch 11\nNorm: 12.57, NNZs: 639575, Bias: -1.398309, T: 33946, Avg. loss: 0.043952\nTotal training time: 9.29 seconds.\n-- Epoch 12\nNorm: 12.37, NNZs: 642117, Bias: -1.397697, T: 37032, Avg. loss: 0.040731\nTotal training time: 10.14 seconds.\n-- Epoch 13\nNorm: 12.07, NNZs: 647809, Bias: -1.397327, T: 40118, Avg. loss: 0.037977\nTotal training time: 10.98 seconds.\n-- Epoch 14\nNorm: 11.91, NNZs: 648614, Bias: -1.395024, T: 43204, Avg. loss: 0.035643\nTotal training time: 11.83 seconds.\n-- Epoch 15\nNorm: 11.69, NNZs: 653727, Bias: -1.401937, T: 46290, Avg. loss: 0.033626\nTotal training time: 12.68 seconds.\n-- Epoch 16\nNorm: 11.65, NNZs: 654393, Bias: -1.402239, T: 49376, Avg. loss: 0.031869\nTotal training time: 13.53 seconds.\n-- Epoch 17\nNorm: 11.54, NNZs: 654935, Bias: -1.406454, T: 52462, Avg. loss: 0.030316\nTotal training time: 14.38 seconds.\n-- Epoch 18\nNorm: 11.52, NNZs: 655136, Bias: -1.403302, T: 55548, Avg. loss: 0.028936\nTotal training time: 15.23 seconds.\n-- Epoch 19\nNorm: 11.40, NNZs: 655392, Bias: -1.408476, T: 58634, Avg. loss: 0.027697\nTotal training time: 16.08 seconds.\n-- Epoch 20\nNorm: 11.34, NNZs: 655782, Bias: -1.411190, T: 61720, Avg. loss: 0.026582\nTotal training time: 16.93 seconds.\n-- Epoch 21\nNorm: 11.31, NNZs: 656496, Bias: -1.413075, T: 64806, Avg. loss: 0.025567\nTotal training time: 17.79 seconds.\n-- Epoch 22\nNorm: 11.25, NNZs: 658144, Bias: -1.414211, T: 67892, Avg. loss: 0.024639\nTotal training time: 18.64 seconds.\n-- Epoch 23\nNorm: 11.18, NNZs: 658189, Bias: -1.416606, T: 70978, Avg. loss: 0.023788\nTotal training time: 19.49 seconds.\n-- Epoch 24\nNorm: 11.14, NNZs: 658349, Bias: -1.416274, T: 74064, Avg. loss: 0.023012\nTotal training time: 20.34 seconds.\n-- Epoch 25\nNorm: 11.12, NNZs: 659026, Bias: -1.418407, T: 77150, Avg. loss: 0.022296\nTotal training time: 21.19 seconds.\n-- Epoch 26\nNorm: 11.05, NNZs: 659305, Bias: -1.419376, T: 80236, Avg. loss: 0.021645\nTotal training time: 22.04 seconds.\n-- Epoch 27\nNorm: 11.08, NNZs: 659482, Bias: -1.418643, T: 83322, Avg. loss: 0.021040\nTotal training time: 22.90 seconds.\n-- Epoch 28\nNorm: 11.01, NNZs: 660501, Bias: -1.423481, T: 86408, Avg. loss: 0.020472\nTotal training time: 23.75 seconds.\n-- Epoch 29\nNorm: 10.99, NNZs: 660567, Bias: -1.423640, T: 89494, Avg. loss: 0.019948\nTotal training time: 24.60 seconds.\n-- Epoch 30\nNorm: 10.97, NNZs: 664392, Bias: -1.425656, T: 92580, Avg. loss: 0.019455\nTotal training time: 25.45 seconds.\n-- Epoch 31\nNorm: 10.92, NNZs: 673356, Bias: -1.427894, T: 95666, Avg. loss: 0.018992\nTotal training time: 26.31 seconds.\n-- Epoch 32\nNorm: 10.94, NNZs: 673765, Bias: -1.427269, T: 98752, Avg. loss: 0.018561\nTotal training time: 27.16 seconds.\n-- Epoch 33\nNorm: 10.90, NNZs: 673912, Bias: -1.429475, T: 101838, Avg. loss: 0.018154\nTotal training time: 28.01 seconds.\n-- Epoch 34\nNorm: 10.90, NNZs: 674667, Bias: -1.426645, T: 104924, Avg. loss: 0.017765\nTotal training time: 28.86 seconds.\n-- Epoch 35\nNorm: 10.87, NNZs: 675178, Bias: -1.428691, T: 108010, Avg. loss: 0.017406\nTotal training time: 29.71 seconds.\n-- Epoch 36\nNorm: 10.86, NNZs: 675178, Bias: -1.430834, T: 111096, Avg. loss: 0.017066\nTotal training time: 30.57 seconds.\n-- Epoch 37\nNorm: 10.85, NNZs: 676183, Bias: -1.430985, T: 114182, Avg. loss: 0.016745\nTotal training time: 31.42 seconds.\n-- Epoch 38\nNorm: 10.85, NNZs: 676183, Bias: -1.430287, T: 117268, Avg. loss: 0.016440\nTotal training time: 32.27 seconds.\n-- Epoch 39\nNorm: 10.82, NNZs: 676183, Bias: -1.432379, T: 120354, Avg. loss: 0.016147\nTotal training time: 33.26 seconds.\n-- Epoch 40\nNorm: 10.80, NNZs: 676450, Bias: -1.435225, T: 123440, Avg. loss: 0.015871\nTotal training time: 34.18 seconds.\n-- Epoch 41\nNorm: 10.80, NNZs: 676450, Bias: -1.435742, T: 126526, Avg. loss: 0.015610\nTotal training time: 35.04 seconds.\n-- Epoch 42\nNorm: 10.78, NNZs: 676478, Bias: -1.436499, T: 129612, Avg. loss: 0.015362\nTotal training time: 35.89 seconds.\n-- Epoch 43\nNorm: 10.75, NNZs: 676478, Bias: -1.438975, T: 132698, Avg. loss: 0.015119\nTotal training time: 36.74 seconds.\n-- Epoch 44\nNorm: 10.76, NNZs: 676731, Bias: -1.438136, T: 135784, Avg. loss: 0.014891\nTotal training time: 37.59 seconds.\n-- Epoch 45\nNorm: 10.75, NNZs: 676731, Bias: -1.437092, T: 138870, Avg. loss: 0.014673\nTotal training time: 38.44 seconds.\n-- Epoch 46\nNorm: 10.75, NNZs: 676731, Bias: -1.438113, T: 141956, Avg. loss: 0.014466\nTotal training time: 39.30 seconds.\n-- Epoch 47\nNorm: 10.73, NNZs: 676731, Bias: -1.440802, T: 145042, Avg. loss: 0.014268\nTotal training time: 40.15 seconds.\n-- Epoch 48\nNorm: 10.74, NNZs: 676731, Bias: -1.441051, T: 148128, Avg. loss: 0.014077\nTotal training time: 41.01 seconds.\n-- Epoch 49\nNorm: 10.75, NNZs: 676731, Bias: -1.440810, T: 151214, Avg. loss: 0.013893\nTotal training time: 41.86 seconds.\n-- Epoch 50\nNorm: 10.72, NNZs: 676731, Bias: -1.442397, T: 154300, Avg. loss: 0.013717\nTotal training time: 42.71 seconds.\n-- Epoch 51\nNorm: 10.72, NNZs: 676731, Bias: -1.442145, T: 157386, Avg. loss: 0.013550\nTotal training time: 43.56 seconds.\n-- Epoch 52\nNorm: 10.70, NNZs: 676731, Bias: -1.444459, T: 160472, Avg. loss: 0.013386\nTotal training time: 44.42 seconds.\n-- Epoch 53\nNorm: 10.70, NNZs: 676930, Bias: -1.444148, T: 163558, Avg. loss: 0.013231\nTotal training time: 45.27 seconds.\n-- Epoch 54\nNorm: 10.69, NNZs: 676930, Bias: -1.445122, T: 166644, Avg. loss: 0.013080\nTotal training time: 46.12 seconds.\n-- Epoch 55\nNorm: 10.68, NNZs: 676930, Bias: -1.446134, T: 169730, Avg. loss: 0.012933\nTotal training time: 46.98 seconds.\n-- Epoch 56\nNorm: 10.69, NNZs: 676930, Bias: -1.444745, T: 172816, Avg. loss: 0.012793\nTotal training time: 47.83 seconds.\n-- Epoch 57\nNorm: 10.68, NNZs: 677996, Bias: -1.446578, T: 175902, Avg. loss: 0.012658\nTotal training time: 48.68 seconds.\n-- Epoch 58\nNorm: 10.68, NNZs: 677996, Bias: -1.446980, T: 178988, Avg. loss: 0.012527\nTotal training time: 49.54 seconds.\n-- Epoch 59\nNorm: 10.67, NNZs: 678041, Bias: -1.448586, T: 182074, Avg. loss: 0.012400\nTotal training time: 50.39 seconds.\n-- Epoch 60\nNorm: 10.67, NNZs: 678041, Bias: -1.448428, T: 185160, Avg. loss: 0.012278\nTotal training time: 51.25 seconds.\n-- Epoch 61\nNorm: 10.66, NNZs: 678041, Bias: -1.449285, T: 188246, Avg. loss: 0.012158\nTotal training time: 52.10 seconds.\n-- Epoch 62\nNorm: 10.67, NNZs: 678041, Bias: -1.448317, T: 191332, Avg. loss: 0.012044\nTotal training time: 52.95 seconds.\n-- Epoch 63\nNorm: 10.65, NNZs: 678072, Bias: -1.450770, T: 194418, Avg. loss: 0.011933\nTotal training time: 53.81 seconds.\n-- Epoch 64\nNorm: 10.67, NNZs: 678072, Bias: -1.449019, T: 197504, Avg. loss: 0.011824\nTotal training time: 54.66 seconds.\n-- Epoch 65\nNorm: 10.64, NNZs: 678072, Bias: -1.451512, T: 200590, Avg. loss: 0.011719\nTotal training time: 55.51 seconds.\n-- Epoch 66\nNorm: 10.64, NNZs: 678072, Bias: -1.452137, T: 203676, Avg. loss: 0.011618\nTotal training time: 56.37 seconds.\n-- Epoch 67\nNorm: 10.64, NNZs: 678072, Bias: -1.452552, T: 206762, Avg. loss: 0.011520\nTotal training time: 57.22 seconds.\n-- Epoch 68\nNorm: 10.63, NNZs: 678327, Bias: -1.452885, T: 209848, Avg. loss: 0.011423\nTotal training time: 58.07 seconds.\n-- Epoch 69\nNorm: 10.61, NNZs: 678327, Bias: -1.455266, T: 212934, Avg. loss: 0.011330\nTotal training time: 58.93 seconds.\n-- Epoch 70\nNorm: 10.62, NNZs: 678327, Bias: -1.455033, T: 216020, Avg. loss: 0.011241\nTotal training time: 59.78 seconds.\n-- Epoch 71\nNorm: 10.63, NNZs: 678327, Bias: -1.453629, T: 219106, Avg. loss: 0.011154\nTotal training time: 60.64 seconds.\n-- Epoch 72\nNorm: 10.62, NNZs: 705449, Bias: -1.454862, T: 222192, Avg. loss: 0.011069\nTotal training time: 61.50 seconds.\n-- Epoch 73\nNorm: 10.62, NNZs: 705449, Bias: -1.455562, T: 225278, Avg. loss: 0.010986\nTotal training time: 62.35 seconds.\n-- Epoch 74\nNorm: 10.62, NNZs: 705449, Bias: -1.455251, T: 228364, Avg. loss: 0.010906\nTotal training time: 63.20 seconds.\n-- Epoch 75\nNorm: 10.63, NNZs: 705449, Bias: -1.455490, T: 231450, Avg. loss: 0.010827\nTotal training time: 64.06 seconds.\n-- Epoch 76\nNorm: 10.62, NNZs: 705449, Bias: -1.456308, T: 234536, Avg. loss: 0.010751\nTotal training time: 64.91 seconds.\n-- Epoch 77\nNorm: 10.64, NNZs: 705449, Bias: -1.454973, T: 237622, Avg. loss: 0.010675\nTotal training time: 65.76 seconds.\n-- Epoch 78\nNorm: 10.62, NNZs: 705449, Bias: -1.456650, T: 240708, Avg. loss: 0.010602\nTotal training time: 66.62 seconds.\n-- Epoch 79\nNorm: 10.60, NNZs: 705449, Bias: -1.458473, T: 243794, Avg. loss: 0.010531\nTotal training time: 67.47 seconds.\n-- Epoch 80\nNorm: 10.62, NNZs: 705476, Bias: -1.457318, T: 246880, Avg. loss: 0.010462\nTotal training time: 68.32 seconds.\n-- Epoch 81\nNorm: 10.61, NNZs: 705476, Bias: -1.458412, T: 249966, Avg. loss: 0.010395\nTotal training time: 69.18 seconds.\n-- Epoch 82\nNorm: 10.60, NNZs: 705476, Bias: -1.459469, T: 253052, Avg. loss: 0.010329\nTotal training time: 70.03 seconds.\n-- Epoch 83\nNorm: 10.60, NNZs: 705476, Bias: -1.458628, T: 256138, Avg. loss: 0.010265\nTotal training time: 70.88 seconds.\n-- Epoch 84\nNorm: 10.61, NNZs: 705476, Bias: -1.459007, T: 259224, Avg. loss: 0.010202\nTotal training time: 71.74 seconds.\n-- Epoch 85\nNorm: 10.60, NNZs: 705476, Bias: -1.459641, T: 262310, Avg. loss: 0.010141\nTotal training time: 72.59 seconds.\n-- Epoch 86\nNorm: 10.61, NNZs: 705476, Bias: -1.458760, T: 265396, Avg. loss: 0.010080\nTotal training time: 73.45 seconds.\n-- Epoch 87\nNorm: 10.60, NNZs: 705476, Bias: -1.460177, T: 268482, Avg. loss: 0.010021\nTotal training time: 74.30 seconds.\n-- Epoch 88\nNorm: 10.59, NNZs: 705476, Bias: -1.461403, T: 271568, Avg. loss: 0.009963\nTotal training time: 75.16 seconds.\n-- Epoch 89\nNorm: 10.60, NNZs: 705476, Bias: -1.460308, T: 274654, Avg. loss: 0.009906\nTotal training time: 76.01 seconds.\n-- Epoch 90\nNorm: 10.58, NNZs: 705476, Bias: -1.462142, T: 277740, Avg. loss: 0.009852\nTotal training time: 76.87 seconds.\n-- Epoch 91\nNorm: 10.59, NNZs: 705476, Bias: -1.461373, T: 280826, Avg. loss: 0.009799\nTotal training time: 77.72 seconds.\n-- Epoch 92\nNorm: 10.58, NNZs: 705476, Bias: -1.462605, T: 283912, Avg. loss: 0.009747\nTotal training time: 78.58 seconds.\n-- Epoch 93\nNorm: 10.58, NNZs: 705476, Bias: -1.462501, T: 286998, Avg. loss: 0.009696\nTotal training time: 79.43 seconds.\n-- Epoch 94\nNorm: 10.59, NNZs: 705476, Bias: -1.461625, T: 290084, Avg. loss: 0.009645\nTotal training time: 80.28 seconds.\n-- Epoch 95\nNorm: 10.57, NNZs: 705491, Bias: -1.463846, T: 293170, Avg. loss: 0.009596\nTotal training time: 81.14 seconds.\n-- Epoch 96\nNorm: 10.57, NNZs: 705491, Bias: -1.463994, T: 296256, Avg. loss: 0.009549\nTotal training time: 81.99 seconds.\n-- Epoch 97\nNorm: 10.58, NNZs: 705491, Bias: -1.463350, T: 299342, Avg. loss: 0.009501\nTotal training time: 82.85 seconds.\n-- Epoch 98\nNorm: 10.56, NNZs: 705491, Bias: -1.465282, T: 302428, Avg. loss: 0.009454\nTotal training time: 83.70 seconds.\n-- Epoch 99\nNorm: 10.58, NNZs: 705491, Bias: -1.463383, T: 305514, Avg. loss: 0.009409\nTotal training time: 84.55 seconds.\n-- Epoch 100\nNorm: 10.56, NNZs: 705655, Bias: -1.466201, T: 308600, Avg. loss: 0.009364\nTotal training time: 85.41 seconds.\n-- Epoch 1\nNorm: 126.99, NNZs: 1047643, Bias: 0.467605, T: 3086, Avg. loss: 3.077330\nTotal training time: 0.95 seconds.\n-- Epoch 2\nNorm: 89.68, NNZs: 1048532, Bias: 0.551026, T: 6172, Avg. loss: 1.896222\nTotal training time: 1.90 seconds.\n-- Epoch 3\nNorm: 71.54, NNZs: 1048570, Bias: 0.468141, T: 9258, Avg. loss: 1.377010\nTotal training time: 2.88 seconds.\n-- Epoch 4\nNorm: 60.25, NNZs: 1048574, Bias: 0.529939, T: 12344, Avg. loss: 1.077210\nTotal training time: 3.86 seconds.\n-- Epoch 5\nNorm: 52.84, NNZs: 1048576, Bias: 0.502964, T: 15430, Avg. loss: 0.884142\nTotal training time: 4.86 seconds.\n-- Epoch 6\nNorm: 47.90, NNZs: 1048576, Bias: 0.513831, T: 18516, Avg. loss: 0.754105\nTotal training time: 5.90 seconds.\n-- Epoch 7\nNorm: 44.17, NNZs: 1048576, Bias: 0.491099, T: 21602, Avg. loss: 0.658543\nTotal training time: 6.96 seconds.\n-- Epoch 8\nNorm: 41.84, NNZs: 1048576, Bias: 0.451884, T: 24688, Avg. loss: 0.585925\nTotal training time: 8.02 seconds.\n-- Epoch 9\nNorm: 40.13, NNZs: 1048576, Bias: 0.465085, T: 27774, Avg. loss: 0.529078\nTotal training time: 9.14 seconds.\n-- Epoch 10\nNorm: 38.87, NNZs: 1048576, Bias: 0.455133, T: 30860, Avg. loss: 0.483626\nTotal training time: 10.27 seconds.\n-- Epoch 11\nNorm: 37.87, NNZs: 1048576, Bias: 0.435238, T: 33946, Avg. loss: 0.446203\nTotal training time: 11.37 seconds.\n-- Epoch 12\nNorm: 37.02, NNZs: 1048576, Bias: 0.437174, T: 37032, Avg. loss: 0.414929\nTotal training time: 12.51 seconds.\n-- Epoch 13\nNorm: 36.41, NNZs: 1048576, Bias: 0.419509, T: 40118, Avg. loss: 0.388305\nTotal training time: 13.66 seconds.\n-- Epoch 14\nNorm: 35.88, NNZs: 1048576, Bias: 0.440014, T: 43204, Avg. loss: 0.365426\nTotal training time: 14.80 seconds.\n-- Epoch 15\nNorm: 35.54, NNZs: 1048576, Bias: 0.434295, T: 46290, Avg. loss: 0.345675\nTotal training time: 15.97 seconds.\n-- Epoch 16\nNorm: 35.25, NNZs: 1048576, Bias: 0.422166, T: 49376, Avg. loss: 0.328319\nTotal training time: 17.16 seconds.\n-- Epoch 17\nNorm: 35.03, NNZs: 1048576, Bias: 0.415247, T: 52462, Avg. loss: 0.313027\nTotal training time: 18.35 seconds.\n-- Epoch 18\nNorm: 34.76, NNZs: 1048576, Bias: 0.418613, T: 55548, Avg. loss: 0.299290\nTotal training time: 19.54 seconds.\n-- Epoch 19\nNorm: 34.57, NNZs: 1048576, Bias: 0.400862, T: 58634, Avg. loss: 0.286963\nTotal training time: 20.73 seconds.\n-- Epoch 20\nNorm: 34.40, NNZs: 1048576, Bias: 0.402182, T: 61720, Avg. loss: 0.275959\nTotal training time: 21.93 seconds.\n-- Epoch 21\nNorm: 34.24, NNZs: 1048576, Bias: 0.402108, T: 64806, Avg. loss: 0.265984\nTotal training time: 23.14 seconds.\n-- Epoch 22\nNorm: 34.08, NNZs: 1048576, Bias: 0.397133, T: 67892, Avg. loss: 0.256906\nTotal training time: 24.33 seconds.\n-- Epoch 23\nNorm: 33.96, NNZs: 1048576, Bias: 0.403552, T: 70978, Avg. loss: 0.248616\nTotal training time: 25.54 seconds.\n-- Epoch 24\nNorm: 33.84, NNZs: 1048576, Bias: 0.397291, T: 74064, Avg. loss: 0.240940\nTotal training time: 26.74 seconds.\n-- Epoch 25\nNorm: 33.72, NNZs: 1048576, Bias: 0.396498, T: 77150, Avg. loss: 0.233951\nTotal training time: 27.97 seconds.\n-- Epoch 26\nNorm: 33.65, NNZs: 1048576, Bias: 0.386990, T: 80236, Avg. loss: 0.227456\nTotal training time: 29.23 seconds.\n-- Epoch 27\nNorm: 33.58, NNZs: 1048576, Bias: 0.391163, T: 83322, Avg. loss: 0.221425\nTotal training time: 30.46 seconds.\n-- Epoch 28\nNorm: 33.49, NNZs: 1048576, Bias: 0.395420, T: 86408, Avg. loss: 0.215814\nTotal training time: 31.68 seconds.\n-- Epoch 29\nNorm: 33.44, NNZs: 1048576, Bias: 0.386859, T: 89494, Avg. loss: 0.210560\nTotal training time: 32.97 seconds.\n-- Epoch 30\nNorm: 33.39, NNZs: 1048576, Bias: 0.387481, T: 92580, Avg. loss: 0.205681\nTotal training time: 34.23 seconds.\n-- Epoch 31\nNorm: 33.32, NNZs: 1048576, Bias: 0.387182, T: 95666, Avg. loss: 0.201111\nTotal training time: 35.47 seconds.\n-- Epoch 32\nNorm: 33.28, NNZs: 1048576, Bias: 0.384413, T: 98752, Avg. loss: 0.196835\nTotal training time: 36.71 seconds.\n-- Epoch 33\nNorm: 33.21, NNZs: 1048576, Bias: 0.381962, T: 101838, Avg. loss: 0.192791\nTotal training time: 37.98 seconds.\n-- Epoch 34\nNorm: 33.15, NNZs: 1048576, Bias: 0.384489, T: 104924, Avg. loss: 0.188977\nTotal training time: 39.26 seconds.\n-- Epoch 35\nNorm: 33.13, NNZs: 1048576, Bias: 0.384823, T: 108010, Avg. loss: 0.185418\nTotal training time: 40.52 seconds.\n-- Epoch 36\nNorm: 33.09, NNZs: 1048576, Bias: 0.380463, T: 111096, Avg. loss: 0.182032\nTotal training time: 41.78 seconds.\n-- Epoch 37\nNorm: 33.04, NNZs: 1048576, Bias: 0.381027, T: 114182, Avg. loss: 0.178816\nTotal training time: 43.05 seconds.\n-- Epoch 38\nNorm: 33.01, NNZs: 1048576, Bias: 0.376214, T: 117268, Avg. loss: 0.175771\nTotal training time: 44.32 seconds.\n-- Epoch 39\nNorm: 32.97, NNZs: 1048576, Bias: 0.376310, T: 120354, Avg. loss: 0.172881\nTotal training time: 45.57 seconds.\n-- Epoch 40\nNorm: 32.93, NNZs: 1048576, Bias: 0.378163, T: 123440, Avg. loss: 0.170134\nTotal training time: 46.85 seconds.\n-- Epoch 41\nNorm: 32.90, NNZs: 1048576, Bias: 0.375240, T: 126526, Avg. loss: 0.167507\nTotal training time: 48.13 seconds.\n-- Epoch 42\nNorm: 32.88, NNZs: 1048576, Bias: 0.372173, T: 129612, Avg. loss: 0.165020\nTotal training time: 49.42 seconds.\n-- Epoch 43\nNorm: 32.85, NNZs: 1048576, Bias: 0.370888, T: 132698, Avg. loss: 0.162626\nTotal training time: 50.70 seconds.\n-- Epoch 44\nNorm: 32.82, NNZs: 1048576, Bias: 0.372830, T: 135784, Avg. loss: 0.160361\nTotal training time: 51.97 seconds.\n-- Epoch 45\nNorm: 32.80, NNZs: 1048576, Bias: 0.370261, T: 138870, Avg. loss: 0.158200\nTotal training time: 53.25 seconds.\n-- Epoch 46\nNorm: 32.77, NNZs: 1048576, Bias: 0.373206, T: 141956, Avg. loss: 0.156128\nTotal training time: 54.50 seconds.\n-- Epoch 47\nNorm: 32.74, NNZs: 1048576, Bias: 0.373751, T: 145042, Avg. loss: 0.154128\nTotal training time: 55.76 seconds.\n-- Epoch 48\nNorm: 32.74, NNZs: 1048576, Bias: 0.368557, T: 148128, Avg. loss: 0.152219\nTotal training time: 57.04 seconds.\n-- Epoch 49\nNorm: 32.71, NNZs: 1048576, Bias: 0.368791, T: 151214, Avg. loss: 0.150368\nTotal training time: 58.33 seconds.\n-- Epoch 50\nNorm: 32.69, NNZs: 1048576, Bias: 0.366586, T: 154300, Avg. loss: 0.148609\nTotal training time: 59.58 seconds.\n-- Epoch 51\nNorm: 32.67, NNZs: 1048576, Bias: 0.364009, T: 157386, Avg. loss: 0.146904\nTotal training time: 60.85 seconds.\n-- Epoch 52\nNorm: 32.64, NNZs: 1048576, Bias: 0.368638, T: 160472, Avg. loss: 0.145248\nTotal training time: 62.13 seconds.\n-- Epoch 53\nNorm: 32.64, NNZs: 1048576, Bias: 0.366758, T: 163558, Avg. loss: 0.143679\nTotal training time: 63.41 seconds.\n-- Epoch 54\nNorm: 32.62, NNZs: 1048576, Bias: 0.362123, T: 166644, Avg. loss: 0.142163\nTotal training time: 64.66 seconds.\n-- Epoch 55\nNorm: 32.60, NNZs: 1048576, Bias: 0.365537, T: 169730, Avg. loss: 0.140701\nTotal training time: 65.93 seconds.\n-- Epoch 56\nNorm: 32.58, NNZs: 1048576, Bias: 0.363543, T: 172816, Avg. loss: 0.139298\nTotal training time: 67.20 seconds.\n-- Epoch 57\nNorm: 32.57, NNZs: 1048576, Bias: 0.360071, T: 175902, Avg. loss: 0.137943\nTotal training time: 68.72 seconds.\n-- Epoch 58\nNorm: 32.55, NNZs: 1048576, Bias: 0.361456, T: 178988, Avg. loss: 0.136640\nTotal training time: 70.02 seconds.\n-- Epoch 59\nNorm: 32.54, NNZs: 1048576, Bias: 0.363521, T: 182074, Avg. loss: 0.135373\nTotal training time: 71.34 seconds.\n-- Epoch 60\nNorm: 32.54, NNZs: 1048576, Bias: 0.362528, T: 185160, Avg. loss: 0.134142\nTotal training time: 72.67 seconds.\n-- Epoch 61\nNorm: 32.52, NNZs: 1048576, Bias: 0.360267, T: 188246, Avg. loss: 0.132948\nTotal training time: 73.96 seconds.\n-- Epoch 62\nNorm: 32.50, NNZs: 1048576, Bias: 0.361567, T: 191332, Avg. loss: 0.131803\nTotal training time: 75.25 seconds.\n-- Epoch 63\nNorm: 32.50, NNZs: 1048576, Bias: 0.356758, T: 194418, Avg. loss: 0.130691\nTotal training time: 76.56 seconds.\n-- Epoch 64\nNorm: 32.48, NNZs: 1048576, Bias: 0.359106, T: 197504, Avg. loss: 0.129605\nTotal training time: 77.85 seconds.\n-- Epoch 65\nNorm: 32.48, NNZs: 1048576, Bias: 0.356143, T: 200590, Avg. loss: 0.128561\nTotal training time: 79.18 seconds.\n-- Epoch 66\nNorm: 32.45, NNZs: 1048576, Bias: 0.358177, T: 203676, Avg. loss: 0.127550\nTotal training time: 80.48 seconds.\n-- Epoch 67\nNorm: 32.45, NNZs: 1048576, Bias: 0.358556, T: 206762, Avg. loss: 0.126567\nTotal training time: 81.82 seconds.\n-- Epoch 68\nNorm: 32.44, NNZs: 1048576, Bias: 0.357500, T: 209848, Avg. loss: 0.125604\nTotal training time: 83.13 seconds.\n-- Epoch 69\nNorm: 32.43, NNZs: 1048576, Bias: 0.354133, T: 212934, Avg. loss: 0.124675\nTotal training time: 84.44 seconds.\n-- Epoch 70\nNorm: 32.42, NNZs: 1048576, Bias: 0.355801, T: 216020, Avg. loss: 0.123774\nTotal training time: 85.70 seconds.\n-- Epoch 71\nNorm: 32.40, NNZs: 1048576, Bias: 0.354090, T: 219106, Avg. loss: 0.122899\nTotal training time: 87.05 seconds.\n-- Epoch 72\nNorm: 32.39, NNZs: 1048576, Bias: 0.355480, T: 222192, Avg. loss: 0.122044\nTotal training time: 88.32 seconds.\n-- Epoch 73\nNorm: 32.39, NNZs: 1048576, Bias: 0.355981, T: 225278, Avg. loss: 0.121215\nTotal training time: 89.65 seconds.\n-- Epoch 74\nNorm: 32.38, NNZs: 1048576, Bias: 0.353984, T: 228364, Avg. loss: 0.120409\nTotal training time: 90.97 seconds.\n-- Epoch 75\nNorm: 32.37, NNZs: 1048576, Bias: 0.354412, T: 231450, Avg. loss: 0.119620\nTotal training time: 92.29 seconds.\n-- Epoch 76\nNorm: 32.36, NNZs: 1048576, Bias: 0.355660, T: 234536, Avg. loss: 0.118855\nTotal training time: 93.59 seconds.\n-- Epoch 77\nNorm: 32.35, NNZs: 1048576, Bias: 0.356777, T: 237622, Avg. loss: 0.118108\nTotal training time: 94.84 seconds.\n-- Epoch 78\nNorm: 32.35, NNZs: 1048576, Bias: 0.351818, T: 240708, Avg. loss: 0.117379\nTotal training time: 96.20 seconds.\n-- Epoch 79\nNorm: 32.34, NNZs: 1048576, Bias: 0.353002, T: 243794, Avg. loss: 0.116673\nTotal training time: 97.50 seconds.\n-- Epoch 80\nNorm: 32.33, NNZs: 1048576, Bias: 0.354604, T: 246880, Avg. loss: 0.115981\nTotal training time: 98.81 seconds.\n-- Epoch 81\nNorm: 32.33, NNZs: 1048576, Bias: 0.350320, T: 249966, Avg. loss: 0.115304\nTotal training time: 100.14 seconds.\n-- Epoch 82\nNorm: 32.32, NNZs: 1048576, Bias: 0.351505, T: 253052, Avg. loss: 0.114643\nTotal training time: 101.46 seconds.\n-- Epoch 83\nNorm: 32.32, NNZs: 1048576, Bias: 0.352578, T: 256138, Avg. loss: 0.113996\nTotal training time: 102.76 seconds.\n-- Epoch 84\nNorm: 32.31, NNZs: 1048576, Bias: 0.351172, T: 259224, Avg. loss: 0.113367\nTotal training time: 104.10 seconds.\n-- Epoch 85\nNorm: 32.30, NNZs: 1048576, Bias: 0.349552, T: 262310, Avg. loss: 0.112754\nTotal training time: 105.39 seconds.\n-- Epoch 86\nNorm: 32.30, NNZs: 1048576, Bias: 0.351072, T: 265396, Avg. loss: 0.112151\nTotal training time: 106.70 seconds.\n-- Epoch 87\nNorm: 32.30, NNZs: 1048576, Bias: 0.349928, T: 268482, Avg. loss: 0.111561\nTotal training time: 108.04 seconds.\n-- Epoch 88\nNorm: 32.30, NNZs: 1048576, Bias: 0.347433, T: 271568, Avg. loss: 0.110988\nTotal training time: 109.37 seconds.\n-- Epoch 89\nNorm: 32.28, NNZs: 1048576, Bias: 0.348634, T: 274654, Avg. loss: 0.110427\nTotal training time: 110.68 seconds.\n-- Epoch 90\nNorm: 32.27, NNZs: 1048576, Bias: 0.347943, T: 277740, Avg. loss: 0.109881\nTotal training time: 111.97 seconds.\n-- Epoch 91\nNorm: 32.27, NNZs: 1048576, Bias: 0.350040, T: 280826, Avg. loss: 0.109342\nTotal training time: 113.33 seconds.\n-- Epoch 92\nNorm: 32.27, NNZs: 1048576, Bias: 0.347406, T: 283912, Avg. loss: 0.108817\nTotal training time: 114.63 seconds.\n-- Epoch 93\nNorm: 32.26, NNZs: 1048576, Bias: 0.347929, T: 286998, Avg. loss: 0.108305\nTotal training time: 115.98 seconds.\n-- Epoch 94\nNorm: 32.26, NNZs: 1048576, Bias: 0.346743, T: 290084, Avg. loss: 0.107803\nTotal training time: 117.32 seconds.\n-- Epoch 95\nNorm: 32.25, NNZs: 1048576, Bias: 0.349089, T: 293170, Avg. loss: 0.107312\nTotal training time: 118.59 seconds.\n-- Epoch 96\nNorm: 32.24, NNZs: 1048576, Bias: 0.348864, T: 296256, Avg. loss: 0.106828\nTotal training time: 119.91 seconds.\n-- Epoch 97\nNorm: 32.24, NNZs: 1048576, Bias: 0.347671, T: 299342, Avg. loss: 0.106353\nTotal training time: 121.27 seconds.\n-- Epoch 98\nNorm: 32.24, NNZs: 1048576, Bias: 0.346834, T: 302428, Avg. loss: 0.105894\nTotal training time: 122.60 seconds.\n-- Epoch 99\nNorm: 32.23, NNZs: 1048576, Bias: 0.346883, T: 305514, Avg. loss: 0.105441\nTotal training time: 123.91 seconds.\n-- Epoch 100\nNorm: 32.23, NNZs: 1048576, Bias: 0.345527, T: 308600, Avg. loss: 0.104994\nTotal training time: 125.23 seconds.\n-- Epoch 1\nNorm: 38.64, NNZs: 171107, Bias: -1.125340, T: 3086, Avg. loss: 0.255091\nTotal training time: 0.93 seconds.\n-- Epoch 2\nNorm: 27.14, NNZs: 207000, Bias: -1.151550, T: 6172, Avg. loss: 0.155965\nTotal training time: 1.86 seconds.\n-- Epoch 3\nNorm: 21.02, NNZs: 212553, Bias: -1.163631, T: 9258, Avg. loss: 0.112561\nTotal training time: 2.79 seconds.\n-- Epoch 4\nNorm: 17.46, NNZs: 242267, Bias: -1.152765, T: 12344, Avg. loss: 0.088504\nTotal training time: 3.73 seconds.\n-- Epoch 5\nNorm: 15.21, NNZs: 253820, Bias: -1.152612, T: 15430, Avg. loss: 0.073092\nTotal training time: 4.66 seconds.\n-- Epoch 6\nNorm: 13.80, NNZs: 258362, Bias: -1.155061, T: 18516, Avg. loss: 0.062182\nTotal training time: 5.59 seconds.\n-- Epoch 7\nNorm: 12.89, NNZs: 265437, Bias: -1.149186, T: 21602, Avg. loss: 0.054244\nTotal training time: 6.53 seconds.\n-- Epoch 8\nNorm: 12.01, NNZs: 268279, Bias: -1.154386, T: 24688, Avg. loss: 0.048230\nTotal training time: 7.46 seconds.\n-- Epoch 9\nNorm: 11.61, NNZs: 274381, Bias: -1.136866, T: 27774, Avg. loss: 0.043513\nTotal training time: 8.39 seconds.\n-- Epoch 10\nNorm: 11.06, NNZs: 278602, Bias: -1.152600, T: 30860, Avg. loss: 0.039800\nTotal training time: 9.33 seconds.\n-- Epoch 11\nNorm: 10.86, NNZs: 282663, Bias: -1.143829, T: 33946, Avg. loss: 0.036708\nTotal training time: 10.26 seconds.\n-- Epoch 12\nNorm: 10.64, NNZs: 289278, Bias: -1.142647, T: 37032, Avg. loss: 0.034123\nTotal training time: 11.19 seconds.\n-- Epoch 13\nNorm: 10.43, NNZs: 299123, Bias: -1.146345, T: 40118, Avg. loss: 0.031888\nTotal training time: 12.13 seconds.\n-- Epoch 14\nNorm: 10.31, NNZs: 300855, Bias: -1.141650, T: 43204, Avg. loss: 0.030010\nTotal training time: 13.06 seconds.\n-- Epoch 15\nNorm: 10.15, NNZs: 301732, Bias: -1.139459, T: 46290, Avg. loss: 0.028381\nTotal training time: 14.00 seconds.\n-- Epoch 16\nNorm: 10.03, NNZs: 308244, Bias: -1.143770, T: 49376, Avg. loss: 0.026956\nTotal training time: 14.94 seconds.\n-- Epoch 17\nNorm: 9.95, NNZs: 308782, Bias: -1.141516, T: 52462, Avg. loss: 0.025690\nTotal training time: 15.88 seconds.\n-- Epoch 18\nNorm: 9.89, NNZs: 309215, Bias: -1.140929, T: 55548, Avg. loss: 0.024552\nTotal training time: 16.81 seconds.\n-- Epoch 19\nNorm: 9.80, NNZs: 312688, Bias: -1.138807, T: 58634, Avg. loss: 0.023525\nTotal training time: 17.74 seconds.\n-- Epoch 20\nNorm: 9.79, NNZs: 313460, Bias: -1.134348, T: 61720, Avg. loss: 0.022617\nTotal training time: 18.68 seconds.\n-- Epoch 21\nNorm: 9.69, NNZs: 317380, Bias: -1.137670, T: 64806, Avg. loss: 0.021782\nTotal training time: 19.62 seconds.\n-- Epoch 22\nNorm: 9.67, NNZs: 319885, Bias: -1.134539, T: 67892, Avg. loss: 0.021034\nTotal training time: 20.56 seconds.\n-- Epoch 23\nNorm: 9.60, NNZs: 319959, Bias: -1.136131, T: 70978, Avg. loss: 0.020345\nTotal training time: 21.50 seconds.\n-- Epoch 24\nNorm: 9.58, NNZs: 322104, Bias: -1.135319, T: 74064, Avg. loss: 0.019702\nTotal training time: 22.43 seconds.\n-- Epoch 25\nNorm: 9.52, NNZs: 324360, Bias: -1.135620, T: 77150, Avg. loss: 0.019114\nTotal training time: 23.37 seconds.\n-- Epoch 26\nNorm: 9.51, NNZs: 367464, Bias: -1.134623, T: 80236, Avg. loss: 0.018574\nTotal training time: 24.32 seconds.\n-- Epoch 27\nNorm: 9.46, NNZs: 367508, Bias: -1.136472, T: 83322, Avg. loss: 0.018076\nTotal training time: 25.26 seconds.\n-- Epoch 28\nNorm: 9.45, NNZs: 385261, Bias: -1.133429, T: 86408, Avg. loss: 0.017616\nTotal training time: 26.20 seconds.\n-- Epoch 29\nNorm: 9.41, NNZs: 385918, Bias: -1.131980, T: 89494, Avg. loss: 0.017180\nTotal training time: 27.14 seconds.\n-- Epoch 30\nNorm: 9.38, NNZs: 389346, Bias: -1.135097, T: 92580, Avg. loss: 0.016776\nTotal training time: 28.08 seconds.\n-- Epoch 31\nNorm: 9.38, NNZs: 389533, Bias: -1.131403, T: 95666, Avg. loss: 0.016395\nTotal training time: 29.02 seconds.\n-- Epoch 32\nNorm: 9.37, NNZs: 389765, Bias: -1.129158, T: 98752, Avg. loss: 0.016040\nTotal training time: 29.96 seconds.\n-- Epoch 33\nNorm: 9.36, NNZs: 390347, Bias: -1.127257, T: 101838, Avg. loss: 0.015699\nTotal training time: 30.91 seconds.\n-- Epoch 34\nNorm: 9.33, NNZs: 390460, Bias: -1.130627, T: 104924, Avg. loss: 0.015389\nTotal training time: 31.85 seconds.\n-- Epoch 35\nNorm: 9.32, NNZs: 391349, Bias: -1.131145, T: 108010, Avg. loss: 0.015088\nTotal training time: 32.79 seconds.\n-- Epoch 36\nNorm: 9.31, NNZs: 391693, Bias: -1.129680, T: 111096, Avg. loss: 0.014804\nTotal training time: 33.73 seconds.\n-- Epoch 37\nNorm: 9.29, NNZs: 392455, Bias: -1.130995, T: 114182, Avg. loss: 0.014537\nTotal training time: 34.67 seconds.\n-- Epoch 38\nNorm: 9.26, NNZs: 392568, Bias: -1.132187, T: 117268, Avg. loss: 0.014284\nTotal training time: 35.61 seconds.\n-- Epoch 39\nNorm: 9.26, NNZs: 392650, Bias: -1.129262, T: 120354, Avg. loss: 0.014044\nTotal training time: 36.56 seconds.\n-- Epoch 40\nNorm: 9.25, NNZs: 393460, Bias: -1.128238, T: 123440, Avg. loss: 0.013818\nTotal training time: 37.50 seconds.\n-- Epoch 41\nNorm: 9.24, NNZs: 395464, Bias: -1.128871, T: 126526, Avg. loss: 0.013598\nTotal training time: 38.44 seconds.\n-- Epoch 42\nNorm: 9.22, NNZs: 395464, Bias: -1.129720, T: 129612, Avg. loss: 0.013392\nTotal training time: 39.39 seconds.\n-- Epoch 43\nNorm: 9.21, NNZs: 395464, Bias: -1.130041, T: 132698, Avg. loss: 0.013194\nTotal training time: 40.33 seconds.\n-- Epoch 44\nNorm: 9.21, NNZs: 395464, Bias: -1.127638, T: 135784, Avg. loss: 0.013004\nTotal training time: 41.27 seconds.\n-- Epoch 45\nNorm: 9.19, NNZs: 395835, Bias: -1.129452, T: 138870, Avg. loss: 0.012826\nTotal training time: 42.21 seconds.\n-- Epoch 46\nNorm: 9.18, NNZs: 395835, Bias: -1.128230, T: 141956, Avg. loss: 0.012655\nTotal training time: 43.16 seconds.\n-- Epoch 47\nNorm: 9.18, NNZs: 396013, Bias: -1.126852, T: 145042, Avg. loss: 0.012489\nTotal training time: 44.10 seconds.\n-- Epoch 48\nNorm: 9.17, NNZs: 396013, Bias: -1.128285, T: 148128, Avg. loss: 0.012332\nTotal training time: 45.05 seconds.\n-- Epoch 49\nNorm: 9.16, NNZs: 396013, Bias: -1.129238, T: 151214, Avg. loss: 0.012179\nTotal training time: 45.99 seconds.\n-- Epoch 50\nNorm: 9.15, NNZs: 397215, Bias: -1.128900, T: 154300, Avg. loss: 0.012033\nTotal training time: 46.94 seconds.\n-- Epoch 51\nNorm: 9.16, NNZs: 397215, Bias: -1.125386, T: 157386, Avg. loss: 0.011892\nTotal training time: 47.88 seconds.\n-- Epoch 52\nNorm: 9.14, NNZs: 397736, Bias: -1.126951, T: 160472, Avg. loss: 0.011758\nTotal training time: 48.82 seconds.\n-- Epoch 53\nNorm: 9.13, NNZs: 397778, Bias: -1.128593, T: 163558, Avg. loss: 0.011628\nTotal training time: 49.77 seconds.\n-- Epoch 54\nNorm: 9.13, NNZs: 397778, Bias: -1.127484, T: 166644, Avg. loss: 0.011503\nTotal training time: 50.71 seconds.\n-- Epoch 55\nNorm: 9.12, NNZs: 398086, Bias: -1.127722, T: 169730, Avg. loss: 0.011384\nTotal training time: 51.66 seconds.\n-- Epoch 56\nNorm: 9.11, NNZs: 399519, Bias: -1.127595, T: 172816, Avg. loss: 0.011267\nTotal training time: 52.60 seconds.\n-- Epoch 57\nNorm: 9.11, NNZs: 399519, Bias: -1.127328, T: 175902, Avg. loss: 0.011155\nTotal training time: 53.55 seconds.\n-- Epoch 58\nNorm: 9.11, NNZs: 399519, Bias: -1.127209, T: 178988, Avg. loss: 0.011046\nTotal training time: 54.49 seconds.\n-- Epoch 59\nNorm: 9.11, NNZs: 399519, Bias: -1.126522, T: 182074, Avg. loss: 0.010940\nTotal training time: 55.43 seconds.\n-- Epoch 60\nNorm: 9.11, NNZs: 399519, Bias: -1.125748, T: 185160, Avg. loss: 0.010838\nTotal training time: 56.38 seconds.\n-- Epoch 61\nNorm: 9.10, NNZs: 399624, Bias: -1.125800, T: 188246, Avg. loss: 0.010740\nTotal training time: 57.32 seconds.\n-- Epoch 62\nNorm: 9.10, NNZs: 399624, Bias: -1.125930, T: 191332, Avg. loss: 0.010645\nTotal training time: 58.27 seconds.\n-- Epoch 63\nNorm: 9.10, NNZs: 400376, Bias: -1.124605, T: 194418, Avg. loss: 0.010553\nTotal training time: 59.21 seconds.\n-- Epoch 64\nNorm: 9.09, NNZs: 400376, Bias: -1.125412, T: 197504, Avg. loss: 0.010464\nTotal training time: 60.15 seconds.\n-- Epoch 65\nNorm: 9.09, NNZs: 400376, Bias: -1.125249, T: 200590, Avg. loss: 0.010377\nTotal training time: 61.10 seconds.\n-- Epoch 66\nNorm: 9.08, NNZs: 400376, Bias: -1.125447, T: 203676, Avg. loss: 0.010294\nTotal training time: 62.04 seconds.\n-- Epoch 67\nNorm: 9.08, NNZs: 400376, Bias: -1.124316, T: 206762, Avg. loss: 0.010212\nTotal training time: 63.21 seconds.\n-- Epoch 68\nNorm: 9.08, NNZs: 400376, Bias: -1.124775, T: 209848, Avg. loss: 0.010132\nTotal training time: 64.17 seconds.\n-- Epoch 69\nNorm: 9.07, NNZs: 400821, Bias: -1.123608, T: 212934, Avg. loss: 0.010054\nTotal training time: 65.13 seconds.\n-- Epoch 70\nNorm: 9.07, NNZs: 401190, Bias: -1.125078, T: 216020, Avg. loss: 0.009980\nTotal training time: 66.07 seconds.\n-- Epoch 71\nNorm: 9.07, NNZs: 401655, Bias: -1.124593, T: 219106, Avg. loss: 0.009907\nTotal training time: 67.02 seconds.\n-- Epoch 72\nNorm: 9.06, NNZs: 401866, Bias: -1.125388, T: 222192, Avg. loss: 0.009835\nTotal training time: 67.96 seconds.\n-- Epoch 73\nNorm: 9.06, NNZs: 403115, Bias: -1.124614, T: 225278, Avg. loss: 0.009766\nTotal training time: 68.91 seconds.\n-- Epoch 74\nNorm: 9.06, NNZs: 403115, Bias: -1.125422, T: 228364, Avg. loss: 0.009699\nTotal training time: 69.86 seconds.\n-- Epoch 75\nNorm: 9.05, NNZs: 403925, Bias: -1.124429, T: 231450, Avg. loss: 0.009633\nTotal training time: 70.80 seconds.\n-- Epoch 76\nNorm: 9.05, NNZs: 403925, Bias: -1.124638, T: 234536, Avg. loss: 0.009569\nTotal training time: 71.75 seconds.\n-- Epoch 77\nNorm: 9.05, NNZs: 403925, Bias: -1.122617, T: 237622, Avg. loss: 0.009506\nTotal training time: 72.70 seconds.\n-- Epoch 78\nNorm: 9.04, NNZs: 404121, Bias: -1.124590, T: 240708, Avg. loss: 0.009445\nTotal training time: 73.65 seconds.\n-- Epoch 79\nNorm: 9.04, NNZs: 404121, Bias: -1.123481, T: 243794, Avg. loss: 0.009386\nTotal training time: 74.59 seconds.\n-- Epoch 80\nNorm: 9.04, NNZs: 404121, Bias: -1.122425, T: 246880, Avg. loss: 0.009328\nTotal training time: 75.53 seconds.\n-- Epoch 81\nNorm: 9.03, NNZs: 404121, Bias: -1.123135, T: 249966, Avg. loss: 0.009272\nTotal training time: 76.48 seconds.\n-- Epoch 82\nNorm: 9.03, NNZs: 404121, Bias: -1.123792, T: 253052, Avg. loss: 0.009218\nTotal training time: 77.42 seconds.\n-- Epoch 83\nNorm: 9.03, NNZs: 404582, Bias: -1.124112, T: 256138, Avg. loss: 0.009164\nTotal training time: 78.37 seconds.\n-- Epoch 84\nNorm: 9.03, NNZs: 404582, Bias: -1.123004, T: 259224, Avg. loss: 0.009112\nTotal training time: 79.31 seconds.\n-- Epoch 85\nNorm: 9.02, NNZs: 404582, Bias: -1.124078, T: 262310, Avg. loss: 0.009061\nTotal training time: 80.26 seconds.\n-- Epoch 86\nNorm: 9.03, NNZs: 404582, Bias: -1.122079, T: 265396, Avg. loss: 0.009012\nTotal training time: 81.20 seconds.\n-- Epoch 87\nNorm: 9.02, NNZs: 404722, Bias: -1.123623, T: 268482, Avg. loss: 0.008963\nTotal training time: 82.14 seconds.\n-- Epoch 88\nNorm: 9.02, NNZs: 404722, Bias: -1.122475, T: 271568, Avg. loss: 0.008915\nTotal training time: 83.09 seconds.\n-- Epoch 89\nNorm: 9.02, NNZs: 404722, Bias: -1.122931, T: 274654, Avg. loss: 0.008869\nTotal training time: 84.03 seconds.\n-- Epoch 90\nNorm: 9.02, NNZs: 404722, Bias: -1.122195, T: 277740, Avg. loss: 0.008824\nTotal training time: 84.98 seconds.\n-- Epoch 91\nNorm: 9.01, NNZs: 404974, Bias: -1.123732, T: 280826, Avg. loss: 0.008779\nTotal training time: 85.92 seconds.\n-- Epoch 92\nNorm: 9.01, NNZs: 405294, Bias: -1.122023, T: 283912, Avg. loss: 0.008736\nTotal training time: 86.87 seconds.\n-- Epoch 93\nNorm: 9.01, NNZs: 405294, Bias: -1.122691, T: 286998, Avg. loss: 0.008693\nTotal training time: 87.82 seconds.\n-- Epoch 94\nNorm: 9.01, NNZs: 405294, Bias: -1.122490, T: 290084, Avg. loss: 0.008651\nTotal training time: 88.76 seconds.\n-- Epoch 95\nNorm: 9.01, NNZs: 405294, Bias: -1.121485, T: 293170, Avg. loss: 0.008610\nTotal training time: 89.71 seconds.\n-- Epoch 96\nNorm: 9.01, NNZs: 405294, Bias: -1.120901, T: 296256, Avg. loss: 0.008570\nTotal training time: 90.65 seconds.\n-- Epoch 97\nNorm: 9.00, NNZs: 405294, Bias: -1.122810, T: 299342, Avg. loss: 0.008531\nTotal training time: 91.59 seconds.\n-- Epoch 98\nNorm: 9.00, NNZs: 405294, Bias: -1.122031, T: 302428, Avg. loss: 0.008493\nTotal training time: 92.54 seconds.\n-- Epoch 99\nNorm: 9.00, NNZs: 405294, Bias: -1.122510, T: 305514, Avg. loss: 0.008456\nTotal training time: 93.49 seconds.\n-- Epoch 100\nNorm: 9.00, NNZs: 405294, Bias: -1.121242, T: 308600, Avg. loss: 0.008419\nTotal training time: 94.43 seconds.\n-- Epoch 1\nNorm: 61.97, NNZs: 1045108, Bias: -1.679258, T: 3086, Avg. loss: 0.429146\nTotal training time: 0.91 seconds.\n-- Epoch 2\nNorm: 38.35, NNZs: 1046633, Bias: -1.702397, T: 6172, Avg. loss: 0.232606\nTotal training time: 1.76 seconds.\n-- Epoch 3\nNorm: 28.38, NNZs: 1047423, Bias: -1.683216, T: 9258, Avg. loss: 0.160219\nTotal training time: 2.62 seconds.\n-- Epoch 4\nNorm: 22.38, NNZs: 1048054, Bias: -1.692567, T: 12344, Avg. loss: 0.122710\nTotal training time: 3.47 seconds.\n-- Epoch 5\nNorm: 18.97, NNZs: 1048321, Bias: -1.684963, T: 15430, Avg. loss: 0.099518\nTotal training time: 4.34 seconds.\n-- Epoch 6\nNorm: 16.92, NNZs: 1048426, Bias: -1.681285, T: 18516, Avg. loss: 0.083886\nTotal training time: 5.24 seconds.\n-- Epoch 7\nNorm: 15.66, NNZs: 1048558, Bias: -1.683730, T: 21602, Avg. loss: 0.072674\nTotal training time: 6.17 seconds.\n-- Epoch 8\nNorm: 14.94, NNZs: 1048564, Bias: -1.675541, T: 24688, Avg. loss: 0.064232\nTotal training time: 7.12 seconds.\n-- Epoch 9\nNorm: 14.46, NNZs: 1048570, Bias: -1.670708, T: 27774, Avg. loss: 0.057669\nTotal training time: 8.08 seconds.\n-- Epoch 10\nNorm: 14.03, NNZs: 1048574, Bias: -1.674903, T: 30860, Avg. loss: 0.052396\nTotal training time: 9.06 seconds.\n-- Epoch 11\nNorm: 13.72, NNZs: 1048574, Bias: -1.672643, T: 33946, Avg. loss: 0.048094\nTotal training time: 10.04 seconds.\n-- Epoch 12\nNorm: 13.47, NNZs: 1048575, Bias: -1.673717, T: 37032, Avg. loss: 0.044506\nTotal training time: 11.02 seconds.\n-- Epoch 13\nNorm: 13.30, NNZs: 1048575, Bias: -1.673466, T: 40118, Avg. loss: 0.041457\nTotal training time: 12.03 seconds.\n-- Epoch 14\nNorm: 13.21, NNZs: 1048575, Bias: -1.671800, T: 43204, Avg. loss: 0.038851\nTotal training time: 13.04 seconds.\n-- Epoch 15\nNorm: 13.11, NNZs: 1048575, Bias: -1.673019, T: 46290, Avg. loss: 0.036587\nTotal training time: 14.03 seconds.\n-- Epoch 16\nNorm: 13.04, NNZs: 1048575, Bias: -1.672121, T: 49376, Avg. loss: 0.034597\nTotal training time: 15.06 seconds.\n-- Epoch 17\nNorm: 12.93, NNZs: 1048576, Bias: -1.673831, T: 52462, Avg. loss: 0.032838\nTotal training time: 16.07 seconds.\n-- Epoch 18\nNorm: 12.95, NNZs: 1048576, Bias: -1.669699, T: 55548, Avg. loss: 0.031284\nTotal training time: 17.09 seconds.\n-- Epoch 19\nNorm: 12.93, NNZs: 1048576, Bias: -1.670421, T: 58634, Avg. loss: 0.029876\nTotal training time: 18.11 seconds.\n-- Epoch 20\nNorm: 12.85, NNZs: 1048576, Bias: -1.669585, T: 61720, Avg. loss: 0.028607\nTotal training time: 19.13 seconds.\n-- Epoch 21\nNorm: 12.83, NNZs: 1048576, Bias: -1.669603, T: 64806, Avg. loss: 0.027466\nTotal training time: 20.17 seconds.\n-- Epoch 22\nNorm: 12.69, NNZs: 1048576, Bias: -1.674426, T: 67892, Avg. loss: 0.026422\nTotal training time: 21.19 seconds.\n-- Epoch 23\nNorm: 12.76, NNZs: 1048576, Bias: -1.669324, T: 70978, Avg. loss: 0.025478\nTotal training time: 22.24 seconds.\n-- Epoch 24\nNorm: 12.72, NNZs: 1048576, Bias: -1.668524, T: 74064, Avg. loss: 0.024604\nTotal training time: 23.26 seconds.\n-- Epoch 25\nNorm: 12.68, NNZs: 1048576, Bias: -1.670331, T: 77150, Avg. loss: 0.023800\nTotal training time: 24.29 seconds.\n-- Epoch 26\nNorm: 12.68, NNZs: 1048576, Bias: -1.669312, T: 80236, Avg. loss: 0.023051\nTotal training time: 25.34 seconds.\n-- Epoch 27\nNorm: 12.68, NNZs: 1048576, Bias: -1.668877, T: 83322, Avg. loss: 0.022366\nTotal training time: 26.38 seconds.\n-- Epoch 28\nNorm: 12.65, NNZs: 1048576, Bias: -1.668932, T: 86408, Avg. loss: 0.021729\nTotal training time: 27.41 seconds.\n-- Epoch 29\nNorm: 12.64, NNZs: 1048576, Bias: -1.669328, T: 89494, Avg. loss: 0.021135\nTotal training time: 28.46 seconds.\n-- Epoch 30\nNorm: 12.61, NNZs: 1048576, Bias: -1.669441, T: 92580, Avg. loss: 0.020579\nTotal training time: 29.50 seconds.\n-- Epoch 31\nNorm: 12.62, NNZs: 1048576, Bias: -1.668822, T: 95666, Avg. loss: 0.020054\nTotal training time: 30.57 seconds.\n-- Epoch 32\nNorm: 12.61, NNZs: 1048576, Bias: -1.667725, T: 98752, Avg. loss: 0.019567\nTotal training time: 31.60 seconds.\n-- Epoch 33\nNorm: 12.58, NNZs: 1048576, Bias: -1.668828, T: 101838, Avg. loss: 0.019110\nTotal training time: 32.64 seconds.\n-- Epoch 34\nNorm: 12.57, NNZs: 1048576, Bias: -1.669251, T: 104924, Avg. loss: 0.018676\nTotal training time: 33.70 seconds.\n-- Epoch 35\nNorm: 12.50, NNZs: 1048576, Bias: -1.671891, T: 108010, Avg. loss: 0.018264\nTotal training time: 34.72 seconds.\n-- Epoch 36\nNorm: 12.55, NNZs: 1048576, Bias: -1.669180, T: 111096, Avg. loss: 0.017877\nTotal training time: 35.82 seconds.\n-- Epoch 37\nNorm: 12.52, NNZs: 1048576, Bias: -1.670062, T: 114182, Avg. loss: 0.017514\nTotal training time: 36.86 seconds.\n-- Epoch 38\nNorm: 12.53, NNZs: 1048576, Bias: -1.668951, T: 117268, Avg. loss: 0.017168\nTotal training time: 37.93 seconds.\n-- Epoch 39\nNorm: 12.53, NNZs: 1048576, Bias: -1.668993, T: 120354, Avg. loss: 0.016840\nTotal training time: 39.00 seconds.\n-- Epoch 40\nNorm: 12.50, NNZs: 1048576, Bias: -1.669260, T: 123440, Avg. loss: 0.016528\nTotal training time: 40.02 seconds.\n-- Epoch 41\nNorm: 12.50, NNZs: 1048576, Bias: -1.669031, T: 126526, Avg. loss: 0.016230\nTotal training time: 41.10 seconds.\n-- Epoch 42\nNorm: 12.51, NNZs: 1048576, Bias: -1.668637, T: 129612, Avg. loss: 0.015947\nTotal training time: 42.17 seconds.\n-- Epoch 43\nNorm: 12.50, NNZs: 1048576, Bias: -1.668649, T: 132698, Avg. loss: 0.015678\nTotal training time: 43.22 seconds.\n-- Epoch 44\nNorm: 12.51, NNZs: 1048576, Bias: -1.668124, T: 135784, Avg. loss: 0.015419\nTotal training time: 44.29 seconds.\n-- Epoch 45\nNorm: 12.48, NNZs: 1048576, Bias: -1.668849, T: 138870, Avg. loss: 0.015173\nTotal training time: 45.33 seconds.\n-- Epoch 46\nNorm: 12.49, NNZs: 1048576, Bias: -1.668406, T: 141956, Avg. loss: 0.014938\nTotal training time: 46.40 seconds.\n-- Epoch 47\nNorm: 12.51, NNZs: 1048576, Bias: -1.666936, T: 145042, Avg. loss: 0.014711\nTotal training time: 47.49 seconds.\n-- Epoch 48\nNorm: 12.49, NNZs: 1048576, Bias: -1.668309, T: 148128, Avg. loss: 0.014494\nTotal training time: 48.53 seconds.\n-- Epoch 49\nNorm: 12.48, NNZs: 1048576, Bias: -1.667916, T: 151214, Avg. loss: 0.014287\nTotal training time: 49.59 seconds.\n-- Epoch 50\nNorm: 12.49, NNZs: 1048576, Bias: -1.667196, T: 154300, Avg. loss: 0.014087\nTotal training time: 50.65 seconds.\n-- Epoch 51\nNorm: 12.47, NNZs: 1048576, Bias: -1.667807, T: 157386, Avg. loss: 0.013897\nTotal training time: 51.69 seconds.\n-- Epoch 52\nNorm: 12.46, NNZs: 1048576, Bias: -1.669025, T: 160472, Avg. loss: 0.013711\nTotal training time: 52.76 seconds.\n-- Epoch 53\nNorm: 12.42, NNZs: 1048576, Bias: -1.670262, T: 163558, Avg. loss: 0.013533\nTotal training time: 53.80 seconds.\n-- Epoch 54\nNorm: 12.47, NNZs: 1048576, Bias: -1.667477, T: 166644, Avg. loss: 0.013362\nTotal training time: 54.89 seconds.\n-- Epoch 55\nNorm: 12.44, NNZs: 1048576, Bias: -1.668789, T: 169730, Avg. loss: 0.013197\nTotal training time: 55.94 seconds.\n-- Epoch 56\nNorm: 12.46, NNZs: 1048576, Bias: -1.667966, T: 172816, Avg. loss: 0.013038\nTotal training time: 57.01 seconds.\n-- Epoch 57\nNorm: 12.42, NNZs: 1048576, Bias: -1.669870, T: 175902, Avg. loss: 0.012884\nTotal training time: 58.07 seconds.\n-- Epoch 58\nNorm: 12.44, NNZs: 1048576, Bias: -1.667599, T: 178988, Avg. loss: 0.012736\nTotal training time: 59.12 seconds.\n-- Epoch 59\nNorm: 12.41, NNZs: 1048576, Bias: -1.669798, T: 182074, Avg. loss: 0.012592\nTotal training time: 60.16 seconds.\n-- Epoch 60\nNorm: 12.44, NNZs: 1048576, Bias: -1.668270, T: 185160, Avg. loss: 0.012452\nTotal training time: 61.24 seconds.\n-- Epoch 61\nNorm: 12.42, NNZs: 1048576, Bias: -1.668760, T: 188246, Avg. loss: 0.012318\nTotal training time: 62.30 seconds.\n-- Epoch 62\nNorm: 12.44, NNZs: 1048576, Bias: -1.667909, T: 191332, Avg. loss: 0.012188\nTotal training time: 63.37 seconds.\n-- Epoch 63\nNorm: 12.45, NNZs: 1048576, Bias: -1.666873, T: 194418, Avg. loss: 0.012062\nTotal training time: 64.45 seconds.\n-- Epoch 64\nNorm: 12.44, NNZs: 1048576, Bias: -1.667096, T: 197504, Avg. loss: 0.011941\nTotal training time: 65.51 seconds.\n-- Epoch 65\nNorm: 12.42, NNZs: 1048576, Bias: -1.668210, T: 200590, Avg. loss: 0.011823\nTotal training time: 66.56 seconds.\n-- Epoch 66\nNorm: 12.43, NNZs: 1048576, Bias: -1.667807, T: 203676, Avg. loss: 0.011709\nTotal training time: 67.63 seconds.\n-- Epoch 67\nNorm: 12.41, NNZs: 1048576, Bias: -1.668795, T: 206762, Avg. loss: 0.011597\nTotal training time: 68.67 seconds.\n-- Epoch 68\nNorm: 12.40, NNZs: 1048576, Bias: -1.668699, T: 209848, Avg. loss: 0.011488\nTotal training time: 69.73 seconds.\n-- Epoch 69\nNorm: 12.41, NNZs: 1048576, Bias: -1.667906, T: 212934, Avg. loss: 0.011384\nTotal training time: 70.81 seconds.\n-- Epoch 70\nNorm: 12.42, NNZs: 1048576, Bias: -1.667455, T: 216020, Avg. loss: 0.011282\nTotal training time: 71.89 seconds.\n-- Epoch 71\nNorm: 12.39, NNZs: 1048576, Bias: -1.668953, T: 219106, Avg. loss: 0.011182\nTotal training time: 72.91 seconds.\n-- Epoch 72\nNorm: 12.42, NNZs: 1048576, Bias: -1.667301, T: 222192, Avg. loss: 0.011085\nTotal training time: 74.02 seconds.\n-- Epoch 73\nNorm: 12.40, NNZs: 1048576, Bias: -1.668165, T: 225278, Avg. loss: 0.010992\nTotal training time: 75.05 seconds.\n-- Epoch 74\nNorm: 12.41, NNZs: 1048576, Bias: -1.667858, T: 228364, Avg. loss: 0.010901\nTotal training time: 76.13 seconds.\n-- Epoch 75\nNorm: 12.42, NNZs: 1048576, Bias: -1.667317, T: 231450, Avg. loss: 0.010812\nTotal training time: 77.19 seconds.\n-- Epoch 76\nNorm: 12.40, NNZs: 1048576, Bias: -1.668041, T: 234536, Avg. loss: 0.010725\nTotal training time: 78.25 seconds.\n-- Epoch 77\nNorm: 12.41, NNZs: 1048576, Bias: -1.667578, T: 237622, Avg. loss: 0.010640\nTotal training time: 79.31 seconds.\n-- Epoch 78\nNorm: 12.40, NNZs: 1048576, Bias: -1.668240, T: 240708, Avg. loss: 0.010558\nTotal training time: 80.39 seconds.\n-- Epoch 79\nNorm: 12.40, NNZs: 1048576, Bias: -1.667466, T: 243794, Avg. loss: 0.010477\nTotal training time: 81.44 seconds.\n-- Epoch 80\nNorm: 12.41, NNZs: 1048576, Bias: -1.667157, T: 246880, Avg. loss: 0.010399\nTotal training time: 82.53 seconds.\n-- Epoch 81\nNorm: 12.38, NNZs: 1048576, Bias: -1.668924, T: 249966, Avg. loss: 0.010322\nTotal training time: 83.57 seconds.\n-- Epoch 82\nNorm: 12.42, NNZs: 1048576, Bias: -1.666863, T: 253052, Avg. loss: 0.010247\nTotal training time: 84.68 seconds.\n-- Epoch 83\nNorm: 12.40, NNZs: 1048576, Bias: -1.667700, T: 256138, Avg. loss: 0.010175\nTotal training time: 85.74 seconds.\n-- Epoch 84\nNorm: 12.38, NNZs: 1048576, Bias: -1.668405, T: 259224, Avg. loss: 0.010104\nTotal training time: 86.79 seconds.\n-- Epoch 85\nNorm: 12.37, NNZs: 1048576, Bias: -1.669065, T: 262310, Avg. loss: 0.010034\nTotal training time: 87.88 seconds.\n-- Epoch 86\nNorm: 12.40, NNZs: 1048576, Bias: -1.667607, T: 265396, Avg. loss: 0.009967\nTotal training time: 89.16 seconds.\n-- Epoch 87\nNorm: 12.41, NNZs: 1048576, Bias: -1.666997, T: 268482, Avg. loss: 0.009901\nTotal training time: 90.24 seconds.\n-- Epoch 88\nNorm: 12.40, NNZs: 1048576, Bias: -1.667326, T: 271568, Avg. loss: 0.009837\nTotal training time: 91.29 seconds.\n-- Epoch 89\nNorm: 12.41, NNZs: 1048576, Bias: -1.666658, T: 274654, Avg. loss: 0.009773\nTotal training time: 92.37 seconds.\n-- Epoch 90\nNorm: 12.39, NNZs: 1048576, Bias: -1.667756, T: 277740, Avg. loss: 0.009712\nTotal training time: 93.43 seconds.\n-- Epoch 91\nNorm: 12.39, NNZs: 1048576, Bias: -1.667346, T: 280826, Avg. loss: 0.009651\nTotal training time: 94.48 seconds.\n-- Epoch 92\nNorm: 12.39, NNZs: 1048576, Bias: -1.667532, T: 283912, Avg. loss: 0.009593\nTotal training time: 95.55 seconds.\n-- Epoch 93\nNorm: 12.39, NNZs: 1048576, Bias: -1.666863, T: 286998, Avg. loss: 0.009535\nTotal training time: 96.62 seconds.\n-- Epoch 94\nNorm: 12.38, NNZs: 1048576, Bias: -1.667477, T: 290084, Avg. loss: 0.009478\nTotal training time: 97.68 seconds.\n-- Epoch 95\nNorm: 12.39, NNZs: 1048576, Bias: -1.666723, T: 293170, Avg. loss: 0.009422\nTotal training time: 98.77 seconds.\n-- Epoch 96\nNorm: 12.39, NNZs: 1048576, Bias: -1.666633, T: 296256, Avg. loss: 0.009368\nTotal training time: 99.82 seconds.\n-- Epoch 97\nNorm: 12.38, NNZs: 1048576, Bias: -1.667059, T: 299342, Avg. loss: 0.009315\nTotal training time: 100.90 seconds.\n-- Epoch 98\nNorm: 12.37, NNZs: 1048576, Bias: -1.667658, T: 302428, Avg. loss: 0.009263\nTotal training time: 101.95 seconds.\n-- Epoch 99\nNorm: 12.38, NNZs: 1048576, Bias: -1.667028, T: 305514, Avg. loss: 0.009212\nTotal training time: 103.04 seconds.\n-- Epoch 100\nNorm: 12.37, NNZs: 1048576, Bias: -1.667665, T: 308600, Avg. loss: 0.009161\nTotal training time: 104.11 seconds.\n-- Epoch 1\nNorm: 44.08, NNZs: 639227, Bias: -1.365984, T: 3086, Avg. loss: 0.305213\nTotal training time: 0.86 seconds.\n-- Epoch 2\nNorm: 29.88, NNZs: 964240, Bias: -1.380770, T: 6172, Avg. loss: 0.184517\nTotal training time: 1.71 seconds.\n-- Epoch 3\nNorm: 23.32, NNZs: 1005257, Bias: -1.417820, T: 9258, Avg. loss: 0.133109\nTotal training time: 2.55 seconds.\n-- Epoch 4\nNorm: 19.21, NNZs: 1036041, Bias: -1.441796, T: 12344, Avg. loss: 0.103885\nTotal training time: 3.41 seconds.\n-- Epoch 5\nNorm: 17.02, NNZs: 1042693, Bias: -1.409172, T: 15430, Avg. loss: 0.085543\nTotal training time: 4.26 seconds.\n-- Epoch 6\nNorm: 15.41, NNZs: 1047629, Bias: -1.415142, T: 18516, Avg. loss: 0.072828\nTotal training time: 5.13 seconds.\n-- Epoch 7\nNorm: 14.24, NNZs: 1047984, Bias: -1.413177, T: 21602, Avg. loss: 0.063406\nTotal training time: 6.00 seconds.\n-- Epoch 8\nNorm: 13.47, NNZs: 1048277, Bias: -1.408829, T: 24688, Avg. loss: 0.056401\nTotal training time: 6.88 seconds.\n-- Epoch 9\nNorm: 12.85, NNZs: 1048313, Bias: -1.416355, T: 27774, Avg. loss: 0.050887\nTotal training time: 7.75 seconds.\n-- Epoch 10\nNorm: 12.57, NNZs: 1048456, Bias: -1.409159, T: 30860, Avg. loss: 0.046511\nTotal training time: 8.63 seconds.\n-- Epoch 11\nNorm: 12.23, NNZs: 1048493, Bias: -1.408113, T: 33946, Avg. loss: 0.042907\nTotal training time: 9.53 seconds.\n-- Epoch 12\nNorm: 11.98, NNZs: 1048513, Bias: -1.409290, T: 37032, Avg. loss: 0.039875\nTotal training time: 10.41 seconds.\n-- Epoch 13\nNorm: 11.82, NNZs: 1048518, Bias: -1.411048, T: 40118, Avg. loss: 0.037310\nTotal training time: 11.30 seconds.\n-- Epoch 14\nNorm: 11.70, NNZs: 1048529, Bias: -1.409885, T: 43204, Avg. loss: 0.035096\nTotal training time: 12.18 seconds.\n-- Epoch 15\nNorm: 11.61, NNZs: 1048535, Bias: -1.408248, T: 46290, Avg. loss: 0.033166\nTotal training time: 13.07 seconds.\n-- Epoch 16\nNorm: 11.48, NNZs: 1048543, Bias: -1.409240, T: 49376, Avg. loss: 0.031463\nTotal training time: 13.98 seconds.\n-- Epoch 17\nNorm: 11.39, NNZs: 1048556, Bias: -1.410181, T: 52462, Avg. loss: 0.029979\nTotal training time: 14.89 seconds.\n-- Epoch 18\nNorm: 11.30, NNZs: 1048557, Bias: -1.410177, T: 55548, Avg. loss: 0.028664\nTotal training time: 15.78 seconds.\n-- Epoch 19\nNorm: 11.23, NNZs: 1048560, Bias: -1.410130, T: 58634, Avg. loss: 0.027475\nTotal training time: 16.67 seconds.\n-- Epoch 20\nNorm: 11.18, NNZs: 1048563, Bias: -1.406741, T: 61720, Avg. loss: 0.026395\nTotal training time: 17.57 seconds.\n-- Epoch 21\nNorm: 11.13, NNZs: 1048567, Bias: -1.411218, T: 64806, Avg. loss: 0.025435\nTotal training time: 18.50 seconds.\n-- Epoch 22\nNorm: 11.08, NNZs: 1048571, Bias: -1.410641, T: 67892, Avg. loss: 0.024549\nTotal training time: 19.39 seconds.\n-- Epoch 23\nNorm: 11.02, NNZs: 1048571, Bias: -1.412824, T: 70978, Avg. loss: 0.023723\nTotal training time: 20.30 seconds.\n-- Epoch 24\nNorm: 10.98, NNZs: 1048572, Bias: -1.413107, T: 74064, Avg. loss: 0.022981\nTotal training time: 21.23 seconds.\n-- Epoch 25\nNorm: 10.97, NNZs: 1048572, Bias: -1.411672, T: 77150, Avg. loss: 0.022303\nTotal training time: 22.13 seconds.\n-- Epoch 26\nNorm: 10.94, NNZs: 1048573, Bias: -1.413230, T: 80236, Avg. loss: 0.021659\nTotal training time: 23.07 seconds.\n-- Epoch 27\nNorm: 10.93, NNZs: 1048573, Bias: -1.412257, T: 83322, Avg. loss: 0.021076\nTotal training time: 23.99 seconds.\n-- Epoch 28\nNorm: 10.92, NNZs: 1048573, Bias: -1.410361, T: 86408, Avg. loss: 0.020530\nTotal training time: 24.91 seconds.\n-- Epoch 29\nNorm: 10.88, NNZs: 1048573, Bias: -1.411003, T: 89494, Avg. loss: 0.020019\nTotal training time: 25.82 seconds.\n-- Epoch 30\nNorm: 10.85, NNZs: 1048573, Bias: -1.411581, T: 92580, Avg. loss: 0.019539\nTotal training time: 26.73 seconds.\n-- Epoch 31\nNorm: 10.85, NNZs: 1048573, Bias: -1.410680, T: 95666, Avg. loss: 0.019096\nTotal training time: 27.65 seconds.\n-- Epoch 32\nNorm: 10.83, NNZs: 1048573, Bias: -1.410644, T: 98752, Avg. loss: 0.018668\nTotal training time: 28.57 seconds.\n-- Epoch 33\nNorm: 10.82, NNZs: 1048574, Bias: -1.410491, T: 101838, Avg. loss: 0.018273\nTotal training time: 29.50 seconds.\n-- Epoch 34\nNorm: 10.77, NNZs: 1048575, Bias: -1.413558, T: 104924, Avg. loss: 0.017906\nTotal training time: 30.44 seconds.\n-- Epoch 35\nNorm: 10.80, NNZs: 1048575, Bias: -1.408885, T: 108010, Avg. loss: 0.017557\nTotal training time: 31.35 seconds.\n-- Epoch 36\nNorm: 10.74, NNZs: 1048576, Bias: -1.414119, T: 111096, Avg. loss: 0.017228\nTotal training time: 32.27 seconds.\n-- Epoch 37\nNorm: 10.75, NNZs: 1048576, Bias: -1.412098, T: 114182, Avg. loss: 0.016913\nTotal training time: 33.19 seconds.\n-- Epoch 38\nNorm: 10.73, NNZs: 1048576, Bias: -1.411228, T: 117268, Avg. loss: 0.016616\nTotal training time: 34.08 seconds.\n-- Epoch 39\nNorm: 10.73, NNZs: 1048576, Bias: -1.410648, T: 120354, Avg. loss: 0.016341\nTotal training time: 35.02 seconds.\n-- Epoch 40\nNorm: 10.71, NNZs: 1048576, Bias: -1.411497, T: 123440, Avg. loss: 0.016074\nTotal training time: 35.93 seconds.\n-- Epoch 41\nNorm: 10.70, NNZs: 1048576, Bias: -1.410790, T: 126526, Avg. loss: 0.015821\nTotal training time: 36.87 seconds.\n-- Epoch 42\nNorm: 10.71, NNZs: 1048576, Bias: -1.408888, T: 129612, Avg. loss: 0.015579\nTotal training time: 37.79 seconds.\n-- Epoch 43\nNorm: 10.69, NNZs: 1048576, Bias: -1.411479, T: 132698, Avg. loss: 0.015349\nTotal training time: 38.72 seconds.\n-- Epoch 44\nNorm: 10.69, NNZs: 1048576, Bias: -1.409842, T: 135784, Avg. loss: 0.015130\nTotal training time: 39.65 seconds.\n-- Epoch 45\nNorm: 10.68, NNZs: 1048576, Bias: -1.410782, T: 138870, Avg. loss: 0.014920\nTotal training time: 40.58 seconds.\n-- Epoch 46\nNorm: 10.66, NNZs: 1048576, Bias: -1.411550, T: 141956, Avg. loss: 0.014719\nTotal training time: 41.52 seconds.\n-- Epoch 47\nNorm: 10.65, NNZs: 1048576, Bias: -1.412997, T: 145042, Avg. loss: 0.014525\nTotal training time: 42.44 seconds.\n-- Epoch 48\nNorm: 10.65, NNZs: 1048576, Bias: -1.411801, T: 148128, Avg. loss: 0.014338\nTotal training time: 43.37 seconds.\n-- Epoch 49\nNorm: 10.66, NNZs: 1048576, Bias: -1.409439, T: 151214, Avg. loss: 0.014158\nTotal training time: 44.31 seconds.\n-- Epoch 50\nNorm: 10.63, NNZs: 1048576, Bias: -1.413028, T: 154300, Avg. loss: 0.013988\nTotal training time: 45.25 seconds.\n-- Epoch 51\nNorm: 10.64, NNZs: 1048576, Bias: -1.411857, T: 157386, Avg. loss: 0.013823\nTotal training time: 46.18 seconds.\n-- Epoch 52\nNorm: 10.62, NNZs: 1048576, Bias: -1.413654, T: 160472, Avg. loss: 0.013661\nTotal training time: 47.12 seconds.\n-- Epoch 53\nNorm: 10.63, NNZs: 1048576, Bias: -1.412381, T: 163558, Avg. loss: 0.013508\nTotal training time: 48.04 seconds.\n-- Epoch 54\nNorm: 10.61, NNZs: 1048576, Bias: -1.413892, T: 166644, Avg. loss: 0.013360\nTotal training time: 48.99 seconds.\n-- Epoch 55\nNorm: 10.62, NNZs: 1048576, Bias: -1.411867, T: 169730, Avg. loss: 0.013218\nTotal training time: 49.89 seconds.\n-- Epoch 56\nNorm: 10.61, NNZs: 1048576, Bias: -1.412045, T: 172816, Avg. loss: 0.013082\nTotal training time: 50.83 seconds.\n-- Epoch 57\nNorm: 10.59, NNZs: 1048576, Bias: -1.415366, T: 175902, Avg. loss: 0.012949\nTotal training time: 51.80 seconds.\n-- Epoch 58\nNorm: 10.61, NNZs: 1048576, Bias: -1.410465, T: 178988, Avg. loss: 0.012821\nTotal training time: 52.70 seconds.\n-- Epoch 59\nNorm: 10.60, NNZs: 1048576, Bias: -1.412818, T: 182074, Avg. loss: 0.012700\nTotal training time: 53.67 seconds.\n-- Epoch 60\nNorm: 10.59, NNZs: 1048576, Bias: -1.413175, T: 185160, Avg. loss: 0.012581\nTotal training time: 54.59 seconds.\n-- Epoch 61\nNorm: 10.58, NNZs: 1048576, Bias: -1.414188, T: 188246, Avg. loss: 0.012467\nTotal training time: 55.54 seconds.\n-- Epoch 62\nNorm: 10.59, NNZs: 1048576, Bias: -1.412889, T: 191332, Avg. loss: 0.012355\nTotal training time: 56.47 seconds.\n-- Epoch 63\nNorm: 10.57, NNZs: 1048576, Bias: -1.414065, T: 194418, Avg. loss: 0.012248\nTotal training time: 57.42 seconds.\n-- Epoch 64\nNorm: 10.57, NNZs: 1048576, Bias: -1.414523, T: 197504, Avg. loss: 0.012143\nTotal training time: 58.34 seconds.\n-- Epoch 65\nNorm: 10.57, NNZs: 1048576, Bias: -1.413630, T: 200590, Avg. loss: 0.012042\nTotal training time: 59.31 seconds.\n-- Epoch 66\nNorm: 10.56, NNZs: 1048576, Bias: -1.414746, T: 203676, Avg. loss: 0.011944\nTotal training time: 60.25 seconds.\n-- Epoch 67\nNorm: 10.57, NNZs: 1048576, Bias: -1.413022, T: 206762, Avg. loss: 0.011849\nTotal training time: 61.20 seconds.\n-- Epoch 68\nNorm: 10.57, NNZs: 1048576, Bias: -1.411876, T: 209848, Avg. loss: 0.011756\nTotal training time: 62.14 seconds.\n-- Epoch 69\nNorm: 10.56, NNZs: 1048576, Bias: -1.412941, T: 212934, Avg. loss: 0.011667\nTotal training time: 63.08 seconds.\n-- Epoch 70\nNorm: 10.56, NNZs: 1048576, Bias: -1.412479, T: 216020, Avg. loss: 0.011579\nTotal training time: 64.04 seconds.\n-- Epoch 71\nNorm: 10.56, NNZs: 1048576, Bias: -1.412452, T: 219106, Avg. loss: 0.011495\nTotal training time: 64.99 seconds.\n-- Epoch 72\nNorm: 10.54, NNZs: 1048576, Bias: -1.414202, T: 222192, Avg. loss: 0.011411\nTotal training time: 65.93 seconds.\n-- Epoch 73\nNorm: 10.55, NNZs: 1048576, Bias: -1.413034, T: 225278, Avg. loss: 0.011330\nTotal training time: 66.89 seconds.\n-- Epoch 74\nNorm: 10.55, NNZs: 1048576, Bias: -1.411996, T: 228364, Avg. loss: 0.011251\nTotal training time: 67.86 seconds.\n-- Epoch 75\nNorm: 10.54, NNZs: 1048576, Bias: -1.412888, T: 231450, Avg. loss: 0.011175\nTotal training time: 68.82 seconds.\n-- Epoch 76\nNorm: 10.53, NNZs: 1048576, Bias: -1.414063, T: 234536, Avg. loss: 0.011101\nTotal training time: 69.76 seconds.\n-- Epoch 77\nNorm: 10.54, NNZs: 1048576, Bias: -1.412448, T: 237622, Avg. loss: 0.011028\nTotal training time: 70.73 seconds.\n-- Epoch 78\nNorm: 10.53, NNZs: 1048576, Bias: -1.414316, T: 240708, Avg. loss: 0.010957\nTotal training time: 71.68 seconds.\n-- Epoch 79\nNorm: 10.53, NNZs: 1048576, Bias: -1.413542, T: 243794, Avg. loss: 0.010889\nTotal training time: 72.61 seconds.\n-- Epoch 80\nNorm: 10.52, NNZs: 1048576, Bias: -1.414051, T: 246880, Avg. loss: 0.010822\nTotal training time: 73.55 seconds.\n-- Epoch 81\nNorm: 10.53, NNZs: 1048576, Bias: -1.413453, T: 249966, Avg. loss: 0.010756\nTotal training time: 74.51 seconds.\n-- Epoch 82\nNorm: 10.52, NNZs: 1048576, Bias: -1.414522, T: 253052, Avg. loss: 0.010692\nTotal training time: 75.43 seconds.\n-- Epoch 83\nNorm: 10.52, NNZs: 1048576, Bias: -1.413285, T: 256138, Avg. loss: 0.010630\nTotal training time: 76.40 seconds.\n-- Epoch 84\nNorm: 10.51, NNZs: 1048576, Bias: -1.414640, T: 259224, Avg. loss: 0.010569\nTotal training time: 77.35 seconds.\n-- Epoch 85\nNorm: 10.51, NNZs: 1048576, Bias: -1.414794, T: 262310, Avg. loss: 0.010510\nTotal training time: 78.30 seconds.\n-- Epoch 86\nNorm: 10.51, NNZs: 1048576, Bias: -1.414295, T: 265396, Avg. loss: 0.010452\nTotal training time: 79.24 seconds.\n-- Epoch 87\nNorm: 10.51, NNZs: 1048576, Bias: -1.413308, T: 268482, Avg. loss: 0.010396\nTotal training time: 80.16 seconds.\n-- Epoch 88\nNorm: 10.51, NNZs: 1048576, Bias: -1.413087, T: 271568, Avg. loss: 0.010340\nTotal training time: 81.11 seconds.\n-- Epoch 89\nNorm: 10.50, NNZs: 1048576, Bias: -1.414496, T: 274654, Avg. loss: 0.010285\nTotal training time: 82.09 seconds.\n-- Epoch 90\nNorm: 10.50, NNZs: 1048576, Bias: -1.414267, T: 277740, Avg. loss: 0.010232\nTotal training time: 83.03 seconds.\n-- Epoch 91\nNorm: 10.50, NNZs: 1048576, Bias: -1.414941, T: 280826, Avg. loss: 0.010180\nTotal training time: 83.97 seconds.\n-- Epoch 92\nNorm: 10.51, NNZs: 1048576, Bias: -1.412614, T: 283912, Avg. loss: 0.010128\nTotal training time: 84.92 seconds.\n-- Epoch 93\nNorm: 10.50, NNZs: 1048576, Bias: -1.413815, T: 286998, Avg. loss: 0.010079\nTotal training time: 85.89 seconds.\n-- Epoch 94\nNorm: 10.49, NNZs: 1048576, Bias: -1.415025, T: 290084, Avg. loss: 0.010029\nTotal training time: 86.81 seconds.\n-- Epoch 95\nNorm: 10.50, NNZs: 1048576, Bias: -1.414457, T: 293170, Avg. loss: 0.009981\nTotal training time: 87.77 seconds.\n-- Epoch 96\nNorm: 10.49, NNZs: 1048576, Bias: -1.415301, T: 296256, Avg. loss: 0.009933\nTotal training time: 88.75 seconds.\n-- Epoch 97\nNorm: 10.50, NNZs: 1048576, Bias: -1.413412, T: 299342, Avg. loss: 0.009887\nTotal training time: 89.66 seconds.\n-- Epoch 98\nNorm: 10.49, NNZs: 1048576, Bias: -1.414696, T: 302428, Avg. loss: 0.009842\nTotal training time: 90.62 seconds.\n-- Epoch 99\nNorm: 10.49, NNZs: 1048576, Bias: -1.413966, T: 305514, Avg. loss: 0.009799\nTotal training time: 91.56 seconds.\n-- Epoch 100\nNorm: 10.49, NNZs: 1048576, Bias: -1.414600, T: 308600, Avg. loss: 0.009756\nTotal training time: 92.54 seconds.\n-- Epoch 1\nNorm: 78.13, NNZs: 613708, Bias: -2.315019, T: 3086, Avg. loss: 1.099688\nTotal training time: 0.88 seconds.\n-- Epoch 2\nNorm: 54.46, NNZs: 702767, Bias: -2.320828, T: 6172, Avg. loss: 0.645699\nTotal training time: 1.73 seconds.\n-- Epoch 3\nNorm: 42.44, NNZs: 841390, Bias: -2.304876, T: 9258, Avg. loss: 0.463739\nTotal training time: 2.59 seconds.\n-- Epoch 4\nNorm: 35.83, NNZs: 867036, Bias: -2.210931, T: 12344, Avg. loss: 0.362179\nTotal training time: 3.45 seconds.\n-- Epoch 5\nNorm: 31.16, NNZs: 877850, Bias: -2.158198, T: 15430, Avg. loss: 0.297284\nTotal training time: 4.31 seconds.\n-- Epoch 6\nNorm: 27.92, NNZs: 935577, Bias: -2.127622, T: 18516, Avg. loss: 0.252614\nTotal training time: 5.17 seconds.\n-- Epoch 7\nNorm: 25.88, NNZs: 950971, Bias: -2.080989, T: 21602, Avg. loss: 0.220578\nTotal training time: 6.03 seconds.\n-- Epoch 8\nNorm: 24.49, NNZs: 973070, Bias: -2.034443, T: 24688, Avg. loss: 0.196121\nTotal training time: 6.91 seconds.\n-- Epoch 9\nNorm: 23.48, NNZs: 991633, Bias: -1.992127, T: 27774, Avg. loss: 0.177157\nTotal training time: 7.80 seconds.\n-- Epoch 10\nNorm: 22.61, NNZs: 993941, Bias: -1.978776, T: 30860, Avg. loss: 0.161962\nTotal training time: 8.67 seconds.\n-- Epoch 11\nNorm: 22.07, NNZs: 1005559, Bias: -1.948164, T: 33946, Avg. loss: 0.149420\nTotal training time: 9.57 seconds.\n-- Epoch 12\nNorm: 21.55, NNZs: 1005820, Bias: -1.925280, T: 37032, Avg. loss: 0.138871\nTotal training time: 10.45 seconds.\n-- Epoch 13\nNorm: 21.20, NNZs: 1014599, Bias: -1.905124, T: 40118, Avg. loss: 0.129986\nTotal training time: 11.49 seconds.\n-- Epoch 14\nNorm: 20.87, NNZs: 1020978, Bias: -1.886442, T: 43204, Avg. loss: 0.122413\nTotal training time: 12.48 seconds.\n-- Epoch 15\nNorm: 20.55, NNZs: 1021335, Bias: -1.872827, T: 46290, Avg. loss: 0.115737\nTotal training time: 13.37 seconds.\n-- Epoch 16\nNorm: 20.33, NNZs: 1024905, Bias: -1.851326, T: 49376, Avg. loss: 0.109970\nTotal training time: 14.28 seconds.\n-- Epoch 17\nNorm: 20.13, NNZs: 1025764, Bias: -1.837556, T: 52462, Avg. loss: 0.104832\nTotal training time: 15.17 seconds.\n-- Epoch 18\nNorm: 19.94, NNZs: 1028686, Bias: -1.824226, T: 55548, Avg. loss: 0.100260\nTotal training time: 16.08 seconds.\n-- Epoch 19\nNorm: 19.85, NNZs: 1031281, Bias: -1.809478, T: 58634, Avg. loss: 0.096161\nTotal training time: 16.98 seconds.\n-- Epoch 20\nNorm: 19.71, NNZs: 1032249, Bias: -1.797133, T: 61720, Avg. loss: 0.092462\nTotal training time: 17.88 seconds.\n-- Epoch 21\nNorm: 19.61, NNZs: 1033559, Bias: -1.786545, T: 64806, Avg. loss: 0.089116\nTotal training time: 18.80 seconds.\n-- Epoch 22\nNorm: 19.51, NNZs: 1033639, Bias: -1.772482, T: 67892, Avg. loss: 0.086059\nTotal training time: 19.70 seconds.\n-- Epoch 23\nNorm: 19.39, NNZs: 1035427, Bias: -1.765420, T: 70978, Avg. loss: 0.083244\nTotal training time: 20.62 seconds.\n-- Epoch 24\nNorm: 19.33, NNZs: 1035557, Bias: -1.754205, T: 74064, Avg. loss: 0.080657\nTotal training time: 21.52 seconds.\n-- Epoch 25\nNorm: 19.26, NNZs: 1037305, Bias: -1.747249, T: 77150, Avg. loss: 0.078313\nTotal training time: 22.45 seconds.\n-- Epoch 26\nNorm: 19.19, NNZs: 1039572, Bias: -1.737716, T: 80236, Avg. loss: 0.076123\nTotal training time: 23.38 seconds.\n-- Epoch 27\nNorm: 19.08, NNZs: 1039621, Bias: -1.732640, T: 83322, Avg. loss: 0.074074\nTotal training time: 24.28 seconds.\n-- Epoch 28\nNorm: 19.05, NNZs: 1039976, Bias: -1.716959, T: 86408, Avg. loss: 0.072209\nTotal training time: 25.20 seconds.\n-- Epoch 29\nNorm: 18.99, NNZs: 1040706, Bias: -1.712625, T: 89494, Avg. loss: 0.070468\nTotal training time: 26.12 seconds.\n-- Epoch 30\nNorm: 18.85, NNZs: 1041381, Bias: -1.710878, T: 92580, Avg. loss: 0.068826\nTotal training time: 27.04 seconds.\n-- Epoch 31\nNorm: 18.87, NNZs: 1041389, Bias: -1.697501, T: 95666, Avg. loss: 0.067292\nTotal training time: 27.94 seconds.\n-- Epoch 32\nNorm: 18.83, NNZs: 1042401, Bias: -1.691523, T: 98752, Avg. loss: 0.065855\nTotal training time: 28.88 seconds.\n-- Epoch 33\nNorm: 18.76, NNZs: 1042428, Bias: -1.686029, T: 101838, Avg. loss: 0.064515\nTotal training time: 29.81 seconds.\n-- Epoch 34\nNorm: 18.74, NNZs: 1042428, Bias: -1.676337, T: 104924, Avg. loss: 0.063249\nTotal training time: 30.73 seconds.\n-- Epoch 35\nNorm: 18.66, NNZs: 1042443, Bias: -1.673713, T: 108010, Avg. loss: 0.062057\nTotal training time: 31.64 seconds.\n-- Epoch 36\nNorm: 18.66, NNZs: 1042717, Bias: -1.666177, T: 111096, Avg. loss: 0.060925\nTotal training time: 32.57 seconds.\n-- Epoch 37\nNorm: 18.65, NNZs: 1042908, Bias: -1.658844, T: 114182, Avg. loss: 0.059861\nTotal training time: 33.50 seconds.\n-- Epoch 38\nNorm: 18.61, NNZs: 1042911, Bias: -1.655881, T: 117268, Avg. loss: 0.058838\nTotal training time: 34.42 seconds.\n-- Epoch 39\nNorm: 18.61, NNZs: 1042921, Bias: -1.649657, T: 120354, Avg. loss: 0.057850\nTotal training time: 35.34 seconds.\n-- Epoch 40\nNorm: 18.57, NNZs: 1042925, Bias: -1.644094, T: 123440, Avg. loss: 0.056928\nTotal training time: 36.27 seconds.\n-- Epoch 41\nNorm: 18.54, NNZs: 1042930, Bias: -1.638912, T: 126526, Avg. loss: 0.056048\nTotal training time: 37.18 seconds.\n-- Epoch 42\nNorm: 18.51, NNZs: 1043291, Bias: -1.634332, T: 129612, Avg. loss: 0.055213\nTotal training time: 38.09 seconds.\n-- Epoch 43\nNorm: 18.50, NNZs: 1043572, Bias: -1.629894, T: 132698, Avg. loss: 0.054415\nTotal training time: 39.04 seconds.\n-- Epoch 44\nNorm: 18.47, NNZs: 1043572, Bias: -1.623556, T: 135784, Avg. loss: 0.053646\nTotal training time: 39.94 seconds.\n-- Epoch 45\nNorm: 18.48, NNZs: 1043878, Bias: -1.616780, T: 138870, Avg. loss: 0.052920\nTotal training time: 40.88 seconds.\n-- Epoch 46\nNorm: 18.44, NNZs: 1043885, Bias: -1.613374, T: 141956, Avg. loss: 0.052221\nTotal training time: 41.84 seconds.\n-- Epoch 47\nNorm: 18.42, NNZs: 1043889, Bias: -1.609046, T: 145042, Avg. loss: 0.051559\nTotal training time: 42.76 seconds.\n-- Epoch 48\nNorm: 18.39, NNZs: 1043889, Bias: -1.606558, T: 148128, Avg. loss: 0.050925\nTotal training time: 43.69 seconds.\n-- Epoch 49\nNorm: 18.39, NNZs: 1043890, Bias: -1.601563, T: 151214, Avg. loss: 0.050309\nTotal training time: 44.62 seconds.\n-- Epoch 50\nNorm: 18.35, NNZs: 1043892, Bias: -1.600511, T: 154300, Avg. loss: 0.049719\nTotal training time: 45.54 seconds.\n-- Epoch 51\nNorm: 18.37, NNZs: 1043892, Bias: -1.592085, T: 157386, Avg. loss: 0.049155\nTotal training time: 46.46 seconds.\n-- Epoch 52\nNorm: 18.33, NNZs: 1043892, Bias: -1.590524, T: 160472, Avg. loss: 0.048605\nTotal training time: 47.40 seconds.\n-- Epoch 53\nNorm: 18.31, NNZs: 1043892, Bias: -1.587625, T: 163558, Avg. loss: 0.048081\nTotal training time: 48.33 seconds.\n-- Epoch 54\nNorm: 18.31, NNZs: 1043900, Bias: -1.583158, T: 166644, Avg. loss: 0.047571\nTotal training time: 49.27 seconds.\n-- Epoch 55\nNorm: 18.30, NNZs: 1043900, Bias: -1.577855, T: 169730, Avg. loss: 0.047088\nTotal training time: 50.21 seconds.\n-- Epoch 56\nNorm: 18.28, NNZs: 1043900, Bias: -1.576267, T: 172816, Avg. loss: 0.046615\nTotal training time: 51.15 seconds.\n-- Epoch 57\nNorm: 18.27, NNZs: 1043900, Bias: -1.572263, T: 175902, Avg. loss: 0.046161\nTotal training time: 52.08 seconds.\n-- Epoch 58\nNorm: 18.24, NNZs: 1043900, Bias: -1.568974, T: 178988, Avg. loss: 0.045725\nTotal training time: 53.01 seconds.\n-- Epoch 59\nNorm: 18.23, NNZs: 1043900, Bias: -1.565315, T: 182074, Avg. loss: 0.045301\nTotal training time: 53.94 seconds.\n-- Epoch 60\nNorm: 18.21, NNZs: 1043900, Bias: -1.563851, T: 185160, Avg. loss: 0.044893\nTotal training time: 54.90 seconds.\n-- Epoch 61\nNorm: 18.21, NNZs: 1043900, Bias: -1.558607, T: 188246, Avg. loss: 0.044499\nTotal training time: 55.81 seconds.\n-- Epoch 62\nNorm: 18.20, NNZs: 1043901, Bias: -1.556321, T: 191332, Avg. loss: 0.044118\nTotal training time: 56.75 seconds.\n-- Epoch 63\nNorm: 18.18, NNZs: 1044129, Bias: -1.554002, T: 194418, Avg. loss: 0.043747\nTotal training time: 57.70 seconds.\n-- Epoch 64\nNorm: 18.18, NNZs: 1044131, Bias: -1.550122, T: 197504, Avg. loss: 0.043383\nTotal training time: 58.62 seconds.\n-- Epoch 65\nNorm: 18.18, NNZs: 1044158, Bias: -1.546870, T: 200590, Avg. loss: 0.043031\nTotal training time: 59.56 seconds.\n-- Epoch 66\nNorm: 18.17, NNZs: 1044163, Bias: -1.543917, T: 203676, Avg. loss: 0.042690\nTotal training time: 60.50 seconds.\n-- Epoch 67\nNorm: 18.15, NNZs: 1044340, Bias: -1.543646, T: 206762, Avg. loss: 0.042355\nTotal training time: 61.45 seconds.\n-- Epoch 68\nNorm: 18.14, NNZs: 1044342, Bias: -1.539852, T: 209848, Avg. loss: 0.042036\nTotal training time: 62.38 seconds.\n-- Epoch 69\nNorm: 18.12, NNZs: 1044342, Bias: -1.537510, T: 212934, Avg. loss: 0.041722\nTotal training time: 63.32 seconds.\n-- Epoch 70\nNorm: 18.12, NNZs: 1044345, Bias: -1.533514, T: 216020, Avg. loss: 0.041416\nTotal training time: 64.25 seconds.\n-- Epoch 71\nNorm: 18.10, NNZs: 1044345, Bias: -1.533967, T: 219106, Avg. loss: 0.041121\nTotal training time: 65.21 seconds.\n-- Epoch 72\nNorm: 18.13, NNZs: 1044345, Bias: -1.527220, T: 222192, Avg. loss: 0.040833\nTotal training time: 66.13 seconds.\n-- Epoch 73\nNorm: 18.09, NNZs: 1044731, Bias: -1.526411, T: 225278, Avg. loss: 0.040552\nTotal training time: 67.09 seconds.\n-- Epoch 74\nNorm: 18.11, NNZs: 1044731, Bias: -1.522046, T: 228364, Avg. loss: 0.040282\nTotal training time: 68.01 seconds.\n-- Epoch 75\nNorm: 18.09, NNZs: 1044732, Bias: -1.521896, T: 231450, Avg. loss: 0.040016\nTotal training time: 68.97 seconds.\n-- Epoch 76\nNorm: 18.08, NNZs: 1044732, Bias: -1.519394, T: 234536, Avg. loss: 0.039758\nTotal training time: 69.90 seconds.\n-- Epoch 77\nNorm: 18.07, NNZs: 1044736, Bias: -1.518561, T: 237622, Avg. loss: 0.039504\nTotal training time: 70.84 seconds.\n-- Epoch 78\nNorm: 18.07, NNZs: 1044736, Bias: -1.514841, T: 240708, Avg. loss: 0.039259\nTotal training time: 71.78 seconds.\n-- Epoch 79\nNorm: 18.06, NNZs: 1044740, Bias: -1.513580, T: 243794, Avg. loss: 0.039020\nTotal training time: 72.72 seconds.\n-- Epoch 80\nNorm: 18.05, NNZs: 1044740, Bias: -1.510524, T: 246880, Avg. loss: 0.038789\nTotal training time: 73.66 seconds.\n-- Epoch 81\nNorm: 18.04, NNZs: 1044740, Bias: -1.509298, T: 249966, Avg. loss: 0.038563\nTotal training time: 74.60 seconds.\n-- Epoch 82\nNorm: 18.03, NNZs: 1044740, Bias: -1.506275, T: 253052, Avg. loss: 0.038341\nTotal training time: 75.54 seconds.\n-- Epoch 83\nNorm: 18.04, NNZs: 1044742, Bias: -1.501767, T: 256138, Avg. loss: 0.038123\nTotal training time: 76.47 seconds.\n-- Epoch 84\nNorm: 18.02, NNZs: 1044756, Bias: -1.501372, T: 259224, Avg. loss: 0.037914\nTotal training time: 77.41 seconds.\n-- Epoch 85\nNorm: 18.01, NNZs: 1044757, Bias: -1.501027, T: 262310, Avg. loss: 0.037705\nTotal training time: 78.36 seconds.\n-- Epoch 86\nNorm: 18.02, NNZs: 1044757, Bias: -1.497025, T: 265396, Avg. loss: 0.037504\nTotal training time: 79.30 seconds.\n-- Epoch 87\nNorm: 18.01, NNZs: 1044757, Bias: -1.495250, T: 268482, Avg. loss: 0.037307\nTotal training time: 80.24 seconds.\n-- Epoch 88\nNorm: 18.01, NNZs: 1044764, Bias: -1.493154, T: 271568, Avg. loss: 0.037112\nTotal training time: 81.19 seconds.\n-- Epoch 89\nNorm: 18.00, NNZs: 1044764, Bias: -1.491409, T: 274654, Avg. loss: 0.036925\nTotal training time: 82.13 seconds.\n-- Epoch 90\nNorm: 18.00, NNZs: 1044764, Bias: -1.488794, T: 277740, Avg. loss: 0.036740\nTotal training time: 83.07 seconds.\n-- Epoch 91\nNorm: 18.00, NNZs: 1044764, Bias: -1.486056, T: 280826, Avg. loss: 0.036559\nTotal training time: 84.02 seconds.\n-- Epoch 92\nNorm: 18.00, NNZs: 1044764, Bias: -1.483858, T: 283912, Avg. loss: 0.036384\nTotal training time: 84.95 seconds.\n-- Epoch 93\nNorm: 17.99, NNZs: 1044764, Bias: -1.482195, T: 286998, Avg. loss: 0.036210\nTotal training time: 85.91 seconds.\n-- Epoch 94\nNorm: 17.96, NNZs: 1044764, Bias: -1.483267, T: 290084, Avg. loss: 0.036040\nTotal training time: 86.85 seconds.\n-- Epoch 95\nNorm: 17.97, NNZs: 1044764, Bias: -1.480666, T: 293170, Avg. loss: 0.035873\nTotal training time: 87.79 seconds.\n-- Epoch 96\nNorm: 17.96, NNZs: 1044764, Bias: -1.479862, T: 296256, Avg. loss: 0.035712\nTotal training time: 88.74 seconds.\n-- Epoch 97\nNorm: 17.96, NNZs: 1044765, Bias: -1.476211, T: 299342, Avg. loss: 0.035549\nTotal training time: 89.68 seconds.\n-- Epoch 98\nNorm: 17.95, NNZs: 1044767, Bias: -1.474277, T: 302428, Avg. loss: 0.035394\nTotal training time: 90.63 seconds.\n-- Epoch 99\nNorm: 17.95, NNZs: 1044767, Bias: -1.473787, T: 305514, Avg. loss: 0.035241\nTotal training time: 91.59 seconds.\n-- Epoch 100\nNorm: 17.97, NNZs: 1044767, Bias: -1.469052, T: 308600, Avg. loss: 0.035090\nTotal training time: 92.52 seconds.\n-- Epoch 1\nNorm: 63.57, NNZs: 395248, Bias: -0.776395, T: 3086, Avg. loss: 0.638981\nTotal training time: 0.85 seconds.\n-- Epoch 2\nNorm: 45.96, NNZs: 784434, Bias: -0.778416, T: 6172, Avg. loss: 0.368070\nTotal training time: 1.69 seconds.\n-- Epoch 3\nNorm: 35.18, NNZs: 794445, Bias: -0.844667, T: 9258, Avg. loss: 0.256250\nTotal training time: 2.53 seconds.\n-- Epoch 4\nNorm: 28.61, NNZs: 808485, Bias: -0.829537, T: 12344, Avg. loss: 0.196031\nTotal training time: 3.37 seconds.\n-- Epoch 5\nNorm: 24.51, NNZs: 812184, Bias: -0.816307, T: 15430, Avg. loss: 0.159101\nTotal training time: 4.21 seconds.\n-- Epoch 6\nNorm: 21.87, NNZs: 818614, Bias: -0.814460, T: 18516, Avg. loss: 0.134097\nTotal training time: 5.05 seconds.\n-- Epoch 7\nNorm: 20.16, NNZs: 823616, Bias: -0.808006, T: 21602, Avg. loss: 0.116260\nTotal training time: 5.89 seconds.\n-- Epoch 8\nNorm: 19.10, NNZs: 827316, Bias: -0.793359, T: 24688, Avg. loss: 0.102802\nTotal training time: 6.73 seconds.\n-- Epoch 9\nNorm: 18.41, NNZs: 829751, Bias: -0.791027, T: 27774, Avg. loss: 0.092364\nTotal training time: 7.58 seconds.\n-- Epoch 10\nNorm: 17.90, NNZs: 832835, Bias: -0.788420, T: 30860, Avg. loss: 0.084044\nTotal training time: 8.42 seconds.\n-- Epoch 11\nNorm: 17.48, NNZs: 833481, Bias: -0.776385, T: 33946, Avg. loss: 0.077169\nTotal training time: 9.27 seconds.\n-- Epoch 12\nNorm: 17.22, NNZs: 836799, Bias: -0.779732, T: 37032, Avg. loss: 0.071465\nTotal training time: 10.12 seconds.\n-- Epoch 13\nNorm: 16.94, NNZs: 855966, Bias: -0.778083, T: 40118, Avg. loss: 0.066569\nTotal training time: 10.97 seconds.\n-- Epoch 14\nNorm: 16.76, NNZs: 903481, Bias: -0.777494, T: 43204, Avg. loss: 0.062396\nTotal training time: 11.82 seconds.\n-- Epoch 15\nNorm: 16.63, NNZs: 920310, Bias: -0.775015, T: 46290, Avg. loss: 0.058831\nTotal training time: 12.68 seconds.\n-- Epoch 16\nNorm: 16.50, NNZs: 921408, Bias: -0.768577, T: 49376, Avg. loss: 0.055662\nTotal training time: 13.54 seconds.\n-- Epoch 17\nNorm: 16.40, NNZs: 931189, Bias: -0.773960, T: 52462, Avg. loss: 0.052890\nTotal training time: 14.40 seconds.\n-- Epoch 18\nNorm: 16.30, NNZs: 931992, Bias: -0.767719, T: 55548, Avg. loss: 0.050418\nTotal training time: 15.26 seconds.\n-- Epoch 19\nNorm: 16.22, NNZs: 932060, Bias: -0.769375, T: 58634, Avg. loss: 0.048202\nTotal training time: 16.11 seconds.\n-- Epoch 20\nNorm: 16.16, NNZs: 932395, Bias: -0.765379, T: 61720, Avg. loss: 0.046198\nTotal training time: 16.97 seconds.\n-- Epoch 21\nNorm: 16.10, NNZs: 932703, Bias: -0.768804, T: 64806, Avg. loss: 0.044394\nTotal training time: 17.83 seconds.\n-- Epoch 22\nNorm: 16.05, NNZs: 932892, Bias: -0.767373, T: 67892, Avg. loss: 0.042752\nTotal training time: 18.69 seconds.\n-- Epoch 23\nNorm: 16.00, NNZs: 933095, Bias: -0.764462, T: 70978, Avg. loss: 0.041241\nTotal training time: 19.54 seconds.\n-- Epoch 24\nNorm: 15.95, NNZs: 946498, Bias: -0.764921, T: 74064, Avg. loss: 0.039854\nTotal training time: 20.40 seconds.\n-- Epoch 25\nNorm: 15.92, NNZs: 949401, Bias: -0.766935, T: 77150, Avg. loss: 0.038589\nTotal training time: 21.27 seconds.\n-- Epoch 26\nNorm: 15.88, NNZs: 949444, Bias: -0.766333, T: 80236, Avg. loss: 0.037425\nTotal training time: 22.12 seconds.\n-- Epoch 27\nNorm: 15.86, NNZs: 949653, Bias: -0.764734, T: 83322, Avg. loss: 0.036339\nTotal training time: 22.98 seconds.\n-- Epoch 28\nNorm: 15.83, NNZs: 949663, Bias: -0.762676, T: 86408, Avg. loss: 0.035332\nTotal training time: 23.84 seconds.\n-- Epoch 29\nNorm: 15.81, NNZs: 949733, Bias: -0.763269, T: 89494, Avg. loss: 0.034394\nTotal training time: 24.70 seconds.\n-- Epoch 30\nNorm: 15.78, NNZs: 949810, Bias: -0.764355, T: 92580, Avg. loss: 0.033518\nTotal training time: 25.56 seconds.\n-- Epoch 31\nNorm: 15.75, NNZs: 949885, Bias: -0.759970, T: 95666, Avg. loss: 0.032691\nTotal training time: 26.42 seconds.\n-- Epoch 32\nNorm: 15.73, NNZs: 950035, Bias: -0.763623, T: 98752, Avg. loss: 0.031920\nTotal training time: 27.28 seconds.\n-- Epoch 33\nNorm: 15.71, NNZs: 950053, Bias: -0.762051, T: 101838, Avg. loss: 0.031196\nTotal training time: 28.14 seconds.\n-- Epoch 34\nNorm: 15.69, NNZs: 950110, Bias: -0.761077, T: 104924, Avg. loss: 0.030517\nTotal training time: 29.00 seconds.\n-- Epoch 35\nNorm: 15.68, NNZs: 959169, Bias: -0.759743, T: 108010, Avg. loss: 0.029873\nTotal training time: 29.86 seconds.\n-- Epoch 36\nNorm: 15.66, NNZs: 1005386, Bias: -0.760078, T: 111096, Avg. loss: 0.029262\nTotal training time: 30.73 seconds.\n-- Epoch 37\nNorm: 15.64, NNZs: 1005651, Bias: -0.757991, T: 114182, Avg. loss: 0.028684\nTotal training time: 31.59 seconds.\n-- Epoch 38\nNorm: 15.63, NNZs: 1005651, Bias: -0.758120, T: 117268, Avg. loss: 0.028140\nTotal training time: 32.45 seconds.\n-- Epoch 39\nNorm: 15.62, NNZs: 1005666, Bias: -0.757327, T: 120354, Avg. loss: 0.027625\nTotal training time: 33.30 seconds.\n-- Epoch 40\nNorm: 15.61, NNZs: 1006835, Bias: -0.755822, T: 123440, Avg. loss: 0.027133\nTotal training time: 34.17 seconds.\n-- Epoch 41\nNorm: 15.60, NNZs: 1006886, Bias: -0.755314, T: 126526, Avg. loss: 0.026662\nTotal training time: 35.04 seconds.\n-- Epoch 42\nNorm: 15.59, NNZs: 1006908, Bias: -0.758849, T: 129612, Avg. loss: 0.026216\nTotal training time: 35.90 seconds.\n-- Epoch 43\nNorm: 15.58, NNZs: 1006909, Bias: -0.758931, T: 132698, Avg. loss: 0.025786\nTotal training time: 36.76 seconds.\n-- Epoch 44\nNorm: 15.57, NNZs: 1006909, Bias: -0.755203, T: 135784, Avg. loss: 0.025381\nTotal training time: 37.62 seconds.\n-- Epoch 45\nNorm: 15.56, NNZs: 1006909, Bias: -0.755827, T: 138870, Avg. loss: 0.024993\nTotal training time: 38.48 seconds.\n-- Epoch 46\nNorm: 15.55, NNZs: 1006950, Bias: -0.756522, T: 141956, Avg. loss: 0.024620\nTotal training time: 39.60 seconds.\n-- Epoch 47\nNorm: 15.54, NNZs: 1006950, Bias: -0.755859, T: 145042, Avg. loss: 0.024263\nTotal training time: 40.46 seconds.\n-- Epoch 48\nNorm: 15.53, NNZs: 1006958, Bias: -0.757915, T: 148128, Avg. loss: 0.023923\nTotal training time: 41.32 seconds.\n-- Epoch 49\nNorm: 15.52, NNZs: 1006958, Bias: -0.755363, T: 151214, Avg. loss: 0.023595\nTotal training time: 42.19 seconds.\n-- Epoch 50\nNorm: 15.52, NNZs: 1006958, Bias: -0.756787, T: 154300, Avg. loss: 0.023282\nTotal training time: 43.06 seconds.\n-- Epoch 51\nNorm: 15.51, NNZs: 1007034, Bias: -0.752983, T: 157386, Avg. loss: 0.022976\nTotal training time: 43.92 seconds.\n-- Epoch 52\nNorm: 15.50, NNZs: 1007044, Bias: -0.754439, T: 160472, Avg. loss: 0.022684\nTotal training time: 44.78 seconds.\n-- Epoch 53\nNorm: 15.49, NNZs: 1007049, Bias: -0.756421, T: 163558, Avg. loss: 0.022403\nTotal training time: 45.65 seconds.\n-- Epoch 54\nNorm: 15.49, NNZs: 1007054, Bias: -0.755766, T: 166644, Avg. loss: 0.022135\nTotal training time: 46.51 seconds.\n-- Epoch 55\nNorm: 15.48, NNZs: 1007057, Bias: -0.754606, T: 169730, Avg. loss: 0.021873\nTotal training time: 47.38 seconds.\n-- Epoch 56\nNorm: 15.48, NNZs: 1007057, Bias: -0.753732, T: 172816, Avg. loss: 0.021623\nTotal training time: 48.24 seconds.\n-- Epoch 57\nNorm: 15.48, NNZs: 1007057, Bias: -0.755653, T: 175902, Avg. loss: 0.021382\nTotal training time: 49.11 seconds.\n-- Epoch 58\nNorm: 15.47, NNZs: 1007057, Bias: -0.755519, T: 178988, Avg. loss: 0.021149\nTotal training time: 49.98 seconds.\n-- Epoch 59\nNorm: 15.46, NNZs: 1007063, Bias: -0.756032, T: 182074, Avg. loss: 0.020920\nTotal training time: 50.86 seconds.\n-- Epoch 60\nNorm: 15.46, NNZs: 1007063, Bias: -0.755887, T: 185160, Avg. loss: 0.020702\nTotal training time: 51.72 seconds.\n-- Epoch 61\nNorm: 15.46, NNZs: 1007068, Bias: -0.754467, T: 188246, Avg. loss: 0.020492\nTotal training time: 52.58 seconds.\n-- Epoch 62\nNorm: 15.45, NNZs: 1007144, Bias: -0.755110, T: 191332, Avg. loss: 0.020285\nTotal training time: 53.45 seconds.\n-- Epoch 63\nNorm: 15.45, NNZs: 1007147, Bias: -0.754974, T: 194418, Avg. loss: 0.020086\nTotal training time: 54.32 seconds.\n-- Epoch 64\nNorm: 15.45, NNZs: 1007151, Bias: -0.754349, T: 197504, Avg. loss: 0.019894\nTotal training time: 55.20 seconds.\n-- Epoch 65\nNorm: 15.44, NNZs: 1007151, Bias: -0.754107, T: 200590, Avg. loss: 0.019709\nTotal training time: 56.07 seconds.\n-- Epoch 66\nNorm: 15.44, NNZs: 1007163, Bias: -0.754650, T: 203676, Avg. loss: 0.019529\nTotal training time: 56.94 seconds.\n-- Epoch 67\nNorm: 15.44, NNZs: 1007163, Bias: -0.753802, T: 206762, Avg. loss: 0.019353\nTotal training time: 57.80 seconds.\n-- Epoch 68\nNorm: 15.43, NNZs: 1008007, Bias: -0.753997, T: 209848, Avg. loss: 0.019183\nTotal training time: 58.68 seconds.\n-- Epoch 69\nNorm: 15.43, NNZs: 1008022, Bias: -0.754760, T: 212934, Avg. loss: 0.019016\nTotal training time: 59.55 seconds.\n-- Epoch 70\nNorm: 15.42, NNZs: 1008024, Bias: -0.755771, T: 216020, Avg. loss: 0.018854\nTotal training time: 60.41 seconds.\n-- Epoch 71\nNorm: 15.42, NNZs: 1008033, Bias: -0.752346, T: 219106, Avg. loss: 0.018698\nTotal training time: 61.28 seconds.\n-- Epoch 72\nNorm: 15.42, NNZs: 1008039, Bias: -0.754019, T: 222192, Avg. loss: 0.018547\nTotal training time: 62.15 seconds.\n-- Epoch 73\nNorm: 15.42, NNZs: 1008039, Bias: -0.752735, T: 225278, Avg. loss: 0.018400\nTotal training time: 63.01 seconds.\n-- Epoch 74\nNorm: 15.41, NNZs: 1008039, Bias: -0.753769, T: 228364, Avg. loss: 0.018257\nTotal training time: 63.88 seconds.\n-- Epoch 75\nNorm: 15.41, NNZs: 1008039, Bias: -0.752808, T: 231450, Avg. loss: 0.018117\nTotal training time: 64.75 seconds.\n-- Epoch 76\nNorm: 15.41, NNZs: 1008083, Bias: -0.753752, T: 234536, Avg. loss: 0.017980\nTotal training time: 65.62 seconds.\n-- Epoch 77\nNorm: 15.40, NNZs: 1008083, Bias: -0.754202, T: 237622, Avg. loss: 0.017847\nTotal training time: 66.49 seconds.\n-- Epoch 78\nNorm: 15.40, NNZs: 1008083, Bias: -0.754319, T: 240708, Avg. loss: 0.017717\nTotal training time: 67.36 seconds.\n-- Epoch 79\nNorm: 15.40, NNZs: 1008087, Bias: -0.752903, T: 243794, Avg. loss: 0.017591\nTotal training time: 68.23 seconds.\n-- Epoch 80\nNorm: 15.40, NNZs: 1008091, Bias: -0.752774, T: 246880, Avg. loss: 0.017468\nTotal training time: 69.10 seconds.\n-- Epoch 81\nNorm: 15.39, NNZs: 1008105, Bias: -0.753050, T: 249966, Avg. loss: 0.017348\nTotal training time: 69.97 seconds.\n-- Epoch 82\nNorm: 15.39, NNZs: 1008105, Bias: -0.751515, T: 253052, Avg. loss: 0.017230\nTotal training time: 70.84 seconds.\n-- Epoch 83\nNorm: 15.39, NNZs: 1008105, Bias: -0.753302, T: 256138, Avg. loss: 0.017116\nTotal training time: 71.71 seconds.\n-- Epoch 84\nNorm: 15.39, NNZs: 1008107, Bias: -0.753282, T: 259224, Avg. loss: 0.017004\nTotal training time: 72.59 seconds.\n-- Epoch 85\nNorm: 15.39, NNZs: 1008107, Bias: -0.752609, T: 262310, Avg. loss: 0.016895\nTotal training time: 73.47 seconds.\n-- Epoch 86\nNorm: 15.38, NNZs: 1008107, Bias: -0.752838, T: 265396, Avg. loss: 0.016789\nTotal training time: 74.34 seconds.\n-- Epoch 87\nNorm: 15.38, NNZs: 1008107, Bias: -0.753020, T: 268482, Avg. loss: 0.016684\nTotal training time: 75.21 seconds.\n-- Epoch 88\nNorm: 15.37, NNZs: 1008833, Bias: -0.753572, T: 271568, Avg. loss: 0.016581\nTotal training time: 76.09 seconds.\n-- Epoch 89\nNorm: 15.38, NNZs: 1008833, Bias: -0.752248, T: 274654, Avg. loss: 0.016481\nTotal training time: 76.96 seconds.\n-- Epoch 90\nNorm: 15.37, NNZs: 1008833, Bias: -0.752837, T: 277740, Avg. loss: 0.016385\nTotal training time: 77.83 seconds.\n-- Epoch 91\nNorm: 15.37, NNZs: 1008876, Bias: -0.753409, T: 280826, Avg. loss: 0.016289\nTotal training time: 78.70 seconds.\n-- Epoch 92\nNorm: 15.37, NNZs: 1008898, Bias: -0.753082, T: 283912, Avg. loss: 0.016194\nTotal training time: 79.57 seconds.\n-- Epoch 93\nNorm: 15.36, NNZs: 1008898, Bias: -0.752565, T: 286998, Avg. loss: 0.016103\nTotal training time: 80.45 seconds.\n-- Epoch 94\nNorm: 15.36, NNZs: 1008898, Bias: -0.751389, T: 290084, Avg. loss: 0.016014\nTotal training time: 81.32 seconds.\n-- Epoch 95\nNorm: 15.37, NNZs: 1008898, Bias: -0.751699, T: 293170, Avg. loss: 0.015926\nTotal training time: 82.19 seconds.\n-- Epoch 96\nNorm: 15.36, NNZs: 1008898, Bias: -0.751833, T: 296256, Avg. loss: 0.015841\nTotal training time: 83.06 seconds.\n-- Epoch 97\nNorm: 15.36, NNZs: 1008898, Bias: -0.751286, T: 299342, Avg. loss: 0.015757\nTotal training time: 83.94 seconds.\n-- Epoch 98\nNorm: 15.36, NNZs: 1008898, Bias: -0.751715, T: 302428, Avg. loss: 0.015676\nTotal training time: 84.81 seconds.\n-- Epoch 99\nNorm: 15.36, NNZs: 1008898, Bias: -0.751920, T: 305514, Avg. loss: 0.015595\nTotal training time: 85.68 seconds.\n-- Epoch 100\nNorm: 15.36, NNZs: 1008898, Bias: -0.751872, T: 308600, Avg. loss: 0.015517\nTotal training time: 86.55 seconds.\n-- Epoch 1\nNorm: 40.10, NNZs: 253763, Bias: -0.835905, T: 3086, Avg. loss: 0.225260\nTotal training time: 0.85 seconds.\n-- Epoch 2\nNorm: 26.44, NNZs: 364895, Bias: -0.813172, T: 6172, Avg. loss: 0.132161\nTotal training time: 1.70 seconds.\n-- Epoch 3\nNorm: 21.75, NNZs: 387910, Bias: -0.826581, T: 9258, Avg. loss: 0.097308\nTotal training time: 2.55 seconds.\n-- Epoch 4\nNorm: 18.06, NNZs: 392025, Bias: -0.822172, T: 12344, Avg. loss: 0.078356\nTotal training time: 3.40 seconds.\n-- Epoch 5\nNorm: 16.13, NNZs: 408847, Bias: -0.808067, T: 15430, Avg. loss: 0.065448\nTotal training time: 4.26 seconds.\n-- Epoch 6\nNorm: 14.45, NNZs: 420055, Bias: -0.824321, T: 18516, Avg. loss: 0.055781\nTotal training time: 5.11 seconds.\n-- Epoch 7\nNorm: 13.37, NNZs: 424487, Bias: -0.820839, T: 21602, Avg. loss: 0.048929\nTotal training time: 5.96 seconds.\n-- Epoch 8\nNorm: 12.59, NNZs: 428112, Bias: -0.818330, T: 24688, Avg. loss: 0.043673\nTotal training time: 6.82 seconds.\n-- Epoch 9\nNorm: 11.87, NNZs: 432880, Bias: -0.821291, T: 27774, Avg. loss: 0.039448\nTotal training time: 7.68 seconds.\n-- Epoch 10\nNorm: 11.53, NNZs: 441707, Bias: -0.821978, T: 30860, Avg. loss: 0.036118\nTotal training time: 8.54 seconds.\n-- Epoch 11\nNorm: 11.19, NNZs: 442421, Bias: -0.817799, T: 33946, Avg. loss: 0.033405\nTotal training time: 9.40 seconds.\n-- Epoch 12\nNorm: 10.86, NNZs: 443389, Bias: -0.824530, T: 37032, Avg. loss: 0.031131\nTotal training time: 10.27 seconds.\n-- Epoch 13\nNorm: 10.65, NNZs: 446551, Bias: -0.824829, T: 40118, Avg. loss: 0.029202\nTotal training time: 11.14 seconds.\n-- Epoch 14\nNorm: 10.54, NNZs: 448493, Bias: -0.823376, T: 43204, Avg. loss: 0.027526\nTotal training time: 12.01 seconds.\n-- Epoch 15\nNorm: 10.43, NNZs: 451076, Bias: -0.824225, T: 46290, Avg. loss: 0.026068\nTotal training time: 12.88 seconds.\n-- Epoch 16\nNorm: 10.30, NNZs: 451744, Bias: -0.825191, T: 49376, Avg. loss: 0.024813\nTotal training time: 13.75 seconds.\n-- Epoch 17\nNorm: 10.25, NNZs: 452668, Bias: -0.823042, T: 52462, Avg. loss: 0.023690\nTotal training time: 14.62 seconds.\n-- Epoch 18\nNorm: 10.19, NNZs: 456407, Bias: -0.826030, T: 55548, Avg. loss: 0.022672\nTotal training time: 15.49 seconds.\n-- Epoch 19\nNorm: 10.05, NNZs: 457537, Bias: -0.829505, T: 58634, Avg. loss: 0.021773\nTotal training time: 16.36 seconds.\n-- Epoch 20\nNorm: 9.99, NNZs: 457841, Bias: -0.828603, T: 61720, Avg. loss: 0.020959\nTotal training time: 17.23 seconds.\n-- Epoch 21\nNorm: 9.95, NNZs: 475715, Bias: -0.826494, T: 64806, Avg. loss: 0.020199\nTotal training time: 18.11 seconds.\n-- Epoch 22\nNorm: 9.89, NNZs: 475983, Bias: -0.828695, T: 67892, Avg. loss: 0.019534\nTotal training time: 18.98 seconds.\n-- Epoch 23\nNorm: 9.87, NNZs: 476850, Bias: -0.827320, T: 70978, Avg. loss: 0.018918\nTotal training time: 19.85 seconds.\n-- Epoch 24\nNorm: 9.83, NNZs: 477454, Bias: -0.830433, T: 74064, Avg. loss: 0.018358\nTotal training time: 20.72 seconds.\n-- Epoch 25\nNorm: 9.79, NNZs: 477699, Bias: -0.831531, T: 77150, Avg. loss: 0.017843\nTotal training time: 21.60 seconds.\n-- Epoch 26\nNorm: 9.82, NNZs: 478073, Bias: -0.829856, T: 80236, Avg. loss: 0.017365\nTotal training time: 22.47 seconds.\n-- Epoch 27\nNorm: 9.75, NNZs: 483454, Bias: -0.832732, T: 83322, Avg. loss: 0.016919\nTotal training time: 23.35 seconds.\n-- Epoch 28\nNorm: 9.79, NNZs: 483821, Bias: -0.828461, T: 86408, Avg. loss: 0.016503\nTotal training time: 24.22 seconds.\n-- Epoch 29\nNorm: 9.74, NNZs: 484181, Bias: -0.829501, T: 89494, Avg. loss: 0.016117\nTotal training time: 25.10 seconds.\n-- Epoch 30\nNorm: 9.72, NNZs: 484243, Bias: -0.831333, T: 92580, Avg. loss: 0.015759\nTotal training time: 25.97 seconds.\n-- Epoch 31\nNorm: 9.68, NNZs: 484243, Bias: -0.832424, T: 95666, Avg. loss: 0.015425\nTotal training time: 26.85 seconds.\n-- Epoch 32\nNorm: 9.68, NNZs: 489314, Bias: -0.831564, T: 98752, Avg. loss: 0.015110\nTotal training time: 27.72 seconds.\n-- Epoch 33\nNorm: 9.64, NNZs: 489379, Bias: -0.835034, T: 101838, Avg. loss: 0.014810\nTotal training time: 28.60 seconds.\n-- Epoch 34\nNorm: 9.63, NNZs: 489379, Bias: -0.833958, T: 104924, Avg. loss: 0.014529\nTotal training time: 29.47 seconds.\n-- Epoch 35\nNorm: 9.62, NNZs: 489981, Bias: -0.831326, T: 108010, Avg. loss: 0.014264\nTotal training time: 30.35 seconds.\n-- Epoch 36\nNorm: 9.61, NNZs: 492166, Bias: -0.832735, T: 111096, Avg. loss: 0.014015\nTotal training time: 31.23 seconds.\n-- Epoch 37\nNorm: 9.60, NNZs: 495259, Bias: -0.834880, T: 114182, Avg. loss: 0.013777\nTotal training time: 32.10 seconds.\n-- Epoch 38\nNorm: 9.61, NNZs: 495785, Bias: -0.832244, T: 117268, Avg. loss: 0.013551\nTotal training time: 32.98 seconds.\n-- Epoch 39\nNorm: 9.62, NNZs: 495785, Bias: -0.832558, T: 120354, Avg. loss: 0.013338\nTotal training time: 33.86 seconds.\n-- Epoch 40\nNorm: 9.59, NNZs: 496460, Bias: -0.832942, T: 123440, Avg. loss: 0.013135\nTotal training time: 34.74 seconds.\n-- Epoch 41\nNorm: 9.58, NNZs: 496460, Bias: -0.833895, T: 126526, Avg. loss: 0.012945\nTotal training time: 35.61 seconds.\n-- Epoch 42\nNorm: 9.52, NNZs: 497758, Bias: -0.837518, T: 129612, Avg. loss: 0.012759\nTotal training time: 36.49 seconds.\n-- Epoch 43\nNorm: 9.56, NNZs: 497968, Bias: -0.835748, T: 132698, Avg. loss: 0.012583\nTotal training time: 37.37 seconds.\n-- Epoch 44\nNorm: 9.54, NNZs: 498093, Bias: -0.834062, T: 135784, Avg. loss: 0.012417\nTotal training time: 38.24 seconds.\n-- Epoch 45\nNorm: 9.54, NNZs: 498971, Bias: -0.835798, T: 138870, Avg. loss: 0.012257\nTotal training time: 39.12 seconds.\n-- Epoch 46\nNorm: 9.53, NNZs: 499026, Bias: -0.836409, T: 141956, Avg. loss: 0.012103\nTotal training time: 40.00 seconds.\n-- Epoch 47\nNorm: 9.53, NNZs: 499452, Bias: -0.837006, T: 145042, Avg. loss: 0.011956\nTotal training time: 40.88 seconds.\n-- Epoch 48\nNorm: 9.50, NNZs: 499452, Bias: -0.838292, T: 148128, Avg. loss: 0.011814\nTotal training time: 41.76 seconds.\n-- Epoch 49\nNorm: 9.53, NNZs: 500843, Bias: -0.835409, T: 151214, Avg. loss: 0.011680\nTotal training time: 42.63 seconds.\n-- Epoch 50\nNorm: 9.51, NNZs: 502672, Bias: -0.836441, T: 154300, Avg. loss: 0.011546\nTotal training time: 43.51 seconds.\n-- Epoch 51\nNorm: 9.51, NNZs: 502672, Bias: -0.835973, T: 157386, Avg. loss: 0.011420\nTotal training time: 44.39 seconds.\n-- Epoch 52\nNorm: 9.48, NNZs: 502672, Bias: -0.836389, T: 160472, Avg. loss: 0.011299\nTotal training time: 45.26 seconds.\n-- Epoch 53\nNorm: 9.48, NNZs: 503926, Bias: -0.838026, T: 163558, Avg. loss: 0.011182\nTotal training time: 46.14 seconds.\n-- Epoch 54\nNorm: 9.51, NNZs: 503964, Bias: -0.835943, T: 166644, Avg. loss: 0.011070\nTotal training time: 47.02 seconds.\n-- Epoch 55\nNorm: 9.48, NNZs: 504623, Bias: -0.838228, T: 169730, Avg. loss: 0.010963\nTotal training time: 47.90 seconds.\n-- Epoch 56\nNorm: 9.46, NNZs: 504957, Bias: -0.837424, T: 172816, Avg. loss: 0.010860\nTotal training time: 48.78 seconds.\n-- Epoch 57\nNorm: 9.47, NNZs: 505059, Bias: -0.838279, T: 175902, Avg. loss: 0.010760\nTotal training time: 49.66 seconds.\n-- Epoch 58\nNorm: 9.48, NNZs: 505059, Bias: -0.837495, T: 178988, Avg. loss: 0.010663\nTotal training time: 50.54 seconds.\n-- Epoch 59\nNorm: 9.49, NNZs: 505059, Bias: -0.835158, T: 182074, Avg. loss: 0.010568\nTotal training time: 51.42 seconds.\n-- Epoch 60\nNorm: 9.46, NNZs: 505447, Bias: -0.839021, T: 185160, Avg. loss: 0.010475\nTotal training time: 52.30 seconds.\n-- Epoch 61\nNorm: 9.45, NNZs: 505661, Bias: -0.839263, T: 188246, Avg. loss: 0.010387\nTotal training time: 53.18 seconds.\n-- Epoch 62\nNorm: 9.45, NNZs: 505661, Bias: -0.839133, T: 191332, Avg. loss: 0.010303\nTotal training time: 54.06 seconds.\n-- Epoch 63\nNorm: 9.45, NNZs: 505661, Bias: -0.838781, T: 194418, Avg. loss: 0.010220\nTotal training time: 54.93 seconds.\n-- Epoch 64\nNorm: 9.43, NNZs: 505720, Bias: -0.840754, T: 197504, Avg. loss: 0.010139\nTotal training time: 55.81 seconds.\n-- Epoch 65\nNorm: 9.44, NNZs: 505776, Bias: -0.840571, T: 200590, Avg. loss: 0.010061\nTotal training time: 56.69 seconds.\n-- Epoch 66\nNorm: 9.44, NNZs: 505776, Bias: -0.837919, T: 203676, Avg. loss: 0.009986\nTotal training time: 57.56 seconds.\n-- Epoch 67\nNorm: 9.45, NNZs: 505776, Bias: -0.838456, T: 206762, Avg. loss: 0.009913\nTotal training time: 58.44 seconds.\n-- Epoch 68\nNorm: 9.43, NNZs: 505776, Bias: -0.840169, T: 209848, Avg. loss: 0.009842\nTotal training time: 59.32 seconds.\n-- Epoch 69\nNorm: 9.44, NNZs: 796718, Bias: -0.839952, T: 212934, Avg. loss: 0.009772\nTotal training time: 60.20 seconds.\n-- Epoch 70\nNorm: 9.43, NNZs: 796718, Bias: -0.839341, T: 216020, Avg. loss: 0.009705\nTotal training time: 61.07 seconds.\n-- Epoch 71\nNorm: 9.43, NNZs: 796718, Bias: -0.838209, T: 219106, Avg. loss: 0.009638\nTotal training time: 61.94 seconds.\n-- Epoch 72\nNorm: 9.44, NNZs: 796718, Bias: -0.839381, T: 222192, Avg. loss: 0.009576\nTotal training time: 62.82 seconds.\n-- Epoch 73\nNorm: 9.44, NNZs: 796718, Bias: -0.839684, T: 225278, Avg. loss: 0.009514\nTotal training time: 63.69 seconds.\n-- Epoch 74\nNorm: 9.41, NNZs: 796718, Bias: -0.841347, T: 228364, Avg. loss: 0.009454\nTotal training time: 64.57 seconds.\n-- Epoch 75\nNorm: 9.42, NNZs: 796718, Bias: -0.839861, T: 231450, Avg. loss: 0.009397\nTotal training time: 65.45 seconds.\n-- Epoch 76\nNorm: 9.42, NNZs: 796718, Bias: -0.840211, T: 234536, Avg. loss: 0.009341\nTotal training time: 66.32 seconds.\n-- Epoch 77\nNorm: 9.41, NNZs: 796718, Bias: -0.838824, T: 237622, Avg. loss: 0.009284\nTotal training time: 67.20 seconds.\n-- Epoch 78\nNorm: 9.41, NNZs: 796718, Bias: -0.839978, T: 240708, Avg. loss: 0.009230\nTotal training time: 68.08 seconds.\n-- Epoch 79\nNorm: 9.42, NNZs: 796718, Bias: -0.840776, T: 243794, Avg. loss: 0.009177\nTotal training time: 68.96 seconds.\n-- Epoch 80\nNorm: 9.42, NNZs: 796743, Bias: -0.839732, T: 246880, Avg. loss: 0.009125\nTotal training time: 69.84 seconds.\n-- Epoch 81\nNorm: 9.40, NNZs: 796743, Bias: -0.841977, T: 249966, Avg. loss: 0.009075\nTotal training time: 70.71 seconds.\n-- Epoch 82\nNorm: 9.40, NNZs: 796757, Bias: -0.841106, T: 253052, Avg. loss: 0.009026\nTotal training time: 71.58 seconds.\n-- Epoch 83\nNorm: 9.39, NNZs: 796757, Bias: -0.842521, T: 256138, Avg. loss: 0.008978\nTotal training time: 72.71 seconds.\n-- Epoch 84\nNorm: 9.40, NNZs: 796757, Bias: -0.840811, T: 259224, Avg. loss: 0.008932\nTotal training time: 73.59 seconds.\n-- Epoch 85\nNorm: 9.41, NNZs: 796757, Bias: -0.840719, T: 262310, Avg. loss: 0.008886\nTotal training time: 74.46 seconds.\n-- Epoch 86\nNorm: 9.40, NNZs: 796757, Bias: -0.841646, T: 265396, Avg. loss: 0.008841\nTotal training time: 75.34 seconds.\n-- Epoch 87\nNorm: 9.41, NNZs: 796757, Bias: -0.841524, T: 268482, Avg. loss: 0.008798\nTotal training time: 76.22 seconds.\n-- Epoch 88\nNorm: 9.40, NNZs: 797935, Bias: -0.840280, T: 271568, Avg. loss: 0.008754\nTotal training time: 77.09 seconds.\n-- Epoch 89\nNorm: 9.39, NNZs: 797935, Bias: -0.842498, T: 274654, Avg. loss: 0.008712\nTotal training time: 77.97 seconds.\n-- Epoch 90\nNorm: 9.39, NNZs: 797935, Bias: -0.842963, T: 277740, Avg. loss: 0.008672\nTotal training time: 78.85 seconds.\n-- Epoch 91\nNorm: 9.39, NNZs: 797935, Bias: -0.842045, T: 280826, Avg. loss: 0.008632\nTotal training time: 79.72 seconds.\n-- Epoch 92\nNorm: 9.39, NNZs: 797935, Bias: -0.842855, T: 283912, Avg. loss: 0.008593\nTotal training time: 80.60 seconds.\n-- Epoch 93\nNorm: 9.39, NNZs: 797935, Bias: -0.842017, T: 286998, Avg. loss: 0.008555\nTotal training time: 81.48 seconds.\n-- Epoch 94\nNorm: 9.39, NNZs: 797935, Bias: -0.842685, T: 290084, Avg. loss: 0.008518\nTotal training time: 82.35 seconds.\n-- Epoch 95\nNorm: 9.38, NNZs: 797935, Bias: -0.843959, T: 293170, Avg. loss: 0.008480\nTotal training time: 83.23 seconds.\n-- Epoch 96\nNorm: 9.39, NNZs: 797935, Bias: -0.843056, T: 296256, Avg. loss: 0.008444\nTotal training time: 84.11 seconds.\n-- Epoch 97\nNorm: 9.38, NNZs: 797935, Bias: -0.843356, T: 299342, Avg. loss: 0.008408\nTotal training time: 84.99 seconds.\n-- Epoch 98\nNorm: 9.38, NNZs: 797935, Bias: -0.842078, T: 302428, Avg. loss: 0.008374\nTotal training time: 85.86 seconds.\n-- Epoch 99\nNorm: 9.39, NNZs: 797935, Bias: -0.842275, T: 305514, Avg. loss: 0.008340\nTotal training time: 86.74 seconds.\n-- Epoch 100\nNorm: 9.38, NNZs: 797935, Bias: -0.842079, T: 308600, Avg. loss: 0.008307\nTotal training time: 87.61 seconds.\n" ], [ "test_pred = SGD.predict(X_test)", "_____no_output_____" ], [ "print test_pred\ntest_ids = np.load(\"../data/features/test_ids.npy\")\nprint test_ids\nwrite_predictions(test_pred, test_ids, \"../predictions/sgd_huber_100_big_tfidf.csv\")", "[10 5 8 ..., 10 8 8]\n['e5b875f7e584b29fd9e85c1f232956849aabcb311'\n '18abefbfb74285D709bcf665d594df11bf56e1984'\n '47cd5265b1fc52021c025452e084c405a0a03df1e' ...,\n '6abb75b149d8e39e30c8df2c19bfd96986f0e35b3'\n 'f0e968070037717da88665ab091ff2B4973528f30'\n '7b2459e11cac9341a00fa7bDcd5b17618a0b97dc8']\n" ], [ "from sklearn.linear_model import PassiveAggressiveClassifier\n# Try an SGDClassifier learning directly from the sparse matrix\nmodel = PassiveAggressiveClassifier(n_jobs=4, verbose=42, n_iter=20)\nmodel.fit(X,Y)", "-- Epoch 1\n-- Epoch 1\n-- Epoch 1\n-- Epoch 1\nNorm: 7.55, NNZs: 125613, Bias: -0.138381, T: 3086, Avg. loss: 0.046595\nTotal training time: 0.03 seconds.\nNorm: 5.78, NNZs: 164319, Bias: -0.112992, T: 3086, Avg. loss: 0.120186\nTotal training time: 0.04 seconds.\n-- Epoch 2\n-- Epoch 2\nNorm: 5.25, NNZs: 75659, Bias: -0.112442, T: 3086, Avg. loss: 0.035022\nTotal training time: 0.04 seconds.\n-- Epoch 2\nNorm: 9.22, NNZs: 132801, Bias: -0.179926, T: 6172, Avg. loss: 0.041148\nTotal training time: 0.06 seconds.\nNorm: 5.13, NNZs: 144209, Bias: -0.137939, T: 3086, Avg. loss: 0.042037\nTotal training time: 0.06 seconds.\n-- Epoch 3\n-- Epoch 2\nNorm: 8.61, NNZs: 182575, Bias: -0.142782, T: 6172, Avg. loss: 0.116928\nTotal training time: 0.07 seconds.\n-- Epoch 3\nNorm: 11.13, NNZs: 138897, Bias: -0.200858, T: 9258, Avg. loss: 0.038695Norm: 10.86, NNZs: 194803, Bias: -0.189226, T: 9258, Avg. loss: 0.114809\nTotal training time: 0.12 seconds.\n\nTotal training time: 0.12 seconds.\n-- Epoch 4\nNorm: 7.00, NNZs: 151584, Bias: -0.180608, T: 6172, Avg. loss: 0.037232\nTotal training time: 0.11 seconds.\n-- Epoch 4\nNorm: 7.32, NNZs: 155748, Bias: -0.132659, T: 6172, Avg. loss: 0.031583\nTotal training time: 0.12 seconds.\n-- Epoch 3\n-- Epoch 3\nNorm: 13.00, NNZs: 201283, Bias: -0.198497, T: 12344, Avg. loss: 0.112699\nTotal training time: 0.16 seconds.\n-- Epoch 5\nNorm: 12.44, NNZs: 140909, Bias: -0.209754, T: 12344, Avg. loss: 0.036497\nTotal training time: 0.17 seconds.\nNorm: 9.13, NNZs: 159582, Bias: -0.143230, T: 9258, Avg. loss: 0.029215\nTotal training time: 0.16 seconds.\n-- Epoch 5\n-- Epoch 4\nNorm: 7.98, NNZs: 159699, Bias: -0.207358, T: 9258, Avg. loss: 0.035670\nTotal training time: 0.17 seconds.\nNorm: 15.22, NNZs: 204819, Bias: -0.259705, T: 15430, Avg. loss: 0.111519\nTotal training time: 0.20 seconds.\nNorm: 11.08, NNZs: 169450, Bias: -0.181081, T: 12344, Avg. loss: 0.027951-- Epoch 4\n-- Epoch 6\n\nTotal training time: 0.21 seconds.\n-- Epoch 5\nNorm: 13.97, NNZs: 141832, Bias: -0.235442, T: 15430, Avg. loss: 0.034727\nTotal training time: 0.23 seconds.\n-- Epoch 6\nNorm: 12.23, NNZs: 170328, Bias: -0.191514, T: 15430, Avg. loss: 0.026484\nTotal training time: 0.25 seconds.\nNorm: 16.87, NNZs: 206813, Bias: -0.288808, T: 18516, Avg. loss: 0.110610\nTotal training time: 0.26 seconds.\nNorm: 9.22, NNZs: 172804, Bias: -0.264905, T: 12344, Avg. loss: 0.034566\nTotal training time: 0.25 seconds.\n-- Epoch 6\n-- Epoch 7\nNorm: 15.17, NNZs: 143938, Bias: -0.257578, T: 18516, Avg. loss: 0.033563\nTotal training time: 0.26 seconds.\n-- Epoch 7\n-- Epoch 5\nNorm: 13.37, NNZs: 172175, Bias: -0.217118, T: 18516, Avg. loss: 0.025669\nTotal training time: 0.28 seconds.\n-- Epoch 7\nNorm: 16.31, NNZs: 146074, Bias: -0.270363, T: 21602, Avg. loss: 0.032659\nTotal training time: 0.30 seconds.\n-- Epoch 8\nNorm: 18.79, NNZs: 210952, Bias: -0.344830, T: 21602, Avg. loss: 0.109655\nTotal training time: 0.33 seconds.\nNorm: 14.36, NNZs: 173116, Bias: -0.225061, T: 21602, Avg. loss: 0.024868-- Epoch 8\nNorm: 9.80, NNZs: 175408, Bias: -0.303223, T: 15430, Avg. loss: 0.033608\nTotal training time: 0.33 seconds.\nNorm: 17.19, NNZs: 146259, Bias: -0.288096, T: 24688, Avg. loss: 0.031612\nTotal training time: 0.34 seconds.\n-- Epoch 6\n\nTotal training time: 0.33 seconds.\n-- Epoch 9\n-- Epoch 8\nNorm: 20.46, NNZs: 213597, Bias: -0.360011, T: 24688, Avg. loss: 0.108777\nTotal training time: 0.36 seconds.\nNorm: 10.64, NNZs: 177735, Bias: -0.312725, T: 18516, Avg. loss: 0.033153\nTotal training time: 0.36 seconds.\n-- Epoch 9\n-- Epoch 7\nNorm: 18.31, NNZs: 146783, Bias: -0.300341, T: 27774, Avg. loss: 0.030977\nTotal training time: 0.38 seconds.\n-- Epoch 10\nNorm: 15.55, NNZs: 174001, Bias: -0.231881, T: 24688, Avg. loss: 0.024131\nTotal training time: 0.37 seconds.\n-- Epoch 9\nNorm: 22.15, NNZs: 214774, Bias: -0.381537, T: 27774, Avg. loss: 0.107818\nTotal training time: 0.40 seconds.\n-- Epoch 10\nNorm: 11.52, NNZs: 182346, Bias: -0.343389, T: 21602, Avg. loss: 0.032644\nTotal training time: 0.39 seconds.\n-- Epoch 8\nNorm: 19.36, NNZs: 147399, Bias: -0.302230, T: 30860, Avg. loss: 0.030288\nTotal training time: 0.42 seconds.\nNorm: 16.49, NNZs: 175029, Bias: -0.261609, T: 27774, Avg. loss: 0.023527\nTotal training time: 0.41 seconds.\n-- Epoch 11\nNorm: 23.96, NNZs: 215679, Bias: -0.404656, T: 30860, Avg. loss: 0.106963\nTotal training time: 0.43 seconds.\n-- Epoch 10\n-- Epoch 11\nNorm: 12.34, NNZs: 184478, Bias: -0.368994, T: 24688, Avg. loss: 0.032283\nTotal training time: 0.42 seconds.\n-- Epoch 9\nNorm: 20.51, NNZs: 147614, Bias: -0.328488, T: 33946, Avg. loss: 0.029753\nTotal training time: 0.46 seconds.\nNorm: 25.61, NNZs: 216396, Bias: -0.424627, T: 33946, Avg. loss: 0.106017\nTotal training time: 0.46 seconds.\n-- Epoch 12\nNorm: 17.28, NNZs: 176292, Bias: -0.269128, T: 30860, Avg. loss: 0.022978\nTotal training time: 0.45 seconds.\n-- Epoch 12\nNorm: 13.21, NNZs: 188577, Bias: -0.396467, T: 27774, Avg. loss: 0.031936\nTotal training time: 0.45 seconds.\n-- Epoch 11\n-- Epoch 10\nNorm: 27.13, NNZs: 217088, Bias: -0.433000, T: 37032, Avg. loss: 0.105353\nTotal training time: 0.49 seconds.\nNorm: 13.85, NNZs: 191080, Bias: -0.428029, T: 30860, Avg. loss: 0.031633\nTotal training time: 0.48 seconds.\nNorm: 21.27, NNZs: 147700, Bias: -0.331995, T: 37032, Avg. loss: 0.029054\nTotal training time: 0.49 seconds.\n-- Epoch 11\n-- Epoch 13\n-- Epoch 13\nNorm: 18.11, NNZs: 176759, Bias: -0.276906, T: 33946, Avg. loss: 0.022376\nTotal training time: 0.49 seconds.\n-- Epoch 12\nNorm: 14.42, NNZs: 191998, Bias: -0.460404, T: 33946, Avg. loss: 0.031251\nTotal training time: 0.51 seconds.\n-- Epoch 12\nNorm: 28.63, NNZs: 217402, Bias: -0.460138, T: 40118, Avg. loss: 0.104656\nTotal training time: 0.52 seconds.\n-- Epoch 14\nNorm: 22.05, NNZs: 147800, Bias: -0.346405, T: 40118, Avg. loss: 0.028445\nTotal training time: 0.53 seconds.\n-- Epoch 14\nNorm: 18.85, NNZs: 176852, Bias: -0.281412, T: 37032, Avg. loss: 0.021928\nTotal training time: 0.53 seconds.\n-- Epoch 13\nNorm: 15.01, NNZs: 193141, Bias: -0.466874, T: 37032, Avg. loss: 0.030943\nTotal training time: 0.54 seconds.\n-- Epoch 13\nNorm: 30.13, NNZs: 217759, Bias: -0.483775, T: 43204, Avg. loss: 0.103850\nTotal training time: 0.55 seconds.\n-- Epoch 15\nNorm: 22.92, NNZs: 147857, Bias: -0.361740, T: 43204, Avg. loss: 0.028006\nTotal training time: 0.57 seconds.\nNorm: 19.65, NNZs: 177130, Bias: -0.286754, T: 40118, Avg. loss: 0.021555\nTotal training time: 0.56 seconds.\n-- Epoch 15\n-- Epoch 14\nNorm: 15.80, NNZs: 194048, Bias: -0.488759, T: 40118, Avg. loss: 0.030693\nTotal training time: 0.57 seconds.\n-- Epoch 14\nNorm: 31.42, NNZs: 217935, Bias: -0.515990, T: 46290, Avg. loss: 0.103128\nTotal training time: 0.58 seconds.\n-- Epoch 16\nNorm: 16.43, NNZs: 194806, Bias: -0.516277, T: 43204, Avg. loss: 0.030533\nTotal training time: 0.60 seconds.\nNorm: 23.74, NNZs: 147997, Bias: -0.364666, T: 46290, Avg. loss: 0.027344\nTotal training time: 0.61 seconds.\n-- Epoch 15\n-- Epoch 16\nNorm: 20.28, NNZs: 177212, Bias: -0.280141, T: 43204, Avg. loss: 0.021002\nTotal training time: 0.61 seconds.\nNorm: 32.87, NNZs: 218037, Bias: -0.531316, T: 49376, Avg. loss: 0.102451\nTotal training time: 0.62 seconds.\n-- Epoch 15\n-- Epoch 17\nNorm: 17.05, NNZs: 195213, Bias: -0.538795, T: 46290, Avg. loss: 0.030362\nTotal training time: 0.64 seconds.\nNorm: 34.22, NNZs: 218445, Bias: -0.562803, T: 52462, Avg. loss: 0.101749\nTotal training time: 0.65 seconds.\n-- Epoch 16\nNorm: 24.41, NNZs: 148109, Bias: -0.376257, T: 49376, Avg. loss: 0.026785\nTotal training time: 0.65 seconds.\n-- Epoch 18\n-- Epoch 17\nNorm: 20.74, NNZs: 177619, Bias: -0.293214, T: 46290, Avg. loss: 0.020484\nTotal training time: 0.65 seconds.\n-- Epoch 16\nNorm: 35.50, NNZs: 218778, Bias: -0.571215, T: 55548, Avg. loss: 0.101158\nTotal training time: 0.67 seconds.\nNorm: 17.70, NNZs: 197336, Bias: -0.552810, T: 49376, Avg. loss: 0.030125\nTotal training time: 0.67 seconds.\n-- Epoch 19\n-- Epoch 17\nNorm: 25.19, NNZs: 148164, Bias: -0.393347, T: 52462, Avg. loss: 0.026394\nTotal training time: 0.69 seconds.\n-- Epoch 18\nNorm: 21.30, NNZs: 178484, Bias: -0.319611, T: 49376, Avg. loss: 0.020198\nTotal training time: 0.68 seconds.\n-- Epoch 17\nNorm: 36.84, NNZs: 219295, Bias: -0.626140, T: 58634, Avg. loss: 0.100501\nTotal training time: 0.71 seconds.\n-- Epoch 20\nNorm: 18.18, NNZs: 198148, Bias: -0.581376, T: 52462, Avg. loss: 0.030019\nTotal training time: 0.70 seconds.\n-- Epoch 18\nNorm: 25.88, NNZs: 148223, Bias: -0.407307, T: 55548, Avg. loss: 0.025934\nTotal training time: 0.72 seconds.\n-- Epoch 19\nNorm: 21.97, NNZs: 178569, Bias: -0.304032, T: 52462, Avg. loss: 0.019885\nTotal training time: 0.72 seconds.\n-- Epoch 18\nNorm: 37.98, NNZs: 219635, Bias: -0.651349, T: 61720, Avg. loss: 0.099810\nTotal training time: 0.73 seconds.\n-- Epoch 1\nNorm: 18.54, NNZs: 198513, Bias: -0.610919, T: 55548, Avg. loss: 0.029785\nTotal training time: 0.73 seconds.\n-- Epoch 19\nNorm: 26.50, NNZs: 148291, Bias: -0.408269, T: 58634, Avg. loss: 0.025503\nTotal training time: 0.76 seconds.\nNorm: 4.77, NNZs: 57103, Bias: -0.117284, T: 3086, Avg. loss: 0.047042\nTotal training time: 0.02 seconds.\n-- Epoch 2\nNorm: 22.58, NNZs: 178623, Bias: -0.314288, T: 55548, Avg. loss: 0.019605\nTotal training time: 0.76 seconds.\n-- Epoch 20\nNorm: 19.20, NNZs: 198869, Bias: -0.630483, T: 58634, Avg. loss: 0.029658\nTotal training time: 0.76 seconds.\n-- Epoch 19\n-- Epoch 20\nNorm: 7.52, NNZs: 62731, Bias: -0.160376, T: 6172, Avg. loss: 0.043775\nTotal training time: 0.05 seconds.\n-- Epoch 3\nNorm: 19.97, NNZs: 198990, Bias: -0.649799, T: 61720, Avg. loss: 0.029454\nTotal training time: 0.79 seconds.\nNorm: 27.22, NNZs: 148393, Bias: -0.416649, T: 61720, Avg. loss: 0.025107\nTotal training time: 0.80 seconds.\n-- Epoch 1\n-- Epoch 1\nNorm: 23.14, NNZs: 178688, Bias: -0.327487, T: 58634, Avg. loss: 0.019361\nTotal training time: 0.80 seconds.\n-- Epoch 20\nNorm: 9.66, NNZs: 72087, Bias: -0.207689, T: 9258, Avg. loss: 0.041709\nTotal training time: 0.08 seconds.\n" ], [ "test_pred = model.predict(X_test)", "_____no_output_____" ], [ "print test_pred\ntest_ids = np.load(\"../data/features/test_ids.npy\")\nprint test_ids\nwrite_predictions(test_pred, test_ids, \"../predictions/passive_basic.csv\")", "[10 8 8 ..., 10 8 8]\n['298379101ad17b816ffeb88e8ba9ce915b7a46fc9'\n 'cb56d8fd4accec76a5608a000d65711d1cfa9cc3e'\n '21ead3c92f31d801b1a887c1b0dc12d503f7204d6' ...,\n 'E96d65723d24aa91a9fa54f92754444f99491d2e1'\n '107e0ba3d61f2650b09ef02a30f0bab0b82c1933d'\n '16a64eeb54Ca49adf9608e2206db5aedc22038a09']\n" ], [ "", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5b0d1ebba55a88d6ab33ce903b4c175aea6e41
21,038
ipynb
Jupyter Notebook
notebooks/01-3_news-data-exploration.ipynb
stungkit/stock_trend_analysis
e9d3f2db19a9af93cc8dc55c2394ae88c1b3ee6e
[ "MIT" ]
7
2020-04-16T18:25:15.000Z
2022-02-20T03:57:31.000Z
notebooks/01-3_news-data-exploration.ipynb
stungkit/stock_trend_analysis
e9d3f2db19a9af93cc8dc55c2394ae88c1b3ee6e
[ "MIT" ]
4
2020-04-10T05:40:48.000Z
2022-01-13T01:40:24.000Z
notebooks/01-3_news-data-exploration.ipynb
stungkit/stock_trend_analysis
e9d3f2db19a9af93cc8dc55c2394ae88c1b3ee6e
[ "MIT" ]
4
2020-11-30T06:43:42.000Z
2021-03-12T05:42:13.000Z
44.383966
147
0.450423
[ [ [ "# Data Exploration News Data\n\nThis notebook will explore data sources for news data (especially short news ticker) for information mining using NLP and sentiment analysis.", "_____no_output_____" ] ], [ [ "import feedparser\nimport pandas as pd", "_____no_output_____" ], [ "d = feedparser.parse('https://www.ft.com/business-education?format=rss')\nfeed = pd.DataFrame(d['entries'])\nmeta = d['feed']", "_____no_output_____" ], [ "feed.head()", "_____no_output_____" ], [ "d = feedparser.parse('http://rss.cnn.com/rss/money_news_economy.rss')\nfeed = pd.DataFrame(d['entries'])\nmeta = d['feed']", "_____no_output_____" ], [ "feed.head()", "_____no_output_____" ] ], [ [ "## Data Parsing\n\nThis part outlines the ETL pipeline for the data to use for sentiment analysis", "_____no_output_____" ] ], [ [ "# TODO\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec5b12ad569bea46e1859214f4314bf2bfb31c8e
38,539
ipynb
Jupyter Notebook
step-functions-data-science-sdk/automate_model_retraining_workflow/automate_model_retraining_workflow.ipynb
phiamazon/amazon-sagemaker-examples
abf3d06d3ea21c5ec425344d517700338a620f8c
[ "Apache-2.0" ]
2,327
2020-03-01T09:47:34.000Z
2021-11-25T12:38:42.000Z
step-functions-data-science-sdk/automate_model_retraining_workflow/automate_model_retraining_workflow.ipynb
phiamazon/amazon-sagemaker-examples
abf3d06d3ea21c5ec425344d517700338a620f8c
[ "Apache-2.0" ]
209
2020-03-01T17:14:12.000Z
2021-11-08T20:35:42.000Z
step-functions-data-science-sdk/automate_model_retraining_workflow/automate_model_retraining_workflow.ipynb
phiamazon/amazon-sagemaker-examples
abf3d06d3ea21c5ec425344d517700338a620f8c
[ "Apache-2.0" ]
686
2020-03-03T17:24:51.000Z
2021-11-25T23:39:12.000Z
39.086207
646
0.602532
[ [ [ "# Automate Model Retraining & Deployment Using the AWS Step Functions Data Science SDK\n\n1. [Introduction](#Introduction)\n1. [Setup](#Setup)\n1. [Create Resources](#Create-Resources)\n1. [Build a Machine Learning Workflow](#Build-a-Machine-Learning-Workflow)\n1. [Run the Workflow](#Run-the-Workflow)\n1. [Clean Up](#Clean-Up)", "_____no_output_____" ], [ "## Introduction\n\nThis notebook describes how to use the AWS Step Functions Data Science SDK to create a machine learning model retraining workflow. The Step Functions SDK is an open source library that allows data scientists to easily create and execute machine learning workflows using AWS Step Functions and Amazon SageMaker. For more information, please see the following resources:\n* [AWS Step Functions](https://aws.amazon.com/step-functions/)\n* [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html)\n* [AWS Step Functions Data Science SDK](https://aws-step-functions-data-science-sdk.readthedocs.io)\n\nIn this notebook, we will use the SDK to create steps that capture and transform data using AWS Glue, encorporate this data into the training of a machine learning model, deploy the model to a SageMaker endpoint, link these steps together to create a workflow, and then execute the workflow in AWS Step Functions.", "_____no_output_____" ], [ "## Setup\n\nFirst, we'll need to install and load all the required modules. Then we'll create fine-grained IAM roles for the Lambda, Glue, and Step Functions resources that we will create. The IAM roles grant the services permissions within your AWS environment.", "_____no_output_____" ] ], [ [ "import sys\n!{sys.executable} -m pip install --upgrade stepfunctions", "_____no_output_____" ] ], [ [ "### Import the Required Modules", "_____no_output_____" ] ], [ [ "import uuid\nimport logging\nimport stepfunctions\nimport boto3\nimport sagemaker\n\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\nfrom sagemaker import s3_input\nfrom sagemaker.s3 import S3Uploader\nfrom stepfunctions import steps\nfrom stepfunctions.steps import TrainingStep, ModelStep\nfrom stepfunctions.inputs import ExecutionInput\nfrom stepfunctions.workflow import Workflow\n\nsession = sagemaker.Session()\nstepfunctions.set_stream_logger(level=logging.INFO)\n\nregion = boto3.Session().region_name\nbucket = session.default_bucket()\nid = uuid.uuid4().hex\n\n#Create a unique name for the AWS Glue job to be created. If you change the \n#default name, you may need to change the Step Functions execution role.\njob_name = 'glue-customer-churn-etl-{}'.format(id)\n\n#Create a unique name for the AWS Lambda function to be created. If you change\n#the default name, you may need to change the Step Functions execution role.\nfunction_name = 'query-training-status-{}'.format(id)", "_____no_output_____" ] ], [ [ "Next, we'll create fine-grained IAM roles for the Lambda, Glue, and Step Functions resources. The IAM roles grant the services permissions within your AWS environment.\n\n### Add permissions to your notebook role in IAM\n\nThe IAM role assumed by your notebook requires permission to create and run workflows in AWS Step Functions. If this notebook is running on a SageMaker notebook instance, do the following to provide IAM permissions to the notebook:\n\n1. Open the Amazon [SageMaker console](https://console.aws.amazon.com/sagemaker/). \n2. Select **Notebook instances** and choose the name of your notebook instance.\n3. Under **Permissions and encryption** select the role ARN to view the role on the IAM console.\n4. Copy and save the IAM role ARN for later use. \n5. Choose **Attach policies** and search for `AWSStepFunctionsFullAccess`.\n6. Select the check box next to `AWSStepFunctionsFullAccess` and choose **Attach policy**.\n\nWe also need to provide permissions that allow the notebook instance the ability to create an AWS Lambda function and AWS Glue job. We will edit the managed policy attached to our role directly to encorporate these specific permissions:\n\n1. Under **Permisions policies** expand the AmazonSageMaker-ExecutionPolicy-******** policy and choose **Edit policy**.\n2. Select **Add additional permissions**. Choose **IAM** for Service and **PassRole** for Actions.\n3. Under Resources, choose **Specific**. Select **Add ARN** and enter `query_training_status-role` for **Role name with path*** and choose **Add**. You will create this role later on in this notebook.\n4. Select **Add additional permissions** a second time. Choose **Lambda** for Service, **Write** for Access level, and **All resources** for Resources.\n5. Select **Add additional permissions** a final time. Choose **Glue** for Service, **Write** for Access level, and **All resources** for Resources.\n6. Choose **Review policy** and then **Save changes**.\n\nIf you are running this notebook outside of SageMaker, the SDK will use your configured AWS CLI configuration. For more information, see [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html).", "_____no_output_____" ], [ "Next, let's create an execution role in IAM for Step Functions. \n\n### Create an Execution Role for Step Functions\n\nYour Step Functions workflow requires an IAM role to interact with other services in your AWS environment. \n\n1. Go to the [IAM console](https://console.aws.amazon.com/iam/).\n2. Select **Roles** and then **Create role**.\n3. Under **Choose the service that will use this role** select **Step Functions**.\n4. Choose **Next** until you can enter a **Role name**.\n5. Enter a name such as `StepFunctionsWorkflowExecutionRole` and then select **Create role**.\n\nNext, create and attach a policy to the role you created. As a best practice, the following steps will attach a policy that only provides access to the specific resources and actions needed for this solution.\n\n1. Under the **Permissions** tab, click **Attach policies** and then **Create policy**.\n2. Enter the following in the **JSON** tab:\n\n```json\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": \"iam:PassRole\",\n \"Resource\": \"NOTEBOOK_ROLE_ARN\",\n \"Condition\": {\n \"StringEquals\": {\n \"iam:PassedToService\": \"sagemaker.amazonaws.com\"\n }\n }\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"sagemaker:CreateModel\",\n \"sagemaker:DeleteEndpointConfig\",\n \"sagemaker:DescribeTrainingJob\",\n \"sagemaker:CreateEndpoint\",\n \"sagemaker:StopTrainingJob\",\n \"sagemaker:CreateTrainingJob\",\n \"sagemaker:UpdateEndpoint\",\n \"sagemaker:CreateEndpointConfig\",\n \"sagemaker:DeleteEndpoint\"\n ],\n \"Resource\": [\n \"arn:aws:sagemaker:*:*:*\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"events:DescribeRule\",\n \"events:PutRule\",\n \"events:PutTargets\"\n ],\n \"Resource\": [\n \"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"lambda:InvokeFunction\"\n ],\n \"Resource\": [\n \"arn:aws:lambda:*:*:function:query-training-status*\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"glue:StartJobRun\",\n \"glue:GetJobRun\",\n \"glue:BatchStopJobRun\",\n \"glue:GetJobRuns\"\n ],\n \"Resource\": \"arn:aws:glue:*:*:job/glue-customer-churn-etl*\"\n }\n ]\n}\n```\n\n3. Replace **NOTEBOOK_ROLE_ARN** with the ARN for your notebook that you created in the previous step.\n4. Choose **Review policy** and give the policy a name such as `StepFunctionsWorkflowExecutionPolicy`.\n5. Choose **Create policy**.\n6. Select **Roles** and search for your `StepFunctionsWorkflowExecutionRole` role.\n7. Under the **Permissions** tab, click **Attach policies**.\n8. Search for your newly created `StepFunctionsWorkflowExecutionPolicy` policy and select the check box next to it.\n9. Choose **Attach policy**. You will then be redirected to the details page for the role.\n10. Copy the StepFunctionsWorkflowExecutionRole **Role ARN** at the top of the Summary.", "_____no_output_____" ], [ "### Configure Execution Roles", "_____no_output_____" ] ], [ [ "# paste the StepFunctionsWorkflowExecutionRole ARN from above\nworkflow_execution_role = ''\n\n# SageMaker Execution Role\n# You can use sagemaker.get_execution_role() if running inside sagemaker's notebook instance\nsagemaker_execution_role = sagemaker.get_execution_role() #Replace with ARN if not in an AWS SageMaker notebook", "_____no_output_____" ] ], [ [ "#### Create a Glue IAM Role\nYou need to create an IAM role so that you can create and execute an AWS Glue Job on your data in Amazon S3.\n\n1. Go to the [IAM console](https://console.aws.amazon.com/iam/).\n2. Select **Roles** and then **Create role**.\n3. Under **Choose the service that will use this role** select **Glue**.\n4. Choose **Next** until you can enter a **Role name**.\n5. Enter a name such as `AWS-Glue-S3-Bucket-Access` and then select **Create role**.\n\nNext, create and attach a policy to the role you created. The following steps attach a managed policy that provides Glue access to the specific S3 bucket holding your data.\n\n1. Under the **Permissions** tab, click **Attach policies** and then **Create policy**.\n2. Enter the following in the **JSON** tab:\n\n```json\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"ListObjectsInBucket\",\n \"Effect\": \"Allow\",\n \"Action\": [\"s3:ListBucket\"],\n \"Resource\": [\"arn:aws:s3:::BUCKET-NAME\"]\n },\n {\n \"Sid\": \"AllObjectActions\",\n \"Effect\": \"Allow\",\n \"Action\": \"s3:*Object\",\n \"Resource\": [\"arn:aws:s3:::BUCKET-NAME/*\"]\n }\n ]\n}\n```\n\n3. Run the next cell (below) to retrieve the specific **S3 bucket name** that we will grant permissions to.", "_____no_output_____" ] ], [ [ "session = sagemaker.Session()\nbucket = session.default_bucket()\nprint(bucket)", "_____no_output_____" ] ], [ [ "4. Copy the output of the above cell and replace the **two occurances** of **BUCKET-NAME** in the JSON text that you entered.\n5. Choose **Review policy** and give the policy a name such as `S3BucketAccessPolicy`.\n6. Choose **Create policy**.\n7. Select **Roles**, then search for and select your `AWS-Glue-S3-Bucket-Access` role.\n8. Under the **Permissions** tab, click **Attach policies**.\n9. Search for your newly created `S3BucketAccessPolicy` policy and select the check box next to it.\n10. Choose **Attach policy**. You will then be redirected to the details page for the role.\n11. Copy the **Role ARN** at the top of the Summary tab.", "_____no_output_____" ] ], [ [ "# paste the AWS-Glue-S3-Bucket-Access role ARN from above\nglue_role = ''", "_____no_output_____" ] ], [ [ "#### Create a Lambda IAM Role\nYou also need to create an IAM role so that you can create and execute an AWS Lambda function stored in Amazon S3.\n\n1. Go to the [IAM console](https://console.aws.amazon.com/iam/).\n2. Select **Roles** and then **Create role**.\n3. Under **Choose the service that will use this role** select **Lambda**.\n4. Choose **Next** until you can enter a **Role name**.\n5. Enter a name such as `query_training_status-role` and then select **Create role**.\n\nNext, attach policies to the role you created. The following steps attach policies that provides Lambda access to S3 and read-only access to SageMaker.\n\n1. Under the **Permissions** tab, click **Attach Policies**.\n2. In the search box, type **SageMaker** and select **AmazonSageMakerReadOnly** from the populated list.\n3. In the search box type **AWSLambda** and select **AWSLambdaBasicExecutionRole** from the populated list.\n4. Choose **Attach policy**. You will then be redirected to the details page for the role.\n5. Copy the **Role ARN** at the top of the **Summary**.\n", "_____no_output_____" ] ], [ [ "# paste the query_training_status-role role ARN from above\nlambda_role = ''", "_____no_output_____" ] ], [ [ "### Prepare the Dataset\nThis notebook uses the XGBoost algorithm to automate the classification of unhappy customers for telecommunication service providers. The goal is to identify customers who may cancel their service soon so that you can entice them to stay. This is known as customer churn prediction.\n\nThe dataset we use is publicly available and was mentioned in the book [Discovering Knowledge in Data](https://www.amazon.com/dp/0470908742/) by Daniel T. Larose. It is attributed by the author to the University of California Irvine Repository of Machine Learning Datasets.", "_____no_output_____" ] ], [ [ "project_name = 'ml_deploy'\n\ndata_source = S3Uploader.upload(local_path='./data/customer-churn.csv',\n desired_s3_uri='s3://{}/{}'.format(bucket, project_name),\n session=session)\n\ntrain_prefix = 'train'\nval_prefix = 'validation'\n\ntrain_data = 's3://{}/{}/{}/'.format(bucket, project_name, train_prefix)\nvalidation_data = 's3://{}/{}/{}/'.format(bucket, project_name, val_prefix)", "_____no_output_____" ] ], [ [ "## Create Resources\nIn the following steps we'll create the Glue job and Lambda function that are called from the Step Functions workflow.", "_____no_output_____" ], [ "### Create the AWS Glue Job", "_____no_output_____" ] ], [ [ "glue_script_location = S3Uploader.upload(local_path='./code/glue_etl.py',\n desired_s3_uri='s3://{}/{}'.format(bucket, project_name),\n session=session)\nglue_client = boto3.client('glue')\n\nresponse = glue_client.create_job(\n Name=job_name,\n Description='PySpark job to extract the data and split in to training and validation data sets',\n Role=glue_role, # you can pass your existing AWS Glue role here if you have used Glue before\n ExecutionProperty={\n 'MaxConcurrentRuns': 2\n },\n Command={\n 'Name': 'glueetl',\n 'ScriptLocation': glue_script_location,\n 'PythonVersion': '3'\n },\n DefaultArguments={\n '--job-language': 'python'\n },\n GlueVersion='1.0',\n WorkerType='Standard',\n NumberOfWorkers=2,\n Timeout=60\n)", "_____no_output_____" ] ], [ [ "### Create the AWS Lambda Function", "_____no_output_____" ] ], [ [ "import zipfile\nzip_name = 'query_training_status.zip'\nlambda_source_code = './code/query_training_status.py'\n\nzf = zipfile.ZipFile(zip_name, mode='w')\nzf.write(lambda_source_code, arcname=lambda_source_code.split('/')[-1])\nzf.close()\n\n\nS3Uploader.upload(local_path=zip_name, \n desired_s3_uri='s3://{}/{}'.format(bucket, project_name),\n session=session)", "_____no_output_____" ], [ "lambda_client = boto3.client('lambda')\n\nresponse = lambda_client.create_function(\n FunctionName=function_name,\n Runtime='python3.7',\n Role=lambda_role,\n Handler='query_training_status.lambda_handler',\n Code={\n 'S3Bucket': bucket,\n 'S3Key': '{}/{}'.format(project_name, zip_name)\n },\n Description='Queries a SageMaker training job and return the results.',\n Timeout=15,\n MemorySize=128\n)", "_____no_output_____" ] ], [ [ "### Configure the AWS SageMaker Estimator", "_____no_output_____" ] ], [ [ "container = get_image_uri(region, 'xgboost')\n\nxgb = sagemaker.estimator.Estimator(container,\n sagemaker_execution_role, \n train_instance_count=1, \n train_instance_type='ml.m4.xlarge',\n output_path='s3://{}/{}/output'.format(bucket, project_name))\n\nxgb.set_hyperparameters(max_depth=5,\n eta=0.2,\n gamma=4,\n min_child_weight=6,\n subsample=0.8,\n silent=0,\n objective='binary:logistic',\n eval_metric='error',\n num_round=100)", "_____no_output_____" ] ], [ [ "\n## Build a Machine Learning Workflow", "_____no_output_____" ], [ "You can use a state machine workflow to create a model retraining pipeline. The AWS Data Science Workflows SDK provides several AWS SageMaker workflow steps that you can use to construct an ML pipeline. In this tutorial you will create the following steps:\n\n* [**ETLStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.GlueStartJobRunStep) - Starts an AWS Glue job to extract the latest data from our source database and prepare our data.\n* [**TrainingStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) - Creates the training step and passes the defined estimator.\n* [**ModelStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) - Creates a model in SageMaker using the artifacts created during the TrainingStep.\n* [**LambdaStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.LambdaStep) - Creates the task state step within our workflow that calls a Lambda function.\n* [**ChoiceStateStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/states.html#stepfunctions.steps.states.Choice) - Creates the choice state step within our workflow.\n* [**EndpointConfigStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) - Creates the endpoint config step to define the new configuration for our endpoint.\n* [**EndpointStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointStep) - Creates the endpoint step to update our model endpoint.\n* [**FailStateStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/states.html#stepfunctions.steps.states.Fail) - Creates fail state step within our workflow.", "_____no_output_____" ] ], [ [ "# SageMaker expects unique names for each job, model and endpoint. \n# If these names are not unique the execution will fail.\nexecution_input = ExecutionInput(schema={\n 'TrainingJobName': str,\n 'GlueJobName': str,\n 'ModelName': str,\n 'EndpointName': str,\n 'LambdaFunctionName': str\n})", "_____no_output_____" ] ], [ [ "### Create an ETL step with AWS Glue\nIn the following cell, we create a Glue step thats runs an AWS Glue job. The Glue job extracts the latest data from our source database, removes unnecessary columns, splits the data in to training and validation sets, and saves the data to CSV format in S3. Glue is performing this extraction, transformation, and load (ETL) in a serverless fashion, so there are no compute resources to configure and manage. See the [GlueStartJobRunStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.GlueStartJobRunStep) Compute step in the AWS Step Functions Data Science SDK documentation.", "_____no_output_____" ] ], [ [ "etl_step = steps.GlueStartJobRunStep(\n 'Extract, Transform, Load',\n parameters={\"JobName\": execution_input['GlueJobName'],\n \"Arguments\":{\n '--S3_SOURCE': data_source,\n '--S3_DEST': 's3a://{}/{}/'.format(bucket, project_name),\n '--TRAIN_KEY': train_prefix + '/',\n '--VAL_KEY': val_prefix +'/'}\n }\n)", "_____no_output_____" ] ], [ [ "### Create a SageMaker Training Step \n\nIn the following cell, we create the training step and pass the estimator we defined above. See [TrainingStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) in the AWS Step Functions Data Science SDK documentation to learn more.", "_____no_output_____" ] ], [ [ "training_step = steps.TrainingStep(\n 'Model Training', \n estimator=xgb,\n data={\n 'train': s3_input(train_data, content_type='csv'),\n 'validation': s3_input(validation_data, content_type='csv')\n },\n job_name=execution_input['TrainingJobName'],\n wait_for_completion=True\n)", "_____no_output_____" ] ], [ [ "### Create a Model Step \n\nIn the following cell, we define a model step that will create a model in Amazon SageMaker using the artifacts created during the TrainingStep. See [ModelStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) in the AWS Step Functions Data Science SDK documentation to learn more.\n\nThe model creation step typically follows the training step. The Step Functions SDK provides the [get_expected_model](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep.get_expected_model) method in the TrainingStep class to provide a reference for the trained model artifacts. Please note that this method is only useful when the ModelStep directly follows the TrainingStep.", "_____no_output_____" ] ], [ [ "model_step = steps.ModelStep(\n 'Save Model',\n model=training_step.get_expected_model(),\n model_name=execution_input['ModelName'],\n result_path='$.ModelStepResults'\n)", "_____no_output_____" ] ], [ [ "### Create a Lambda Step\nIn the following cell, we define a lambda step that will invoke the previously created lambda function as part of our Step Function workflow. See [LambdaStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.LambdaStep) in the AWS Step Functions Data Science SDK documentation to learn more.", "_____no_output_____" ] ], [ [ "lambda_step = steps.compute.LambdaStep(\n 'Query Training Results',\n parameters={ \n \"FunctionName\": execution_input['LambdaFunctionName'],\n 'Payload':{\n \"TrainingJobName.$\": '$.TrainingJobName'\n }\n }\n)", "_____no_output_____" ] ], [ [ "### Create a Choice State Step \nIn the following cell, we create a choice step in order to build a dynamic workflow. This choice step branches based off of the results of our SageMaker training step: did the training job fail or should the model be saved and the endpoint be updated? We will add specfic rules to this choice step later on in section 8 of this notebook.", "_____no_output_____" ] ], [ [ "check_accuracy_step = steps.states.Choice(\n 'Accuracy > 90%'\n)", "_____no_output_____" ] ], [ [ "### Create an Endpoint Configuration Step\nIn the following cell we create an endpoint configuration step. See [EndpointConfigStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) in the AWS Step Functions Data Science SDK documentation to learn more.", "_____no_output_____" ] ], [ [ "endpoint_config_step = steps.EndpointConfigStep(\n \"Create Model Endpoint Config\",\n endpoint_config_name=execution_input['ModelName'],\n model_name=execution_input['ModelName'],\n initial_instance_count=1,\n instance_type='ml.m4.xlarge'\n)", "_____no_output_____" ] ], [ [ "### Update the Model Endpoint Step\nIn the following cell, we create the Endpoint step to deploy the new model as a managed API endpoint, updating an existing SageMaker endpoint if our choice state is sucessful.", "_____no_output_____" ] ], [ [ "endpoint_step = steps.EndpointStep(\n 'Update Model Endpoint',\n endpoint_name=execution_input['EndpointName'],\n endpoint_config_name=execution_input['ModelName'],\n update=False\n)", "_____no_output_____" ] ], [ [ "### Create the Fail State Step\nIn addition, we create a Fail step which proceeds from our choice state if the validation accuracy of our model is lower than the threshold we define. See [FailStateStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/states.html#stepfunctions.steps.states.Fail) in the AWS Step Functions Data Science SDK documentation to learn more.", "_____no_output_____" ] ], [ [ "fail_step = steps.states.Fail(\n 'Model Accuracy Too Low',\n comment='Validation accuracy lower than threshold'\n)", "_____no_output_____" ] ], [ [ "### Add Rules to Choice State\nIn the cells below, we add a threshold rule to our choice state. Therefore, if the validation accuracy of our model is below 0.90, we move to the Fail State. If the validation accuracy of our model is above 0.90, we move to the save model step with proceeding endpoint update. See [here](https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst) for more information on how XGBoost calculates classification error.\n\nFor binary classification problems the XGBoost algorithm defines the model error as: \n\n\\begin{equation*}\n\\frac{incorret\\:predictions}{total\\:number\\:of\\:predictions}\n\\end{equation*}\n\nTo achieve an accuracy of 90%, we need error <.10.", "_____no_output_____" ] ], [ [ "threshold_rule = steps.choice_rule.ChoiceRule.NumericLessThan(variable=lambda_step.output()['Payload']['trainingMetrics'][0]['Value'], value=.1)\n\ncheck_accuracy_step.add_choice(rule=threshold_rule, next_step=endpoint_config_step)\ncheck_accuracy_step.default_choice(next_step=fail_step)", "_____no_output_____" ] ], [ [ "### Link all the Steps Together\nFinally, create your workflow definition by chaining all of the steps together that we've created. See [Chain](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.states.Chain) in the AWS Step Functions Data Science SDK documentation to learn more.", "_____no_output_____" ] ], [ [ "endpoint_config_step.next(endpoint_step)", "_____no_output_____" ], [ "workflow_definition = steps.Chain([\n etl_step,\n training_step,\n model_step,\n lambda_step,\n check_accuracy_step\n])", "_____no_output_____" ] ], [ [ "## Run the Workflow\nCreate your workflow using the workflow definition above, and render the graph with [render_graph](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.render_graph):", "_____no_output_____" ] ], [ [ "workflow = Workflow(\n name='MyInferenceRoutine_{}'.format(id),\n definition=workflow_definition,\n role=workflow_execution_role,\n execution_input=execution_input\n)", "_____no_output_____" ], [ "workflow.render_graph()", "_____no_output_____" ] ], [ [ "Create the workflow in AWS Step Functions with [create](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.create):", "_____no_output_____" ] ], [ [ "workflow.create()", "_____no_output_____" ] ], [ [ "Run the workflow with [execute](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.execute):", "_____no_output_____" ] ], [ [ "execution = workflow.execute(\n inputs={\n 'TrainingJobName': 'regression-{}'.format(id), # Each Sagemaker Job requires a unique name,\n 'GlueJobName': job_name,\n 'ModelName': 'CustomerChurn-{}'.format(id), # Each Model requires a unique name,\n 'EndpointName': 'CustomerChurn', # Each Endpoint requires a unique name\n 'LambdaFunctionName': function_name\n }\n)", "_____no_output_____" ] ], [ [ "Render workflow progress with the [render_progress](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.render_progress). This generates a snapshot of the current state of your workflow as it executes. This is a static image therefore you must run the cell again to check progress:", "_____no_output_____" ] ], [ [ "execution.render_progress()", "_____no_output_____" ] ], [ [ "Use [list_events](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.list_events) to list all events in the workflow execution:", "_____no_output_____" ] ], [ [ "execution.list_events(html=True)", "_____no_output_____" ] ], [ [ "Use [list_executions](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_executions) to list all executions for a specific workflow:", "_____no_output_____" ] ], [ [ "workflow.list_executions(html=True)", "_____no_output_____" ] ], [ [ "Use [list_workflows](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_workflows) to list all workflows in your AWS account:", "_____no_output_____" ] ], [ [ "Workflow.list_workflows(html=True)", "_____no_output_____" ] ], [ [ "## Clean Up\nWhen you are done, make sure to clean up your AWS account by deleting resources you won't be reusing. Uncomment the code below and run the cell to delete the Glue job, Lambda function, and Step Function.", "_____no_output_____" ] ], [ [ "#lambda_client.delete_function(FunctionName=function_name)\n#glue_client.delete_job(JobName=job_name)\n#workflow.delete()", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5b13a2cedce64242291b0b9fb0c8e111ac56c3
679,920
ipynb
Jupyter Notebook
code/Chapter 02 - Network Size and Effective Information.ipynb
Liyubov/einet
4adc3f7114609e56b0ee17c515c3292915e5fa1b
[ "MIT" ]
1
2019-07-24T07:58:36.000Z
2019-07-24T07:58:36.000Z
code/Chapter 02 - Network Size and Effective Information.ipynb
Gaurangit/einet
dd24df9fe0335904f5c12a360ae5f258a02b9d9c
[ "MIT" ]
null
null
null
code/Chapter 02 - Network Size and Effective Information.ipynb
Gaurangit/einet
dd24df9fe0335904f5c12a360ae5f258a02b9d9c
[ "MIT" ]
null
null
null
667.242395
193,628
0.942943
[ [ [ "import networkx as nx\nimport numpy as np\nfrom ei_net import *\nimport matplotlib.pyplot as plt\n\n%matplotlib inline", "_____no_output_____" ], [ "##########################################\n############ PLOTTING SETUP ##############\nEI_cmap = \"Greys\"\nwhere_to_save_pngs = \"../figs/pngs/\"\nwhere_to_save_pdfs = \"../figs/pdfs/\"\nsave = True\nplt.rc('axes', axisbelow=True)\n##########################################\n##########################################", "_____no_output_____" ] ], [ [ "# Uncertainty and causal emergence in complex networks", "_____no_output_____" ], [ "# Chapter 02: Network Size and Effective Information\n\n## Random Networks\nWe first examined the effective information of networks with different sizes and structures, asking basic questions about the relationship between a network's $EI$ to its size, number of edges, and network structure. These inquiries eventually allow for the exhaustive classification and quantification of different causal structures. For simplicity and generality, we start by examining the $EI$ in undirected networks.\n\nIt is perhaps intuitive that the $EI$ of a network will increase as the network grows in size. The upper bound of $EI$ in a network of $N$ nodes is log$_2(N)$, and adding more nodes should also increase the available repertoire of counterfactuals or states, which should in turn increase the informativeness of the causal structure. While we describe multiple classes of networks where $EI$ grows as a function of network size, this is not always so.\n\nHere, we show a comparison between the number of nodes in a network and that network's $EI$, using two canonical network structures---Erdös-Rényi (ER) random graphs (Erdös & Rényi, 1959; Bollobas, 1984) and Barabási-Albert (BA) networks (Barabasi & Albert, 1999). As both classes of networks grow in size, the BA networks continue increasing in $EI$. Interestingly, the $EI$ in ER networks converges to a value of $-\\text{log}_2(p)$, where $p$ is the probability that any two nodes in the network will be connected\n____________________________________________________________________________\n\n## Upper and Lower Limits to Effective Information in Networks\nThe fixed information in random causal structures is fundamental to the notion of causal structures as networks. After a certain point, a causal structure that is grown randomly does not gain information as its size increase. Its $EI$ is dominated solely by the probability that any two nodes will be connected, $p$, such that the $EI$ of a random network will be $-\\text{log}_2(p)$. This is not the case in Barabási-Albert (BA) networks. Such a key difference suggests there is something intrinsic to the topology of an organized network where each new node contributes to minimizing uncertainty. That is, $EI$ increases as a function of $N$ if it was created under rules which evince structure (like a BA network), whereas for a network with a random topology it does not.\n\nHere, we will see an important relationship between the $EI$ and a network's density. Sparse, connected networks offer the most certainty about cause and effects and therefore will likely have higher $EI$. As the network becomes more dense, however, we see that the $EI$ drops off. This shift occurs in ER networks at approximately $\\langle k \\rangle = \\text{log}_2(N)$, which is also the point at which we can expect all nodes to be in a giant component (Barabási, 2016). Importantly, if the network is maximally dense (i.e. a fully connected network, with self-loops), $EI=0.00$. However, we expect such dense low-$EI$ structures to not be common, since network structures found in nature and society tend to be sparse (Del Genio, Gross, & Bassler, 2011).\n\nTo see the clear relationship between network connectivity and $EI$, compare the $EI$ of $d$-dimensional ring-lattices to that of hub-and-spoke star networks. The two form striking bounds, between which we expect most real networks to fall. That is, for undirected networks, the 1-dimensional ring lattice represents the upper bound where $\\langle k \\rangle = 2.00$, and the $EI$ scales with $\\text{log}_{2}(N) - \\text{log}_{2}(2d)$. $N$ is the number of nodes and $d$ is the dimension of the ring-lattice (i.e., $d=1$ is a cycle and is shown in this notebook, $d=2$ is a taurus, etc.). On the opposite end of the spectrum, an all-to-all undirected network have zero $EI$. Comparatively, as the size of star networks increase, their $EI$ approaches 0.00. \n\nThe picture that begins to emerge is that $EI$ is inextricably linked to the pattern of connectivity and therefore the motifs present in the network. In the following section, we explore the $EI$ of three-node directed network motifs in order to understand further why certain structures offer different amounts of information about causes and effects in a system. This also allows us to understand exactly why $EI$ is an appropriate metric of causation.", "_____no_output_____" ], [ "## 2.1 Comparing Erdős-Rényi and Barabási-Albert networks", "_____no_output_____" ] ], [ [ "import datetime as dt", "_____no_output_____" ], [ "Nmax = 2000\nNmin = 10\nNval = 16\nn_sim = 4\n\nms = [2, 7]\nps = [0.01, 0.04] \nnetwork_sizes = np.linspace(Nmin, Nmax, Nval, dtype=int)", "_____no_output_____" ], [ "ei_er = []\nei_ba = []\n\nstartT = dt.datetime.now()\nfor i in range(len(ms)):\n nownow = dt.datetime.now().strftime('%H:%M:%S.%f')[:-5]\n print(\"Starting m=%i at %s\"%(ms[i],nownow))\n ei_er_temp = []\n ei_ba_temp = []\n for Ni in range(len(network_sizes)):\n N = network_sizes[Ni]\n if Ni % 5 == 0:\n nownow = dt.datetime.now().strftime('%H:%M:%S.%f')[:-5]\n print(\"\\tN=%04i at %s\"%(N,nownow))\n \n temp_ba = 0\n temp_er = 0\n for num_sim in range(n_sim):\n Gba = nx.barabasi_albert_graph(N, ms[i])\n Ger = nx.erdos_renyi_graph(N, ps[i])\n temp_ba += effective_information(Gba)\n temp_er += effective_information(Ger)\n \n ei_er_temp.append(temp_er/n_sim)\n ei_ba_temp.append(temp_ba/n_sim)\n\n ei_er.append(ei_er_temp)\n ei_ba.append(ei_ba_temp)\n\nfinisH = dt.datetime.now()\ndiff = finisH-startT\ndiff = diff.total_seconds()\n\nprint(\"\\nTotal simulation time: %.2f seconds\"%diff)", "Starting m=2 at 18:46:33.3\n\tN=0010 at 18:46:33.3\n\tN=0673 at 18:46:34.2\n\tN=1336 at 18:46:38.7\n\tN=2000 at 18:46:49.0\nStarting m=7 at 18:46:52.0\n\tN=0010 at 18:46:52.0\n\tN=0673 at 18:46:53.8\n\tN=1336 at 18:47:03.6\n\tN=2000 at 18:47:31.2\n\nTotal simulation time: 65.84 seconds\n" ], [ "maxy_ba = max([max(ei_ba[0]),max(ei_ba[1])])\nmaxy_er = max([max(ei_er[0]),max(ei_er[1])])\nmaxy = max(maxy_ba,maxy_er)\nm_size = 8\nlw = 2.5\nmew = 2\nba_col = [\"#ff003a\",\"#fd6526\"]\ner_col = [\"#00d46a\",\"#0096ff\"]\n\n\nfig, ax0 = plt.subplots(1,1,figsize=(8,5))\n\nfor i in range(len(ms)):\n ax0.plot(\n network_sizes, ei_ba[i], marker='o', markersize=m_size, \n color=ba_col[i], markerfacecolor='w', markeredgewidth=mew, \n linewidth=lw, label=\"Barabási-Albert Network: m=%s\"%ms[i])\n ax0.plot(\n network_sizes, ei_er[i], marker='^', markersize=m_size, \n color=er_col[i], markerfacecolor='w', markeredgewidth=mew, \n linewidth=lw, label=\"Erdős-Rényi Network: p=%.3f\"%ps[i])\n ax0.hlines(\n -np.log2(ps[i]), -100, max(network_sizes)+100, \n linestyles=':', color=er_col[i], alpha=0.9,\n linewidth=lw*1.5, label=r\"-log$_2$(%.3f)\"%ps[i])\n\nax0.set_xlabel(\"$N$\", size=20)\nax0.set_ylabel(\"$EI$\", size=20)\n\nax0.set_xticks(np.linspace(0,Nmax,9))\nax0.set_xticklabels(\n [\"%i\"%i for i in np.linspace(0,Nmax,9)], fontsize=14)\n\nax0.set_yticks(np.linspace(0,int(maxy),9))\nax0.set_yticklabels(\n [\"%.1f\"%i for i in np.linspace(0,int(maxy),9)], fontsize=14)\n\nax0.set_ylim(-0.015*maxy, maxy*1.035)\nax0.set_xlim(-0.02*Nmax, Nmax*1.02)\nax0.grid(linestyle='-', color='#999999', linewidth=lw, alpha=0.3)\nax0.legend(fontsize=12)\n\nif save:\n plt.savefig(\"../figs/pngs/EI_compareERBA.png\", dpi=425, bbox_inches='tight')\n plt.savefig(\"../figs/pdfs/EI_compareERBA.pdf\", bbox_inches='tight')\nplt.show()", "_____no_output_____" ] ], [ [ "## 2.2 Comparing different Erdős-Rényi networks", "_____no_output_____" ] ], [ [ "Nval = 40\npmin = 1e-8\npmax = 1e0\n\nNmax = 2000\nNmin = 10\n\np_values = np.logspace(np.log10(pmin), np.log10(pmax), Nval)\nnetwork_sizes = np.linspace(Nmin, Nmax, Nval, dtype=int)\n\nn_sim = 2\nei_dict = {}\n\nstartT = dt.datetime.now()\nfor Ni, N in enumerate(network_sizes):\n if Ni % 2 == 0:\n nownow = dt.datetime.now().strftime('%H:%M:%S.%f')[:-5]\n print(\"Starting N=%04i at %s\"%(N,nownow))\n ei_er_p = []\n for pi, p in enumerate(p_values):\n \n temp = 0\n for num_sim in range(n_sim):\n Ger = nx.erdos_renyi_graph(N, p)\n temp += effective_information(Ger)\n\n ei_er_p.append(temp/n_sim)\n ei_dict[N] = ei_er_p\n \nfinisH = dt.datetime.now()\ndiff = finisH-startT\ndiff = diff.total_seconds()\n\nprint(\"\\nTotal simulation time: %.2f seconds\"%diff)", "Starting N=0010 at 18:47:42.5\nStarting N=0112 at 18:47:42.8\nStarting N=0214 at 18:47:45.2\nStarting N=0316 at 18:47:51.6\nStarting N=0418 at 18:48:03.5\nStarting N=0520 at 18:48:22.7\nStarting N=0622 at 18:48:52.2\nStarting N=0724 at 18:49:32.4\nStarting N=0826 at 18:50:25.9\nStarting N=0928 at 18:51:34.1\nStarting N=1030 at 18:52:59.8\nStarting N=1132 at 18:54:48.7\nStarting N=1234 at 18:57:02.1\nStarting N=1336 at 18:59:36.0\nStarting N=1438 at 19:02:35.7\nStarting N=1540 at 19:06:03.1\nStarting N=1642 at 19:10:00.6\nStarting N=1744 at 19:14:30.1\nStarting N=1846 at 19:19:35.6\nStarting N=1948 at 19:25:15.7\n\nTotal simulation time: 2637.87 seconds\n" ], [ "max_ei = max([max(p) for p in list(ei_dict.values())])-0.05\nmin_ei = 0\nei_range = np.linspace(min_ei, max_ei, len(p_values))", "_____no_output_____" ], [ "# import cmocean as cmo\n# col_map = cmo.cm.ice\ncol_map = plt.cm.viridis\nsave = True\nEIs = np.array([ei_n for ei_n in ei_dict.values()])\nxtixlab = [r'$10^{-%i}$'%i for i in np.linspace(8, 0, 9, dtype=int)]\nxtix = np.linspace(0.25,EIs.shape[1]-0.25,9)\nytixlab = [r\"$%i$\"%i for i in np.linspace(0, Nmax, 6)]\nytix = np.linspace(0.25,EIs.shape[0]-0.25,6)\n\nfig,ax0 = plt.subplots(1,1,figsize=(8,6))\n\nax0.pcolor(EIs, cmap=col_map)\nax0.set_ylabel(r\"$N$\", size=22)\nax0.set_xlabel(r\"$p$\", size=22)\nax0.set_yticks(ytix)\nax0.set_yticklabels(ytixlab, fontsize=12)\nax0.set_xticks(xtix)\nax0.set_xticklabels(xtixlab, fontsize=12)\n\nm = plt.cm.ScalarMappable(cmap=col_map)\nm.set_array(ei_range)\ncbar = plt.colorbar(m, pad=0.017)\ncbar.ax.tick_params(labelsize=14) \ncbar.set_label(\"$EI$\", size=22)\n\nif save:\n plt.savefig(where_to_save_pngs+\"EI_ER_NvsP.png\", bbox_inches='tight', dpi=425)\n plt.savefig(where_to_save_pdfs+\"EI_ER_NvsP.pdf\", bbox_inches='tight')\n\nplt.show()", "_____no_output_____" ], [ "ei_dict_k = {\"N\":[],\"k\":[],\"EI\":[]}\nfor ni,N in enumerate(network_sizes):\n for pi,p in enumerate(p_values):\n ei_dict_k['k'].append(p*N)\n ei_dict_k['N'].append(N)\n ei_dict_k['EI'].append(ei_dict[N][pi])\n\nxtix = np.logspace(-7,3,11)\nxtixlab = [r'$10^{%i}$'%i for i in np.linspace(-7,3,11,dtype=int)]\nytix = np.linspace(5, Nmax-5, 6)\nytixlab = [r\"$%i$\"%i for i in np.linspace(0, Nmax, 6)]\n\nfig, ax = plt.subplots(1,1,figsize=(8,6))\n\nm = plt.cm.ScalarMappable(cmap=col_map)\n\ncolors = col_map(np.array(ei_dict_k['EI'])/max(ei_dict_k['EI']))\nax.scatter(ei_dict_k['k'], ei_dict_k['N'], s=200, alpha=0.9, marker=\"s\", c=colors)\nax.set_xscale('log')\nax.set_xlim(1e-5,1e4)\n\n# plt.plot(np.log2(network_sizes), network_sizes, linewidth=5.5, linestyle='--', color='#ff723b', \n# label=r\"$\\langle k \\rangle$ = $\\log_2(N)$\", zorder=5)\nplt.plot([1]*len(network_sizes), network_sizes, linewidth=4.5, linestyle=':', color='#f422b8', \n label=r\"$\\langle k \\rangle$ = $1$\")#, zorder=5)\nplt.plot(np.log(network_sizes), network_sizes, linewidth=4.5, linestyle='--', color='#f422b8', \n label=r\"$\\langle k \\rangle$ = $\\log(N)$\")#, zorder=5)\n\nax.set_ylabel(r\"$N$\", size=24)\nax.set_xlabel(r\"$\\langle k \\rangle$\", size=22)\nax.set_yticks(ytix)\nax.set_yticklabels(ytixlab, fontsize=12)\nax.set_xticks(xtix)\nax.set_xticklabels(xtixlab, fontsize=12)\n\nm = plt.cm.ScalarMappable(cmap=col_map)\nm.set_array(ei_range)\ncbar = plt.colorbar(m, pad=0.017)\ncbar.ax.tick_params(labelsize=14) \ncbar.set_label(\"$EI$\", size=22)\n\nax.grid(True, linestyle='-', linewidth=2.3, alpha=0.35, color='#999999')\n\nplt.xlim(min(p_values)*min(network_sizes),max(p_values)*max(network_sizes))\nplt.ylim(-5, max(network_sizes)+5)\nplt.legend(fontsize=16, loc=2, framealpha=0.98)\n\nif save:\n plt.savefig(where_to_save_pngs+\"EI_ER_NvsK.png\", bbox_inches='tight', dpi=425)\n plt.savefig(where_to_save_pdfs+\"EI_ER_NvsK.pdf\", bbox_inches='tight')\n\nplt.show()", "_____no_output_____" ] ], [ [ "_______________________", "_____no_output_____" ], [ "## 2.3 Comparing star networks to ring lattice networks", "_____no_output_____" ] ], [ [ "ei_star = []\nei_line = []\nnet_size = []\nnetwork_sizes = np.array(sorted(list(set(np.logspace(0,3.5,25,dtype=int)))))\nline_dict = {}\nstar_dict = {}\n\nfor N in network_sizes:\n G_star = nx.star_graph(N+1) \n G_line = nx.watts_strogatz_graph(N+2, 2, 0)\n ei_star.append(effective_information(G_star))\n ei_line.append(effective_information(G_line))\n net_size.append(N+2)\n star_dict[N+2] = effective_information(G_star)\n line_dict[N+2] = effective_information(G_line)", "_____no_output_____" ], [ "star_x = list(star_dict.keys())\nstar_y = list(star_dict.values())\nline_x = list(line_dict.keys())\nline_y = list(line_dict.values())", "_____no_output_____" ], [ "import matplotlib.patches as mpatches", "_____no_output_____" ], [ "plt.rc('axes', linewidth=2)\nplt.rc('axes', axisbelow=True)\ncol1 = np.array([0.06581203, 0.49116525, 0.47591044, 1.0])\ncol2 = np.array([0.68306918, 0.25012518, 0.37634705, 1.0])\n\nmult=0.9\nfig, ax = plt.subplots(figsize=(20*mult,12*mult))\n\nplt.loglog(star_x, star_y, marker='o', markersize=20*mult, markerfacecolor='w', \n color=col1, markeredgewidth=5*mult, linewidth=7.0*mult, label=\"Star Network\")\nplt.loglog(line_x, line_y, marker='o', markersize=20*mult, markerfacecolor='w', \n color=col2, markeredgewidth=5*mult, linewidth=7.0*mult, label=\"Ring Lattice\")\n\nplt.grid(linestyle='-', linewidth=5.0, color='#999999', alpha=0.35)\nplt.xlabel(\"$N$\", size=36)\nplt.ylabel(\"$EI$\", size=38)\nplt.xticks(size=24)\nplt.yticks(size=24)\nplt.legend(fontsize=24, bbox_to_anchor=[0.2, 0.525])\nplt.xlim((2.5, 3750))\nplt.ylim((1.5e-4, 5e2))\n\n###############\nplt.axes([0.05, 0.75, 0.14, 0.22])\nG = nx.watts_strogatz_graph(4, 2, 0.0)\nplt.axis('off')\npos11 = nx.circular_layout(G)\nnx.draw_networkx_nodes(G, pos11, node_color='white', node_size=400*mult, linewidths=6*mult).set_edgecolor(col2)\nnx.draw_networkx_edges(G, pos11, edge_color=\"#666666\", width=4.55, alpha=0.98)\n\nplt.axes([0.42, 0.75, 0.14, 0.22])\nG = nx.nx.watts_strogatz_graph(10, 2, 0.0)\nplt.axis('off')\npos12 = nx.circular_layout(G)\nnx.draw_networkx_nodes(G, pos12, node_color='white', node_size=400*mult, linewidths=6*mult).set_edgecolor(col2)\nnx.draw_networkx_edges(G, pos12, edge_color=\"#666666\", width=4.55, alpha=0.98)\n\nplt.axes([0.8, 0.75, 0.14, 0.22])\nG = nx.watts_strogatz_graph(50, 2, 0.0)\nplt.axis('off')\npos13 = nx.circular_layout(G)\nnx.draw_networkx_nodes(G, pos13, node_color='white', node_size=350*mult, linewidths=5*mult).set_edgecolor(col2)\nnx.draw_networkx_edges(G, pos13, edge_color=\"#666666\", width=4.55, alpha=0.98)\n\n###############\nplt.axes([-0.06, 0.81, 0.5, 0.22])\nplt.axis('off')\nstyle=\"Simple,head_length=35,head_width=48,tail_width=22\"\narrow11 = arrow11 = mpatches.FancyArrowPatch((0.63,0.2), (0.870,0.2), arrowstyle=style, color=col2)\nplt.gca().add_patch(arrow11)\nstyle=\"Simple,head_length=16,head_width=27,tail_width=10\"\narrow12 = arrow12 = mpatches.FancyArrowPatch((0.637,0.2), (0.850,0.2), arrowstyle=style, color ='w')\nplt.gca().add_patch(arrow12)\n\nplt.axes([0.26, 0.81, 0.5, 0.22])\nplt.axis('off')\nstyle=\"Simple,head_length=35,head_width=48,tail_width=22\"\narrow21 = arrow21 = mpatches.FancyArrowPatch((0.75,0.2), (1.0,0.2), arrowstyle=style, color=col2)\nplt.gca().add_patch(arrow21)\nstyle=\"Simple,head_length=16,head_width=27,tail_width=10\"\narrow22 = arrow22 = mpatches.FancyArrowPatch((0.757,0.2), (0.980,0.2), arrowstyle=style, color ='w')\nplt.gca().add_patch(arrow22)\n###############\n\nplt.axes([0.05, 0.025, 0.14, 0.22])\nG = nx.star_graph(3)\nplt.axis('off')\npos21 = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, pos21, node_color='white', node_size=400*mult, linewidths=6*mult).set_edgecolor(col1)\nnx.draw_networkx_edges(G, pos21, edge_color=\"#666666\", width=4.55, alpha=0.98)\n\nplt.axes([0.42, 0.025, 0.14, 0.22])\nG = nx.star_graph(9)\nplt.axis('off')\npos22 = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, pos22, node_color='white', node_size=400*mult, linewidths=6*mult).set_edgecolor(col1)\nnx.draw_networkx_edges(G, pos22, edge_color=\"#666666\", width=4.55, alpha=0.98)\n\nplt.axes([0.8, 0.025, 0.14, 0.22])\nG = nx.star_graph(49)\nplt.axis('off')\npos23 = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, pos23, node_color='white', node_size=350*mult, linewidths=5*mult).set_edgecolor(col1)\nnx.draw_networkx_edges(G, pos23, edge_color=\"#666666\", width=4.55, alpha=0.98)\n\n###############\nplt.axes([-0.06, 0.09, 0.5, 0.22])\nplt.axis('off')\nstyle=\"Simple,head_length=35,head_width=48,tail_width=22\"\narrow11 = arrow11 = mpatches.FancyArrowPatch((0.63,0.2), (0.870,0.2), arrowstyle=style, color=col1)\nplt.gca().add_patch(arrow11)\nstyle=\"Simple,head_length=16,head_width=27,tail_width=10\"\narrow12 = arrow12 = mpatches.FancyArrowPatch((0.637,0.2), (0.850,0.2), arrowstyle=style, color ='w')\nplt.gca().add_patch(arrow12)\n\nplt.axes([0.26, 0.09, 0.5, 0.22])\nplt.axis('off')\nstyle=\"Simple,head_length=35,head_width=48,tail_width=22\"\narrow21 = arrow21 = mpatches.FancyArrowPatch((0.75,0.2), (1.0,0.2), arrowstyle=style, color=col1)\nplt.gca().add_patch(arrow21)\nstyle=\"Simple,head_length=16,head_width=27,tail_width=10\"\narrow22 = arrow22 = mpatches.FancyArrowPatch((0.757,0.2), (0.980,0.2), arrowstyle=style, color ='w')\nplt.gca().add_patch(arrow22)\n\nplt.subplots_adjust(top=0.99, bottom=0.01, right=1, left=0, hspace=0, wspace=0)\n\nif save:\n plt.savefig(where_to_save_pngs+\"EffectiveInformation_Compare_StarLine.png\", bbox_inches='tight', dpi=425)\n plt.savefig(where_to_save_pdfs+\"EffectiveInformation_Compare_StarLine.pdf\", bbox_inches='tight')\n\nplt.show()\nplt.rc('axes', linewidth=1)", "_____no_output_____" ] ], [ [ "As the number of nodes in star networks increases, we observe an $EI$ that approaches zero, while the $EI$ of ring lattice networks grows logarithmically as the number of nodes increases.\n\n________________", "_____no_output_____" ], [ "## 2.4 Comparing preferential attachment networks by varying their $\\alpha$ values", "_____no_output_____" ] ], [ [ "def preferential_attachment_network(N, alpha=1.0, m=1):\n \"\"\"\n Generates a network based off of a preferential attachment \n growth rule. Under this growth rule, new nodes place their \n $m$ edges to nodes already present in the graph, G, with \n a probability proportional to $k^\\alpha$.\n \n Params\n ------\n N (int): the desired number of nodes in the final network\n alpha (float): the exponent of preferential attachment. \n When alpha is less than 1.0, we describe it\n as sublinear preferential attachment. At\n alpha > 1.0, it is superlinear preferential\n attachment. And at alpha=1.0, the network \n was grown under linear preferential attachment,\n as in the case of Barabasi-Albert networks.\n m (int): the number of new links that each new node joins\n the network with.\n \n Returns\n -------\n G (nx.Graph): a graph grown under preferential attachment.\n \n \"\"\"\n G = nx.Graph()\n G = nx.complete_graph(m+1)\n\n for node_i in range(m+1,N):\n degrees = np.array(list(dict(G.degree()).values()))\n probs = (degrees**alpha) / sum(degrees**alpha)\n eijs = np.random.choice(\n G.number_of_nodes(), size=(m,), replace=False, p=probs)\n for node_j in eijs:\n G.add_edge(node_i, node_j)\n\n return G", "_____no_output_____" ], [ "Nmax = 2500\nNmin = 10\nNval = 20\n\nnetwork_sizes = np.logspace(np.log10(Nmin), np.log10(Nmax), Nval, dtype=int)\nalphas = np.array([0.0, 0.5, 1.0, 1.2, 1.3, 1.4, 1.5, 2.0, 2.5])\n\nn_sim = 80", "_____no_output_____" ], [ "import datetime as dt", "_____no_output_____" ], [ "N = 2500\nnsamps = 20\nn_sim = 80\nnetwork_sizes = np.logspace(1, np.log10(N), nsamps, dtype=int)\nalphas = np.array([0.0, 0.5, 1.0, 1.2, 1.3, 1.4, 1.5, 2.0, 2.5])", "_____no_output_____" ], [ "ms = [1]\nout_dict = {}\n\nfor alpha in alphas:\n print(\"Starting iteration alpha = %.1f at %s\"%(alpha,dt.datetime.now()))\n alpha_temp = {}\n for m in ms:\n m_temp = []\n for N in network_sizes:\n N_temp = []\n for num_sim in range(n_sim):\n G = preferential_attachment_network(N, alpha, m)\n\n N_temp.append(effective_information(G))\n m_temp.append(N_temp)\n\n alpha_temp[m] = m_temp\n \n out_dict[alpha] = alpha_temp", "Starting iteration alpha = 0.0 at 2019-05-20 13:37:48.235958\nStarting iteration alpha = 0.5 at 2019-05-20 13:44:00.374389\nStarting iteration alpha = 1.0 at 2019-05-20 13:50:16.400084\nStarting iteration alpha = 1.2 at 2019-05-20 13:56:43.478900\nStarting iteration alpha = 1.3 at 2019-05-20 14:02:58.968516\nStarting iteration alpha = 1.4 at 2019-05-20 14:08:47.275276\nStarting iteration alpha = 1.5 at 2019-05-20 14:14:57.554698\nStarting iteration alpha = 2.0 at 2019-05-20 14:20:58.575863\nStarting iteration alpha = 2.5 at 2019-05-20 14:27:05.173995\n" ], [ "mult=0.9\nplt.figure(figsize = (12*mult,9.5*mult))\nm_shapes = dict(zip(ms,['o','^','s','+']))\n\n# alpha_colors = cmo.cm.amp(np.linspace(0.15,0.95,len(alphas)))\nalpha_colors = plt.cm.viridis(np.linspace(0.15,0.95,len(alphas)))\nalpha_colors = dict(zip(alphas, alpha_colors))\n\nlabs = ['- Random Tree', '', '- Barabási-Albert', '', '', '', '', '', '- Star-like Tree']\n\nmaxEI=0\nminEI=1\ncounter = 0\nfor alpha in alphas:\n for m in ms:\n data = out_dict[alpha]\n EI_N_list = np.mean(out_dict[alpha][m],axis=1)\n if max(EI_N_list) > maxEI:\n maxEI = max(EI_N_list)\n if min(EI_N_list) < minEI:\n minEI = min(EI_N_list)\n m_temp_stdv = np.std(out_dict[alpha][m],axis=1)\n fillb = EI_N_list-m_temp_stdv\n fillb[fillb<0] = 0\n\n sizes = np.array([(network_sizes[i]+network_sizes[i+1])/2 for i in range(0,len(network_sizes)-1,2)])\n means = np.array([(EI_N_list[i]+EI_N_list[i+1])/2 for i in range(0,len(EI_N_list)-1,2)])\n stdvs = np.array([(m_temp_stdv[i]+m_temp_stdv[i+1])/2 for i in range(0,len(m_temp_stdv)-1,2)]) \n \n plt.fill_between(sizes, means-stdvs, means+stdvs, \n facecolor=alpha_colors[alpha], alpha=0.2, edgecolor='w', linewidth=2.0)\n \n plt.semilogx(sizes, means, alpha=0.99, marker=m_shapes[1], \n markersize=10.0, color=alpha_colors[alpha], markerfacecolor='w', \n markeredgewidth=2.9, linewidth=3.5, \n label=r\"$\\alpha = %.1f$\"%(alpha))\n\n counter += 1\n\nplt.xlabel(r\"$N$\", size=26)\nplt.ylabel(r\"$EI$\", size=28)\nxtix0 = np.logspace(np.log10(min(network_sizes)), np.log10((max(network_sizes))+1.2), 9)\nxtix = np.logspace(np.log10(min(sizes)), np.log10((max(sizes))), 9)\nytix = np.linspace(0,10,11,dtype=int)\nplt.xticks(xtix, [\"$%i$\"%i for i in np.array(xtix0,dtype=int)], size=18)\nplt.yticks(ytix, [\"$%i$\"%i for i in np.round(ytix,2)], size=18)\nplt.xlim(max(xtix)*0.00475, max(xtix)*1.065)\nplt.ylim(-0.2, 10.25)\nplt.grid(True, linestyle='-', linewidth=3.0, color='#999999', alpha=0.35)\nplt.legend(fontsize=18)\n\nif save:\n plt.savefig(where_to_save_pngs+\"EI_compare_prefattach_alphas_m.png\", bbox_inches='tight', dpi=425)\n plt.savefig(where_to_save_pdfs+\"EI_compare_prefattach_alphas_m.pdf\", bbox_inches='tight')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## 2.5 Comparing random networks by varying their density, $p$", "_____no_output_____" ] ], [ [ "ps = np.logspace(-3.2,-1,9)\nout_dictp = {}\nn_sim = 40\n\nfor p in ps:\n print(\"Starting iteration p = %.4f at %s\"%(p,dt.datetime.now()))\n p_temp = []\n for N in network_sizes:\n N_temp = []\n for num_sim in range(n_sim):\n G = nx.erdos_renyi_graph(N, p)\n N_temp.append(effective_information(G))\n \n p_temp.append(N_temp)\n \n out_dictp[p] = p_temp", "Starting iteration p = 0.0006 at 2019-05-20 14:36:22.087410\nStarting iteration p = 0.0012 at 2019-05-20 14:37:21.833532\nStarting iteration p = 0.0022 at 2019-05-20 14:38:24.060868\nStarting iteration p = 0.0042 at 2019-05-20 14:39:28.467629\nStarting iteration p = 0.0079 at 2019-05-20 14:40:39.867810\nStarting iteration p = 0.0150 at 2019-05-20 14:42:14.724322\nStarting iteration p = 0.0282 at 2019-05-20 14:44:15.699836\nStarting iteration p = 0.0531 at 2019-05-20 14:47:15.183679\nStarting iteration p = 0.1000 at 2019-05-20 14:51:46.286310\n" ], [ "mult=0.9\nplt.figure(figsize = (12*mult,9.5*mult))\nm_shapes = dict(zip(ms,['o','^','s','+']))\n\n# p_colors = cmo.cm.ice_r(np.linspace(0.2,0.85,len(ps)))\np_colors = cmo.cm.viridis(np.linspace(0.2,0.85,len(ps)))\np_colors = dict(zip(ps, p_colors))\n\nmaxEI=0\nminEI=1\ncounter = 0\nfor p in ps:\n data = out_dictp[p]\n EI_N_list = np.mean(out_dictp[p],axis=1)\n if max(EI_N_list) > maxEI:\n maxEI = max(EI_N_list)\n if min(EI_N_list) < minEI:\n minEI = min(EI_N_list)\n m_temp_stdv = np.std(out_dictp[p],axis=1)\n fillb = EI_N_list-m_temp_stdv\n fillb[fillb<0] = 0\n \n sizes = np.array([(network_sizes[i]+network_sizes[i+1])/2 for i in range(0,len(network_sizes)-1,2)])\n means = np.array([(EI_N_list[i]+EI_N_list[i+1])/2 for i in range(0,len(EI_N_list)-1,2)])\n stdvs = np.array([(m_temp_stdv[i]+m_temp_stdv[i+1])/2 for i in range(0,len(m_temp_stdv)-1,2)]) \n sizes = np.array([size for size in sizes for i in range(2)])\n means = np.array([mean for mean in means for i in range(2)])\n stdvs = np.array([stdv for stdv in stdvs for i in range(2)])\n \n plt.fill_between(sizes, means-stdvs, means+stdvs, \n facecolor=p_colors[p], alpha=0.2, edgecolor='w', linewidth=2.0)\n plt.semilogx(sizes, means, alpha=0.99, marker=m_shapes[1], \n markersize=10.0, color=p_colors[p], markerfacecolor='w', \n markeredgewidth=2.9, linewidth=3.5, \n label=r\"$p = %.4f$\"%(p))\n counter += 1\n \nplt.xlabel(r\"$N$\", size=26)\nplt.ylabel(r\"$EI$\", size=28)\nxtix0 = np.logspace(np.log10(min(network_sizes)), np.log10((max(network_sizes))+1.2), 9)\nxtix = np.logspace(np.log10(min(sizes)), np.log10((max(sizes))), 9)\nytix = np.linspace(0,10,11,dtype=int)\nplt.xticks(xtix, [\"$%i$\"%i for i in np.array(xtix0,dtype=int)], size=18)\nplt.yticks(ytix, [\"$%i$\"%i for i in np.round(ytix,2)], size=18)\nplt.xlim(max(xtix)*0.00475, max(xtix)*1.065)\nplt.ylim(-0.2, 10.25)\nplt.grid(True, linestyle='-', linewidth=3.0, color='#999999', alpha=0.35)\nplt.legend(fontsize=18)\n\nif save:\n plt.savefig(where_to_save_pngs+\"EI_compare_ER_ps.png\", bbox_inches='tight', dpi=425)\n plt.savefig(where_to_save_pdfs+\"EI_compare_ER_ps.pdf\", bbox_inches='tight')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## End of Chapter 02. In [Chapter 03](https://nbviewer.jupyter.org/github/jkbren/einet/blob/master/code/Chapter%2003%20-%20Determinism%20and%20Degeneracy.ipynb) we'll see that $EI$ is determinism minus degeneracy\n_______________", "_____no_output_____" ], [ "### References:\n- __[Barabási, A.-L. & Albert, R. (1999). Emergence of scaling in random networks. Science, 286 (October), 509–512. doi: 10.1126/science.286.5439.509](http://barabasi.com/f/67.pdf)__\n- __[Barabási, A.-L. (2016). Network Science. Cambridge University Press. doi: ISBN:9781107076266](http://networksciencebook.com/)__\n- __[Del Genio, C. I., Gross, T., & Bassler, K. E. (2011). All scale-free networks are sparse. Physical Review Letters, 107(17), 1–4. doi: 10.1103/PhysRevLett.107.178701](https://physics.aps.org/featured-article-pdf/10.1103/PhysRevLett.107.178701)__\n- __[Erdös, P., & Rènyi, A. (1959). On random graphs. Publicationes Mathematicae, 6, 290–297. doi: 10.2307/1999405](http://snap.stanford.edu/class/cs224w-readings/erdos59random.pdf)__", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ec5b153e775105284d12a19ed59a291063b90054
33,079
ipynb
Jupyter Notebook
examples/tutorial_simulating_and_fitting_using_a_simple_model.ipynb
tomdelahaije/dmipy
dbbca4066a6c162dcb05865df5ff666af0e4020a
[ "MIT" ]
59
2018-02-22T19:14:19.000Z
2022-02-22T05:40:27.000Z
examples/tutorial_simulating_and_fitting_using_a_simple_model.ipynb
tomdelahaije/dmipy
dbbca4066a6c162dcb05865df5ff666af0e4020a
[ "MIT" ]
95
2018-02-03T11:55:30.000Z
2022-03-31T15:10:39.000Z
examples/tutorial_simulating_and_fitting_using_a_simple_model.ipynb
tomdelahaije/dmipy
dbbca4066a6c162dcb05865df5ff666af0e4020a
[ "MIT" ]
23
2018-02-13T07:21:01.000Z
2022-02-22T20:12:08.000Z
115.25784
25,188
0.873001
[ [ [ "# Simulating and fitting data using a simple Stick model", "_____no_output_____" ], [ "As in the previous tutorial we will use the wu-minn acquisition scheme to do our experiments. Instead of loading it from scratch, we load it from dmipy.data.saved_acquisition_schemes, which contains some saved dmipy acquisition schemes.", "_____no_output_____" ] ], [ [ "# load the necessary modules\nfrom dmipy.signal_models import cylinder_models\nfrom dmipy.core import modeling_framework\nfrom dmipy.data import saved_acquisition_schemes\nimport numpy as np\n\nacq_scheme = saved_acquisition_schemes.wu_minn_hcp_acquisition_scheme()", "_____no_output_____" ] ], [ [ "Using this acquisition scheme, we will simulate data using a simple Stick model, and then use the same Stick model to fit the signal again. First, we instantiate the model(s) we need.\n\nNOTE: this example the same for any other dmipy model. One only needs to change the model and appropriate input parameters.", "_____no_output_____" ] ], [ [ "stick = cylinder_models.C1Stick()", "_____no_output_____" ] ], [ [ "In dmipy, all the simulation and fitting functionality is contained in the MultiCompartmentModel module. To simulate some data, we therefore make a MultiCompartmentModel that just contains the Stick model.", "_____no_output_____" ] ], [ [ "from dmipy.core.modeling_framework import MultiCompartmentModel\nstick_model = MultiCompartmentModel(models=[stick])", "_____no_output_____" ] ], [ [ "We can visualize the flow diagram of the model using the `visualize_model_setup` command:", "_____no_output_____" ] ], [ [ "from IPython.display import Image\nstick_model.visualize_model_setup(view=False, cleanup=False, with_parameters=True)\nImage('Model Setup.png')", "_____no_output_____" ] ], [ [ "To find more information on the model parameters check the function documentation of the Stick model. It is also possible to print the parameter cardinality to figure out the parameter names, and their input format.", "_____no_output_____" ] ], [ [ "stick_model.parameter_cardinality", "_____no_output_____" ] ], [ [ "First thing to notice is name of the model \"C1Stick\" and the parameter \"mu\" is separated by a number \"\\_1\\_\". If multiple sticks are given to the MultiCompartmentModel, then this number will increase as more Sticks are added.\n\n\nThe number after the parameter name indicates the cardinality of the parameter, meaning that the orientation of the stick \"mu\" takes two angles on the sphere as [theta, phi], and one value as parallel diffusivity lambda_par.\n\nFor the example we align the Stick with some angle and give it a diffusivity of 1.7e-9 m^2/s. We obtain the right ordering for the input of the function by using the model's parameters_to_parameter_vector() function:", "_____no_output_____" ] ], [ [ "mu = (np.pi / 2., np.pi / 2.) # in radians\nlambda_par = 1.7e-9 # in m^2/s\nparameter_vector = stick_model.parameters_to_parameter_vector(\n C1Stick_1_lambda_par=lambda_par, C1Stick_1_mu=mu)", "_____no_output_____" ] ], [ [ "As you can see, this produces a parameter vector with the 'correct' order for the model to understand it.", "_____no_output_____" ] ], [ [ "parameter_vector", "_____no_output_____" ] ], [ [ "We can generate the diffusion-weighted signals for these model parameters and the wu-minn acquisition scheme as follows:", "_____no_output_____" ] ], [ [ "E = stick_model.simulate_signal(acq_scheme, parameter_vector)\nlen(E) # See that this produces the signal attenuation for the entire acquisition scheme", "_____no_output_____" ] ], [ [ "Let's assume this signal is now unknown, and we want to fit the Stick model to this signal to find best fitting model parameters. the model.fit(scheme, data) is the easiest way to fit some data. As a default, dmipy uses a global optimizer that we call brute2fine, which does exactly what the name implies: first to a global brute-force optimization and then refine solution to a local minimum.", "_____no_output_____" ] ], [ [ "res = stick_model.fit(acq_scheme, E)", "Using parallel processing with 8 workers.\nSetup brute2fine optimizer in 0.0109260082245 seconds\nFitting of 1 voxels complete in 0.267587184906 seconds.\nAverage of 0.267587184906 seconds per voxel.\n" ] ], [ [ "Using the data and the acquisition scheme we fit the stick_model using the following one-liner. We can see the correct model parameters are obtained.", "_____no_output_____" ] ], [ [ "print 'Optimized result:', res.fitted_parameters_vector\nprint 'Ground truth: ', parameter_vector", "Optimized result: [[ 1.57080726e+00 -1.57079593e+00 1.70010076e-09]]\nGround truth: [1.57079633e+00 1.57079633e+00 1.70000000e-09]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec5b1d0a2d4d7e3cd6e66f0c6b8b9c8f335d292d
10,278
ipynb
Jupyter Notebook
python/simple minst convnet.ipynb
WhitePhosphorus4/xh-learning-code
025e31500d9f46d97ea634d7fd311c65052fd78e
[ "Apache-2.0" ]
null
null
null
python/simple minst convnet.ipynb
WhitePhosphorus4/xh-learning-code
025e31500d9f46d97ea634d7fd311c65052fd78e
[ "Apache-2.0" ]
null
null
null
python/simple minst convnet.ipynb
WhitePhosphorus4/xh-learning-code
025e31500d9f46d97ea634d7fd311c65052fd78e
[ "Apache-2.0" ]
null
null
null
28.00545
141
0.485892
[ [ [ "import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import layers", "_____no_output_____" ], [ "num_classes = 10\ninput_shape = (28,28,1)\n(x_train,y_train), (x_test,y_test) = keras.datasets.mnist.load_data()", "_____no_output_____" ], [ "x_test.shape", "_____no_output_____" ], [ "x_train = x_train.astype(\"float32\")/255\nx_test = x_test.astype(\"float32\")/255", "_____no_output_____" ], [ "x_train.shape", "_____no_output_____" ], [ "x_train = np.expand_dims(x_train,-1)\nx_test = np.expand_dims(x_test,-1)", "_____no_output_____" ], [ "x_train.shape", "_____no_output_____" ], [ "y_train = keras.utils.to_categorical(y_train,num_classes)", "_____no_output_____" ], [ "y_train", "_____no_output_____" ], [ "y_test = keras.utils.to_categorical(y_test,num_classes)", "_____no_output_____" ] ], [ [ "### model", "_____no_output_____" ] ], [ [ "# 动态分配内存\nimport tensorflow as tf\n\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n", "_____no_output_____" ], [ "model = keras.Sequential(\n [\n keras.Input(shape=input_shape),\n layers.Conv2D(32,kernel_size=(3,3),activation = \"relu\"),\n layers.MaxPooling2D(pool_size=(2,2)),\n layers.Conv2D(64,kernel_size=(3,3),activation = \"relu\"),\n layers.MaxPooling2D(pool_size=(2,2)),\n layers.Flatten(),\n layers.Dropout(0.5),\n layers.Dense(num_classes,activation = \"softmax\"),\n ]\n)", "_____no_output_____" ], [ "model.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 1600) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 1600) 0 \n_________________________________________________________________\ndense (Dense) (None, 10) 16010 \n=================================================================\nTotal params: 34,826\nTrainable params: 34,826\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "batch_size = 128\nepoch = 15\nmodel.compile(loss=\"categorical_crossentropy\",optimizer=\"adam\",metrics=[\"accuracy\"])", "_____no_output_____" ], [ "model.fit(x_train,y_train,batch_size=batch_size,epochs=epoch,validation_split=0.1)", "Epoch 1/15\n422/422 [==============================] - 7s 9ms/step - loss: 0.7805 - accuracy: 0.7627 - val_loss: 0.0838 - val_accuracy: 0.9770\nEpoch 2/15\n422/422 [==============================] - 3s 8ms/step - loss: 0.1219 - accuracy: 0.9630 - val_loss: 0.0571 - val_accuracy: 0.9822\nEpoch 3/15\n422/422 [==============================] - 3s 8ms/step - loss: 0.0887 - accuracy: 0.9722 - val_loss: 0.0488 - val_accuracy: 0.9870\nEpoch 4/15\n422/422 [==============================] - 3s 8ms/step - loss: 0.0705 - accuracy: 0.9778 - val_loss: 0.0408 - val_accuracy: 0.9892\nEpoch 5/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0642 - accuracy: 0.9796 - val_loss: 0.0390 - val_accuracy: 0.9887\nEpoch 6/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0565 - accuracy: 0.9828 - val_loss: 0.0366 - val_accuracy: 0.9903\nEpoch 7/15\n422/422 [==============================] - 4s 9ms/step - loss: 0.0520 - accuracy: 0.9837 - val_loss: 0.0360 - val_accuracy: 0.9897\nEpoch 8/15\n422/422 [==============================] - 4s 9ms/step - loss: 0.0476 - accuracy: 0.9851 - val_loss: 0.0313 - val_accuracy: 0.9905\nEpoch 9/15\n422/422 [==============================] - 4s 9ms/step - loss: 0.0447 - accuracy: 0.9855 - val_loss: 0.0347 - val_accuracy: 0.9908\nEpoch 10/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0404 - accuracy: 0.9874 - val_loss: 0.0322 - val_accuracy: 0.9910\nEpoch 11/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0381 - accuracy: 0.9884 - val_loss: 0.0309 - val_accuracy: 0.9928\nEpoch 12/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0370 - accuracy: 0.9880 - val_loss: 0.0320 - val_accuracy: 0.9905\nEpoch 13/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0357 - accuracy: 0.9880 - val_loss: 0.0289 - val_accuracy: 0.9927\nEpoch 14/15\n422/422 [==============================] - 4s 8ms/step - loss: 0.0348 - accuracy: 0.9888 - val_loss: 0.0285 - val_accuracy: 0.9920\nEpoch 15/15\n422/422 [==============================] - 4s 9ms/step - loss: 0.0323 - accuracy: 0.9900 - val_loss: 0.0275 - val_accuracy: 0.9927\n" ], [ "score = model.evaluate(x_test,y_test,verbose = 0)\nscore", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec5b2e561fd80be331e4684b266dce2ff4195ef1
13,689
ipynb
Jupyter Notebook
01_core.baseline.ipynb
ezrider/cheeky_monkey
4b902e9805d09a91f1ee6e4108855ab911af3fbf
[ "Apache-2.0" ]
null
null
null
01_core.baseline.ipynb
ezrider/cheeky_monkey
4b902e9805d09a91f1ee6e4108855ab911af3fbf
[ "Apache-2.0" ]
null
null
null
01_core.baseline.ipynb
ezrider/cheeky_monkey
4b902e9805d09a91f1ee6e4108855ab911af3fbf
[ "Apache-2.0" ]
null
null
null
35.741514
307
0.555044
[ [ [ "# default_exp core.baseline", "_____no_output_____" ] ], [ [ "## Baseline Analysis\n\n> We are using the adjudidicated cases from 2018 to differentiate the cases by their Origin. Our first step is to determine if the they have different status counts. The conclusion of this analysis will driver futher analysis of years where there were significant policy changes made due to COVID.\n", "_____no_output_____" ], [ "In particular ...", "_____no_output_____" ] ], [ [ "#hide\nfrom nbdev.showdoc import *\nfrom fastcore.test import *\n\nfile_path = r'C:\\Users\\mbeaulieu\\rsi_project_book\\Data\\\\'\n\n", "_____no_output_____" ], [ "#export\nimport pandas as pd\nimport altair as alt\n", "_____no_output_____" ], [ "#export\nORIGIN_MAPPINGS = {\n 'DMER - DRIVER MEDICAL EXAMINATION REPORT': 'DMER', \n 'OTHER':'Other', \n 'PHYSICIAN':'Physician',\n 'PUBLIC/FAMILY': 'Public/Family', \n 'EVF - EXAM OF VISUAL FUNCTIONS': 'EVF',\n 'ICBC TRIGGER':'ICBC Trigger',\n 'ICBC':'ICBC',\n 'OTHER HEALTH PROFESSIONAL':'Other Health', \n 'POLICE':'Police', \n 'REHAB':'Rehab',\n 'DRIVER IMPROVEMENT PROGRAM':'Driver Improvement',\n 'DRIVER APPEAL REQUEST':'Appeal',\n 'SEAT BELT EXEMPTION APPLICATION':'Seat Belt',\n 'SPECIALIST REPORT':'Specialist Report',\n 'MLA ENQUIRY':'MLA Enquiry' \n}\n", "_____no_output_____" ], [ "#export\n\ndef get_fte_days_2018():\n# We created three lists, containing names, ages, and locations, holding our ordered data\n# We then created a Python zip() object, which contained the zips of names, ages, and locations. We then applied the list() function to turn this zip object into a list of tuples\n# We then passed this zipped object into our DataFrame() class, along with a list of our column names to create our dataframe.\n\n months = ['Jan-2018', 'Feb-2018', 'Mar-2018', 'Apr-2018','May-2018', 'Jun-2018', 'Jul-2018', 'Aug-2018','Sep-2018', 'Oct-2018', 'Nov-2018', 'Dec-2018']\n ftes = [376\n ,311\n ,361\n ,332\n ,394\n ,360\n ,335\n ,366\n ,323\n ,380\n ,335\n ,281]\n\n\n zipped = list(zip(months, ftes))\n ftedays_df = pd.DataFrame(zipped, columns=['Month-Year', 'FTE (Days)'])\n ftedays_df\n\n d = ftedays_df.copy()\n d.columns = ['Month', 'FTE (Days)']\n \n return d\n", "_____no_output_____" ], [ "#export\ndef derive_statuscount_percase(df_in, ftedays_df):\n FTE_DAYS_YEAR = ftedays_df['FTE (Days)'].sum()\n print('FTE_DAYS_YEAR', FTE_DAYS_YEAR)\n aggregation = {\n 'Case Count': ('STATUS_COUNT','size'),\n 'Total Status Change Count': ('STATUS_COUNT','sum')\n } \n\n # df = df_in.groupby(['ORIGIN_DSC']) \\\n # .agg(** aggregation).reset_index()\n\n # aggregation = {\n # 'Case Count': ('DRIVERS_LICENSE_NO','size'),\n # 'Status Count': ('STATUS_COUNT','sum'),\n # }\n\n df = df_in.groupby([pd.Grouper(freq='M', key='CASE_OPENED_DT') , \n pd.Grouper(key='ORIGIN_DSC'), \n ]).agg(** aggregation)\n\n df = pd.DataFrame(df).reset_index()\n\n df['Opened Month'] = df.apply(lambda x: x['CASE_OPENED_DT'].strftime('%b') + '-' + x['CASE_OPENED_DT'].strftime('%Y'), axis=1)\n \n #case_summary_data['Total Cases in Group'] = case_summary_data.groupby(['Origin Report'])['Cases'].transform(lambda x: sum(x) )\n df['Total Cases In Month'] = df.groupby(['CASE_OPENED_DT'])['Case Count'].transform( lambda x: sum(x))\n df['Total Status Changes In Month'] = df.groupby(['CASE_OPENED_DT'])['Total Status Change Count'].transform( lambda x: sum(x))\n df['Monthly Status Changes/Case'] = df.apply(lambda x: x['Total Status Changes In Month']/x['Total Cases In Month'] , axis=1)\n\n\n df['Group Status Changes/Case'] = df.apply(lambda x: x['Total Status Change Count']/x['Case Count'] , axis=1)\n #Month-Year\tFTE (Days)\n df = pd.merge(df, ftedays_df, how='left', left_on='Opened Month', right_on='Month')\n #df = df[df['Is Adjudicated'] == 'Adjudicated']\n print('df shape ', df.shape)\n case_count = df['Case Count'].sum()\n status_change_count = df['Total Status Change Count'].sum()\n\n df['Group Case Count/FTE'] = df.apply(lambda x: x['Case Count']/x['FTE (Days)'] , axis=1)\n df['Group Status Change/FTE'] = df.apply(lambda x: x['Total Status Change Count']/x['FTE (Days)'] , axis=1)\n\n\n print(f\"Adjudicated Case Count for 2018: {case_count:,}\")\n print(f\"Adjudicated Status Change Count for 2018: {status_change_count:,}\" )\n print(f\"Average Status Change Count/Case: {status_change_count/case_count : .2f}\" )\n\n print(f\"Adjudicated Cases/FTE Day: {case_count/FTE_DAYS_YEAR : .2f}\") \n print(f\"Status Changes/FTE Day: { status_change_count/FTE_DAYS_YEAR: .2f}\")\n# print(f\"Monthly Team Capacity (Status Changes) { (status_change_count/FTE_DAYS_YEAR) * FTE_DAYS_MONTH: ,.2f}\" )\n\n\n ftedays_case_count = FTE_DAYS_YEAR/case_count\n ftedays_status_change_count = FTE_DAYS_YEAR/status_change_count\n print(f\"FTE Days/Adjudicated Case: {ftedays_case_count : .3f}\")\n print(f\"FTE Days/Status Change {ftedays_status_change_count: .3f}\") \n\n return (df, ftedays_case_count, ftedays_status_change_count )", "_____no_output_____" ], [ "#exports\ndef example_exports_func():\n pass\n\n", "_____no_output_____" ], [ "#exports\ndef imgs_save(image):\n pass\n\n", "_____no_output_____" ], [ "#export\ndef get_statuses(f_path):\n file_path = f_path + 'DFC_STATUSES.csv'\n loadedstatus_df = pd.read_csv(file_path)\n loadedstatus_df = loadedstatus_df.sort_values(by = 'STATUS_CD')\n status_df = loadedstatus_df.drop(columns = ['ACTIVE_YN','UPDATE_RULE','DECISION_CODE','GROUP_ASSIGNMENT'])\n return status_df", "_____no_output_____" ], [ "\n#export\ndef get_processed_case_data(f_path):\n \n file_path = f_path + 'cases_processed.csv'\n cases_df = pd.read_csv(file_path,parse_dates=['BIRTHDATE','CASE_OPENED_DT','PREV_CASE_END_DT','LAST_STATUS_DATE'], dtype={'DRIVERS_LICENSE_NO': str})\n cases_df = cases_df[(cases_df['Ignore Case'] == 0) ]\n \n cases_df = cases_df[(cases_df['Is Adjudicated'] == 'Adjudicated') & (cases_df['age_bucket'] >= 80)].reset_index()\n\n cases_df['Origin Report'] = cases_df.apply( lambda x: ORIGIN_MAPPINGS[x['ORIGIN_DSC']] , axis=1)\n\n cases_df['Origin & Decision'] = cases_df.apply( lambda x: str(x['Origin Report']) + ' & ' + str(x['DECISION_DSC']), axis=1)\n\n cases_df['First & Second Status'] = cases_df.apply( lambda x: str(x['FIRSTSTATUSCD']) + '_' + str(x['SECOND_CASE_STATUS_CD']), axis=1)\n\n cases_df['DOCUMENT_COUNT'] = cases_df['DOCUMENT_COUNT'].fillna(0)\n\n cases_df['Age Category'] = cases_df.apply( lambda x: 'Over 80 ' if x.age_bucket >= 80 else 'Under 80', axis=1)\n cases_df['Type Origin'] = cases_df.apply( lambda x: str(x['CASE_CD']) + '_' + str(x['ORIGIN_CD']), axis=1)\n cases_df['Type & Origin Desc'] = cases_df.apply( lambda x: str(x['CASE_DSC']) + ' & ' + str(x['ORIGIN_DSC']), axis=1)\n cases_df['Case Length Over 30 Days'] = cases_df.apply( lambda x: True if x['case_length_days'] >= 30 else False, axis=1)\n cases_df['Case Length Over 60 Days'] = cases_df.apply( lambda x: True if x['case_length_days'] >= 60 else False, axis=1)\n\n\n return cases_df\n\n \n\n", "_____no_output_____" ], [ "\n#export\ndef get_2018processed_case_data(f_path):\n \n# file_path = f_path + 'cases_processed.csv'\n# cases_df = pd.read_csv(file_path,parse_dates=['BIRTHDATE','CASE_OPENED_DT','PREV_CASE_END_DT','LAST_STATUS_DATE'], dtype={'DRIVERS_LICENSE_NO': str})\n# cases_df = cases_df[(cases_df['Ignore Case'] == 0) ]\n cases_df = get_processed_case_data(f_path)\n cases2018_df = cases_df[cases_df.opened_year == 2018].reset_index()\n\n# cases2018_df = cases2018_df[(cases2018_df['Is Adjudicated'] == 'Adjudicated') & (cases2018_df['age_bucket'] >= 80)].reset_index()\n\n# cases2018_df['Origin Report'] = cases2018_df.apply( lambda x: ORIGIN_MAPPINGS[x['ORIGIN_DSC']] , axis=1)\n\n# cases2018_df['Origin & Decision'] = cases2018_df.apply( lambda x: str(x['Origin Report']) + ' & ' + str(x['DECISION_DSC']), axis=1)\n\n# cases2018_df['First & Second Status'] = cases2018_df.apply( lambda x: str(x['FIRSTSTATUSCD']) + '_' + str(x['SECOND_CASE_STATUS_CD']), axis=1)\n\n# cases2018_df['DOCUMENT_COUNT'] = cases2018_df['DOCUMENT_COUNT'].fillna(0)\n\n status_df = get_statuses(f_path)\n\n status_df.columns = ['SECOND_STATUS_CD', 'Second Status Desc']\n cases2018_df = pd.merge(cases2018_df, status_df, left_on='SECOND_CASE_STATUS_CD', right_on='SECOND_STATUS_CD' )\n\n status_df.columns = ['INITIALSTATUSCD', 'Initial Status Desc']\n cases2018_df = pd.merge(cases2018_df, status_df, left_on='FIRSTSTATUSCD', right_on='INITIALSTATUSCD' )\n\n cases2018_df['First & Second Status Descriptions'] = cases2018_df.apply( lambda x: str(x['Initial Status Desc']) + ' ==> ' + str(x['Second Status Desc']), axis=1)\n\n\n cases2018_df = cases2018_df.drop(columns = ['INITIALSTATUSCD', 'SECOND_STATUS_CD'])\n\n\n \n return cases2018_df\n\n", "_____no_output_____" ], [ "#hide\ndf = get_fte_days_2018()\ntest_eq( df.shape, (12,2))\n\n# can't use this test because it depends on an exernal file\n# df = get_2018processed_case_data(file_path)\n# test_ne( df.shape[0], 0)", "_____no_output_____" ], [ "#hide\n#s = get_statuses(file_path)\n", "_____no_output_____" ], [ "#columns = ['CASE_OPENED_DT','LAST_STATUS_DATE','DRIVERS_LICENSE_NO','STATUS_COUNT','Year Span','Status','Age Category']\n\n# columns = ['CASE_OPENED_DT','LAST_STATUS_DATE','DRIVERS_LICENSE_NO','STATUS_COUNT', 'Age Category', 'GENERAL_STATUS']\n# c = get_2018processed_case_data(file_path)\n# #c[columns].head()\n# c.head()\n", "_____no_output_____" ], [ "#hide\n\nfrom nbdev.export import notebook2script; notebook2script()", "Converted 00_core.ipynb.\nConverted 01_core.baseline.ipynb.\nConverted 02_core.caseload.ipynb.\nConverted index.ipynb.\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5b52eae61a7cf2f847529af1e3df98e9af5614
158,969
ipynb
Jupyter Notebook
CS246_Colab_0_(Spark_Tutorial).ipynb
masvgp/math_3280
a47aff822e12d7eb097097273eac59169a0b82b4
[ "MIT" ]
null
null
null
CS246_Colab_0_(Spark_Tutorial).ipynb
masvgp/math_3280
a47aff822e12d7eb097097273eac59169a0b82b4
[ "MIT" ]
null
null
null
CS246_Colab_0_(Spark_Tutorial).ipynb
masvgp/math_3280
a47aff822e12d7eb097097273eac59169a0b82b4
[ "MIT" ]
null
null
null
81.397337
63,546
0.744831
[ [ [ "<a href=\"https://colab.research.google.com/github/masvgp/math_3280/blob/main/CS246_Colab_0_(Spark_Tutorial).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# CS246 - Colab 0\n## Spark Tutorial\n\nIn this tutorial you will learn how to use [Apache Spark](https://spark.apache.org) in local mode on a Colab enviroment.\n\nCredits to [Tiziano Piccardi](http://piccardi.me/) for his Spark Tutorial used in the Applied Data Analysis class at EPFL.", "_____no_output_____" ], [ "### Setup", "_____no_output_____" ], [ "Let's setup Spark on your Colab environment. Run the cell below!", "_____no_output_____" ] ], [ [ "!pip install pyspark\n!pip install -U -q PyDrive\n!apt install openjdk-8-jdk-headless -qq\nimport os\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"", "Collecting pyspark\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/45/b0/9d6860891ab14a39d4bddf80ba26ce51c2f9dc4805e5c6978ac0472c120a/pyspark-3.1.1.tar.gz (212.3MB)\n\u001b[K |████████████████████████████████| 212.3MB 52kB/s \n\u001b[?25hCollecting py4j==0.10.9\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9e/b6/6a4fb90cd235dc8e265a6a2067f2a2c99f0d91787f06aca4bcf7c23f3f80/py4j-0.10.9-py2.py3-none-any.whl (198kB)\n\u001b[K |████████████████████████████████| 204kB 46.2MB/s \n\u001b[?25hBuilding wheels for collected packages: pyspark\n Building wheel for pyspark (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for pyspark: filename=pyspark-3.1.1-py2.py3-none-any.whl size=212767604 sha256=07c91c3c3a7a936d8a4f24810af9243446cad989d793188502016766266766b5\n Stored in directory: /root/.cache/pip/wheels/0b/90/c0/01de724414ef122bd05f056541fb6a0ecf47c7ca655f8b3c0f\nSuccessfully built pyspark\nInstalling collected packages: py4j, pyspark\nSuccessfully installed py4j-0.10.9 pyspark-3.1.1\nThe following additional packages will be installed:\n openjdk-8-jre-headless\nSuggested packages:\n openjdk-8-demo openjdk-8-source libnss-mdns fonts-dejavu-extra\n fonts-ipafont-gothic fonts-ipafont-mincho fonts-wqy-microhei\n fonts-wqy-zenhei fonts-indic\nThe following NEW packages will be installed:\n openjdk-8-jdk-headless openjdk-8-jre-headless\n0 upgraded, 2 newly installed, 0 to remove and 30 not upgraded.\nNeed to get 36.5 MB of archives.\nAfter this operation, 143 MB of additional disk space will be used.\nSelecting previously unselected package openjdk-8-jre-headless:amd64.\n(Reading database ... 160980 files and directories currently installed.)\nPreparing to unpack .../openjdk-8-jre-headless_8u282-b08-0ubuntu1~18.04_amd64.deb ...\nUnpacking openjdk-8-jre-headless:amd64 (8u282-b08-0ubuntu1~18.04) ...\nSelecting previously unselected package openjdk-8-jdk-headless:amd64.\nPreparing to unpack .../openjdk-8-jdk-headless_8u282-b08-0ubuntu1~18.04_amd64.deb ...\nUnpacking openjdk-8-jdk-headless:amd64 (8u282-b08-0ubuntu1~18.04) ...\nSetting up openjdk-8-jre-headless:amd64 (8u282-b08-0ubuntu1~18.04) ...\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/orbd to provide /usr/bin/orbd (orbd) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/servertool to provide /usr/bin/servertool (servertool) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/tnameserv to provide /usr/bin/tnameserv (tnameserv) in auto mode\nSetting up openjdk-8-jdk-headless:amd64 (8u282-b08-0ubuntu1~18.04) ...\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/idlj to provide /usr/bin/idlj (idlj) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/wsimport to provide /usr/bin/wsimport (wsimport) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/jsadebugd to provide /usr/bin/jsadebugd (jsadebugd) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/native2ascii to provide /usr/bin/native2ascii (native2ascii) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/javah to provide /usr/bin/javah (javah) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/hsdb to provide /usr/bin/hsdb (hsdb) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/clhsdb to provide /usr/bin/clhsdb (clhsdb) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/xjc to provide /usr/bin/xjc (xjc) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/schemagen to provide /usr/bin/schemagen (schemagen) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/extcheck to provide /usr/bin/extcheck (extcheck) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/jhat to provide /usr/bin/jhat (jhat) in auto mode\nupdate-alternatives: using /usr/lib/jvm/java-8-openjdk-amd64/bin/wsgen to provide /usr/bin/wsgen (wsgen) in auto mode\n" ] ], [ [ "Now we authenticate a Google Drive client to download the file we will be processing in our Spark job.\n\n**Make sure to follow the interactive instructions.**", "_____no_output_____" ] ], [ [ "from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\n\n# Authenticate and create the PyDrive client\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)", "_____no_output_____" ], [ "id='1L6pCQkldvdBoaEhRFzL0VnrggEFvqON4'\ndownloaded = drive.CreateFile({'id': id}) \ndownloaded.GetContentFile('Bombing_Operations.json.gz')\n\nid='14dyBmcTBA32uXPxDbqr0bFDIzGxMTWwl'\ndownloaded = drive.CreateFile({'id': id}) \ndownloaded.GetContentFile('Aircraft_Glossary.json.gz') ", "_____no_output_____" ] ], [ [ "If you executed the cells above, you should be able to see the files *Bombing_Operations.json.gz* and *Aircraft_Glossary.json.gz* under the \"Files\" tab on the left panel.", "_____no_output_____" ] ], [ [ "# Let's import the libraries we will need\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport pyspark\nfrom pyspark.sql import *\nfrom pyspark.sql.functions import *\nfrom pyspark import SparkContext, SparkConf", "_____no_output_____" ] ], [ [ "Let's initialize the Spark context.\n", "_____no_output_____" ] ], [ [ "# create the session\nconf = SparkConf().set(\"spark.ui.port\", \"4050\")\n\n# create the context\nsc = pyspark.SparkContext(conf=conf)\nspark = SparkSession.builder.getOrCreate()", "_____no_output_____" ] ], [ [ "You can easily check the current version and get the link of the web interface. In the Spark UI, you can monitor the progress of your job and debug the performance bottlenecks (if your Colab is running with a **local runtime**).", "_____no_output_____" ] ], [ [ "spark", "_____no_output_____" ] ], [ [ "If you are running this Colab on the Google hosted runtime, the cell below will create a *ngrok* tunnel which will allow you to still check the Spark UI.", "_____no_output_____" ] ], [ [ "!wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip\n!unzip ngrok-stable-linux-amd64.zip\nget_ipython().system_raw('./ngrok http 4050 &')\n!curl -s http://localhost:4040/api/tunnels | python3 -c \\\n \"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])\"", "--2021-04-03 19:44:26-- https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip\nResolving bin.equinox.io (bin.equinox.io)... 52.5.36.149, 54.235.108.207, 54.159.163.191, ...\nConnecting to bin.equinox.io (bin.equinox.io)|52.5.36.149|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 14746350 (14M) [application/octet-stream]\nSaving to: ‘ngrok-stable-linux-amd64.zip.1’\n\nngrok-stable-linux- 100%[===================>] 14.06M 37.6MB/s in 0.4s \n\n2021-04-03 19:44:27 (37.6 MB/s) - ‘ngrok-stable-linux-amd64.zip.1’ saved [14746350/14746350]\n\nArchive: ngrok-stable-linux-amd64.zip\nreplace ngrok? [y]es, [n]o, [A]ll, [N]one, [r]ename: y\n inflating: ngrok \nhttp://05abbf7770ee.ngrok.io\n" ] ], [ [ "# Vietnam War\n\n**Pres. Johnson**: _What do you think about this Vietnam thing? I’d like to hear you talk a little bit._\n\n**Sen. Russell**: _Well, frankly, Mr. President, it’s the damn worse mess that I ever saw, and I don’t like to brag and I never have been right many times in my life, but I knew that we were going to get into this sort of mess when we went in there._\n\nMay 27, 1964\n\n![banner](https://raw.githubusercontent.com/epfl-ada/2019/c17af0d3c73f11cb083717b7408fedd86245dc4d/Tutorials/04%20-%20Scaling%20Up/img/banner.jpg)", "_____no_output_____" ], [ "----\n\nThe Vietnam War, also known as the Second Indochina War, and in Vietnam as the Resistance War Against America or simply the American War, was a conflict that occurred in Vietnam, Laos, and Cambodia from 1 November 1955 to the fall of Saigon on 30 April 1975. It was the second of the Indochina Wars and was officially fought between North Vietnam and the government of South Vietnam.\n\n**The dataset describes all the air force operation in during the Vietnam War.**\n\n**Bombing_Operations** [Get the dataset here](https://drive.google.com/a/epfl.ch/file/d/1L6pCQkldvdBoaEhRFzL0VnrggEFvqON4/view?usp=sharing)\n\n- AirCraft: _Aircraft model (example: EC-47)_\n- ContryFlyingMission: _Country_\n- MissionDate: _Date of the mission_\n- OperationSupported: _Supported War operation_ (example: [Operation Rolling Thunder](https://en.wikipedia.org/wiki/Operation_Rolling_Thunder))\n- PeriodOfDay: _Day or night_\n- TakeoffLocation: _Take off airport_\n- TimeOnTarget\n- WeaponType\n- WeaponsLoadedWeight\n\n**Aircraft_Glossary** [Get the dataset here](https://drive.google.com/a/epfl.ch/file/d/14dyBmcTBA32uXPxDbqr0bFDIzGxMTWwl/view?usp=sharing)\n\n- AirCraft: _Aircraft model (example: EC-47)_\n- AirCraftName\n- AirCraftType\n\n**Dataset Information:**\n\nTHOR is a painstakingly cultivated database of historic aerial bombings from World War I through Vietnam. THOR has already proven useful in finding unexploded ordnance in Southeast Asia and improving Air Force combat tactics:\nhttps://www.kaggle.com/usaf/vietnam-war-bombing-operations", "_____no_output_____" ], [ "Load the datasets:", "_____no_output_____" ] ], [ [ "Bombing_Operations = spark.read.json(\"Bombing_Operations.json.gz\")\nAircraft_Glossary = spark.read.json(\"Aircraft_Glossary.json.gz\")", "_____no_output_____" ] ], [ [ "Check the schema:", "_____no_output_____" ] ], [ [ "Bombing_Operations.printSchema()", "root\n |-- AirCraft: string (nullable = true)\n |-- ContryFlyingMission: string (nullable = true)\n |-- MissionDate: string (nullable = true)\n |-- OperationSupported: string (nullable = true)\n |-- PeriodOfDay: string (nullable = true)\n |-- TakeoffLocation: string (nullable = true)\n |-- TargetCountry: string (nullable = true)\n |-- TimeOnTarget: double (nullable = true)\n |-- WeaponType: string (nullable = true)\n |-- WeaponsLoadedWeight: long (nullable = true)\n\n" ], [ "Aircraft_Glossary.printSchema()", "root\n |-- AirCraft: string (nullable = true)\n |-- AirCraftName: string (nullable = true)\n |-- AirCraftType: string (nullable = true)\n\n" ] ], [ [ "Get a sample with `take()`:", "_____no_output_____" ] ], [ [ "Bombing_Operations.take(3)", "_____no_output_____" ] ], [ [ "Get a formatted sample with `show()`:", "_____no_output_____" ] ], [ [ "Aircraft_Glossary.show()", "+--------+--------------------+--------------------+\n|AirCraft| AirCraftName| AirCraftType|\n+--------+--------------------+--------------------+\n| A-1|Douglas A-1 Skyra...| Fighter Jet|\n| A-26|Douglas A-26 Invader| Light Bomber|\n| A-37|Cessna A-37 Drago...|Light ground-atta...|\n| A-4|McDonnell Douglas...| Fighter Jet|\n| A-5|North American A-...| Bomber Jet|\n| A-6|Grumman A-6 Intruder| Attack Aircraft|\n| A-7| LTV A-7 Corsair II| Attack Aircraft|\n| AC-119|Fairchild AC-119 ...|Military Transpor...|\n| AC-123|Fairchild C-123 P...|Military Transpor...|\n| AC-130|Lockheed AC-130 S...|Fixed wing ground...|\n| AC-47|Douglas AC-47 Spooky|Ground attack air...|\n| AH-1| Bell AH-1 HueyCobra| Helicopter|\n| B-1| Rockwell B-1 Lancer|Heavy strategic b...|\n| B-52| B-52 Stratofortress| Strategic bomber|\n| B-57|Martin B-57 Canberra| Tactical Bomber|\n| B-66|Douglas B-66 Dest...| Light Bomber|\n| C-1| Grumman C-1A Trader| Transport|\n| C-117| C-117D Skytrain| Transport|\n| C-119|Fairchild C-119 F...|Military Transpor...|\n| C-123|Fairchild C-123 P...|Military Transpor...|\n+--------+--------------------+--------------------+\nonly showing top 20 rows\n\n" ], [ "print(\"In total there are {0} operations\".format(Bombing_Operations.count()))", "In total there are 4400775 operations\n" ] ], [ [ "## Question 1: Which countries are involved and in how many missions? \n\nKeywords: `Dataframe API`, `SQL`, `group by`, `sort`", "_____no_output_____" ], [ "Let's group the missions by `ContryFlyingMission` and count how many records exist:", "_____no_output_____" ] ], [ [ "missions_counts = Bombing_Operations.groupBy(\"ContryFlyingMission\")\\\n .agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(desc(\"MissionsCount\"))\nmissions_counts.show()", "+--------------------+-------------+\n| ContryFlyingMission|MissionsCount|\n+--------------------+-------------+\n|UNITED STATES OF ...| 3708997|\n| VIETNAM (SOUTH)| 622013|\n| LAOS| 32777|\n| KOREA (SOUTH)| 24469|\n| AUSTRALIA| 12519|\n+--------------------+-------------+\n\n" ] ], [ [ "In this case we used the DataFrame API, but we could rewite the `groupBy` using pure SQL:", "_____no_output_____" ] ], [ [ "Bombing_Operations.registerTempTable(\"Bombing_Operations\")\n\nquery = \"\"\"\nSELECT ContryFlyingMission, count(*) as MissionsCount\nFROM Bombing_Operations\nGROUP BY ContryFlyingMission\nORDER BY MissionsCount DESC\n\"\"\"\n\nmissions_counts = spark.sql(query)\nmissions_counts.show()", "+--------------------+-------------+\n| ContryFlyingMission|MissionsCount|\n+--------------------+-------------+\n|UNITED STATES OF ...| 3708997|\n| VIETNAM (SOUTH)| 622013|\n| LAOS| 32777|\n| KOREA (SOUTH)| 24469|\n| AUSTRALIA| 12519|\n+--------------------+-------------+\n\n" ] ], [ [ "The Dataframe is small enough to be moved to Pandas:", "_____no_output_____" ] ], [ [ "missions_count_pd = missions_counts.toPandas()\nmissions_count_pd.head()", "_____no_output_____" ] ], [ [ "Let's plot a barchart with the number of missions by country:", "_____no_output_____" ] ], [ [ "pl = missions_count_pd.plot(kind=\"bar\", \n x=\"ContryFlyingMission\", y=\"MissionsCount\", \n figsize=(10, 7), log=True, alpha=0.5, color=\"olive\")\npl.set_xlabel(\"Country\")\npl.set_ylabel(\"Number of Missions (Log scale)\")\npl.set_title(\"Number of missions by Country\")", "_____no_output_____" ] ], [ [ "----", "_____no_output_____" ], [ "## Questions 2: Show the number of missions in time for each of the countries involved.\n\nKeywords: `group by`, `parse date`, `plot`\n\nLet's select the relevant columns:", "_____no_output_____" ] ], [ [ "missions_countries = Bombing_Operations.selectExpr([\"to_date(MissionDate) as MissionDate\", \"ContryFlyingMission\"])\nmissions_countries", "_____no_output_____" ] ], [ [ "The filed MissionDate is converted to a Python `date` object.\n\nNow we can group by `MissionDate` and `ContryFlyingMission` to get the count:", "_____no_output_____" ] ], [ [ "missions_by_date = missions_countries\\\n .groupBy([\"MissionDate\", \"ContryFlyingMission\"])\\\n .agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(asc(\"MissionDate\")).toPandas()\nmissions_by_date.head()", "_____no_output_____" ] ], [ [ "Now we can plot the content with a different series for each country:", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(10, 6))\n\n# iterate the different groups to create a different series\nfor country, missions in missions_by_date.groupby(\"ContryFlyingMission\"): \n plt.plot(missions[\"MissionDate\"], missions[\"MissionsCount\"], label=country)\n\nplt.legend(loc='best')", "_____no_output_____" ] ], [ [ "We can observe how South Vietnam increased its missions starting from 1970. The drop in 1973 is motivated by the [Paris Peace Accords](https://en.wikipedia.org/wiki/Paris_Peace_Accords) that took place on January 27th, 1973, to establish peace in Vietnam and end the war.", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "## Question 3: Who bombed this location?\n\nKeywords: `RDD map reduce` `cache` `save results`\n\n<img style=\"float: right;\" src=\"https://raw.githubusercontent.com/epfl-ada/2019/c17af0d3c73f11cb083717b7408fedd86245dc4d/Tutorials/04%20-%20Scaling%20Up/img/Hanoi_POL1966.jpg\">\n\nThis picture is the Hanoi POL facility (North Vietnam) burning after it was attacked by the U.S. Air Force on 29 June 1966 in the context of the Rolling Thunder operation. \n\nWe are interested in discovering what was the most common take-off location during that day.", "_____no_output_____" ] ], [ [ "jun_29_operations = Bombing_Operations.where(\"MissionDate = '1966-06-29' AND TargetCountry='NORTH VIETNAM'\")", "_____no_output_____" ] ], [ [ "Which coutries scheduled missions that day?", "_____no_output_____" ] ], [ [ "jun_29_operations.groupBy(\"ContryFlyingMission\").agg(count(\"*\").alias(\"MissionsCount\")).toPandas()", "_____no_output_____" ] ], [ [ "Most of the operation that day were performed by USA airplanes.", "_____no_output_____" ] ], [ [ "jun_29_operations.take(1)", "_____no_output_____" ] ], [ [ "You can specify to cache the content in memory:", "_____no_output_____" ] ], [ [ "jun_29_operations.cache()", "_____no_output_____" ] ], [ [ "Now you can count the number of rows and move the content to the cache:", "_____no_output_____" ] ], [ [ "%time jun_29_operations.count()", "_____no_output_____" ] ], [ [ "The second time the content is cached and the operation is much faster:", "_____no_output_____" ] ], [ [ "%time jun_29_operations.count()", "CPU times: user 1.84 ms, sys: 17 µs, total: 1.86 ms\nWall time: 76 ms\n" ] ], [ [ "You can also save the results on a file...", "_____no_output_____" ] ], [ [ "jun_29_operations.write.mode('overwrite').json(\"jun_29_operations.json\")", "_____no_output_____" ] ], [ [ "... and read from the file:", "_____no_output_____" ] ], [ [ "jun_29_operations = spark.read.json(\"jun_29_operations.json\")", "_____no_output_____" ] ], [ [ "We can use the simple DataFrame API...", "_____no_output_____" ] ], [ [ "TakeoffLocationCounts = jun_29_operations\\\n .groupBy(\"TakeoffLocation\").agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(desc(\"MissionsCount\"))\nTakeoffLocationCounts.show()", "+----------------+-------------+\n| TakeoffLocation|MissionsCount|\n+----------------+-------------+\n| CONSTELLATION| 87|\n| TAKHLI| 56|\n| KORAT| 55|\n| UDORN AB| 44|\n| UBON AB| 44|\n| DANANG| 35|\n| RANGER| 35|\n| TAN SON NHUT| 26|\n|HANCOCK (CVA-19)| 10|\n| CAM RANH BAY| 2|\n| CUBI PT| 1|\n+----------------+-------------+\n\n" ] ], [ [ "... or the explicit Map/Reduce format with RDDs.\n\nFirst we emit a pair in the format (Location, 1):", "_____no_output_____" ] ], [ [ "all_locations = jun_29_operations.rdd.map(lambda row: (row.TakeoffLocation, 1))\nall_locations.take(3)", "_____no_output_____" ] ], [ [ "Then, we sum counters in the reduce step, and we sort by count:", "_____no_output_____" ] ], [ [ "locations_counts_rdd = all_locations.reduceByKey(lambda a, b: a+b).sortBy(lambda r: -r[1])\nlocations_counts_rdd.take(3)", "_____no_output_____" ] ], [ [ "Now we can convert the RDD in dataframe by mapping the pairs to objects of type `Row`", "_____no_output_____" ] ], [ [ "locations_counts_with_schema = locations_counts_rdd.map(lambda r: Row(TakeoffLocation=r[0], MissionsCount=r[1]))\nlocations_counts = spark.createDataFrame(locations_counts_with_schema)\nlocations_counts.show()", "+----------------+-------------+\n| TakeoffLocation|MissionsCount|\n+----------------+-------------+\n| CONSTELLATION| 87|\n| TAKHLI| 56|\n| KORAT| 55|\n| UBON AB| 44|\n| UDORN AB| 44|\n| DANANG| 35|\n| RANGER| 35|\n| TAN SON NHUT| 26|\n|HANCOCK (CVA-19)| 10|\n| CAM RANH BAY| 2|\n| CUBI PT| 1|\n+----------------+-------------+\n\n" ] ], [ [ "<img style=\"float: right;\" src=\"https://raw.githubusercontent.com/epfl-ada/2019/c17af0d3c73f11cb083717b7408fedd86245dc4d/Tutorials/04%20-%20Scaling%20Up/img/USS_Constellation.jpg\">\n\n\nThat day the most common take-off location was the ship USS Constellation (CV-64). We cannot univocally identify one take off location, but we can reduce the possible candidates. Next steps: explore TimeOnTarget feature.\n\n_USS Constellation (CV-64), a Kitty Hawk-class supercarrier, was the third ship of the United States Navy to be named in honor of the \"new constellation of stars\" on the flag of the United States. One of the fastest ships in the Navy, as proven by her victory during a battlegroup race held in 1985, she was nicknamed \"Connie\" by her crew and officially as \"America's Flagship\"._", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "## Questions 4: What is the most used aircraft type during the Vietnam war (number of missions)?\n\nKeywords: `join` `group by`", "_____no_output_____" ], [ "Let's check the content of `Aircraft_Glossary`:", "_____no_output_____" ] ], [ [ "Aircraft_Glossary.show(5)", "+--------+--------------------+--------------------+\n|AirCraft| AirCraftName| AirCraftType|\n+--------+--------------------+--------------------+\n| A-1|Douglas A-1 Skyra...| Fighter Jet|\n| A-26|Douglas A-26 Invader| Light Bomber|\n| A-37|Cessna A-37 Drago...|Light ground-atta...|\n| A-4|McDonnell Douglas...| Fighter Jet|\n| A-5|North American A-...| Bomber Jet|\n+--------+--------------------+--------------------+\nonly showing top 5 rows\n\n" ] ], [ [ "We are interested in the filed `AirCraftType`.", "_____no_output_____" ] ], [ [ "Bombing_Operations.select(\"AirCraft\").show(5)", "+--------+\n|AirCraft|\n+--------+\n| EC-47|\n| EC-47|\n| RF-4|\n| A-1|\n| A-37|\n+--------+\nonly showing top 5 rows\n\n" ] ], [ [ "We can join on the column `AirCraft` of both dataframes.", "_____no_output_____" ], [ "With Dataframe API:", "_____no_output_____" ] ], [ [ "missions_joined = Bombing_Operations.join(Aircraft_Glossary, \n Bombing_Operations.AirCraft == Aircraft_Glossary.AirCraft)\nmissions_joined", "_____no_output_____" ] ], [ [ "We can select only the field we are interested in:", "_____no_output_____" ] ], [ [ "missions_aircrafts = missions_joined.select(\"AirCraftType\")\nmissions_aircrafts.show(5)", "+--------------------+\n| AirCraftType|\n+--------------------+\n|Military Transpor...|\n|Military Transpor...|\n| Fighter bomber jet|\n| Fighter Jet|\n|Light ground-atta...|\n+--------------------+\nonly showing top 5 rows\n\n" ] ], [ [ "And finally we can group by `AirCraftType` and count:", "_____no_output_____" ] ], [ [ "missions_aircrafts.groupBy(\"AirCraftType\").agg(count(\"*\").alias(\"MissionsCount\"))\\\n .sort(desc(\"MissionsCount\"))\\\n .show()", "+--------------------+-------------+\n| AirCraftType|MissionsCount|\n+--------------------+-------------+\n| Fighter Jet Bomber| 1073126|\n| Fighter Jet| 882594|\n| Jet Fighter Bomber| 451385|\n| Attack Aircraft| 315246|\n|Light ground-atta...| 267457|\n| Fighter bomber jet| 242231|\n|Military Transpor...| 228426|\n| Utility Helicopter| 146653|\n| Strategic bomber| 99100|\n| Tactical Bomber| 82219|\n|Observation Aircraft| 81820|\n|Fixed wing ground...| 75058|\n|Ground attack air...| 73843|\n|Carrier-based Fig...| 58691|\n| Training Aircraft| 48435|\n| Light fighter| 39999|\n| Light Bomber| 39262|\n|Light Tactical Bo...| 34738|\n| Light Utility Plane| 28582|\n|Observation/ Ligh...| 24491|\n+--------------------+-------------+\nonly showing top 20 rows\n\n" ] ], [ [ "In alternative we can rewrite this in pure SQL:", "_____no_output_____" ] ], [ [ "Bombing_Operations.registerTempTable(\"Bombing_Operations\")\nAircraft_Glossary.registerTempTable(\"Aircraft_Glossary\")\n\nquery = \"\"\"\nSELECT AirCraftType, count(*) MissionsCount\nFROM Bombing_Operations bo\nJOIN Aircraft_Glossary ag\nON bo.AirCraft = ag.AirCraft\nGROUP BY AirCraftType\nORDER BY MissionsCount DESC\n\"\"\"\n\nspark.sql(query).show()", "+--------------------+-------------+\n| AirCraftType|MissionsCount|\n+--------------------+-------------+\n| Fighter Jet Bomber| 1073126|\n| Fighter Jet| 882594|\n| Jet Fighter Bomber| 451385|\n| Attack Aircraft| 315246|\n|Light ground-atta...| 267457|\n| Fighter bomber jet| 242231|\n|Military Transpor...| 228426|\n| Utility Helicopter| 146653|\n| Strategic bomber| 99100|\n| Tactical Bomber| 82219|\n|Observation Aircraft| 81820|\n|Fixed wing ground...| 75058|\n|Ground attack air...| 73843|\n|Carrier-based Fig...| 58691|\n| Training Aircraft| 48435|\n| Light fighter| 39999|\n| Light Bomber| 39262|\n|Light Tactical Bo...| 34738|\n| Light Utility Plane| 28582|\n|Observation/ Ligh...| 24491|\n+--------------------+-------------+\nonly showing top 20 rows\n\n" ] ], [ [ "The aircrafts of type `Fighter Jet Bomber` participated in most of the missions in the Vietnam war.\n\nNote: This dataset would require further cleaning and normalization. See `Fighter Jet Bomber`, `Jet Fighter Bomber`, `Fighter bomber jet`", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5b5e1982f2441fbd424d62982e365d6db5c196
42,444
ipynb
Jupyter Notebook
Scales_and_Transformations_Practice.ipynb
alexmpaz/data-visualization
d7541965ee9152485a9a8664656d4f199d214611
[ "Unlicense" ]
null
null
null
Scales_and_Transformations_Practice.ipynb
alexmpaz/data-visualization
d7541965ee9152485a9a8664656d4f199d214611
[ "Unlicense" ]
null
null
null
Scales_and_Transformations_Practice.ipynb
alexmpaz/data-visualization
d7541965ee9152485a9a8664656d4f199d214611
[ "Unlicense" ]
null
null
null
118.228412
12,888
0.837881
[ [ [ "# prerequisite package imports\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\n%matplotlib inline\n\nfrom solutions_univ import scales_solution_1, scales_solution_2", "_____no_output_____" ] ], [ [ "Once again, we make use of the Pokémon data for this exercise.", "_____no_output_____" ] ], [ [ "pokemon = pd.read_csv('./data/pokemon.csv')\npokemon.head()", "_____no_output_____" ] ], [ [ "**Task 1**: There are also variables in the dataset that don't have anything to do with the game mechanics, and are just there for flavor. Try plotting the distribution of Pokémon heights (given in meters). For this exercise, experiment with different axis limits as well as bin widths to see what gives the clearest view of the data.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\nplt.figure(figsize=[10,10])\n\nplt.subplot(2,1,1)\nplt.hist(pokemon.height)\n\nplt.subplot(2,1,2)\nbin_edges = np.arange(0,6.2,0.2)\nplt.hist(pokemon.height, bins=bin_edges) #, rwidth=0.8)\nplt.xticks(np.arange(0,6,1))\nplt.xlim(0,6)", "_____no_output_____" ], [ "# run this cell to check your work against ours\nscales_solution_1()", "There's a very long tail of Pokemon heights. Here, I've focused in on Pokemon of height 6 meters or less, so that I can use a smaller bin size to get a more detailed look at the main data distribution.\n" ] ], [ [ "**Task 2**: In this task, you should plot the distribution of Pokémon weights (given in kilograms). Due to the very large range of values taken, you will probably want to perform an _axis transformation_ as part of your visualization workflow.", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE\nbin_edges = 10 ** np.arange(-1, np.log10(pokemon.weight.max()+0.5), 0.1)\nplt.hist(pokemon.weight, bins=bin_edges)\nplt.xscale('log')\ntick_locs = [0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000]\nplt.xticks(tick_locs, tick_locs)\nplt.xlabel('log (Weight in kg)');", "_____no_output_____" ], [ "# run this cell to check your work against ours\nscales_solution_2()", "Since Pokemon weights are so skewed, I used a log transformation on the x-axis. Bin edges are in increments of 0.1 powers of ten, with custom tick marks to demonstrate the log scaling.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ec5b68f37d6f51e41e64b093d7e400625d28ab59
85,102
ipynb
Jupyter Notebook
binance/.ipynb_checkpoints/Untitled-checkpoint.ipynb
dolidoli92/Binance-Outlier-Detection-1hour-BB72
0b63c818ca8dbfdcab62d22f154572696acda94c
[ "MIT" ]
null
null
null
binance/.ipynb_checkpoints/Untitled-checkpoint.ipynb
dolidoli92/Binance-Outlier-Detection-1hour-BB72
0b63c818ca8dbfdcab62d22f154572696acda94c
[ "MIT" ]
null
null
null
binance/.ipynb_checkpoints/Untitled-checkpoint.ipynb
dolidoli92/Binance-Outlier-Detection-1hour-BB72
0b63c818ca8dbfdcab62d22f154572696acda94c
[ "MIT" ]
null
null
null
60.18529
1,779
0.569329
[ [ [ "from datetime import datetime\nimport secondaryindex\nfrom client import Client\nimport exceptions\nimport requests\nimport sqlite3\nimport pandas as pd\nimport time\n\n# API Key & Secret number\nmy_binance = Client('05WV0wxZV0OdWuSPurpFmH3fiSk1XEukIF6em0HXAHmE4qlCVWdvuM5PeghYExzw',\n 'J3xvURK7z5APzMaDB6jjelRvmh9oJ6jnZXIdqntoUkhfxu6YK8r15hKEnUjM48iF')\n\n#################################################################################################\n# 낚시 준비물 챙기기 #\n#################################################################################################\n### Part1. 지속적인 알고리즘을 수행하기 전, pre-processing 작업\n\n# 접속시간, 잔고 출력 추가하기\nprint (\"\\n접 속 시 간 : \",datetime.now().replace(microsecond=0))\n\nall_currencies_count = len(my_binance.get_products()['data']) # 전체 상장 갯수\nprint (\"\\nBinance에 거래가능한 종목 : \",all_currencies_count)\n\n# step1. Get list of currencies\ncurrencies_btc_market = [] # The list of BTC market currencies\ncurrencies_eth_market = [] # The list of ETH market currencies\ncurrencies_bnb_market = [] # The list of BNB market currencies\ncurrencies_usdt_market = [] # The list of USDT market currencies\nget_products_data = my_binance.get_products()['data']\n\n# 최대한 call function 을 줄일 수 있도록 하자.(Server 에 접근을 많이 할수록 속도는 느려짐)\nfor i in range(0,all_currencies_count):\n if get_products_data[i]['symbol'][-3:] == 'BTC':\n currencies_btc_market.append(get_products_data[i]['symbol'])\n #print (get_products_data[i]['symbol'][-3:],\"1\")\n elif get_products_data[i]['symbol'][-3:] == 'ETH':\n currencies_eth_market.append(get_products_data[i]['symbol'])\n #print (get_products_data[i]['symbol'][-3:],\"2\")\n elif get_products_data[i]['symbol'][-3:] == 'BNB':\n currencies_bnb_market.append(get_products_data[i]['symbol'])\n #print (get_products_data[i]['symbol'][-3:],\"3\")\n elif get_products_data[i]['symbol'][-3:] == 'SDT':\n currencies_usdt_market.append(get_products_data[i]['symbol'])\n #print (get_products_data[i]['symbol'][-3:],\"4\")\n else:\n print (\"Cant't classify\")\n\nprint (\"BTC 시장 상장 종목 갯수 : \",len(currencies_btc_market))\nprint (\"ETH 시장 상장 종목 갯수 : \",len(currencies_eth_market))\nprint (\"BNB 시장 상장 종목 갯수 : \",len(currencies_bnb_market))\nprint (\"USDT 시장 상장 종목 갯수 : \",len(currencies_usdt_market))\n\n# step2. Declare the variables often used\ncurrencies_btc_market_count = len(currencies_btc_market)\ncurrencies_eth_market_count = len(currencies_eth_market)\ncurrencies_bnb_market_count = len(currencies_bnb_market)\ncurrencies_usdt_market_count = len(currencies_usdt_market)\n\n# outlier percent 지정\noutlier_percent = -0.0\n\n# the rate of profit 지정\nrate_of_profit = 1.02\n\nprint (\"\\n이탈률이 \", \"{0:.3}\".format(outlier_percent*100),\"% 로 지정되었습니다.\")\nprint (\"수익률이 \", \"{0:.3}\".format((rate_of_profit-1)*100),\"% 로 지정되었습니다.\")\n\n\ndefault_historical_start_str = '1400000000000'\n\n", "\n접 속 시 간 : 2018-04-28 02:53:50\n\nBinance에 거래가능한 종목 : 305\nBTC 시장 상장 종목 갯수 : 122\nETH 시장 상장 종목 갯수 : 120\nBNB 시장 상장 종목 갯수 : 55\nUSDT 시장 상장 종목 갯수 : 8\n\n이탈률이 -0.0 % 로 지정되었습니다.\n수익률이 2.0 % 로 지정되었습니다.\n" ], [ "# 5분 데이터 저장\nprint (\"\\n현재로부터 모든 종목의 과거 데이터(5분봉)를 로드하고 있습니다. 약 1시간의 시간을 소요합니다.\")\ndatalist_past_min5 = {'_time': [], '_symbol': [], '_open': [], '_high': [], '_low': [], '_close': [],\n '_base_buy': [], '_base_sell': [], '_quote_buy': [], '_quote_sell': []}\nmin5_last_data = {}\nfor i in range(0, currencies_btc_market_count):\n try:\n tmp_min5_data = my_binance.get_historical_klines(symbol=currencies_btc_market[i],\n interval=my_binance.KLINE_INTERVAL_5MINUTE,\n start_str=default_historical_start_str)\n \n for j in range(0, len(tmp_min5_data)):\n # datalist_past_min5 생성 및 데이터베이스 저장\n datalist_past_min5['_time'].append(tmp_min5_data[j][0]/1000)\n datalist_past_min5['_symbol'].append(currencies_btc_market[i])\n datalist_past_min5['_open'].append(tmp_min5_data[j][1])\n datalist_past_min5['_high'].append(tmp_min5_data[j][2])\n datalist_past_min5['_low'].append(tmp_min5_data[j][3])\n datalist_past_min5['_close'].append(tmp_min5_data[j][4])\n datalist_past_min5['_base_buy'].append(tmp_min5_data[j][9])\n datalist_past_min5['_base_sell'].append(round(float(tmp_min5_data[j][5])-float(tmp_min5_data[j][9]),2))\n datalist_past_min5['_quote_buy'].append(tmp_min5_data[j][10])\n datalist_past_min5['_quote_sell'].append(round(float(tmp_min5_data[j][7])-float(tmp_min5_data[j][10]),2))\n \n df = pd.DataFrame(datalist_past_min5, columns=['_time', '_symbol', '_open', '_high',\n '_low', '_close', '_base_buy', '_base_sell',\n '_quote_buy','_quote_sell'], index=datalist_past_min5['_time'])\n\n con = sqlite3.connect(\"C:\\\\Users\\\\dolid\\\\database_binance\\\\stock_min5.db\")\n df.to_sql(currencies_btc_market[i], con, if_exists='append')\n con.close()\n \n print (currencies_btc_market[i],\" 종목 데이터 저장을 완료했습니다.\")\n \n except IndexError as e:\n print(e)\n print(\"min5_last_data error\")\n\nprint (\"현재로부터 모든 종목의 과거 데이터(5분봉)를 로드를 완료하였습니다.\")\n", "\n현재로부터 모든 종목의 과거 데이터를 로드하고 있습니다. 약 5분의 시간을 소요합니다.\n" ], [ "# 1시간 데이터 저장\nprint (\"\\n현재로부터 모든 종목의 과거 데이터(1시간봉)를 로드하고 있습니다. 약 1시간의 시간을 소요합니다.\")\nprint (datetime.now())\nfor i in range(0, currencies_btc_market_count):\n try:\n tmp_hour1_data = my_binance.get_historical_klines(symbol=currencies_btc_market[i],\n interval=my_binance.KLINE_INTERVAL_1HOUR,\n start_str=default_historical_start_str)\n datalist_past_hour1 = {'_time': [], '_symbol': [], '_open': [], '_high': [], '_low': [], '_close': [],\n '_base_buy': [], '_base_sell': [], '_quote_buy': [], '_quote_sell': []}\n \n for j in range(0, len(tmp_hour1_data)):\n # datalist_past_hour1 생성 및 데이터베이스 저장\n datalist_past_hour1['_time'].append(tmp_hour1_data[j][0]/1000)\n datalist_past_hour1['_symbol'].append(currencies_btc_market[i])\n datalist_past_hour1['_open'].append(tmp_hour1_data[j][1])\n datalist_past_hour1['_high'].append(tmp_hour1_data[j][2])\n datalist_past_hour1['_low'].append(tmp_hour1_data[j][3])\n datalist_past_hour1['_close'].append(tmp_hour1_data[j][4])\n datalist_past_hour1['_base_buy'].append(tmp_hour1_data[j][9])\n datalist_past_hour1['_base_sell'].append(round(float(tmp_hour1_data[j][5])-float(tmp_hour1_data[j][9]),2))\n datalist_past_hour1['_quote_buy'].append(tmp_hour1_data[j][10])\n datalist_past_hour1['_quote_sell'].append(round(float(tmp_hour1_data[j][7])-float(tmp_hour1_data[j][10]),2))\n \n df = pd.DataFrame(datalist_past_hour1, columns=['_time', '_symbol', '_open', '_high',\n '_low', '_close', '_base_buy', '_base_sell',\n '_quote_buy','_quote_sell'], index=datalist_past_hour1['_time'])\n\n con = sqlite3.connect(\"C:\\\\Users\\\\dolid\\\\database_binance\\\\stock_hour1.db\")\n df.to_sql(currencies_btc_market[i], con, if_exists='replace')\n con.close()\n \n print (currencies_btc_market[i],\" 종목 데이터 저장을 완료했습니다.\")\n \n except IndexError as e:\n print(e)\n print(\"hour1_last_data error\")\n\nprint (\"현재로부터 모든 종목의 과거 데이터(1시간봉)를 로드를 완료하였습니다.\")\nprint (datetime.now())", "\n현재로부터 모든 종목의 과거 데이터(1시간봉)를 로드하고 있습니다. 약 1시간의 시간을 소요합니다.\n2018-04-28 03:48:16.685996\nBNBBTC 종목 데이터 저장을 완료했습니다.\nNULSBTC 종목 데이터 저장을 완료했습니다.\n" ], [ "tmp_hour1_data = my_binance.get_klines(symbol=currencies_btc_market[i],\n interval=my_binance.KLINE_INTERVAL_1HOUR,\n limit=limit_get_klines_hour1)", "_____no_output_____" ], [ "tmp_hour1_data", "_____no_output_____" ], [ "res = my_binance.get_historical_klines(symbol='LTCBTC',\n interval = my_binance.KLINE_INTERVAL_1HOUR,\n start_str = '1420000000000',\n end_str = '1524564000000')", "_____no_output_____" ], [ "print (datetime.fromtimestamp(res[0][0]/1000).strftime(\"%Y-%m-%d %H:%M:%S\"))", "2017-07-14 13:00:00\n" ], [ "res[0][0]/1000a", "_____no_output_____" ], [ "for i in range(0,currencies_btc_market_count):\n tmp_hour1_data = my_binance.get_historical_klines(symbol=currencies_btc_market[i],\n interval=my_binance.KLINE_INTERVAL_1HOUR,\n start_str='1400000000000')\n print (currencies_btc_market_count[i],\"시작일 : \",datetime.fromtimestamp(tmp_hour1_data[0][0]/1000).strftime(\"%Y-%m-%d %H:%M:%S\"))", "_____no_output_____" ], [ "tmp_hour1_data = my_binance.get_historical_klines(symbol=currencies_btc_market[i],\n interval=my_binance.KLINE_INTERVAL_1HOUR,\n start_str='1400000000000')", "_____no_output_____" ], [ "len(tmp_hour1_data)", "_____no_output_____" ], [ "con.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5b705a8a2a7ab08e7348f4d9adbb20d2bbe083
35,760
ipynb
Jupyter Notebook
docs/tutorial/understanding-vanish-gradient.ipynb
Jopyth/tensorboard
b1889902608af8adf7227bd14c8a96915d49ba21
[ "Apache-2.0" ]
402
2016-12-11T00:19:59.000Z
2022-03-20T04:03:11.000Z
docs/tutorial/understanding-vanish-gradient.ipynb
Jopyth/tensorboard
b1889902608af8adf7227bd14c8a96915d49ba21
[ "Apache-2.0" ]
55
2016-12-11T19:53:23.000Z
2020-03-24T15:07:51.000Z
docs/tutorial/understanding-vanish-gradient.ipynb
Jopyth/tensorboard
b1889902608af8adf7227bd14c8a96915d49ba21
[ "Apache-2.0" ]
74
2016-12-11T03:39:05.000Z
2022-03-31T02:04:16.000Z
59.6
412
0.60797
[ [ [ "# Understanding the vanishing gradient problem through visualization", "_____no_output_____" ], [ "There're reasons why deep neural network could work very well, while few people get a promising result or make it possible by simply make their neural network *deep*.\n\n* Computational power and data grow tremendously. People need more complex model and faster computer to make it feasible.\n* Realize and understand the difficulties associated with training a deep model.\n\nIn this tutorial, we would like to show you some insights of the techniques that researchers find useful in training a deep model, using MXNet and its visualizing tool -- TensorBoard.\n\nLet’s recap some of the relevant issues on training a deep model:\n\n* Weight initialization. If you initialize the network with random and small weights, when you look at the gradients down the top layer, you would find they’re getting smaller and smaller, then the first layer almost doesn’t change as the gradients are too small to make a significant update. Without a chance to learn the first layer effectively, it's impossible to update and learn a good deep model.\n* Nonlinearity activation. When people use `sigmoid` or `tanh` as activation function, the gradient, same as the above, is getting smaller and smaller. Just remind the formula of the parameter updates and the gradient.", "_____no_output_____" ], [ "## Experiment Setting\n\nHere we create a simple MLP for cifar10 dataset and visualize the learning processing through loss/accuracy, and its gradient distributions, by changing its initialization and activation setting.\n\n## General Setting \n\nWe adopt MLP as our model and run our experiment in MNIST dataset. Then we'll visualize the weight and gradient of a layer using `Monitor` in MXNet and `Histogram` in TensorBoard.\n\n### Network Structure\n\nHere's the network structure:\n\n```python\ndef get_mlp(acti=\"relu\"):\n \"\"\"\n multi-layer perceptron\n \"\"\"\n data = mx.symbol.Variable('data')\n fc = mx.symbol.FullyConnected(data = data, name='fc', num_hidden=512)\n act = mx.symbol.Activation(data = fc, name='act', act_type=acti)\n fc0 = mx.symbol.FullyConnected(data = act, name='fc0', num_hidden=256)\n act0 = mx.symbol.Activation(data = fc0, name='act0', act_type=acti)\n fc1 = mx.symbol.FullyConnected(data = act0, name='fc1', num_hidden=128)\n act1 = mx.symbol.Activation(data = fc1, name='act1', act_type=acti)\n fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)\n act2 = mx.symbol.Activation(data = fc2, name='act2', act_type=acti)\n fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=32)\n act3 = mx.symbol.Activation(data = fc3, name='act3', act_type=acti)\n fc4 = mx.symbol.FullyConnected(data = act3, name='fc4', num_hidden=16)\n act4 = mx.symbol.Activation(data = fc4, name='act4', act_type=acti)\n fc5 = mx.symbol.FullyConnected(data = act4, name='fc5', num_hidden=10)\n mlp = mx.symbol.SoftmaxOutput(data = fc5, name = 'softmax')\n return mlp\n```\n\nAs you might already notice, we intentionally add more layers than usual, as the vanished gradient problem becomes severer as the network goes deeper.\n\n\n### Weight Initialization\n\nThe weight initialization also has `uniform` and `xavier`. \n\n```python\nif args.init == 'uniform':\n init = mx.init.Uniform(0.1)\nif args.init == 'xavier':\n init = mx.init.Xavier(factor_type=\"in\", magnitude=2.34)\n```\n\nNote that we intentionally choose a near zero setting in `uniform`. \n\n### Activation Function\n\nWe would compare two different activations, `sigmoid` and `relu`. \n\n```python\n# acti = sigmoid or relu.\nact = mx.symbol.Activation(data = fc, name='act', act_type=acti)\n```", "_____no_output_____" ], [ "## Logging with TensorBoard and Monitor\n\nIn order to monitor the weight and gradient of this network in different settings, we could use MXNet's `monitor` for logging and `TensorBoard` for visualization.\n\n### Usage\n\nHere's a code snippet from `train_model.py`:\n\n```python\nimport mxnet as mx\nfrom tensorboard import summary\nfrom tensorboard import FileWriter\n\n# where to keep your TensorBoard logging file\nlogdir = './logs/'\nsummary_writer = FileWriter(logdir)\n\n# mx.mon.Monitor's callback \ndef get_gradient(g):\n # get flatten list\n grad = g.asnumpy().flatten()\n # logging using tensorboard, use histogram type.\n s = summary.histogram('fc_backward_weight', grad)\n summary_writer.add_summary(s)\n return mx.nd.norm(g)/np.sqrt(g.size)\n\nmon = mx.mon.Monitor(int(args.num_examples/args.batch_size), get_gradient, pattern='fc_backward_weight') # get the gradient passed to the first fully-connnected layer.\n\n# training\nmodel.fit(\n X = train,\n eval_data = val,\n eval_metric = eval_metrics,\n kvstore = kv,\n monitor = mon,\n epoch_end_callback = checkpoint)\n\n# close summary_writer\nsummary_writer.close()\n```", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append('./mnist/')\nfrom train_mnist import *", "_____no_output_____" ] ], [ [ "## What to expect?\n\nIf a setting suffers from an vanish gradient problem, the gradients passed from the top should be very close to zero, and the weight of the network barely change/update. ", "_____no_output_____" ], [ "### Uniform and Sigmoid", "_____no_output_____" ] ], [ [ "# Uniform and sigmoid\nargs = parse_args('uniform', 'uniform_sigmoid')\ndata_shape = (784, )\nnet = get_mlp(\"sigmoid\")\n\n# train\ntrain_model.fit(args, net, get_iterator(data_shape))", "2017-01-07 15:51:59,029 Node[0] start with arguments Namespace(batch_size=128, data_dir='mnist/', gpus=None, init='uniform', kv_store='local', load_epoch=None, lr=0.1, lr_factor=1, lr_factor_epoch=1, model_prefix=None, name='uniform_sigmoid', network='mlp', num_epochs=10, num_examples=60000, save_model_prefix=None)\n2017-01-07 15:52:02,173 Node[0] \u001b[91m[Deprecation Warning] mxnet.model.FeedForward has been deprecated. Please use mxnet.mod.Module instead.\u001b[0m\n2017-01-07 15:52:02,182 Node[0] Start training with [cpu(0), cpu(1), cpu(2), cpu(3)]\n2017-01-07 15:52:04,802 Node[0] Batch: 1 fc_backward_weight 5.1907e-07\t\n2017-01-07 15:52:04,802 Node[0] Batch: 1 fc_backward_weight 4.2085e-07\t\n2017-01-07 15:52:04,803 Node[0] Batch: 1 fc_backward_weight 4.31894e-07\t\n2017-01-07 15:52:04,804 Node[0] Batch: 1 fc_backward_weight 5.80652e-07\t\n2017-01-07 15:52:09,599 Node[0] Epoch[0] Resetting Data Iterator\n2017-01-07 15:52:09,600 Node[0] Epoch[0] Time cost=7.379\n2017-01-07 15:52:09,902 Node[0] Epoch[0] Validation-accuracy=0.105769\n2017-01-07 15:52:09,903 Node[0] Epoch[0] Validation-top_k_accuracy_5=0.509115\n2017-01-07 15:52:12,474 Node[0] Batch: 469 fc_backward_weight 5.15008e-07\t\n2017-01-07 15:52:12,475 Node[0] Batch: 469 fc_backward_weight 5.52044e-07\t\n2017-01-07 15:52:12,476 Node[0] Batch: 469 fc_backward_weight 4.48535e-07\t\n2017-01-07 15:52:12,477 Node[0] Batch: 469 fc_backward_weight 5.8659e-07\t\n2017-01-07 15:52:17,161 Node[0] Epoch[1] Resetting Data Iterator\n2017-01-07 15:52:17,162 Node[0] Epoch[1] Time cost=7.258\n2017-01-07 15:52:17,465 Node[0] Epoch[1] Validation-accuracy=0.105769\n2017-01-07 15:52:17,466 Node[0] Epoch[1] Validation-top_k_accuracy_5=0.504507\n2017-01-07 15:52:20,018 Node[0] Batch: 937 fc_backward_weight 5.96259e-07\t\n2017-01-07 15:52:20,019 Node[0] Batch: 937 fc_backward_weight 5.97974e-07\t\n2017-01-07 15:52:20,020 Node[0] Batch: 937 fc_backward_weight 4.51892e-07\t\n2017-01-07 15:52:20,021 Node[0] Batch: 937 fc_backward_weight 6.5213e-07\t\n2017-01-07 15:52:24,892 Node[0] Epoch[2] Resetting Data Iterator\n2017-01-07 15:52:24,893 Node[0] Epoch[2] Time cost=7.426\n2017-01-07 15:52:25,198 Node[0] Epoch[2] Validation-accuracy=0.105769\n2017-01-07 15:52:25,198 Node[0] Epoch[2] Validation-top_k_accuracy_5=0.510216\n2017-01-07 15:52:27,719 Node[0] Batch: 1405 fc_backward_weight 6.52871e-07\t\n2017-01-07 15:52:27,720 Node[0] Batch: 1405 fc_backward_weight 6.20821e-07\t\n2017-01-07 15:52:27,720 Node[0] Batch: 1405 fc_backward_weight 4.46476e-07\t\n2017-01-07 15:52:27,721 Node[0] Batch: 1405 fc_backward_weight 7.53641e-07\t\n2017-01-07 15:52:34,255 Node[0] Epoch[3] Resetting Data Iterator\n2017-01-07 15:52:34,256 Node[0] Epoch[3] Time cost=9.057\n2017-01-07 15:52:34,556 Node[0] Epoch[3] Validation-accuracy=0.105769\n2017-01-07 15:52:34,558 Node[0] Epoch[3] Validation-top_k_accuracy_5=0.510216\n2017-01-07 15:52:37,634 Node[0] Batch: 1873 fc_backward_weight 6.63064e-07\t\n2017-01-07 15:52:37,635 Node[0] Batch: 1873 fc_backward_weight 6.33577e-07\t\n2017-01-07 15:52:37,636 Node[0] Batch: 1873 fc_backward_weight 4.2922e-07\t\n2017-01-07 15:52:37,636 Node[0] Batch: 1873 fc_backward_weight 8.31741e-07\t\n2017-01-07 15:52:43,013 Node[0] Epoch[4] Resetting Data Iterator\n2017-01-07 15:52:43,014 Node[0] Epoch[4] Time cost=8.456\n2017-01-07 15:52:43,321 Node[0] Epoch[4] Validation-accuracy=0.103666\n2017-01-07 15:52:43,322 Node[0] Epoch[4] Validation-top_k_accuracy_5=0.509014\n2017-01-07 15:52:46,048 Node[0] Batch: 2341 fc_backward_weight 6.47525e-07\t\n2017-01-07 15:52:46,050 Node[0] Batch: 2341 fc_backward_weight 6.37593e-07\t\n2017-01-07 15:52:46,051 Node[0] Batch: 2341 fc_backward_weight 4.12299e-07\t\n2017-01-07 15:52:46,052 Node[0] Batch: 2341 fc_backward_weight 8.71203e-07\t\n2017-01-07 15:52:52,142 Node[0] Epoch[5] Resetting Data Iterator\n2017-01-07 15:52:52,143 Node[0] Epoch[5] Time cost=8.819\n2017-01-07 15:52:52,465 Node[0] Epoch[5] Validation-accuracy=0.103666\n2017-01-07 15:52:52,468 Node[0] Epoch[5] Validation-top_k_accuracy_5=0.509014\n2017-01-07 15:52:55,378 Node[0] Batch: 2809 fc_backward_weight 6.23424e-07\t\n2017-01-07 15:52:55,379 Node[0] Batch: 2809 fc_backward_weight 6.33117e-07\t\n2017-01-07 15:52:55,380 Node[0] Batch: 2809 fc_backward_weight 3.99334e-07\t\n2017-01-07 15:52:55,381 Node[0] Batch: 2809 fc_backward_weight 8.78155e-07\t\n2017-01-07 15:53:01,768 Node[0] Epoch[6] Resetting Data Iterator\n2017-01-07 15:53:01,769 Node[0] Epoch[6] Time cost=9.299\n2017-01-07 15:53:02,132 Node[0] Epoch[6] Validation-accuracy=0.107472\n2017-01-07 15:53:02,133 Node[0] Epoch[6] Validation-top_k_accuracy_5=0.509014\n2017-01-07 15:53:04,694 Node[0] Batch: 3277 fc_backward_weight 5.97921e-07\t\n2017-01-07 15:53:04,695 Node[0] Batch: 3277 fc_backward_weight 6.22105e-07\t\n2017-01-07 15:53:04,696 Node[0] Batch: 3277 fc_backward_weight 3.89208e-07\t\n2017-01-07 15:53:04,697 Node[0] Batch: 3277 fc_backward_weight 8.6379e-07\t\n2017-01-07 15:53:10,700 Node[0] Epoch[7] Resetting Data Iterator\n2017-01-07 15:53:10,702 Node[0] Epoch[7] Time cost=8.568\n2017-01-07 15:53:11,146 Node[0] Epoch[7] Validation-accuracy=0.109776\n2017-01-07 15:53:11,147 Node[0] Epoch[7] Validation-top_k_accuracy_5=0.509014\n2017-01-07 15:53:14,031 Node[0] Batch: 3745 fc_backward_weight 5.73259e-07\t\n2017-01-07 15:53:14,032 Node[0] Batch: 3745 fc_backward_weight 6.06878e-07\t\n2017-01-07 15:53:14,033 Node[0] Batch: 3745 fc_backward_weight 3.80379e-07\t\n2017-01-07 15:53:14,035 Node[0] Batch: 3745 fc_backward_weight 8.37382e-07\t\n2017-01-07 15:53:19,583 Node[0] Epoch[8] Resetting Data Iterator\n2017-01-07 15:53:19,587 Node[0] Epoch[8] Time cost=8.437\n2017-01-07 15:53:19,993 Node[0] Epoch[8] Validation-accuracy=0.105970\n2017-01-07 15:53:19,994 Node[0] Epoch[8] Validation-top_k_accuracy_5=0.512620\n2017-01-07 15:53:23,390 Node[0] Batch: 4213 fc_backward_weight 5.49988e-07\t\n2017-01-07 15:53:23,391 Node[0] Batch: 4213 fc_backward_weight 5.89305e-07\t\n2017-01-07 15:53:23,392 Node[0] Batch: 4213 fc_backward_weight 3.71941e-07\t\n2017-01-07 15:53:23,393 Node[0] Batch: 4213 fc_backward_weight 8.05085e-07\t\n2017-01-07 15:53:30,738 Node[0] Epoch[9] Resetting Data Iterator\n2017-01-07 15:53:30,739 Node[0] Epoch[9] Time cost=10.743\n2017-01-07 15:53:31,142 Node[0] Epoch[9] Validation-accuracy=0.105970\n2017-01-07 15:53:31,143 Node[0] Epoch[9] Validation-top_k_accuracy_5=0.512620\n" ] ], [ [ "As you've seen, the metrics of `fc_backward_weight` is so close to zero, and it didn't change a lot during batchs.\n\n```\n2017-01-07 15:44:38,845 Node[0] Batch: 1 fc_backward_weight 5.1907e-07\t\n2017-01-07 15:44:38,846 Node[0] Batch: 1 fc_backward_weight 4.2085e-07\t\n2017-01-07 15:44:38,847 Node[0] Batch: 1 fc_backward_weight 4.31894e-07\t\n2017-01-07 15:44:38,848 Node[0] Batch: 1 fc_backward_weight 5.80652e-07\n\n2017-01-07 15:45:50,199 Node[0] Batch: 4213 fc_backward_weight 5.49988e-07\t\n2017-01-07 15:45:50,200 Node[0] Batch: 4213 fc_backward_weight 5.89305e-07\t\n2017-01-07 15:45:50,201 Node[0] Batch: 4213 fc_backward_weight 3.71941e-07\t\n2017-01-07 15:45:50,202 Node[0] Batch: 4213 fc_backward_weight 8.05085e-07\n```\n\nYou might wonder why we have 4 different `fc_backward_weight`, cause we use 4 cpus.", "_____no_output_____" ], [ "### Uniform and ReLu", "_____no_output_____" ] ], [ [ "# Uniform and sigmoid\nargs = parse_args('uniform', 'uniform_relu')\ndata_shape = (784, )\nnet = get_mlp(\"relu\")\n\n# train\ntrain_model.fit(args, net, get_iterator(data_shape))", "2017-01-07 15:54:06,129 Node[0] start with arguments Namespace(batch_size=128, data_dir='mnist/', gpus=None, init='uniform', kv_store='local', load_epoch=None, lr=0.1, lr_factor=1, lr_factor_epoch=1, model_prefix=None, name='uniform_relu', network='mlp', num_epochs=10, num_examples=60000, save_model_prefix=None)\n2017-01-07 15:54:09,560 Node[0] \u001b[91m[Deprecation Warning] mxnet.model.FeedForward has been deprecated. Please use mxnet.mod.Module instead.\u001b[0m\n2017-01-07 15:54:09,568 Node[0] Start training with [cpu(0), cpu(1), cpu(2), cpu(3)]\n2017-01-07 15:54:12,286 Node[0] Batch: 1 fc_backward_weight 0.000267409\t\n2017-01-07 15:54:12,287 Node[0] Batch: 1 fc_backward_weight 0.00031988\t\n2017-01-07 15:54:12,288 Node[0] Batch: 1 fc_backward_weight 0.000306785\t\n2017-01-07 15:54:12,289 Node[0] Batch: 1 fc_backward_weight 0.000347533\t\n2017-01-07 15:54:18,112 Node[0] Epoch[0] Resetting Data Iterator\n2017-01-07 15:54:18,115 Node[0] Epoch[0] Time cost=8.501\n2017-01-07 15:54:18,490 Node[0] Epoch[0] Validation-accuracy=0.694912\n2017-01-07 15:54:18,491 Node[0] Epoch[0] Validation-top_k_accuracy_5=0.976362\n2017-01-07 15:54:21,076 Node[0] Batch: 469 fc_backward_weight 0.0527437\t\n2017-01-07 15:54:21,077 Node[0] Batch: 469 fc_backward_weight 0.0421219\t\n2017-01-07 15:54:21,078 Node[0] Batch: 469 fc_backward_weight 0.0495309\t\n2017-01-07 15:54:21,079 Node[0] Batch: 469 fc_backward_weight 0.0421051\t\n2017-01-07 15:54:25,876 Node[0] Epoch[1] Resetting Data Iterator\n2017-01-07 15:54:25,877 Node[0] Epoch[1] Time cost=7.385\n2017-01-07 15:54:26,182 Node[0] Epoch[1] Validation-accuracy=0.907652\n2017-01-07 15:54:26,183 Node[0] Epoch[1] Validation-top_k_accuracy_5=0.986679\n2017-01-07 15:54:28,738 Node[0] Batch: 937 fc_backward_weight 0.0285753\t\n2017-01-07 15:54:28,739 Node[0] Batch: 937 fc_backward_weight 0.0520748\t\n2017-01-07 15:54:28,740 Node[0] Batch: 937 fc_backward_weight 0.0807526\t\n2017-01-07 15:54:28,742 Node[0] Batch: 937 fc_backward_weight 0.0502396\t\n2017-01-07 15:54:34,071 Node[0] Epoch[2] Resetting Data Iterator\n2017-01-07 15:54:34,073 Node[0] Epoch[2] Time cost=7.890\n2017-01-07 15:54:34,404 Node[0] Epoch[2] Validation-accuracy=0.921675\n2017-01-07 15:54:34,405 Node[0] Epoch[2] Validation-top_k_accuracy_5=0.987380\n2017-01-07 15:54:36,924 Node[0] Batch: 1405 fc_backward_weight 0.0596137\t\n2017-01-07 15:54:36,925 Node[0] Batch: 1405 fc_backward_weight 0.145902\t\n2017-01-07 15:54:36,926 Node[0] Batch: 1405 fc_backward_weight 0.0783883\t\n2017-01-07 15:54:36,927 Node[0] Batch: 1405 fc_backward_weight 0.0810687\t\n2017-01-07 15:54:41,885 Node[0] Epoch[3] Resetting Data Iterator\n2017-01-07 15:54:41,886 Node[0] Epoch[3] Time cost=7.480\n2017-01-07 15:54:42,210 Node[0] Epoch[3] Validation-accuracy=0.947516\n2017-01-07 15:54:42,211 Node[0] Epoch[3] Validation-top_k_accuracy_5=0.990084\n2017-01-07 15:54:44,714 Node[0] Batch: 1873 fc_backward_weight 0.113804\t\n2017-01-07 15:54:44,715 Node[0] Batch: 1873 fc_backward_weight 0.0355092\t\n2017-01-07 15:54:44,716 Node[0] Batch: 1873 fc_backward_weight 0.0510211\t\n2017-01-07 15:54:44,716 Node[0] Batch: 1873 fc_backward_weight 0.0461469\t\n2017-01-07 15:54:49,711 Node[0] Epoch[4] Resetting Data Iterator\n2017-01-07 15:54:49,712 Node[0] Epoch[4] Time cost=7.498\n2017-01-07 15:54:50,021 Node[0] Epoch[4] Validation-accuracy=0.949319\n2017-01-07 15:54:50,023 Node[0] Epoch[4] Validation-top_k_accuracy_5=0.991587\n2017-01-07 15:54:52,664 Node[0] Batch: 2341 fc_backward_weight 0.0304884\t\n2017-01-07 15:54:52,665 Node[0] Batch: 2341 fc_backward_weight 0.0153732\t\n2017-01-07 15:54:52,666 Node[0] Batch: 2341 fc_backward_weight 0.0638052\t\n2017-01-07 15:54:52,667 Node[0] Batch: 2341 fc_backward_weight 0.0358958\t\n2017-01-07 15:54:58,352 Node[0] Epoch[5] Resetting Data Iterator\n2017-01-07 15:54:58,353 Node[0] Epoch[5] Time cost=8.330\n2017-01-07 15:54:58,663 Node[0] Epoch[5] Validation-accuracy=0.952224\n2017-01-07 15:54:58,664 Node[0] Epoch[5] Validation-top_k_accuracy_5=0.991687\n2017-01-07 15:55:01,141 Node[0] Batch: 2809 fc_backward_weight 0.180743\t\n2017-01-07 15:55:01,142 Node[0] Batch: 2809 fc_backward_weight 0.0453026\t\n2017-01-07 15:55:01,143 Node[0] Batch: 2809 fc_backward_weight 0.0212601\t\n2017-01-07 15:55:01,144 Node[0] Batch: 2809 fc_backward_weight 0.0950233\t\n2017-01-07 15:55:06,363 Node[0] Epoch[6] Resetting Data Iterator\n2017-01-07 15:55:06,364 Node[0] Epoch[6] Time cost=7.700\n2017-01-07 15:55:06,646 Node[0] Epoch[6] Validation-accuracy=0.949219\n2017-01-07 15:55:06,647 Node[0] Epoch[6] Validation-top_k_accuracy_5=0.992889\n2017-01-07 15:55:09,153 Node[0] Batch: 3277 fc_backward_weight 0.0977342\t\n2017-01-07 15:55:09,154 Node[0] Batch: 3277 fc_backward_weight 0.0354421\t\n2017-01-07 15:55:09,155 Node[0] Batch: 3277 fc_backward_weight 0.00394049\t\n2017-01-07 15:55:09,156 Node[0] Batch: 3277 fc_backward_weight 0.0402826\t\n2017-01-07 15:55:15,473 Node[0] Epoch[7] Resetting Data Iterator\n2017-01-07 15:55:15,475 Node[0] Epoch[7] Time cost=8.827\n2017-01-07 15:55:15,867 Node[0] Epoch[7] Validation-accuracy=0.956130\n2017-01-07 15:55:15,868 Node[0] Epoch[7] Validation-top_k_accuracy_5=0.993389\n2017-01-07 15:55:18,700 Node[0] Batch: 3745 fc_backward_weight 0.012503\t\n2017-01-07 15:55:18,701 Node[0] Batch: 3745 fc_backward_weight 0.064014\t\n2017-01-07 15:55:18,702 Node[0] Batch: 3745 fc_backward_weight 0.0158367\t\n2017-01-07 15:55:18,703 Node[0] Batch: 3745 fc_backward_weight 0.00945755\t\n2017-01-07 15:55:23,187 Node[0] Epoch[8] Resetting Data Iterator\n2017-01-07 15:55:23,188 Node[0] Epoch[8] Time cost=7.314\n2017-01-07 15:55:23,488 Node[0] Epoch[8] Validation-accuracy=0.957031\n2017-01-07 15:55:23,489 Node[0] Epoch[8] Validation-top_k_accuracy_5=0.992788\n2017-01-07 15:55:25,936 Node[0] Batch: 4213 fc_backward_weight 0.0226081\t\n2017-01-07 15:55:25,937 Node[0] Batch: 4213 fc_backward_weight 0.0039793\t\n2017-01-07 15:55:25,937 Node[0] Batch: 4213 fc_backward_weight 0.0306151\t\n2017-01-07 15:55:25,938 Node[0] Batch: 4213 fc_backward_weight 0.00818676\t\n2017-01-07 15:55:30,435 Node[0] Epoch[9] Resetting Data Iterator\n2017-01-07 15:55:30,436 Node[0] Epoch[9] Time cost=6.945\n2017-01-07 15:55:30,731 Node[0] Epoch[9] Validation-accuracy=0.959736\n2017-01-07 15:55:30,732 Node[0] Epoch[9] Validation-top_k_accuracy_5=0.991987\n" ] ], [ [ "Even we have a \"poor\" initialization, the model could still converge quickly with proper activation function. And its magnitude has significant difference.\n\n```\n2017-01-07 15:54:12,286 Node[0] Batch: 1 fc_backward_weight 0.000267409\t\n2017-01-07 15:54:12,287 Node[0] Batch: 1 fc_backward_weight 0.00031988\t\n2017-01-07 15:54:12,288 Node[0] Batch: 1 fc_backward_weight 0.000306785\t\n2017-01-07 15:54:12,289 Node[0] Batch: 1 fc_backward_weight 0.000347533\n\n2017-01-07 15:55:25,936 Node[0] Batch: 4213 fc_backward_weight 0.0226081\t\n2017-01-07 15:55:25,937 Node[0] Batch: 4213 fc_backward_weight 0.0039793\t\n2017-01-07 15:55:25,937 Node[0] Batch: 4213 fc_backward_weight 0.0306151\t\n2017-01-07 15:55:25,938 Node[0] Batch: 4213 fc_backward_weight 0.00818676\n```", "_____no_output_____" ], [ "### Xavier and Sigmoid ", "_____no_output_____" ] ], [ [ "# Xavier and sigmoid\nargs = parse_args('xavier', 'xavier_sigmoid')\ndata_shape = (784, )\nnet = get_mlp(\"sigmoid\")\n\n# train\ntrain_model.fit(args, net, get_iterator(data_shape))", "2017-01-07 15:59:10,021 Node[0] start with arguments Namespace(batch_size=128, data_dir='mnist/', gpus=None, init='xavier', kv_store='local', load_epoch=None, lr=0.1, lr_factor=1, lr_factor_epoch=1, model_prefix=None, name='xavier_sigmoid', network='mlp', num_epochs=10, num_examples=60000, save_model_prefix=None)\n2017-01-07 15:59:13,291 Node[0] \u001b[91m[Deprecation Warning] mxnet.model.FeedForward has been deprecated. Please use mxnet.mod.Module instead.\u001b[0m\n2017-01-07 15:59:13,299 Node[0] Start training with [cpu(0), cpu(1), cpu(2), cpu(3)]\n2017-01-07 15:59:15,909 Node[0] Batch: 1 fc_backward_weight 9.27798e-06\t\n2017-01-07 15:59:15,909 Node[0] Batch: 1 fc_backward_weight 8.58008e-06\t\n2017-01-07 15:59:15,910 Node[0] Batch: 1 fc_backward_weight 8.96261e-06\t\n2017-01-07 15:59:15,911 Node[0] Batch: 1 fc_backward_weight 7.33611e-06\t\n2017-01-07 15:59:20,779 Node[0] Epoch[0] Resetting Data Iterator\n2017-01-07 15:59:20,780 Node[0] Epoch[0] Time cost=7.433\n2017-01-07 15:59:21,086 Node[0] Epoch[0] Validation-accuracy=0.105769\n2017-01-07 15:59:21,087 Node[0] Epoch[0] Validation-top_k_accuracy_5=0.509115\n2017-01-07 15:59:23,778 Node[0] Batch: 469 fc_backward_weight 6.76125e-06\t\n2017-01-07 15:59:23,779 Node[0] Batch: 469 fc_backward_weight 6.54805e-06\t\n2017-01-07 15:59:23,780 Node[0] Batch: 469 fc_backward_weight 6.80302e-06\t\n2017-01-07 15:59:23,782 Node[0] Batch: 469 fc_backward_weight 7.39115e-06\t\n2017-01-07 15:59:29,174 Node[0] Epoch[1] Resetting Data Iterator\n2017-01-07 15:59:29,175 Node[0] Epoch[1] Time cost=8.087\n2017-01-07 15:59:29,477 Node[0] Epoch[1] Validation-accuracy=0.105769\n2017-01-07 15:59:29,477 Node[0] Epoch[1] Validation-top_k_accuracy_5=0.504507\n2017-01-07 15:59:32,143 Node[0] Batch: 937 fc_backward_weight 5.83071e-06\t\n2017-01-07 15:59:32,144 Node[0] Batch: 937 fc_backward_weight 5.59626e-06\t\n2017-01-07 15:59:32,145 Node[0] Batch: 937 fc_backward_weight 5.776e-06\t\n2017-01-07 15:59:32,147 Node[0] Batch: 937 fc_backward_weight 6.28738e-06\t\n2017-01-07 15:59:37,783 Node[0] Epoch[2] Resetting Data Iterator\n2017-01-07 15:59:37,784 Node[0] Epoch[2] Time cost=8.305\n2017-01-07 15:59:38,085 Node[0] Epoch[2] Validation-accuracy=0.105769\n2017-01-07 15:59:38,086 Node[0] Epoch[2] Validation-top_k_accuracy_5=0.510216\n2017-01-07 15:59:41,031 Node[0] Batch: 1405 fc_backward_weight 4.951e-06\t\n2017-01-07 15:59:41,032 Node[0] Batch: 1405 fc_backward_weight 4.72836e-06\t\n2017-01-07 15:59:41,033 Node[0] Batch: 1405 fc_backward_weight 4.8514e-06\t\n2017-01-07 15:59:41,034 Node[0] Batch: 1405 fc_backward_weight 5.26915e-06\t\n2017-01-07 15:59:47,042 Node[0] Epoch[3] Resetting Data Iterator\n2017-01-07 15:59:47,043 Node[0] Epoch[3] Time cost=8.957\n2017-01-07 15:59:47,424 Node[0] Epoch[3] Validation-accuracy=0.105769\n2017-01-07 15:59:47,425 Node[0] Epoch[3] Validation-top_k_accuracy_5=0.509014\n2017-01-07 15:59:50,295 Node[0] Batch: 1873 fc_backward_weight 4.22193e-06\t\n2017-01-07 15:59:50,296 Node[0] Batch: 1873 fc_backward_weight 4.03044e-06\t\n2017-01-07 15:59:50,297 Node[0] Batch: 1873 fc_backward_weight 4.11877e-06\t\n2017-01-07 15:59:50,298 Node[0] Batch: 1873 fc_backward_weight 4.45402e-06\t\n2017-01-07 15:59:56,082 Node[0] Epoch[4] Resetting Data Iterator\n2017-01-07 15:59:56,083 Node[0] Epoch[4] Time cost=8.653\n2017-01-07 15:59:56,378 Node[0] Epoch[4] Validation-accuracy=0.105769\n2017-01-07 15:59:56,379 Node[0] Epoch[4] Validation-top_k_accuracy_5=0.509014\n2017-01-07 15:59:58,837 Node[0] Batch: 2341 fc_backward_weight 3.64564e-06\t\n2017-01-07 15:59:58,838 Node[0] Batch: 2341 fc_backward_weight 3.48901e-06\t\n2017-01-07 15:59:58,839 Node[0] Batch: 2341 fc_backward_weight 3.55765e-06\t\n2017-01-07 15:59:58,840 Node[0] Batch: 2341 fc_backward_weight 3.82692e-06\t\n2017-01-07 16:00:03,458 Node[0] Epoch[5] Resetting Data Iterator\n2017-01-07 16:00:03,459 Node[0] Epoch[5] Time cost=7.078\n2017-01-07 16:00:03,790 Node[0] Epoch[5] Validation-accuracy=0.105769\n2017-01-07 16:00:03,791 Node[0] Epoch[5] Validation-top_k_accuracy_5=0.509014\n2017-01-07 16:00:06,406 Node[0] Batch: 2809 fc_backward_weight 3.19336e-06\t\n2017-01-07 16:00:06,407 Node[0] Batch: 2809 fc_backward_weight 3.06777e-06\t\n2017-01-07 16:00:06,409 Node[0] Batch: 2809 fc_backward_weight 3.12543e-06\t\n2017-01-07 16:00:06,410 Node[0] Batch: 2809 fc_backward_weight 3.34344e-06\t\n2017-01-07 16:00:12,052 Node[0] Epoch[6] Resetting Data Iterator\n2017-01-07 16:00:12,053 Node[0] Epoch[6] Time cost=8.261\n2017-01-07 16:00:12,352 Node[0] Epoch[6] Validation-accuracy=0.107472\n2017-01-07 16:00:12,353 Node[0] Epoch[6] Validation-top_k_accuracy_5=0.509014\n2017-01-07 16:00:14,968 Node[0] Batch: 3277 fc_backward_weight 2.83478e-06\t\n2017-01-07 16:00:14,969 Node[0] Batch: 3277 fc_backward_weight 2.73443e-06\t\n2017-01-07 16:00:14,970 Node[0] Batch: 3277 fc_backward_weight 2.78607e-06\t\n2017-01-07 16:00:14,971 Node[0] Batch: 3277 fc_backward_weight 2.9644e-06\t\n2017-01-07 16:00:20,252 Node[0] Epoch[7] Resetting Data Iterator\n2017-01-07 16:00:20,253 Node[0] Epoch[7] Time cost=7.899\n2017-01-07 16:00:20,541 Node[0] Epoch[7] Validation-accuracy=0.105970\n2017-01-07 16:00:20,542 Node[0] Epoch[7] Validation-top_k_accuracy_5=0.512620\n2017-01-07 16:00:23,036 Node[0] Batch: 3745 fc_backward_weight 2.54587e-06\t\n2017-01-07 16:00:23,037 Node[0] Batch: 3745 fc_backward_weight 2.46527e-06\t\n2017-01-07 16:00:23,038 Node[0] Batch: 3745 fc_backward_weight 2.51372e-06\t\n2017-01-07 16:00:23,039 Node[0] Batch: 3745 fc_backward_weight 2.66109e-06\t\n2017-01-07 16:00:27,410 Node[0] Epoch[8] Resetting Data Iterator\n2017-01-07 16:00:27,411 Node[0] Epoch[8] Time cost=6.868\n2017-01-07 16:00:27,718 Node[0] Epoch[8] Validation-accuracy=0.105970\n2017-01-07 16:00:27,719 Node[0] Epoch[8] Validation-top_k_accuracy_5=0.512620\n2017-01-07 16:00:30,358 Node[0] Batch: 4213 fc_backward_weight 2.30903e-06\t\n2017-01-07 16:00:30,359 Node[0] Batch: 4213 fc_backward_weight 2.24373e-06\t\n2017-01-07 16:00:30,360 Node[0] Batch: 4213 fc_backward_weight 2.29058e-06\t\n2017-01-07 16:00:30,361 Node[0] Batch: 4213 fc_backward_weight 2.41351e-06\t\n2017-01-07 16:00:35,874 Node[0] Epoch[9] Resetting Data Iterator\n2017-01-07 16:00:35,875 Node[0] Epoch[9] Time cost=8.156\n2017-01-07 16:00:36,182 Node[0] Epoch[9] Validation-accuracy=0.105970\n2017-01-07 16:00:36,183 Node[0] Epoch[9] Validation-top_k_accuracy_5=0.512620\n" ] ], [ [ "## Visualization\n\nNow start using TensorBoard:\n\n```bash\ntensorboard --logdir=logs/\n```\n\n![Dashboard](https://github.com/zihaolucky/tensorboard/raw/data/docs/tutorial/mnist/pic1.png)\n\n![dist](https://github.com/zihaolucky/tensorboard/raw/data/docs/tutorial/mnist/pic2.png)\n\n![hist](https://github.com/zihaolucky/tensorboard/raw/data/docs/tutorial/mnist/pic3.png)\n\n", "_____no_output_____" ], [ "## References\n\nYou might find these materials useful:\n\n[1] [Rohan #4: The vanishing gradient problem – A Year of Artificial Intelligence](https://ayearofai.com/rohan-4-the-vanishing-gradient-problem-ec68f76ffb9b#.bojpejg3o) \n[2] [On the difficulty of training recurrent and deep neural networks - YouTube](https://www.youtube.com/watch?v=A7poQbTrhxc) \n[3] [What is the vanishing gradient problem? - Quora](https://www.quora.com/What-is-the-vanishing-gradient-problem)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ec5b818a4733a5b6da7b993d7c7bce3cfca83538
2,260
ipynb
Jupyter Notebook
matplotlib/gallery_jupyter/mplot3d/tricontourf3d.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
matplotlib/gallery_jupyter/mplot3d/tricontourf3d.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
matplotlib/gallery_jupyter/mplot3d/tricontourf3d.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
41.851852
1,051
0.592478
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Triangular 3D filled contour plot\n\n\nFilled contour plots of unstructured triangular grids.\n\nThe data used is the same as in the second plot of trisurf3d_demo2.\ntricontour3d_demo shows the unfilled version of this example.\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib.tri as tri\nimport numpy as np\n\n# First create the x, y, z coordinates of the points.\nn_angles = 48\nn_radii = 8\nmin_radius = 0.25\n\n# Create the mesh in polar coordinates and compute x, y, z.\nradii = np.linspace(min_radius, 0.95, n_radii)\nangles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)\nangles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\nangles[:, 1::2] += np.pi/n_angles\n\nx = (radii*np.cos(angles)).flatten()\ny = (radii*np.sin(angles)).flatten()\nz = (np.cos(radii)*np.cos(3*angles)).flatten()\n\n# Create a custom triangulation.\ntriang = tri.Triangulation(x, y)\n\n# Mask off unwanted triangles.\ntriang.set_mask(np.hypot(x[triang.triangles].mean(axis=1),\n y[triang.triangles].mean(axis=1))\n < min_radius)\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.tricontourf(triang, z, cmap=plt.cm.CMRmap)\n\n# Customize the view angle so it's easier to understand the plot.\nax.view_init(elev=45.)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
ec5b82d992144dcebd603c4132f50c04f44aa56b
72,624
ipynb
Jupyter Notebook
LAB/Lab Session 4(Decision Trees)/MLFA_Decision Tree.ipynb
Amarnathpg123/MLFAHandsOnPython
c3592195b4a81752723783b4625c0c39ac5ff4b5
[ "MIT" ]
1
2021-09-19T16:41:59.000Z
2021-09-19T16:41:59.000Z
LAB/Lab Session 4(Decision Trees)/MLFA_Decision Tree.ipynb
Amarnathpg123/MLFAHandsOnPython
c3592195b4a81752723783b4625c0c39ac5ff4b5
[ "MIT" ]
null
null
null
LAB/Lab Session 4(Decision Trees)/MLFA_Decision Tree.ipynb
Amarnathpg123/MLFAHandsOnPython
c3592195b4a81752723783b4625c0c39ac5ff4b5
[ "MIT" ]
null
null
null
73.431749
408
0.548607
[ [ [ "# Tutorial - Decision Tree \n\nDecision trees are a common and powerful prediction approach.\n\nThey are popular because the final model is so easy to understand. The final decision tree can explain why a specific prediction was made, making it attractive for use. The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features. More complex ensemble approaches like random forests are built on top of decision trees.\n\nIn this tutorial, we will learn how to create a Classification Tree.\n\n1. Import necessary libraries.\n2. Import datset and view its shape.\n3. Define the feature attribute and target attribute.\n4. Spliting the dataset 80% for training, and 20% for testing the classifier.\n5. Using entropy and gini index to measure the quality of a split and fit the data.\n6. Making predictions on test data.\n7. Calculating accuracy.\n8. Calculating the confusion matrix.\n9. Decision tree visualization.", "_____no_output_____" ] ], [ [ "#Importing necessary libraries \n\nimport pandas as pd \n\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split # Import train_test_split function\nfrom sklearn import metrics #Import scikit-learn metrics module for accuracy calculation", "_____no_output_____" ], [ "# import the dataset\n\ndata = pd.read_csv(\"wine.csv\")\n# checking the dimensions of the data\nprint(data.shape)\n# To see first 5 rows of the dataset\n#data.head()", "(178, 14)\n" ], [ "# columns present in the dataset\ndata.columns ", "_____no_output_____" ], [ "# define the feature as well as the target datasets / columns\nfeature_col = ['Alcohol', 'Malic_Acid', 'Ash', 'Ash_Alcanity', 'Magnesium','Total_Phenols', \n 'Flavanoids', 'Nonflavanoid_Phenols','Proanthocyanins', 'Color_Intensity', 'Hue', 'OD280', 'Proline']\nfeatures = data[feature_col]\nlabel = data.Customer_Segment", "_____no_output_____" ], [ "# Split the given data into 80 percent training data and 20 percent testing data\nx_train, x_test, y_train, y_test = train_test_split(features, label, test_size=0.2)", "_____no_output_____" ], [ "#Fit the given data\nmodel_tree = DecisionTreeClassifier(criterion = 'entropy')\nmodel_tree = model_tree.fit(x_train, y_train)", "_____no_output_____" ], [ "#Make prediction on the test data\nypred_entropy = model_tree.predict(x_test)", "_____no_output_____" ], [ "# percentage of accuracy\nprint(metrics.accuracy_score(y_test, ypred_entropy))", "0.9166666666666666\n" ], [ "# confusion matrix\nprint(metrics.confusion_matrix(y_test, ypred_entropy))", "[[12 2 0]\n [ 0 11 1]\n [ 0 0 10]]\n" ], [ "# to visualize the decision tree\n\nimport graphviz\nfrom sklearn import tree\ndot_data = tree.export_graphviz(model_tree, feature_names=feature_col, filled=True, rounded=True)\n\n# Draw graph\ngraph = graphviz.Source(dot_data, format=\"png\") \ngraph", "_____no_output_____" ], [ "# Using gini index\n\n#Fit the given data\ntree1 = DecisionTreeClassifier()\ntree1 = tree1.fit(x_train, y_train)\n\n#Make prediction on the test data\nypred_gini = tree1.predict(x_test)\n\n# percentage of accuracy\nprint(metrics.accuracy_score(y_test, ypred_gini))\n#print(f'accuracy obtained is {metrics.accuracy_score(y_test, ypred_gini)*100:.2f}%')\n\n# confusion matrix\nprint(metrics.confusion_matrix(y_test, ypred_gini))\n\n# to visualize the decision tree\nimport graphviz\ndot_data = tree.export_graphviz(tree1, feature_names=feature_col, filled=True, rounded=True)\n\n# Draw graph\ngraph = graphviz.Source(dot_data, format=\"png\") \ngraph", "0.8611111111111112\n[[10 4 0]\n [ 0 12 0]\n [ 0 1 9]]\n" ] ], [ [ "Decision Tree Regressor", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_boston\nfrom sklearn.tree import DecisionTreeRegressor", "_____no_output_____" ], [ "boston = load_boston()\nX = pd.DataFrame(boston.data, columns=boston.feature_names)\ny = pd.DataFrame(boston.target, columns=[\"target\"])\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\n\nmodel = DecisionTreeRegressor(criterion = 'mse', max_depth = 3)\nmodel = model.fit(X_train, y_train)", "_____no_output_____" ], [ "y_pred = model.predict(X_test)\nprint(metrics.r2_score(y_test, y_pred))", "0.8021990752059437\n" ], [ "import graphviz\ndot_data = tree.export_graphviz(model, feature_names=feature_col, filled=True, rounded=True)\n\n# Draw graph\ngraph = graphviz.Source(dot_data, format=\"png\") \ngraph", "_____no_output_____" ], [ "# https://scikit-learn.org/stable/datasets.html (list of availbale sklean dataset)", "_____no_output_____" ], [ "# https://scikit-learn.org/stable/modules/tree.html (Decision tree )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec5ba0ad1877feab16d943e217ae9acca0f337c1
12,385
ipynb
Jupyter Notebook
notebooks/wd-releasegroups.ipynb
loujine/musicbrainz-sparql
f7c355b3f472b7ee55b87deccc92b2bc60f3366c
[ "MIT" ]
null
null
null
notebooks/wd-releasegroups.ipynb
loujine/musicbrainz-sparql
f7c355b3f472b7ee55b87deccc92b2bc60f3366c
[ "MIT" ]
null
null
null
notebooks/wd-releasegroups.ipynb
loujine/musicbrainz-sparql
f7c355b3f472b7ee55b87deccc92b2bc60f3366c
[ "MIT" ]
null
null
null
28.536866
229
0.521437
[ [ [ "%run -i ../startup.py\nENTITY_TYPE = 'release-group'", "_____no_output_____" ] ], [ [ "Wikidata entities:\n\nhttps://www.wikidata.org/wiki/Q482994 album\n\nWikidata properties:\n\nhttps://www.wikidata.org/wiki/Property:P175 performer\n\nhttps://www.wikidata.org/wiki/Property:P214 VIAF\n\nhttps://www.wikidata.org/wiki/Property:P268 BNF\n\nhttps://www.wikidata.org/wiki/Property:P244 LoC\n\nhttps://www.wikidata.org/wiki/Property:P1954 discogs master ID\n\nhttps://www.wikidata.org/wiki/Property:P436 MusicBrainz RG ID\n\nexamples\n\nhttps://www.wikidata.org/wiki/Q7713309\n", "_____no_output_____" ], [ "## Release Groups from Wikidata", "_____no_output_____" ] ], [ [ "links_type_from_wd = sparql(\"\"\"\nSELECT distinct (count(?rg) as ?cnt) ?ins ?insLabel\nWHERE {\n ?rg wdt:P31 ?ins;\n wdt:P436 ?mbid.\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n}\ngroup by ?ins ?insLabel\norder by DESC(?cnt)\n\"\"\")\nlinks_type_from_wd[links_type_from_wd.cnt.astype(int) > 50]", "_____no_output_____" ], [ "# linked to MB RG\nlinks_from_wd = sparql(\"\"\"\nSELECT distinct (count(?rg) as ?cnt)\nWHERE {\n ?rg wdt:P436 ?mbid .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n}\nORDER BY ASC(?rgLabel)\n\"\"\")\nlinks_from_wd", "_____no_output_____" ] ], [ [ "That's too many to be fetched. Try the artists with a discogs link", "_____no_output_____" ] ], [ [ "links_from_wd = sparql(\"\"\"\nSELECT distinct (count(?rg) as ?cnt)\nWHERE {\n ?rg wdt:P436 ?mbid .\n ?rg wdt:P1954 ?discogs .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n}\n\"\"\")\nlinks_from_wd", "_____no_output_____" ], [ "links_from_wd = sparql(\"\"\"\nSELECT (?rg AS ?wd) ?mbid ?rgLabel ?discogs\nWHERE {\n ?rg wdt:P436 ?mbid .\n ?rg wdt:P1954 ?discogs .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n}\nORDER BY ASC(?rgLabel)\n\"\"\")\nlinks_from_wd.rename(columns={'rgLabel': 'name'}, inplace=True)\n\nprint('Count:', len(links_from_wd))\ndisplay_df(links_from_wd.head())", "_____no_output_____" ] ], [ [ "## Release Groups from MusicBrainz with wikidata links", "_____no_output_____" ] ], [ [ "links_from_mb = sql(\"\"\"\nSELECT\n url.url AS wd,\n release_group.gid AS mbid,\n release_group.name\nFROM \n release_group\n JOIN l_release_group_url AS lau ON lau.entity0 = release_group.id\n JOIN url ON lau.entity1 = url.id\nWHERE\n url.url LIKE '%%wikidata.org%%'\nORDER BY\n release_group.name\n;\n\"\"\")\nlinks_from_mb.wd = links_from_mb.wd.apply(lambda s: s.split('/')[-1])\nlinks_from_mb.mbid = links_from_mb.mbid.apply(str)\n\nprint('Count:', len(links_from_mb))\ndisplay_df(links_from_mb.head())", "_____no_output_____" ] ], [ [ "## Duplicate links", "_____no_output_____" ] ], [ [ "duplicate_wd = links_from_mb[[\n 'wd', 'mbid', 'name']].groupby('wd').filter(\n lambda row: len(row.mbid) > 1).sort_values('wd')\n\nprint('Count:', len(duplicate_wd))\ndisplay_df(duplicate_wd.head())", "_____no_output_____" ], [ "duplicate_mb = links_from_mb[['wd', 'mbid', 'name']].groupby('mbid').filter(\n lambda row: len(row.mbid) > 1).sort_values('mbid')\n\nprint('Count:', len(duplicate_mb))\ndisplay_df(duplicate_mb.head())", "_____no_output_____" ] ], [ [ "## Data alignment", "_____no_output_____" ] ], [ [ "merge = pd.merge(links_from_wd, links_from_mb, \n on=['wd', 'mbid'], suffixes=('_wd', '_mb'),\n how='outer', indicator=True)\ndisplay_df(merge.head())", "_____no_output_____" ], [ "# link in mb but missing in wd\nlinks_to_add_to_wd = merge.loc[lambda x : x['_merge']=='right_only'][[\n 'name_mb', 'mbid', 'wd']]\n\nprint('Count:', len(links_to_add_to_wd))\ndisplay_df(links_to_add_to_wd.head())", "_____no_output_____" ], [ "# link in wd but missing in mb\nlinks_to_add_to_mb = merge.loc[lambda x : x['_merge']=='left_only'][[\n 'name_wd', 'wd', 'mbid']]\nlinks_to_add_to_mb['edit_link'] = links_to_add_to_mb.apply(\n mb_artist_edit_wd_link, axis=1)\n\nprint('Count:', len(links_to_add_to_mb))\ndisplay_df(links_to_add_to_mb.head())", "_____no_output_____" ] ], [ [ "## Data alignment through Discogs", "_____no_output_____" ], [ "TBD on wd entries with discogs links and no mb link", "_____no_output_____" ] ], [ [ "# linked to Discogs master\ndiscogs_links_from_wd = sparql(\"\"\"\nSELECT (?rg AS ?wd) ?rgLabel ?discogs\nWHERE {\n ?rg wdt:P1954 ?discogs .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n MINUS {\n ?rg wdt:P436 ?mbid . \n }\n}\nORDER BY ASC(?rgLabel)\n\"\"\")\ndiscogs_links_from_wd.rename(columns={'rgLabel': 'name'}, inplace=True)\n\nprint('Count:', len(discogs_links_from_wd))\ndisplay_df(discogs_links_from_wd.head())", "_____no_output_____" ], [ "discogs_links_from_mb = sql(\"\"\"\nSELECT\n url.url AS discogs,\n release_group.gid AS mbid,\n release_group.name\nFROM \n release_group\n JOIN l_release_group_url AS lau ON lau.entity0 = release_group.id\n JOIN url ON lau.entity1 = url.id\nWHERE\n url.url LIKE '%%discogs.com%%'\n AND lau.entity0 IN (\n SELECT\n entity0\n FROM \n l_release_group_url\n JOIN url ON l_release_group_url.entity1 = url.id\n WHERE\n url.url LIKE '%%discogs.com%%'\n EXCEPT\n SELECT\n entity0\n FROM \n l_release_group_url\n JOIN url ON l_release_group_url.entity1 = url.id\n WHERE\n url.url LIKE '%%wikidata.org%%'\n )\nORDER BY\n release_group.name\n;\n\"\"\")\ndiscogs_links_from_mb.discogs = discogs_links_from_mb.discogs.apply(lambda s: s.split('/')[-1])\ndiscogs_links_from_mb.mbid = discogs_links_from_mb.mbid.apply(str)\n\nprint('Count:', len(discogs_links_from_mb))\ndisplay_df(discogs_links_from_mb.head())", "_____no_output_____" ], [ "discogs_merge = pd.merge(discogs_links_from_wd, discogs_links_from_mb, \n on=['discogs'], suffixes=('_wd', '_mb'),\n how='inner', indicator=False)\ndiscogs_merge['edit_link'] = discogs_merge.apply(\n mb_releasegroup_edit_wd_link, axis=1)\n\nprint('Count:', len(discogs_merge))\ndisplay_df(discogs_merge.head())", "_____no_output_____" ] ], [ [ "## Report", "_____no_output_____" ] ], [ [ "import jinja2\n\ntemplate = jinja2.Template(\"\"\"\n<!doctype html>\n\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>Alignment of MusicBrainz and Wikidata Release Groups</title>\n <link href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm\" crossorigin=\"anonymous\">\n </head>\n\n <body style=\"margin: 20px;\">\n <h1>Alignment of MusicBrainz and Wikidata Release Groups</h1>\n\n <p>Latest MB database update: {{ MB_DATABASE_VERSION }}</p>\n <p>Latest update: {{ date.today().isoformat() }}</p>\n\n <ol>\n <li>\n <a href=\"#wddup\">MusicBrainz entities sharing a Wikidata link</a>\n ({{ duplicate_wd.shape[0] }} rows)\n </li>\n <li>\n <a href=\"#mbdup\">Wikidata entities sharing a MusicBrainz link</a>\n ({{ duplicate_mb.shape[0] }} rows)\n </li>\n <li>\n <a href=\"#wd2mb\">Add missing Wikidata links to MusicBrainz</a>\n ({{ links_to_add_to_mb.shape[0] }} rows)\n </li>\n <li>\n <a href=\"#discogs2mb\">Add missing Wikidata links to MusicBrainz (through Discogs)</a>\n ({{ discogs_merge.shape[0] }} rows)\n </li>\n <li>\n <a href=\"#mb2wd\">Add missing MusicBrainz links to Wikidata</a>\n </li>\n </ol>\n \n <h2 id=\"wddup\">MusicBrainz entities sharing a Wikidata link</h2>\n {{ df_to_html(duplicate_wd) }}\n\n <h2 id=\"mbdup\">Wikidata entities sharing a MusicBrainz link</h2>\n {{ df_to_html(duplicate_mb) }}\n\n <h2 id=\"wd2mb\">Add missing Wikidata links to MusicBrainz</h2>\n {{ df_to_html(links_to_add_to_mb) }}\n\n <h2 id=\"discogs2mb\">Add missing Wikidata links to MusicBrainz (through Discogs)</h2>\n {{ df_to_html(discogs_merge) }}\n\n <h2 id=\"mb2wd\">Add missing MusicBrainz links to Wikidata</h2>\n \n </body>\n</html>\n\"\"\")\n\nwith open('../docs/wd-releasegroups-report.html', 'w') as f:\n f.write(template.render(**globals())\n .replace('&lt;', '<').replace('&gt;', '>')\n .replace('class=\"dataframe\"', 'class=\"table table-striped table-hover table-sm\"')\n .replace('thead', 'thead class=\"thead-light\"'))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ec5bae699b89891b302ed9d9c07bcb243fe649ac
15,174
ipynb
Jupyter Notebook
10.ipynb
Ruth-W/Week-5
b0bfbaa358a480ff1b7f747823188599b7dabe4b
[ "MIT" ]
1
2021-06-20T09:30:05.000Z
2021-06-20T09:30:05.000Z
10.ipynb
Ruth-W/Week-5
b0bfbaa358a480ff1b7f747823188599b7dabe4b
[ "MIT" ]
null
null
null
10.ipynb
Ruth-W/Week-5
b0bfbaa358a480ff1b7f747823188599b7dabe4b
[ "MIT" ]
null
null
null
20.098013
353
0.471662
[ [ [ "# Exercise Notebook (DS)", "_____no_output_____" ] ], [ [ "# this code conceals irrelevant warning messages\nimport warnings\nwarnings.simplefilter('ignore', FutureWarning)\nimport numpy as np", "_____no_output_____" ], [ "elements = [3,5,6,8,2,9]\nprint(type(elements))\nprint(elements)", "<class 'list'>\n[3, 5, 6, 8, 2, 9]\n" ], [ "elements = np.array(elements)\nprint(type(elements))\nprint(elements)", "<class 'numpy.ndarray'>\n[3 5 6 8 2 9]\n" ] ], [ [ "#### NumPy - Array Attributes\n\n`ndarray.shape`\nThis array attribute returns a tuple consisting of array dimensions. It can also be used to resize the array.\n\n**Examples**", "_____no_output_____" ] ], [ [ "a = np.array([[1,2,3],[4,5,6]]) \nprint (a.shape)\na", "(2, 3)\n" ], [ "# this resizes the ndarray \n\na = np.array([[1,2,3],[4,5,6]]) \na.shape = (3,2) \nprint (a)", "[[1 2]\n [3 4]\n [5 6]]\n" ] ], [ [ "NumPy also provides a reshape function to resize an array.", "_____no_output_____" ] ], [ [ "a = np.array([[1,2,5],[4,5,6]]) \nb = a.reshape(3,2) \nprint (b)", "[[1 2]\n [5 4]\n [5 6]]\n" ] ], [ [ "#### ndarray.ndim\nThis array attribute returns the number of array dimensions.\n\n`Example`", "_____no_output_____" ] ], [ [ "elements.ndim", "_____no_output_____" ], [ "# an array of evenly spaced numbers \na = np.arange(24) \nprint (a)", "[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]\n" ], [ "# this is one dimensional array \na = np.arange(24) \nprint(a.ndim) \n\n# now reshape it \nb = a.reshape(2,4,3) \nprint (b) \n# b is having three dimensions\nprint(b.ndim)", "1\n[[[ 0 1 2]\n [ 3 4 5]\n [ 6 7 8]\n [ 9 10 11]]\n\n [[12 13 14]\n [15 16 17]\n [18 19 20]\n [21 22 23]]]\n3\n" ], [ "b[1][1][2]", "_____no_output_____" ], [ "# array of five zeros. Default dtype is float \nx = np.zeros(5) \nprint (x)", "[0. 0. 0. 0. 0.]\n" ], [ "# array of five ones. Default dtype is float \nx = np.ones(5) \nprint (x)", "[1. 1. 1. 1. 1.]\n" ] ], [ [ "#### NumPy - Array From Existing Data\n\n**numpy.asarray**\nThis function is similar to numpy.array except for the fact that it has fewer parameters. This routine is useful for converting Python sequence into ndarray.", "_____no_output_____" ] ], [ [ "# convert list to ndarray \n\nx = [1,2,3]\na = np.asarray(x) \nprint (a)", "[1 2 3]\n" ], [ "# ndarray from list of tuples \n\nx = [(1,2,3),(4,5)] \na = np.asarray(x) \nprint (a)", "[(1, 2, 3) (4, 5)]\n" ] ], [ [ "#### numpy.frombuffer\nThis function interprets a buffer as one-dimensional array. Any object that exposes the buffer interface is used as parameter to return an ndarray.\n\n`numpy.frombuffer(buffer, dtype = float, count = -1, offset = 0)`", "_____no_output_____" ], [ "##### The constructor takes the following parameters.\n\nSr.No.\tParameter & Description\n1. buffer\n\nAny object that exposes buffer interface\n\n2. dtype\n\nData type of returned ndarray. Defaults to float\n\n3. count\n\nThe number of items to read, default -1 means all data\n\n4. offset\n\nThe starting position to read from. Default is 0", "_____no_output_____" ], [ "#### Operations on Numpy Array", "_____no_output_____" ] ], [ [ "# 2D Numpy Array\na = np.array([[1,2,3], [4,1,5]]) \nprint (a)", "_____no_output_____" ], [ "# Addition\na+3", "_____no_output_____" ], [ "# Multiplication\na*2", "_____no_output_____" ], [ "# Subtraction\na-2", "_____no_output_____" ], [ "# Division\na/3", "_____no_output_____" ] ], [ [ "**NumPy - Indexing & Slicing**\n\nContents of ndarray object can be accessed and modified by indexing or slicing, just like Python's in-built container objects.\n\nAs mentioned earlier, items in ndarray object follows zero-based index. Three types of indexing methods are available − field access, basic slicing and advanced indexing.\n\nBasic slicing is an extension of Python's basic concept of slicing to n dimensions. A Python slice object is constructed by giving **start, stop, and step** parameters to the built-in slice function. This slice object is passed to the array to extract a part of array.", "_____no_output_____" ] ], [ [ "a = np.arange(10) \nprint(a)\ns = slice(2,7,2) \nprint(a[s])", "[0 1 2 3 4 5 6 7 8 9]\n[2 4 6]\n" ] ], [ [ "In the above example, an ndarray object is prepared by arange() function. Then a slice object is defined with start, stop, and step values 2, 7, and 2 respectively. When this slice object is passed to the ndarray, a part of it starting with index 2 up to 7 with a step of 2 is sliced.\n\nThe same result can also be obtained by giving the slicing parameters separated by a colon : (start:stop:step) directly to the ndarray object.", "_____no_output_____" ] ], [ [ "a = np.arange(10) \nprint(a)\nb = a[2:7:2] \nprint (b)", "[0 1 2 3 4 5 6 7 8 9]\n[2 4 6]\n" ], [ "a = np.array([[1,2,3],[3,4,5],[4,5,6]]) \nprint (a ) \n\n# slice items starting from index\nprint('Now we will slice the array from the index a[1:]') \nprint(a[1:])", "[[1 2 3]\n [3 4 5]\n [4 5 6]]\nNow we will slice the array from the index a[1:]\n[[3 4 5]\n [4 5 6]]\n" ] ], [ [ "#### Numpy 2D Array", "_____no_output_____" ] ], [ [ "a = np.array([[1,2,3],[3,4,5]])\nprint(a)", "[[1 2 3]\n [3 4 5]]\n" ], [ "a = np.array([[1,2,3], [4,1,5]]) \nprint (a)", "[[1 2 3]\n [4 1 5]]\n" ], [ "# Addition\na+3", "_____no_output_____" ], [ "# Multiplication\na*2", "_____no_output_____" ], [ "# Subtraction\na-2", "_____no_output_____" ], [ "# Division\na/3", "_____no_output_____" ] ], [ [ "##### Task", "_____no_output_____" ], [ "1. Write a NumPy program to test element-wise for NaN of a given array.", "_____no_output_____" ] ], [ [ "c = np.array([2, 0, np.nan, np.inf])\nprint(c)\nprint(\"Test element-wise for NaN:\")\nprint(np.isnan(a))", "_____no_output_____" ] ], [ [ "2. Write a NumPy program to test element-wise for complex number, real number of a given array. Also test whether a given number is a scalar type or not.", "_____no_output_____" ] ], [ [ "d = np.array([1+1j, 1+0j, 5.6, 4, 3, 1j])\nprint(d)\nprint(\"Checking for complex number:\")\nprint(np.iscomplex(d))\nprint(\"Checking for real number:\")\nprint(np.isreal(d))\nprint(\"Checking for scalar type:\")\nprint(np.isscalar(3.1))\nprint(np.isscalar([3.1]))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec5bb13601556c019629fb723bf680caa3bd0b7d
620,814
ipynb
Jupyter Notebook
notebooks/pg-ts.ipynb
macks22/cmabeval
ac32437bd23efbb1a559922f457b8adc52833a2b
[ "MIT" ]
3
2019-11-02T22:09:19.000Z
2020-02-20T05:25:28.000Z
notebooks/pg-ts.ipynb
macks22/banditry
ac32437bd23efbb1a559922f457b8adc52833a2b
[ "MIT" ]
null
null
null
notebooks/pg-ts.ipynb
macks22/banditry
ac32437bd23efbb1a559922f457b8adc52833a2b
[ "MIT" ]
1
2021-04-06T13:33:24.000Z
2021-04-06T13:33:24.000Z
342.99116
115,448
0.922209
[ [ [ "# Pólya-Gamma Logistic Regression + Thompson Sampling\n\nThis notebook will walk through an implementation of Polya-Gamma augmented logistic regression + Thompson Sampling. Specifically, it will summarize and reimplement the method described in:\n\n> B. Dumitrascu, K. Feng, and B. E. Engelhardt, “PG-TS: Improved Thompson Sampling for Logistic Contextual Bandits,” arXiv:1805.07458 [cs, stat], May 2018.", "_____no_output_____" ], [ "## Notation\n\nBasical mathematical notation:\n\n- $x \\in \\mathbb{R}^d$ denotes a $d$-dimensional column vector with scalar entries $x_j$, indexed by integers $j = \\{1, 2, \\ldots, d\\}$\n- $\\mathbf{X}$ denotes a square matrix, while $X$ refers to a random variable (RV)\n- We use $||.||$ for the 2-norm, while $||x||_A$ denotes $x^T\\mathbf{A}x$, for a matrix $\\mathbf{A}$\n- Let $\\mathbb{1}_\\mathcal{B}(x)$ be the indicator function of a set $\\mathcal{B}$ defined as 1 if $x \\in \\mathcal{B}$ and 0 otherwise\n- $MVN(\\mathbf{b}, \\mathbf{B})$ denotes the multivariate normal distribution with mean $\\mathbf{b}$ and covariance matrix $\\mathbf{B}$, and $\\mathbf{I}_d$ is the $d \\times d$ identity matrix\n\nContextual bandit notation:\n\n- $\\mathcal{A}$ is the set of arms and $|\\mathcal{A}| = K$ is the number of arms\n- At each time step $t$, the learner observes context $\\mathbf{x}_{t,a} \\in \\mathbb{R}^d$\n- The learner then chooses an arm $a_t$ and receives a reward $r_t \\in \\{0, 1\\}$\n- The expectation of this reward is related to the context through a parameter $\\mathbf{\\theta^*} \\in \\mathbb{R}^d$ and a logistic link function $\\text{logit}^{-1}$, abbreviated as $\\mu$\n- Let $\\mathcal{D}_t$ be the set of triplets $(x_{i, a_i}, a_i, r_i)$ for $i = 1, \\ldots, t$, representing the past $t$ observations fo the contexts, actions chosen, and their corresponding rewards\n- The objective is to minimize cumulative regret given $\\mathcal{D}_{t-1}$ after fixed budget of $t$ steps\n- Regret is the expected difference between optimal reward received by always playing optimal arm $a^*$ and actual reward received:\n\n$$r_t = \\sum_{i=1}^t \\left[\n\\mu(\\mathbf{x}_{i, a^*}^T \\mathbf{\\theta^*}) -\n\\mu(\\mathbf{x}_{i, a_i}^T \\mathbf{\\theta^*})\n\\right]$$\n\nThe parameter $\\theta$ is re-estimated after each round $t$ using a generalized lienar model (GLM) estimator, as $\\theta_t$.", "_____no_output_____" ], [ "## Thompson Sampling for Contextual Logistic Bandits\n\nSpecify prior $p(\\theta)$ and estimate posterior from observed triplets using Bayes' rule:\n\n$$p(\\theta | \\mathcal{D}_{t-1}) \\propto \\prod_{i=1}^{t-1} p(r_i | a_i, \\mathbf{x}_{ia, a_i}, \\theta) p(\\theta)$$\n\nThe optimal arm is then chosen according to the explicit TS probabilities $p(a_t = a^* | \\theta_t, \\mathcal{D}_{t-1})$, computed via:\n\n$$\\int \\mathbb{1}_{\\mathcal{A}_t^\\text{max}}(\\theta_t)\n\\left(E[r_t | a, \\mathbf{x}_{t,a}, \\theta_t] \\right)\np(\\theta_t | \\mathcal{D}_{t-1}) d\\theta_t,$$\n\nwhere $\\mathcal{A}_t^\\text{max}(\\theta_t)$ is the set of arms with maximum rewards at step $t$ if the true parameter were $\\theta_t$.\n\nIn the case of logistic regression, the joint PMF over rewards $r_1, \\ldots, r_t$ observed upon taking actions $a_1, \\ldots, a_t$ is:\n\n$$\\prod_{i=1}^t p(r_i = 1 | a_i, \\mathbf{x}_{i, a_i}, \\theta_i) = \\prod_{i=1}^t \\text{Bernoulli}(\\mu(\\mathbf{x}_{i, a_i}^T \\theta_i))$$", "_____no_output_____" ], [ "### Laplacian approximation\n\nSince the posterior derived from this joint distribution is intractable, Laplace-TS (Chapelle and Li, 2011) approximates the MV Gaussian with a diagonal covariance matrix. The mean of this is a MAP estimate. Laplace-TS performs well compared to UCB and other approximate techniques used by Russo et al. in 2017.", "_____no_output_____" ], [ "### Polya-Gamma (PG) Data Augmentation\n\nReframe discrete rewards as functions of latent variables with Pólya-Gamma distributions over a continuous space. The PG latent variable construction relies on the theoretical properties of PG random variables to exploit the fact that **the logistic likelihood is a mixture of Gaussians with PG mixing distributions.**\n\n**So what is a PG-distributed RV?** _Let $X$ be a real-valued RV. $X$ follows a PG distribution with parameters $b > 0$ and $c \\in \\mathbb{R}, X \\sim PG(b, c)$ if_:\n\n$$X =\n\\frac{1}{2\\pi^2}\n\\sum_{k=1}^\\infty \\frac{G_k}{(k - 1/2)^2 + c^2/(4\\pi^2)},$$\n\n_where $G_k \\sim Ga(b,1)$ are independent Gamma variables_.", "_____no_output_____" ], [ "The identity central to the PG augmentation scheme is:\n\n$$\\frac{exp(\\psi)^a}{(1 + exp(\\psi))^b} =\n2^{-b} exp(\\kappa \\psi) \\int_0^\\infty exp(-\\omega \\psi^2 / 2) p(\\omega) d\\omega,$$\n\nwhere $\\psi \\in \\mathbb{R}, a \\in \\mathbb{R}, b > 0, \\kappa = a - b/2$, and $\\omega \\sim PG(b, 0)$. When $\\psi = \\mathbf{x}_t^T \\theta$, the previous identity allows us to write the logistic likelihood contribution of step $t$, $L_t(\\theta)$ as:\n\n\\begin{align}\nL_t(\\theta)\n &= \\frac{exp(\\mathbf{x}_t^T \\theta)^{r_t}}{1 + exp(\\mathbf{x}_t^T \\theta)} \\\\\n &\\propto exp(\\kappa_t \\mathbf{x}_t^T \\theta) \\int_0^\\infty exp[-\\omega_t (\\mathbf{x}_t^T \\theta)^2 / 2] p(\\omega_t; 1, 0) d\\omega_t,\n\\end{align}\n\nwhere $\\kappa_t = r_t - 1/2$ and $p(\\omega_t; 1, 0)$ is the density of a PG-idstributed RV with parameters $(1, 0)$. These values result from appropriate substitution of $a=r_t$ and $b=1$ in the identity above.\n\nWith this formulation of the likelihood, the conditional posterior of $\\theta$ given latent variables $\\mathbf{\\omega} = [\\omega_1, \\ldots, \\omega_t]$ and past rewards $\\mathbf{r} = [r_1, \\ldots, r_t]$ is a Gaussian:\n\n\\begin{align}\np(\\theta | \\mathbf{\\omega}, \\mathbf{r})\n &= p(\\theta) \\prod_{i=1}^t L_i(\\theta | \\omega_i) \\\\\n &\\propto p(\\theta) \\prod_{i=1}^t exp\\left\\{\\kappa_i \\mathbf{x}_i^T \\theta - \\omega_i (\\mathbf{x}_i^T \\theta)^2 / 2 \\right\\} \\\\\n &\\propto p(\\theta) \\prod_{i=1}^t exp\\left\\{ \\frac{\\omega_i}{2} (\\mathbf{x}_i^T \\theta - \\kappa_i / \\omega_i)^2 \\right\\} \\\\\n &\\propto p(\\theta) \\ exp \\left\\{ -\\frac{1}{2} (z - \\mathbf{X}\\theta)^T \\mathbf{\\Omega} (z - \\mathbf{X}\\theta) \\right\\},\n\\end{align}\n\nwhere $z = (\\kappa_1 / \\omega_1, \\ldots, \\kappa_t / \\omega_t)$, and $\\mathbf{\\Omega} = diag(\\omega_1, \\ldots, \\omega_t)$. This is a conditionally Gaussian likelihood in $\\theta$, with working responses $z$, design matrix $\\mathbf{X}$, and diagonal covariance matrix $\\mathbf{\\Omega}^{-1}$.", "_____no_output_____" ], [ "With a multivariate Gaussian prior for $\\theta \\sim MVN(\\mathbf{b}, \\mathbf{B})$:\n\n$$p(\\theta) \\propto exp\\left\\{ -\\frac{1}{2} (\\theta - \\mathbf{b})^T \\mathbf{B}^{-1} (\\theta - \\mathbf{b}) \\right\\},$$\n\nthis identity leads to an efficient Gibbs sampler. The main parameters are drawn from a Gaussian distribution, which is parameterized by latent variables drawn from the PG distribution. The two steps are:\n\n\\begin{align}\n(\\omega_i | \\theta) &\\sim PG(1, \\mathbf{x}_i^T \\theta) \\\\\n(\\theta | \\mathbf{r}, \\mathbf{\\omega}) &\\sim N(\\mathbf{m}_\\omega, \\mathbf{V}_\\omega),\n\\end{align}\n\nwith\n\n\\begin{align}\n\\mathbf{V}_\\omega &= (\\mathbf{X}^T \\mathbf{\\Omega} \\mathbf{X} + \\mathbf{B}^{-1})^{-1} \\\\\n\\mathbf{m}_\\omega &= \\mathbf{V}_\\omega (\\mathbf{X}^T \\mathbf{\\kappa} + \\mathbf{B}^{-1}\\mathbf{b}) \\\\\n\\mathbf{\\kappa} &= [r_1 - 1/2, \\ldots, r_t - 1/2]\n\\end{align}", "_____no_output_____" ], [ "![PG-TS-Algorithm](images/pg-ts-algorithm-snapshot.png)", "_____no_output_____" ], [ "Notes on the algorithm:\n\n1. $\\mathbf{b}$ = prior means for coefficients\n2. $\\mathbf{B}$ = prior covariance for coefficients\n3. $M$ = number of burn-in iterations. Set to 1 for \"streaming\" setting. Authors found $M = 100$ gave good mixing\n4. Initial draw $\\theta_0$ is from the prior $MVN(\\mathbf{b}, \\mathbf{B})$", "_____no_output_____" ], [ "## Experiments from the paper\n\n1. Gaussian simulations. 100 arms and 10 features per context across 1,000 trials (time steps).\n - Simulation process:\n - Contexts generated via: $\\mathbf{x}_{t,a} \\sim MVN(-\\mathbf{3}, \\mathbf{I}_{10})$ for all arms $a$.\n - True parameters $\\theta^* \\sim MVN(\\mathbf{0}, \\mathbf{I}_{10})$.\n - **Set the hyperparameters to be $\\mathbf{b} = 0$ and $\\mathbf{B} = \\mathbf{I}_{10}$.**\n - Results averaged over 100 runs. Also looked at variability across runs.\n - Found PG-TS performed better than Laplace-TS with less variability. Also found $M = 1$ is still better.\n2. Mixture of Gaussians simulations to examine prior misspecification. 100 arms, 10 features, 5,000 trials (time steps).\n - Simulation process:\n - Contexts generated via: $\\mathbf{x}_{t,a} \\sim MVN(\\mathbf{0}, \\mathbf{I}_{10})$\n - True parameter $\\theta^*$ sampled from 4-component GMM, specified as follows.\n - variances $\\sigma^2_{j=1:4} \\sim \\text{Inverse-Gamma}(3,1)$\n - means $\\mu_{j=1:4} \\sim N(-3, \\sigma_j^2)$\n - mixture weights $\\phi \\sim \\text{Dirichlet}(1, 3, 5, 7)$\n - Then $\\theta^*(i) \\sim \\sum_{j=1}^4 \\phi_j N(\\mu_j, \\sigma_j^2),$ for $i = 1, \\ldots, 10$.\n - Found that PG-TS dealt with this misspecification better than Laplace-TS\n3. Forest Cover type data from UCI ML repo (Bay et al., 2000). Kind of an odd experiment, so not discussing it here.\n4. Yahoo! News article recommendation.\n - Ran replay on 200K of the clickstream events from the dataset of Li et al., 2010.\n - Did not discuss running it more than once and mention that $\\le 24,000$ events were valid for each of the evaluated algorithms.\n - This may indicate the authors failed to repeat the replay procedure, as should be done.\n - Full dataset contains 45.8M user visits from first 10 days in May 2009. For each user visit, one article of $K \\approx 20$ is shown.\n - Each article is associated with a feature vector $\\mathbf{x} \\in \\mathbb{R}^6$, including constant intercept feature, \"preprocessed using a conjoint analysis with a bilinear model (Chu et al., 2009)...\"\n - Articles represent the pool of arms, binary payoff is CTR, goal is to maximize total CTR.\n - Also include experiments with models updating in batches, introducing time delay (uses actual minutes of delay).\n - Found that PG-TS performed best; has more performance advantage with less delay", "_____no_output_____" ], [ "# Game Plan from here\n\n1. Implement PG-TS algorithm and validate on the same simulated procedure described in Experiment 1\n2. Implement Laplace-TS algorithm and validate on Experiment 1\n3. Extend PG-TS to use IW on $\\mathbf{B}$\n4. Relax (3) s.t. we have $\\mathbf{B} = diag(\\sigma^2_1, \\ldots, \\sigma^2_d)$ and $\\sigma^2_j \\sim IG(\\alpha_j, \\beta_j), j = 1, \\ldots, d$\n\nPotential extensions:\n\n- To improve expressiveness / reduce amount of tuning of hyperparameters:\n 1. Add an Inverse-Wishart (IW) prior on $\\mathbf{B}$, since it's unclear how to properly set the prior covariance and that can have a significant impact\n 2. Constrain the covariance to be diagonal and use a separate Inverse-Gamma (IG) prior per variance entry $\\sigma^2_j, j = 1, \\ldots, d$ instead of the IW.\n 3. Constrain this further s.t. the covariance is $\\mathbf{I}_d \\times \\sigma^2$, i.e. each $\\theta$ is distributed according to the same variance, and $\\sigma^2 \\sim IG(\\alpha, \\beta)$.\n 4. Add a Gamma prior on the scale term(s) of the IG distributions, since we may not know what good settings for those are.\n- To handle non-stationarity:\n 1. Add a drift component to the coefficients using the formulation from Sarkka (special case of Kalman Filter).\n 2. Discount old data using some discount factor $\\gamma \\in (0, 1]$\n 3. Use a sliding window method: only fit the model on data in the last $W$ time steps\n 4. (Piecewise stationarity) Incorporate some sort of changepoint detection technique (perhaps the model-based Mahalonobis distance technique)", "_____no_output_____" ], [ "## Implement PG-TS algorithm and validate on the same simulated procedure described in Experiment 1", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport pickle\nimport logging\nfrom concurrent import futures\n\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nfrom scipy import stats\nfrom scipy import special as sps\nfrom pypolyagamma import PyPolyaGamma\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport seaborn as sns\nsns.set_style('whitegrid')\n\nlogging.basicConfig(\n level=logging.INFO, stream=sys.stdout,\n format=\"[%(asctime)s][%(levelname)s]: %(message)s\")\nlogger = logging.getLogger()\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Start by simulating data", "_____no_output_____" ] ], [ [ "# Simulate data\ndef simulate_gaussian_data(num_arms=5, num_predictors=3, num_time_steps=50,\n prior_means=None, prior_cov=None, *, seed=42):\n rng = np.random.RandomState(seed)\n\n # Generate true effects\n if prior_means is None:\n prior_means = np.zeros(num_predictors, dtype=np.float)\n\n if prior_cov is None:\n prior_cov = np.identity(num_predictors, dtype=np.float)\n\n true_effects = rng.multivariate_normal(prior_means, prior_cov)\n print(f'True effects: {np.round(true_effects, 4)}')\n\n # Generate design matrix\n arm_contexts = rng.multivariate_normal(\n np.ones(num_predictors, dtype=np.float) * -3,\n np.identity(num_predictors, dtype=np.float),\n size=num_arms)\n print(f'Context matrix size: {arm_contexts.shape}')\n\n # Generate multiple points for each arm, using round-robin routing.\n arm_per_time_step = rng.choice(num_arms, size=num_time_steps)\n print(f'Samples per arm: {np.bincount(arm_per_time_step)}')\n\n design_matrix = arm_contexts[arm_per_time_step]\n print(f'Design matrix size: {design_matrix.shape}')\n\n logits = design_matrix.dot(true_effects)\n rates = sps.expit(logits)\n ys = rng.binomial(n=1, p=rates)\n\n return true_effects, rates, design_matrix, ys\n\n\ntrue_effects, rates, design_matrix, ys = simulate_gaussian_data()", "True effects: [ 0.4967 -0.1383 0.6477]\nContext matrix size: (5, 3)\nSamples per arm: [ 7 14 4 17 8]\nDesign matrix size: (50, 3)\n" ] ], [ [ "### Next implement PG-TS", "_____no_output_____" ] ], [ [ "def draw_omegas(design_matrix, theta, pg_rng):\n num_rows = design_matrix.shape[0]\n omegas = np.ndarray(num_rows)\n logits = design_matrix.dot(theta)\n for i, logit_i in enumerate(logits):\n omegas[i] = pg_rng.pgdraw(1, logit_i)\n\n return omegas\n\n\nrng = np.random.RandomState(42)\npg_rng = PyPolyaGamma(seed=rng.randint(0, 2**32))\nomegas = draw_omegas(design_matrix, true_effects, pg_rng)\nomegas", "_____no_output_____" ], [ "design_matrix.shape, omegas.shape", "_____no_output_____" ] ], [ [ "Can we precompute the scatter matrix and multiply in the omegas later?\n\nI couldn't figure out any way to do this.", "_____no_output_____" ], [ "#### Just out of curiousity, what is the fastest way to compute `X.T.dot(Omega)`?", "_____no_output_____" ] ], [ [ "Omega = np.diag(omegas)\nnp.array_equal(design_matrix.T.dot(Omega), design_matrix.T * omegas)", "_____no_output_____" ], [ "# What is the fastest way to compute X.T.dot(Omega)?\nfrom scipy import sparse", "_____no_output_____" ], [ "%%timeit\ndesign_matrix.T.dot(np.diag(omegas))", "6.22 µs ± 290 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n" ], [ "%%timeit\ndesign_matrix.T.dot(sparse.diags(omegas))", "3.99 ms ± 196 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ], [ "%%timeit\ndesign_matrix.T * omegas", "1.84 µs ± 15.8 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n" ] ], [ [ "### Taking multivariate draws using Cholesky Decomp.\n\nFor efficiency and numerical stability, it's a good idea to avoid taking explicit matrix inverses if possible. With multivariate normal draws, this can be accomplished by using a Cholesky Decomposition, an eigen-decomposition, or an SVD. SVD may be favorable in cases where singular matrices are common, but we'll proceed with Cholesky here because it is most efficient ($O(n^2)$ vs. the $O(n^3)$ complexity of naive matrix inverse).\n\nLet's start by taking the explicit draw using an explicit inverse and work from there.", "_____no_output_____" ] ], [ [ "rng = np.random.RandomState(42)\npg_rng = PyPolyaGamma(seed=rng.randint(0, 2**32))\n\nX = design_matrix\nnum_predictors = X.shape[1]\nm0 = np.zeros(num_predictors)\nP0 = np.identity(num_predictors, dtype=np.float)\n\n# re-usable calculations\nP0_inv = np.linalg.inv(P0)\nP0_inv_m0 = P0_inv.dot(m0)\nkappas = (ys - 0.5).T\n\n# prelim draws\nbeta_hat = rng.multivariate_normal(m0, P0)\nomegas = draw_omegas(X, beta_hat, pg_rng)\n\n# TODO: speed this up by computing inverse via Cholesky decomposition\nV_omega_inv = (X.T * omegas).dot(X) + P0_inv\nV_omega = np.linalg.inv(V_omega_inv)\nm_omega = V_omega.dot(X.T.dot(kappas) + P0_inv_m0)\n\ndraw = rng.multivariate_normal(m_omega, V_omega)\ndraw", "_____no_output_____" ] ], [ [ "Now the task is to replicate this draw without explicitly using `multivariate_normal`. We can use this lemma:\n\n\\begin{align}\n\\beta \\sim MVN_p(\\vec{\\mu}, \\Sigma) &\\equiv \\beta = L\\vec{\\gamma} + \\vec{\\mu}, \\\\\n\\text{where } \\Sigma &= L L^T, \\\\\nL &\\text{ is a lower triangular matrix}, \\\\\n\\text{and } \\gamma &\\sim MVN(\\vec{0}, I_p)\n\\end{align}\n\nLet's begin by demonstrating a draw from the multivariate normal using the equivalence on line one.", "_____no_output_____" ] ], [ [ "L = sp.linalg.cholesky(V_omega, lower=True)\ngamma = rng.normal(0, 1, size=(num_predictors, 10000))\ndraws = L.dot(gamma) + m_omega[:, None]\n\nm_omega - draws.mean(axis=1) # the means should be quite close", "_____no_output_____" ] ], [ [ "Now the task is to do the same without relying on the already-inverted precision matrix.\n\n\\begin{align}\n\\mathbf{V}_\\omega^{-1} &= \\mathbf{X}^T \\mathbf{\\Omega} \\mathbf{X} + \\mathbf{B}^{-1} = L L^T \\\\\n\\mathbf{V}_\\omega &= (\\mathbf{V}_\\omega^{-1})^{-1} = (L L^T)^{-1} = L^{-T} L^{-1} \\\\\n\\mathbf{m}_\\omega &= \\mathbf{V}_\\omega (\\mathbf{X}^T \\mathbf{\\kappa} + \\mathbf{B}^{-1}\\mathbf{b}) \\\\\n\\mathbf{m}_\\omega &= (L^{-T} L^{-1}) (\\mathbf{X}^T \\mathbf{\\kappa} + \\mathbf{B}^{-1}\\mathbf{b}) \\\\\n\\mathbf{\\kappa} &= [r_1 - 1/2, \\ldots, r_t - 1/2] \\\\\n\\beta &= L^{-T}\\gamma + \\mathbf{m}_\\omega\n\\end{align}\n\nNow, solving for $\\mathbf{m}_\\omega$ in terms of $L$ and $L^T$:\n\n\\begin{align}\n\\mathbf{y_\\omega} &\\stackrel{def}{=} \\mathbf{X}^T \\mathbf{\\kappa} + \\mathbf{B}^{-1}\\mathbf{b} \\\\\n\\mathbf{m_\\omega} &= L^{-T} L^{-1} \\mathbf{y_\\omega} \\\\\nL^T \\mathbf{m_\\omega} &= L^{-1} \\mathbf{y_\\omega} \\\\\nL^T \\mathbf{m_\\omega} &\\stackrel{def}{=} \\eta \\\\\nL \\eta &= \\mathbf{y_\\omega}\n\\end{align}\n\nThis leads us to a method by which we can solve for $\\eta$ first and then $\\mathbf{m_\\omega}$. The last step is to solve for $L^{-T}$ without inverting:\n\n\\begin{align}\n\\beta &= L^{-T}\\gamma + \\mathbf{m}_\\omega \\\\\n\\beta - \\mathbf{m}_\\omega &= L^{-T}\\gamma \\\\\nL^T (\\beta - \\mathbf{m}_\\omega) &= \\gamma \\\\\nL^T \\beta - L^T \\mathbf{m_\\omega} &= \\gamma \\\\\nL^T \\beta &= L^T \\mathbf{m_\\omega} + \\gamma \\\\\nL^T \\beta &= \\eta + \\gamma\n\\end{align}\n\nThis is pretty cool; it turns out we don't actually even need to solve for $\\mathbf{m_\\omega}$. We can just solve for $\\eta$ and then directly solve for $\\beta$. Let's now demonstrate this.", "_____no_output_____" ] ], [ [ "L = sp.linalg.cholesky(V_omega_inv, lower=True)\ny_omega = X.T.dot(kappas) + P0_inv_m0\neta = sp.linalg.solve_triangular(L, y_omega, lower=True)\n#gamma = rng.normal(0, 1, size=(num_predictors, 10000))\n\ndraws = sp.linalg.solve_triangular(L, eta[:, None] + gamma, lower=True, trans='T')\nm_omega - draws.mean(axis=1) # the means should be quite close", "_____no_output_____" ] ], [ [ "References consulted:\n\n1. https://makarandtapaswi.wordpress.com/2011/07/08/cholesky-decomposition-for-matrix-inversion/\n2. https://scicomp.stackexchange.com/questions/3188/dealing-with-the-inverse-of-a-positive-definite-symmetric-covariance-matrix\n3. https://cs.nyu.edu/courses/spring09/G22.3033-012/Bayesian-Inference.ppt\n4. https://jrnold.github.io/bayesian_notes/appendix.html#miscellaneous-mathematical-background\n5. https://stats.stackexchange.com/a/193735/46800", "_____no_output_____" ], [ "## PG-TS Gibbs sampler\n\nAnd now on to the actual PG-TS Gibbs sampler implementation.", "_____no_output_____" ] ], [ [ "# Implement PG-TS Gibbs sampler\nclass NotFitted(Exception):\n pass\n\n\nclass Seedable:\n \n def __init__(self, seed=42):\n self._initial_seed = seed\n self.rng = np.random.RandomState(self._initial_seed)\n\n def seed(self, seed):\n self.rng.seed(seed)\n return self\n\n def reset(self):\n self.seed(self._initial_seed)\n return self\n\n\nclass BaseModel(Seedable):\n \"\"\"Provides some useful helper methods and properties.\"\"\"\n\n @property\n def param_names(self):\n return [name for name in self.__dict__ if name.endswith('_')]\n\n def iter_params(self):\n for name, value in self.__dict__.items():\n if name.endswith('_'):\n yield name, value\n\n def raise_if_not_fitted(self):\n empty_params = []\n for name, value in self.iter_params():\n if value is None:\n empty_params.append(name)\n\n if empty_params:\n raise NotFitted(f\"some parameters are None: {empty_params}\")\n\n\nclass LogisticRegression(BaseModel):\n \"\"\"Bayesian logistic regression model, fitted with PG-augmented Gibbs.\"\"\"\n \n def __init__(self, m0=None, P0=None, num_samples=100, num_burnin=0, **kwargs):\n \"\"\"\n Args:\n m0 (np.ndarray): prior mean\n P0 (np.ndarray): prior covariance matrix\n \"\"\"\n super().__init__(**kwargs)\n\n # Hyperparameters\n self.m0 = m0\n self.P0 = P0\n\n # Set other properties that control fitting\n self.num_samples = num_samples\n self.num_burnin = num_burnin\n self.pg_rng = PyPolyaGamma(seed=self.rng.randint(0, 2**32))\n\n # Set up empty parameters\n self.beta_hat_ = None\n\n def seed(self, seed):\n super().seed(seed)\n self.pg_rng = PyPolyaGamma(seed=self.rng.randint(0, 2**32))\n return self\n\n def reset(self):\n super().reset()\n self.beta_hat_ = None\n return self\n\n # Use custom pickling to handle non-serializable PG RNG\n # WARNING: pickle + unpickle will reset seed\n def __getstate__(self):\n state = self.__dict__.copy()\n del state['pg_rng']\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.reset()\n\n def sample_from_prior(self):\n return self.rng.multivariate_normal(self.m0, self.P0)\n\n def fit(self, X, y):\n \"\"\"Fit the model using Gibbs sampler.\n\n Args:\n X (np.ndarray): design matrix\n y (np.ndarray): responses (binary rewards)\n\n Returns:\n self: reference to fitted model object (this instance).\n \"\"\"\n # Precompute some values that will be re-used in loops\n P0_inv = np.linalg.inv(self.P0)\n P0_inv_m0 = P0_inv.dot(self.m0)\n kappas = (y - 0.5).T\n XTkappa = X.T.dot(kappas)\n y_omega = XTkappa + P0_inv_m0\n num_predictors = X.shape[1]\n\n # Init memory for parameter traces\n beta_hat = np.ndarray((self.num_samples + 1, num_predictors))\n\n # Init trace from prior\n beta_hat[0] = self.sample_from_prior()\n\n # Set fitted parameters on instance\n self.beta_hat_ = beta_hat[1:] # discard initial sample from prior\n\n for i in range(1, self.num_samples + 1):\n omegas = draw_omegas(X, beta_hat[i - 1], self.pg_rng)\n V_omega_inv = (X.T * omegas).dot(X) + P0_inv\n\n L = sp.linalg.cholesky(V_omega_inv, lower=True)\n eta = sp.linalg.solve_triangular(L, y_omega, lower=True)\n gamma = self.rng.normal(0, 1, size=num_predictors)\n beta_hat[i] = sp.linalg.solve_triangular(\n L, eta + gamma, lower=True, trans='T')\n\n return self\n\n def transform(self, X, num_burnin=None):\n self.raise_if_not_fitted()\n\n # Optionally override default burnin.\n num_burnin = self.num_burnin if num_burnin is None else num_burnin\n beta_trace = self.beta_hat_[num_burnin:]\n\n # Compute logits and then transform to rates\n logits = X.dot(beta_trace.T)\n return sps.expit(logits)\n\n def choose_arm(self, context):\n if self.beta_hat_ is None:\n beta_hat = self.rng.multivariate_normal(self.m0, self.P0)\n else:\n beta_hat = self.beta_hat_[-1]\n\n # Compute logits and then transform to rates\n logits = context.dot(beta_hat)\n rates = sps.expit(logits)\n \n # Choose best arm for this \"plausible model.\"\n return np.argmax(rates)", "_____no_output_____" ] ], [ [ "### Validate the model on synthetic data", "_____no_output_____" ] ], [ [ "num_predictors = design_matrix.shape[1]\nmodel = LogisticRegression(\n m0=np.zeros(num_predictors),\n P0=np.identity(num_predictors, dtype=np.float),\n num_samples=100)\nmodel.fit(design_matrix, ys)\n\nbeta_trace = model.beta_hat_\npd.DataFrame(beta_trace).plot();", "_____no_output_____" ], [ "# Posterior rates\nrates_trace = model.transform(design_matrix, num_burnin=20)\nexpected_rates = np.mean(rates_trace, axis=-1)\ncredible_bands = [(5.0, 95.0), (10.0, 90.0), (25.0, 75.0)]\nfig, ax = plt.subplots(figsize=(10, 4))\n\nax.plot(rates, 's', color='red', alpha=0.8)\nax.plot(expected_rates, 's', color='blue', alpha=0.7)\n\nxpoints = np.arange(len(rates))\nfor i, q in enumerate(credible_bands, start=1):\n lo, hi = np.percentile(rates_trace, q=q, axis=-1)\n plt.vlines(xpoints, lo, hi, color='blue', alpha=i * 0.2);", "_____no_output_____" ] ], [ [ "Next we'll combine the two plots above into a modular `ModelValidator` class that can be re-used elsewhere and doesn't have us working with so much global state.", "_____no_output_____" ] ], [ [ "class ModelValidator:\n \"\"\"Fit Bayesian regression models and validate their outputs.\"\"\"\n \n def __init__(self, model, credible_bands=(90, 80, 50)):\n self.model = model\n self.credible_bands = credible_bands\n\n def validate(self, X, y, rates):\n self.model.fit(X, y)\n self.traceplot()\n self.recapture_plot(X, rates)\n\n def traceplot(self, fontsize=14):\n num_params = len(self.model.param_names)\n fig, axes = plt.subplots(nrows=num_params, figsize=(10, num_params * 3.5),\n squeeze=False)\n\n for ax, (param_name, value) in zip(axes.flat, self.model.iter_params()):\n # TODO: only supports 1D parameters\n num_samples, cardinality = value.shape\n individual_names = [f'{param_name}{i}' for i in range(cardinality)]\n param_df = pd.DataFrame(\n value, columns=pd.Index(individual_names, 'Parameters'),\n index=pd.Index(np.arange(num_samples), name='Posterior Samples'))\n param_df.plot(ax=ax)\n\n # Pretty it up\n ax.set_title(param_name[:-1], fontsize=fontsize + 2)\n ax.set_ylabel(f'Support({param_name[:-1]})', fontsize=fontsize)\n ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize)\n\n return axes\n\n def recapture_plot(self, X, rates, fontsize=14):\n rates_trace = self.model.transform(X)\n expected_rates = np.mean(rates_trace, axis=-1)\n\n fig, ax = plt.subplots(figsize=(10, 4))\n ax.set_title('Rate Recapture', fontsize=fontsize + 2)\n ci_labels = ','.join(f'{width}%' for width in self.credible_bands)\n ax.set_xlabel(f'Data (red) and Expected Value with {ci_labels} CIs (blue)',\n fontsize=fontsize)\n ax.set_ylabel('Rate', fontsize=fontsize)\n\n # Plot actual rates along with expected value (estimate)\n ax.plot(rates, 's', color='red', alpha=0.8, label='actual')\n ax.plot(expected_rates, 's', color='blue', alpha=0.7, label='expected')\n ax.legend()\n\n # Plot vertical lines representing credible intervals of various widths.\n # First sort widths in descending order; we'll use this ordering to plot\n # with increasingly bold lines to show increasing credibility.\n widths = list(sorted(self.credible_bands, reverse=True))\n credible_bands = [(50 - width / 2, 50 + width / 2)\n for width in widths]\n\n xpoints = np.arange(len(rates))\n for i, q in enumerate(credible_bands, start=1):\n lo, hi = np.percentile(rates_trace, q=q, axis=-1)\n plt.vlines(xpoints, lo, hi, color='blue', alpha=i * 0.2)\n\n return ax", "_____no_output_____" ], [ "ModelValidator(model).validate(design_matrix, ys, rates)", "_____no_output_____" ] ], [ [ "## Implement Laplace-TS algorithm and validate on Experiment 1", "_____no_output_____" ], [ "![Laplace-TS](images/laplace-ts-algorithm-snapshot.png)", "_____no_output_____" ] ], [ [ "# We'll use scipy's optimize routine for the MAP estimate.\n# This is for computing the minimizer in the algorithm above.\nfrom scipy import optimize", "_____no_output_____" ], [ "m0 = np.zeros(num_predictors)\nq0 = np.ones(num_predictors)\n\n# This is the form from the original paper, which\n# didn't work for me.\ndef reg_laplace_log_likelihood(w, X, y, m, q):\n logits = X.dot(w)\n return (0.5 * q.dot((w - m) ** 2) + \n np.sum(np.log1p(np.exp(-y * logits))))\n\n# This is the form from Bishop's PRML (p. 218)\ndef posterior_neg_log_likelihood(w, X, t, m, q):\n logits = X.dot(w)\n y = sps.expit(logits)\n # diff = w - m\n # P0 = np.diag(1 / q)\n # diff.dot(P0).dot(diff) - \n return (0.5 * q.dot((w - m) ** 2) -\n np.sum(t * np.log(y) + (1 - t) * np.log1p(-y)))\n\noptimization_result = optimize.minimize(\n posterior_neg_log_likelihood,\n x0=np.random.normal(0, 0.001, size=num_predictors),\n args=(design_matrix, ys, m0, q0)) \nprint(optimization_result)\n\nw_hat = optimization_result.x\nlist(zip(true_effects, w_hat))", " fun: 8.219890974034909\n hess_inv: array([[ 0.52679597, -0.29611805, -0.02775451],\n [-0.29611805, 0.46026717, -0.24328135],\n [-0.02775451, -0.24328135, 0.26892651]])\n jac: array([4.76837158e-07, 7.15255737e-07, 2.38418579e-07])\n message: 'Optimization terminated successfully.'\n nfev: 70\n nit: 11\n njev: 14\n status: 0\n success: True\n x: array([0.36772034, 0.04376781, 0.60563244])\n" ], [ "m = w_hat\np = sps.expit(design_matrix.dot(m))[:, None]\nq = q0 + np.sum(design_matrix ** 2 * p * (1 - p), axis=0)\nm, q", "_____no_output_____" ], [ "dists = [stats.norm(m[i], np.sqrt(1 / q[i]))\n for i in range(num_predictors)]\n[dist.interval(.95) for dist in dists]", "_____no_output_____" ], [ "dist = stats.multivariate_normal(m, np.diag(1 / q))\ndist.rvs(100).mean(axis=0)", "_____no_output_____" ] ], [ [ "Now let's convert this code into a class that fits our interface.", "_____no_output_____" ] ], [ [ "class LaplaceLogisticRegression(BaseModel):\n \"\"\"Bayesian logistic regression model, fitted with Laplace approximation.\"\"\"\n \n def __init__(self, m0=None, q0=None, num_samples=100, **kwargs):\n \"\"\"\n Args:\n m0 (np.ndarray): prior mean\n q0 (np.ndarray): prior precision matrix diagonal\n \"\"\"\n super().__init__(**kwargs)\n\n # Hyperparameters\n self.m0 = m0\n self.q0 = q0\n\n # Set other properties that control fitting\n self.num_samples = num_samples\n\n # Set up empty parameters\n self.beta_dist = stats.multivariate_normal(self.m0, np.diag(1 / self.q0))\n self.beta_hat_ = None\n\n def reset(self):\n super().reset()\n self.beta_dist = stats.multivariate_normal(self.m0, np.diag(1 / self.q0))\n self.beta_hat_ = None\n return self\n\n def fit(self, X, y):\n \"\"\"Fit the model using Laplacian approximation.\n\n Args:\n X (np.ndarray): design matrix\n y (np.ndarray): responses (binary rewards)\n\n Returns:\n self: reference to fitted model object (this instance).\n \"\"\"\n # First we need to find the mode of the posterior distribution.\n num_predictors = X.shape[1]\n optimization_result = optimize.minimize(\n posterior_neg_log_likelihood,\n x0=np.random.normal(0, 0.001, size=num_predictors),\n args=(X, y, self.m0, self.q0))\n mean_map_estimate = optimization_result.x\n \n # Next we \"fit\" a Gaussian centered at this posterior mode.\n # The computations below compute the covariance matrix by\n # taking the inverse of the matrix of second derivatives of\n # the negative log likelihood (see Bishop 4.5 for more details).\n m = mean_map_estimate # retain notation from Chapelle paper\n p = sps.expit(X.dot(m))[:, None]\n q = self.q0 + np.sum(X ** 2 * p * (1 - p), axis=0)\n cov = np.diag(1 / q) # q is precision\n\n # Set fitted parameters on instance\n self.beta_dist = stats.multivariate_normal(m, cov)\n self.beta_hat_ = self.beta_dist.rvs(self.num_samples, random_state=self.rng)\n return self\n\n def transform(self, X):\n self.raise_if_not_fitted()\n\n # Compute logits and then transform to rates\n logits = X.dot(self.beta_hat_.T)\n return sps.expit(logits)\n\n def choose_arm(self, context):\n beta_sample = self.beta_dist.rvs(random_state=self.rng)\n logits = context.dot(beta_sample)\n rates = sps.expit(logits)\n return np.argmax(rates)", "_____no_output_____" ], [ "model = LaplaceLogisticRegression(\n m0=np.zeros(num_predictors), q0=np.ones(num_predictors))\nmodel.fit(design_matrix, ys)", "_____no_output_____" ], [ "ModelValidator(model).validate(design_matrix, ys, rates)", "_____no_output_____" ] ], [ [ "### Replicate Experiment 1 from Paper", "_____no_output_____" ] ], [ [ "class GaussianSimulationFactory(Seedable):\n \"\"\"Simulate data according to contextual Gaussian distributions.\n\n A factory creates individual environments.\n This particular factory creates `GaussianSimulationEnvironment`s.\n \"\"\"\n\n def __init__(self, num_arms=100, num_predictors=10, num_time_steps=1000,\n prior_effect_means=None, prior_effect_cov=None, **kwargs):\n super().__init__(**kwargs)\n\n self.num_arms = num_arms\n self.num_predictors = num_predictors\n self.num_time_steps = num_time_steps\n\n # Set prior parameters for effects\n self.prior_effect_means = prior_effect_means\n if self.prior_effect_means is None:\n self.prior_effect_means = np.zeros(\n self.num_predictors, dtype=np.float)\n\n self.prior_effect_cov = prior_effect_cov\n if self.prior_effect_cov is None:\n self.prior_effect_cov = np.identity(\n self.num_predictors, dtype=np.float)\n\n def __call__(self):\n # Generate true effects\n true_effects = self.rng.multivariate_normal(\n self.prior_effect_means, self.prior_effect_cov)\n logger.info(f'True effects: {np.round(true_effects, 4)}')\n\n # Generate design matrix\n arm_contexts = self.rng.multivariate_normal(\n np.ones(self.num_predictors, dtype=np.float) * -3,\n np.identity(self.num_predictors, dtype=np.float),\n size=self.num_arms)\n logger.info(f'Context matrix size: {arm_contexts.shape}')\n\n return GaussianSimulationEnvironment(\n true_effects, arm_contexts, seed=self.rng.randint(0, 2**32))\n\n\nclass GaussianSimulationEnvironment(Seedable):\n \"\"\"An environment with Gaussian-distributed rewards related to\n contextual covariates linearly through a logistic link function.\n\n To replicate an experiment with the same environment but different\n random seeds, simply change the random seed after the first experiment\n is complete. If running in parallel, create multiple of these objects\n with different random seeds but the same parameters otherwise.\n \"\"\"\n\n def __init__(self, true_effects, arm_contexts, **kwargs):\n super().__init__(**kwargs)\n\n self.true_effects = true_effects\n self.arm_contexts = arm_contexts\n self.arm_rates = self._recompute_arm_rates()\n self.optimal_arm = np.argmax(self.arm_rates)\n self.optimal_rate = self.arm_rates[self.optimal_arm]\n\n def _recompute_arm_rates(self):\n logits = self.arm_contexts.dot(self.true_effects)\n return sps.expit(logits)\n\n @property\n def num_arms(self):\n return self.arm_contexts.shape[0]\n\n @property\n def num_predictors(self):\n return self.arm_contexts.shape[1]\n\n def __str__(self):\n return (f'{self.__class__.__name__}'\n f', num_predictors={self.num_predictors}'\n f', num_arms={self.num_arms}'\n f', max_arm_rate={np.round(np.max(self.arm_rates), 5)}'\n f', mean_arm_rate={np.round(np.mean(self.arm_rates), 5)}')\n\n def __repr__(self):\n return self.__str__()\n\n def choose_arm(self, i):\n self._validate_arm_index(i)\n\n # Generate data for optimal arm.\n y_optimal = self.rng.binomial(n=1, p=self.optimal_rate)\n\n # Generate data for selected arm.\n context = self.arm_contexts[i]\n if i == self.optimal_arm:\n y = y_optimal\n else:\n y = self.rng.binomial(n=1, p=self.arm_rates[i])\n\n return context, y, y_optimal\n\n def _validate_arm_index(self, i):\n if i < 0 or i >= self.num_arms:\n raise ValueError(\n f'arm a must satisfy: 0 < a < {self.num_arms}; got {i}')\n\n\nclass Experiment(Seedable):\n \"\"\"Run one or more replicates of agent-environment interaction\n and record the resulting metrics.\n \"\"\"\n\n def __init__(self, environment_factory, model,\n num_time_steps=1000, logging_frequency=100,\n max_workers=7, **kwargs):\n super().__init__(**kwargs)\n\n self.environment = environment_factory()\n self.model = model\n self.num_time_steps = num_time_steps\n self.logging_frequency = logging_frequency\n self.max_workers = max_workers\n\n def run(self, num_replications=1):\n rep_nums = np.arange(num_replications)\n with futures.ProcessPoolExecutor(max_workers=self.max_workers) as pool:\n all_rewards = pool.map(self.run_once, rep_nums)\n\n rewards, optimal_rewards = list(zip(*all_rewards))\n return np.array(rewards), np.array(optimal_rewards)\n\n def run_once(self, seed):\n design_matrix = np.ndarray(\n (self.num_time_steps, self.environment.num_predictors))\n rewards = np.ndarray(self.num_time_steps)\n optimal_rewards = np.ndarray(self.num_time_steps)\n arm_selected = np.ndarray(self.num_time_steps, dtype=np.uint)\n\n self.model.seed(seed).reset()\n self.environment.seed(seed)\n\n logger.info(f'Experiment_{seed} beginning...')\n for t in range(self.num_time_steps):\n if (t + 1) % self.logging_frequency == 0:\n logger.info(f'Experiment_{seed} at t={t + 1}')\n\n arm_selected[t] = self.model.choose_arm(self.environment.arm_contexts)\n design_matrix[t], rewards[t], optimal_rewards[t] = \\\n self.environment.choose_arm(arm_selected[t])\n self.model.fit(design_matrix[:t], rewards[:t])\n\n logger.info(f'Experiment_{seed} complete.')\n return rewards, optimal_rewards", "_____no_output_____" ], [ "factory = GaussianSimulationFactory(seed=2135)\nenvironment = factory()\nplt.bar(np.arange(environment.num_arms), environment.arm_rates)\nenvironment", "[2019-03-03 06:13:54,276][INFO]: True effects: [ 0.3705 0.5342 2.9009 -0.4323 0.275 1.428 0.4041 -0.0516 -2.7478\n -0.181 ]\n[2019-03-03 06:13:54,278][INFO]: Context matrix size: (100, 10)\n" ] ], [ [ "### PG-TS with M=100", "_____no_output_____" ] ], [ [ "def contains_all(dirpath, *filenames):\n listing = os.listdir(dirpath)\n return all(fname in listing for fname in filenames)", "_____no_output_____" ], [ "if not contains_all('data', 'pg_ts_rewards.pkl', 'pg_ts_optimal_rewards.pkl'):\n factory.reset()\n model = LogisticRegression(\n m0=np.zeros(factory.num_predictors),\n P0=np.identity(factory.num_predictors, dtype=np.float),\n num_samples=100)\n experiment = Experiment(factory, model, logging_frequency=500)\n pg_ts_rewards, pg_ts_optimal_rewards = experiment.run(num_replications=100)\n\n # Save results from PG-TS experiments.\n with open('data/pg_ts_rewards.pkl', 'wb') as f:\n pickle.dump(pg_ts_rewards, f)\n\n with open('data/pg_ts_optimal_rewards.pkl', 'wb') as f:\n pickle.dump(pg_ts_optimal_rewards, f)\nelse:\n # Load results from PG-TS experiments.\n with open('data/pg_ts_rewards.pkl', 'rb') as f:\n pg_ts_rewards = pickle.load(f)\n\n with open('data/pg_ts_optimal_rewards.pkl', 'rb') as f:\n pg_ts_optimal_rewards = pickle.load(f)", "_____no_output_____" ], [ "def plot_cum_regret(rewards, optimal_rewards, ax=None, **kwargs):\n if ax is None:\n fig, ax = plt.subplots(figsize=kwargs.pop('figsize', None))\n\n regret = optimal_rewards - rewards\n cum_regret = np.cumsum(regret, axis=-1)\n pd.DataFrame(cum_regret.T).plot(\n ax=ax,\n color=kwargs.get('color', 'red'),\n alpha=kwargs.get('alpha', 0.5))\n\n fontsize = kwargs.pop('fontsize', 14)\n ax.set_ylabel('Cumulative Regret', fontsize=fontsize)\n ax.set_xlabel('Trial Number', fontsize=fontsize)\n ax.get_legend().remove()\n ax.set_title(kwargs.get('title', ''), fontsize=fontsize + 2)\n\n return ax", "_____no_output_____" ], [ "plot_cum_regret(pg_ts_rewards, pg_ts_optimal_rewards, title='PG-TS');", "_____no_output_____" ] ], [ [ "### PG-TS with M=1 (PG-TS-Stream)", "_____no_output_____" ] ], [ [ "if not contains_all('data', 'pg_ts_stream_rewards.pkl', 'pg_ts_stream_optimal_rewards.pkl'):\n factory.reset()\n model = LogisticRegression(\n m0=np.zeros(factory.num_predictors),\n P0=np.identity(factory.num_predictors, dtype=np.float),\n num_samples=2) # first sample discarded as burnin\n experiment = Experiment(factory, model, logging_frequency=500)\n pg_ts_stream_rewards, pg_ts_stream_optimal_rewards = experiment.run(num_replications=100)\n\n # Save results from PG-TS-Stream experiments.\n with open('data/pg_ts_stream_rewards.pkl', 'wb') as f:\n pickle.dump(pg_ts_stream_rewards, f)\n\n with open('data/pg_ts_stream_optimal_rewards.pkl', 'wb') as f:\n pickle.dump(pg_ts_stream_optimal_rewards, f)\nelse:\n # Load results from PG-TS-Stream experiments.\n with open('data/pg_ts_stream_rewards.pkl', 'rb') as f:\n pg_ts_stream_rewards = pickle.load(f)\n\n with open('data/pg_ts_stream_optimal_rewards.pkl', 'rb') as f:\n pg_ts_stream_optimal_rewards = pickle.load(f)", "_____no_output_____" ], [ "plot_cum_regret(pg_ts_stream_rewards, pg_ts_stream_optimal_rewards,\n color='steelblue', title='PG-TS-Stream');", "_____no_output_____" ] ], [ [ "#### Combined PG-TS plots", "_____no_output_____" ] ], [ [ "ax = plot_cum_regret(pg_ts_rewards, pg_ts_optimal_rewards, figsize=(10, 5))\nplot_cum_regret(pg_ts_stream_rewards, pg_ts_stream_optimal_rewards,\n ax=ax, color='steelblue');\n\nax.legend([Line2D([0], [0], color='red', alpha=0.5, lw=4),\n Line2D([0], [0], color='steelblue', alpha=0.5, lw=4)],\n ['PG-TS', 'PG-TS-Stream']);", "_____no_output_____" ], [ "if not contains_all('data', 'laplace_ts_rewards.pkl', 'laplace_ts_optimal_rewards.pkl'):\n factory.reset()\n model = LaplaceLogisticRegression(\n m0=np.zeros(factory.num_predictors),\n q0=np.ones(factory.num_predictors, dtype=np.float),\n num_samples=1)\n experiment = Experiment(factory, model, logging_frequency=500)\n laplace_ts_rewards, laplace_ts_optimal_rewards = experiment.run(num_replications=100)\n\n # Save results from PG-TS experiments.\n with open('data/laplace_ts_rewards.pkl', 'wb') as f:\n pickle.dump(laplace_ts_rewards, f)\n\n with open('data/laplace_ts_optimal_rewards.pkl', 'wb') as f:\n pickle.dump(laplace_ts_optimal_rewards, f)\nelse:\n # Load results from Laplace-TS experiments.\n with open('data/laplace_ts_rewards.pkl', 'rb') as f:\n laplace_ts_rewards = pickle.load(f)\n\n with open('data/laplace_ts_optimal_rewards.pkl', 'rb') as f:\n laplace_ts_optimal_rewards = pickle.load(f)", "_____no_output_____" ], [ "plot_cum_regret(laplace_ts_rewards, laplace_ts_optimal_rewards,\n color='green', title='Laplace-TS');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec5bbd4f9f277157723c9f6a92b940b79fe8ef43
144,684
ipynb
Jupyter Notebook
Python-Intermediate .ipynb
Nusan7/DataScience
4ee3f9dff89858197b13101503e93f25aa0c5d39
[ "MIT" ]
null
null
null
Python-Intermediate .ipynb
Nusan7/DataScience
4ee3f9dff89858197b13101503e93f25aa0c5d39
[ "MIT" ]
null
null
null
Python-Intermediate .ipynb
Nusan7/DataScience
4ee3f9dff89858197b13101503e93f25aa0c5d39
[ "MIT" ]
null
null
null
68.182846
59,016
0.751804
[ [ [ "## Basics of matplotlib ", "_____no_output_____" ], [ "It is used for visualization. Contains many plots ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt \nimport pandas as pd \nimport numpy as np \ngapminder=pd.read_csv('Data/gapminder.csv') #Importing the data. \nprint(gapminder.head())\ngapminder['life_exp'].astype(float) # Converting the column life_exp to float. \nlife_exp=gapminder['life_exp'].values\ngdp_cap=gapminder['gdp_cap'].values\nprint(len(life_exp))\nprint(len(gdp_cap))", " Unnamed: 0 country year population cont life_exp gdp_cap\n0 11 Afghanistan 2007 31889923.0 Asia 43.828 974.580338\n1 23 Albania 2007 3600523.0 Europe 76.423 5937.029526\n2 35 Algeria 2007 33333216.0 Africa 72.301 6223.367465\n3 47 Angola 2007 12420476.0 Africa 42.731 4797.231267\n4 59 Argentina 2007 40301927.0 Americas 75.320 12779.379640\n142\n142\n" ], [ "plt.plot(gdp_cap,life_exp)\nplt.show() \n# In situations like this it is better to plot a scatter plot instead. ", "_____no_output_____" ], [ "plt.scatter(gdp_cap,life_exp)\nplt.xscale('log') # For a range of values logarithmic scale can be used.\nplt.show()", "_____no_output_____" ] ], [ [ "### Observations: \n1. There is a correlation between the gdp_cap and life_exp. Higher the GDP, greater the life_exp ", "_____no_output_____" ], [ "## Histograms \nThey are used to show the distribution and explore the data set. ", "_____no_output_____" ] ], [ [ "plt.hist(life_exp,bins=15) #setting the bins increases the intervals\nplt.show()\n", "_____no_output_____" ] ], [ [ "## Customizing the plot ", "_____no_output_____" ] ], [ [ "print(life_exp)\nprint(gdp_cap)\n\n", "[43.828 76.423 72.301 42.731 75.32 81.235 79.829 75.635 64.062 79.441\n 56.728 65.554 74.852 50.728 72.39 73.005 52.295 49.58 59.723 50.43\n 80.653 44.741 50.651 78.553 72.961 72.889 65.152 46.462 55.322 78.782\n 48.328 75.748 78.273 76.486 78.332 54.791 72.235 74.994 71.338 71.878\n 51.579 58.04 52.947 79.313 80.657 56.735 59.448 79.406 60.022 79.483\n 70.259 56.007 46.388 60.916 70.198 82.208 73.338 81.757 64.698 70.65\n 70.964 59.545 78.885 80.745 80.546 72.567 82.603 72.535 54.11 67.297\n 78.623 77.588 71.993 42.592 45.678 73.952 59.443 48.303 74.241 54.467\n 64.164 72.801 76.195 66.803 74.543 71.164 42.082 62.069 52.906 63.785\n 79.762 80.204 72.899 56.867 46.859 80.196 75.64 65.483 75.537 71.752\n 71.421 71.688 75.563 78.098 78.746 76.442 72.476 46.242 65.528 72.777\n 63.062 74.002 42.568 79.972 74.663 77.926 48.159 49.339 80.941 72.396\n 58.556 39.613 80.884 81.701 74.143 78.4 52.517 70.616 58.42 69.819\n 73.923 71.777 51.542 79.425 78.242 76.384 73.747 74.249 73.422 62.698\n 42.384 43.487]\n[ 974.5803384 5937.029526 6223.367465 4797.231267 12779.37964\n 34435.36744 36126.4927 29796.04834 1391.253792 33692.60508\n 1441.284873 3822.137084 7446.298803 12569.85177 9065.800825\n 10680.79282 1217.032994 430.0706916 1713.778686 2042.09524\n 36319.23501 706.016537 1704.063724 13171.63885 4959.114854\n 7006.580419 986.1478792 277.5518587 3632.557798 9645.06142\n 1544.750112 14619.22272 8948.102923 22833.30851 35278.41874\n 2082.481567 6025.374752 6873.262326 5581.180998 5728.353514\n 12154.08975 641.3695236 690.8055759 33207.0844 30470.0167\n 13206.48452 752.7497265 32170.37442 1327.60891 27538.41188\n 5186.050003 942.6542111 579.231743 1201.637154 3548.330846\n 39724.97867 18008.94444 36180.78919 2452.210407 3540.651564\n 11605.71449 4471.061906 40675.99635 25523.2771 28569.7197\n 7320.880262 31656.06806 4519.461171 1463.249282 1593.06548\n 23348.13973 47306.98978 10461.05868 1569.331442 414.5073415\n 12057.49928 1044.770126 759.3499101 12451.6558 1042.581557\n 1803.151496 10956.99112 11977.57496 3095.772271 9253.896111\n 3820.17523 823.6856205 944. 4811.060429 1091.359778\n 36797.93332 25185.00911 2749.320965 619.6768924 2013.977305\n 49357.19017 22316.19287 2605.94758 9809.185636 4172.838464\n 7408.905561 3190.481016 15389.92468 20509.64777 19328.70901\n 7670.122558 10808.47561 863.0884639 1598.435089 21654.83194\n 1712.472136 9786.534714 862.5407561 47143.17964 18678.31435\n 25768.25759 926.1410683 9269.657808 28821.0637 3970.095407\n 2602.394995 4513.480643 33859.74835 37506.41907 4184.548089\n 28718.27684 1107.482182 7458.396327 882.9699438 18008.50924\n 7092.923025 8458.276384 1056.380121 33203.26128 42951.65309\n 10611.46299 11415.80569 2441.576404 3025.349798 2280.769906\n 1271.211593 469.7092981]\n" ], [ "plt.scatter(gdp_cap,life_exp,c='r',alpha=0.8)#color customization\nplt.xscale('log')\nplt.xlabel('GDP') # lAbeling \nplt.ylabel('Life Expectancy')\nplt.title('Life Expectancy VS GDP')\nplt.yticks([0,20,40,60,80])\nplt.grid()\nplt.show()", "_____no_output_____" ] ], [ [ "## Dictionaries\n1. Consists of key value pairs. \n2. Created using {}. \n3. Useful if we find out information on something that is unordered and using the key name.\n4. Mutable\n5. Indexed by keys", "_____no_output_____" ] ], [ [ "Asia={'India':'Delhi','Nepal':'Kathmandu','China':'Beijing','Japan':'Tokyo'}\nprint(Asia)", "{'India': 'Delhi', 'Nepal': 'Kathmandu', 'China': 'Beijing', 'Japan': 'Tokyo'}\n" ], [ "dir(dict)", "_____no_output_____" ], [ "Asia.keys() # Getting all the keys in the dictionaries ", "_____no_output_____" ], [ "print(Asia['Nepal']) # Getting the value from the key name.\nprint(Asia.get('Nepal')) #Does not give any error message.", "Kathmandu\nKathmandu\n" ], [ "Asia.values()", "_____no_output_____" ], [ "Asia.update({'Indonesia':'Jakarta','Bangladesh':'jhaka','Austrlia':'Canberra'})", "_____no_output_____" ], [ "Asia", "_____no_output_____" ], [ "'Indonesia' in Asia", "_____no_output_____" ], [ "Asia['Bangladesh']='Dhaka'\nprint(Asia)", "{'India': 'Delhi', 'Nepal': 'Kathmandu', 'China': 'Beijing', 'Japan': 'Tokyo', 'Indonesia': 'Jakarta', 'Bangladesh': 'Dhaka', 'Austrlia': 'Canberra'}\n" ], [ "del(Asia['Austrlia'])", "_____no_output_____" ] ], [ [ "## Pandas\n1. Used for tabular data. \n2. Can store mulitple date types. \n3. Manipulation is easy", "_____no_output_____" ] ], [ [ "import pandas as pd \ncountries=pd.DataFrame(Asia,index=['country','capitals'])\ncountries", "_____no_output_____" ], [ "cars=pd.read_csv('Data/cars.csv',index_col=0)\ncars.shape", "_____no_output_____" ], [ "cars.head()", "_____no_output_____" ], [ "cars[cars['cars_per_cap']>700] # All the countries with cars_per_cap greater than 700", "_____no_output_____" ], [ "cars[cars['drives_right']==True] #Countries that drives on the right side", "_____no_output_____" ], [ "#.iloc and .loc can be used to subeset the dataframe.\nprint(cars.loc[['US'],['country','drives_right']])\nprint(cars.iloc[:,[1]])", " country drives_right\nUS United States True\n country\nUS United States\nAUS Australia\nJAP Japan\nIN India\nRU Russia\nMOR Morocco\nEG Egypt\n" ], [ "brics=pd.read_csv('Data/brics.csv',index_col=0)\nprint(brics.head())\nbrics.loc[['BR','IN','SA'],['capital']]", " country capital area population\nBR Brazil Brasilia 8.516 200.40\nRU Russia Moscow 17.100 143.50\nIN India New Delhi 3.286 1252.00\nCH China Beijing 9.597 1357.00\nSA South Africa Pretoria 1.221 52.98\n" ], [ "brics.iloc[:,1:]", "_____no_output_____" ] ], [ [ "## Control flow and Filtering\n\n#### Comparison Operators \n1. <\n3. ==\n4. !=\n5. <=", "_____no_output_____" ] ], [ [ "a=23\nb=33\nprint(a==b)\nprint(a<=b)\nprint(a>=b)\na1='Hello'\nb1='Hellos'\nprint(a1==b1)", "False\nTrue\nFalse\nFalse\n" ], [ "new=np.arange(0,100,5)\nlen(new)\nnew1=np.linspace(0,100,20)\nlen(new1)\nnew>new1\nnew==new1 # For numpy all the values get compared \nnew>4", "_____no_output_____" ] ], [ [ "### Boolean operators: \n1. and\n2. or \n3. not", "_____no_output_____" ] ], [ [ "a2=160\nb2=1240\nprint(a2>a and b2<b)\nprint(a2==a or a2>a)\nprint(a2>a and not(b2<b))", "False\nTrue\nTrue\n" ] ], [ [ "### For numpy:\n1. np.logical_or()\n2. np.logical_and()\n3. np.logical_not()\n", "_____no_output_____" ] ], [ [ "arr1=np.array([30,40,50,60,70,80])\narr2=np.array([324,234234,50,60,34543,60])\nprint(np.logical_or(arr1>30,arr2<20))", "[False True True True True True]\n" ] ], [ [ "### if,elif and else ", "_____no_output_____" ] ], [ [ "num=2\nif num>=5 and num <=10:\n print(f'The num is greater than or equal to : {num}')\nelif num<5:\n print(f'The num is less than 5 as it is equal to : {num}')\nelse:\n print(f'The num is equal to : {num}')", "The num is less than 5 as it is equal to : 2\n" ] ], [ [ "### Filtering Pandas DataFrame", "_____no_output_____" ] ], [ [ "brics.head()\n", "_____no_output_____" ], [ "#getting the countries with the area greater than 8:\nprint(brics[brics['area']>8])\n#Getting the countries with the area greater than 8 and population 1000:\nbrics[np.logical_and(brics['area']>8,brics['population']>1000)]", " country capital area population\nBR Brazil Brasilia 8.516 200.4\nRU Russia Moscow 17.100 143.5\nCH China Beijing 9.597 1357.0\n" ] ], [ [ "### Loops \n1. While loop: loop until the condition is True. \n2. For loop: loop until there is no more element to loop.", "_____no_output_____" ] ], [ [ "number=int(input('Please enter the number : \\n'))\nwhile number!=100:\n if number<100:\n print(number)\n number=number+1\n else:\n print(number)\n number=number-1", "Please enter the number : \n96\n96\n97\n98\n99\n" ], [ "# For loop: \nlist1=['Nusan','Prapti','Dubey','Daemon','Brad','Liz']\nfor var in list1:\n print(var)\nfor index, var in enumerate(list1):\n print(f'The index is {index} and the variable is {var}')\n", "Nusan\nPrapti\nDubey\nDaemon\nBrad\nLiz\nThe index is 0 and the variable is Nusan\nThe index is 1 and the variable is Prapti\nThe index is 2 and the variable is Dubey\nThe index is 3 and the variable is Daemon\nThe index is 4 and the variable is Brad\nThe index is 5 and the variable is Liz\n" ], [ "# For loop for dictionaries and numpy arrays:\ndict1={'country':['US','China','Nepal','India'],'capitals':['Washington DC','Beijing','Kathmandu','Delhi']}\nfor s,t in dict1.items():\n print(s,t)\n", "country ['US', 'China', 'Nepal', 'India']\ncapitals ['Washington DC', 'Beijing', 'Kathmandu', 'Delhi']\n" ], [ "type(life_exp)\nlife_exp", "_____no_output_____" ], [ "for i in np.nditer(life_exp):\n print(i)", "43.828\n76.423\n72.301\n42.731\n75.32\n81.235\n79.829\n75.635\n64.062\n79.441\n56.728\n65.554\n74.852\n50.728\n72.39\n73.005\n52.295\n49.58\n59.723\n50.43\n80.653\n44.74100000000001\n50.651\n78.553\n72.961\n72.889\n65.152\n46.461999999999996\n55.321999999999996\n78.782\n48.328\n75.748\n78.273\n76.486\n78.332\n54.791000000000004\n72.235\n74.994\n71.33800000000001\n71.878\n51.57899999999999\n58.04\n52.946999999999996\n79.313\n80.657\n56.735\n59.448\n79.406\n60.022\n79.483\n70.259\n56.007\n46.388000000000005\n60.916000000000004\n70.19800000000001\n82.208\n73.33800000000001\n81.757\n64.69800000000001\n70.65\n70.964\n59.545\n78.885\n80.745\n80.546\n72.567\n82.603\n72.535\n54.11\n67.297\n78.623\n77.58800000000001\n71.993\n42.592\n45.678000000000004\n73.952\n59.443000000000005\n48.303000000000004\n74.241\n54.467\n64.164\n72.801\n76.195\n66.803\n74.543\n71.164\n42.082\n62.068999999999996\n52.906000000000006\n63.785\n79.762\n80.204\n72.899\n56.867\n46.858999999999995\n80.196\n75.64\n65.483\n75.53699999999999\n71.752\n71.421\n71.688\n75.563\n78.098\n78.74600000000001\n76.442\n72.476\n46.242\n65.528\n72.777\n63.062\n74.002\n42.568000000000005\n79.972\n74.663\n77.926\n48.159\n49.339\n80.941\n72.396\n58.556000000000004\n39.613\n80.884\n81.70100000000001\n74.143\n78.4\n52.516999999999996\n70.616\n58.42\n69.819\n73.923\n71.777\n51.542\n79.425\n78.242\n76.384\n73.747\n74.249\n73.422\n62.698\n42.38399999999999\n43.486999999999995\n" ], [ "cars.head()", "_____no_output_____" ], [ "# looping over the pandas dataframe", "_____no_output_____" ], [ "for lab,row in cars.iterrows():\n print(lab)\n print(row)", "US\ncars_per_cap 809\ncountry United States\ndrives_right True\nName: US, dtype: object\nAUS\ncars_per_cap 731\ncountry Australia\ndrives_right False\nName: AUS, dtype: object\nJAP\ncars_per_cap 588\ncountry Japan\ndrives_right False\nName: JAP, dtype: object\nIN\ncars_per_cap 18\ncountry India\ndrives_right False\nName: IN, dtype: object\nRU\ncars_per_cap 200\ncountry Russia\ndrives_right True\nName: RU, dtype: object\nMOR\ncars_per_cap 70\ncountry Morocco\ndrives_right True\nName: MOR, dtype: object\nEG\ncars_per_cap 45\ncountry Egypt\ndrives_right True\nName: EG, dtype: object\n" ], [ "for lab, row in cars.iterrows():\n print(lab + ' '+ str(row['country']))\n", "US United States\nAUS Australia\nJAP Japan\nIN India\nRU Russia\nMOR Morocco\nEG Egypt\n" ], [ "for lab,row in cars.iterrows():\n cars['COUNTRY']=cars['country'].apply(str.upper)\ncars", "_____no_output_____" ], [ "cars.head()", "_____no_output_____" ], [ "for lab, row in cars.iterrows():\n cars['LENGTH']=cars['COUNTRY'].apply(len)", "_____no_output_____" ], [ "cars", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5bbfaaea79638b2e3e176d9b472bc7eafa556f
31,591
ipynb
Jupyter Notebook
vilio/data/all_results/sanity_check_pred_samples.ipynb
VAIBHAV-2303/Hateful-Memes-Classification
16c775061d94fae7bf10bbe8fda2dd1dc3e6b697
[ "MIT" ]
4
2021-06-23T06:40:35.000Z
2022-01-11T19:09:53.000Z
vilio/data/all_results/sanity_check_pred_samples.ipynb
VAIBHAV-2303/Hateful-Memes-Classification
16c775061d94fae7bf10bbe8fda2dd1dc3e6b697
[ "MIT" ]
null
null
null
vilio/data/all_results/sanity_check_pred_samples.ipynb
VAIBHAV-2303/Hateful-Memes-Classification
16c775061d94fae7bf10bbe8fda2dd1dc3e6b697
[ "MIT" ]
2
2021-07-25T12:43:20.000Z
2021-07-27T17:53:41.000Z
29.859168
112
0.399164
[ [ [ "import os\nfrom shutil import copyfile\nimport pandas as pd", "_____no_output_____" ], [ "ref = pd.read_csv('VLMDB_sa_train_SA.csv')\nref", "_____no_output_____" ], [ "df = pd.read_csv('EL365072_sa_train_SA.csv')\ndf", "_____no_output_____" ], [ "delta = ref[~ref.id.isin(df.id)]\ndf.append(delta).sort_values(by='id')", "_____no_output_____" ], [ "df3 = pd.read_csv('EL365072_sa_dev_seen_SA.csv')\ndf3", "_____no_output_____" ], [ "df4 = pd.read_csv('VLMDB_sa_dev_seen_SA.csv')\ndf4", "_____no_output_____" ], [ "(df3.id.values == df4.id.values).sum()", "_____no_output_____" ], [ "df3 = pd.read_csv('EL365072_sa_test_seen_SA.csv')\ndf4 = pd.read_csv('VLMDB_sa_test_seen_SA.csv')\n(df3.id.values == df4.id.values).sum()", "_____no_output_____" ], [ "df3 = pd.read_csv('EL365072_sa_test_unseen_SA.csv')\ndf4 = pd.read_csv('VLMDB_sa_test_unseen_SA.csv')\n(df3.id.values == df4.id.values).sum()", "_____no_output_____" ], [ "os.listdir(os.path.join(os.getcwd(), 'res'))", "_____no_output_____" ], [ "keep_cols = ['id', 'proba_x', 'label_x']\ncol_map = {'proba_x': 'proba', 'label_x': 'label'}\ne_folders = ['ES36', 'ESVCR36', 'ESV50', 'ES72', 'ESVCR72', 'EL36', 'ELVCR36', 'ELV50', 'EL72', 'ELVCR72']", "_____no_output_____" ], [ "for ef in e_folders:\n print('-'*10 , ef, '-'*10)\n df = pd.read_csv(os.path.join('.', 'res', ef, ef+'_train.csv'))\n print(df.shape)\n if ef[:2] == 'ES':\n os.makedirs(os.path.join('.', 'res', 'ES_RECT', ef), exist_ok=True)\n for eff in os.listdir(os.path.join('.', 'res', ef)): \n src = os.path.join('.', 'res', ef, eff)\n dest = os.path.join('.', 'res', 'ES_RECT', ef, eff)\n copyfile(src, dest)\n \n if 'train' in src:\n df = pd.read_csv(src)\n df_new = pd.merge(df, ref, on=['id'], how='inner')[keep_cols]\n df_new = df_new.rename(columns=col_map)\n df_new.to_csv(dest)\n \n elif ef[:2] == 'EL':\n os.makedirs(os.path.join('.', 'res', 'EL_RECT', ef), exist_ok=True)\n for eff in os.listdir(os.path.join('.', 'res', ef)):\n src = os.path.join('.', 'res', ef, eff)\n dest = os.path.join('.', 'res', 'EL_RECT', ef, eff)\n copyfile(src, dest)\n \n if 'train' in src:\n df = pd.read_csv(src)\n df_new = pd.merge(df, ref, on=['id'], how='inner')[keep_cols]\n df_new = df_new.rename(columns=col_map)\n df_new.to_csv(dest)", "---------- ES36 ----------\n(8574, 3)\n---------- ESVCR36 ----------\n(8574, 3)\n---------- ESV50 ----------\n(8574, 3)\n---------- ES72 ----------\n(8574, 3)\n---------- ESVCR72 ----------\n(8574, 3)\n---------- EL36 ----------\n(8574, 3)\n---------- ELVCR36 ----------\n(8574, 3)\n---------- ELV50 ----------\n(8574, 3)\n---------- EL72 ----------\n(8574, 3)\n---------- ELVCR72 ----------\n(8574, 3)\n" ], [ "for ef in os.listdir(os.path.join('.', 'res')):\n os.makedirs(os.path.join('.', 'newres', ef), exist_ok=True)\n for file in os.listdir(os.path.join('.', 'res', ef)):\n if 'dev' in file or 'test' in file:\n print(os.path.join('.', 'res', ef, file))\n df = pd.read_csv(os.path.join('.', 'res', ef, file)).sort_values(by='id')\n elif 'train' in file:\n print(os.path.join('.', 'res', ef, file))\n df = pd.read_csv(os.path.join('.', 'res', ef, file))\n delta = ref[~ref.id.isin(df.id)]\n df = df.append(delta).sort_values(by='id')\n df.to_csv(os.path.join('.', 'newres', ef, file))", "./res/OV50/OV50_train.csv\n./res/OV50/OV50_test_seen.csv\n./res/OV50/OV50_dev_seen.csv\n./res/OV50/OV50_test_unseen.csv\n./res/ES36/ES36_train.csv\n./res/ES36/ES36_test_unseen.csv\n./res/ES36/ES36_test_seen.csv\n./res/ES36/ES36_dev_seen.csv\n./res/ESV50/ESV50_dev_seen.csv\n./res/ESV50/ESV50_test_unseen.csv\n./res/ESV50/ESV50_test_seen.csv\n./res/ESV50/ESV50_train.csv\n./res/ELV50/ELV50_test_unseen.csv\n./res/ELV50/ELV50_train.csv\n./res/ELV50/ELV50_dev_seen.csv\n./res/ELV50/ELV50_test_seen.csv\n./res/V45/V45_dev_seen.csv\n./res/V45/V45_train.csv\n./res/V45/V45_test_seen.csv\n./res/V45/V45_test_unseen.csv\n./res/U36/U36_train.csv\n./res/U36/U36_test_seen.csv\n./res/U36/U36_test_unseen.csv\n./res/U36/U36_dev_seen.csv\n./res/ELVCR72/ELVCR72_test_seen.csv\n./res/ELVCR72/ELVCR72_dev_seen.csv\n./res/ELVCR72/ELVCR72_test_unseen.csv\n./res/ELVCR72/ELVCR72_train.csv\n./res/EL72/EL72_train.csv\n./res/EL72/EL72_test_unseen.csv\n./res/EL72/EL72_test_seen.csv\n./res/EL72/EL72_dev_seen.csv\n./res/U50/U50_test_unseen.csv\n./res/U50/U50_dev_seen.csv\n./res/U50/U50_test_seen.csv\n./res/U50/U50_train.csv\n./res/EL36/EL36_test_seen.csv\n./res/EL36/EL36_test_unseen.csv\n./res/EL36/EL36_dev_seen.csv\n./res/EL36/EL36_train.csv\n./res/V135/V135_test_unseen.csv\n./res/V135/V135_test_seen.csv\n./res/V135/V135_dev_seen.csv\n./res/V135/V135_train.csv\n./res/ESVCR36/ESVCR36_test_seen.csv\n./res/ESVCR36/ESVCR36_test_unseen.csv\n./res/ESVCR36/ESVCR36_train.csv\n./res/ESVCR36/ESVCR36_dev_seen.csv\n./res/V90/V90_test_seen.csv\n./res/V90/V90_train.csv\n./res/V90/V90_dev_seen.csv\n./res/V90/V90_test_unseen.csv\n./res/U72/U72_dev_seen.csv\n./res/U72/U72_test_seen.csv\n./res/U72/U72_train.csv\n./res/U72/U72_test_unseen.csv\n./res/O36/O36_train.csv\n./res/O36/O36_dev_seen.csv\n./res/O36/O36_test_seen.csv\n./res/O36/O36_test_unseen.csv\n./res/O50/O50_test_seen.csv\n./res/O50/O50_train.csv\n./res/O50/O50_dev_seen.csv\n./res/O50/O50_test_unseen.csv\n./res/ESVCR72/ESVCR72_test_unseen.csv\n./res/ESVCR72/ESVCR72_test_seen.csv\n./res/ESVCR72/ESVCR72_train.csv\n./res/ESVCR72/ESVCR72_dev_seen.csv\n./res/ELVCR36/ELVCR36_dev_seen.csv\n./res/ELVCR36/ELVCR36_test_seen.csv\n./res/ELVCR36/ELVCR36_test_unseen.csv\n./res/ELVCR36/ELVCR36_train.csv\n./res/ES72/ES72_train.csv\n./res/ES72/ES72_test_seen.csv\n./res/ES72/ES72_dev_seen.csv\n./res/ES72/ES72_test_unseen.csv\n" ], [ "for ef in os.listdir(os.path.join('.', 'res_ens')):\n os.makedirs(os.path.join('.', 'newres_ens', ef), exist_ok=True)\n for file in os.listdir(os.path.join('.', 'res_ens', ef)):\n if 'dev' in file or 'test' in file:\n print(os.path.join('.', 'res_ens', ef, file))\n df = pd.read_csv(os.path.join('.', 'res_ens', ef, file)).sort_values(by='id')\n elif 'train' in file:\n print(os.path.join('.', 'res_ens', ef, file))\n df = pd.read_csv(os.path.join('.', 'res_ens', ef, file))\n delta = ref[~ref.id.isin(df.id)]\n df = df.append(delta).sort_values(by='id')\n df.to_csv(os.path.join('.', 'newres_ens', ef, file))", "./res_ens/EL365072_loop/FIN_test_unseen_EL365072_loop_2.csv\n./res_ens/EL365072_loop/FIN_test_seen_EL365072_loop_2.csv\n./res_ens/EL365072_loop/FIN_dev_seen_EL365072_loop_2.csv\n./res_ens/EL365072_loop/FIN_train_EL365072_loop_2.csv\n./res_ens/U365072_loop/FIN_dev_seen_U365072_loop_2.csv\n./res_ens/U365072_loop/FIN_test_seen_U365072_loop_2.csv\n./res_ens/U365072_loop/FIN_test_unseen_U365072_loop_2.csv\n./res_ens/U365072_loop/FIN_train_U365072_loop_2.csv\n./res_ens/EL365072_sa/EL365072_sa_train_SA.csv\n./res_ens/EL365072_sa/EL365072_sa_test_unseen_SA.csv\n./res_ens/EL365072_sa/EL365072_sa_test_seen_SA.csv\n./res_ens/EL365072_sa/EL365072_sa_dev_seen_SA.csv\n./res_ens/U365072_sa/U365072_sa_test_unseen_SA.csv\n./res_ens/U365072_sa/U365072_sa_dev_seen_SA.csv\n./res_ens/U365072_sa/U365072_sa_train_SA.csv\n./res_ens/U365072_sa/U365072_sa_test_seen_SA.csv\n./res_ens/O365050_sa/O365050_sa_dev_seen_SA.csv\n./res_ens/O365050_sa/O365050_sa_train_SA.csv\n./res_ens/O365050_sa/O365050_sa_test_unseen_SA.csv\n./res_ens/O365050_sa/O365050_sa_test_seen_SA.csv\n./res_ens/ES365072_loop/FIN_test_seen_ES365072_loop_2.csv\n./res_ens/ES365072_loop/FIN_dev_seen_ES365072_loop_2.csv\n./res_ens/ES365072_loop/FIN_train_ES365072_loop_2.csv\n./res_ens/ES365072_loop/FIN_test_unseen_ES365072_loop_2.csv\n./res_ens/ES365072_sa/ES365072_sa_test_unseen_SA.csv\n./res_ens/ES365072_sa/ES365072_sa_train_SA.csv\n./res_ens/ES365072_sa/ES365072_sa_dev_seen_SA.csv\n./res_ens/ES365072_sa/ES365072_sa_test_seen_SA.csv\n./res_ens/VLMDB_sa/VLMDB_sa_dev_seen_SA.csv\n./res_ens/VLMDB_sa/VLMDB_sa_train_SA.csv\n./res_ens/VLMDB_sa/VLMDB_sa_test_seen_SA.csv\n./res_ens/VLMDB_sa/VLMDB_sa_test_unseen_SA.csv\n./res_ens/O365050_loop/FIN_train_O365050_loop_2.csv\n./res_ens/O365050_loop/FIN_dev_seen_O365050_loop_2.csv\n./res_ens/O365050_loop/FIN_test_unseen_O365050_loop_2.csv\n./res_ens/O365050_loop/FIN_test_seen_O365050_loop_2.csv\n./res_ens/VLMDB_loop/FIN_train_VLMDB_loop_2.csv\n./res_ens/VLMDB_loop/FIN_dev_seen_VLMDB_loop_2.csv\n./res_ens/VLMDB_loop/FIN_test_unseen_VLMDB_loop_2.csv\n./res_ens/VLMDB_loop/FIN_test_seen_VLMDB_loop_2.csv\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5bcaa6de6394d5b4748836e294cf59171a2704
85,935
ipynb
Jupyter Notebook
AI_algorithms_in_trading/project_6_starter.ipynb
Echo9k/nd880-AI_for_Trading
085c566dcb543cb376e03b720d43742e6d28aeda
[ "MIT" ]
1
2021-06-24T21:01:06.000Z
2021-06-24T21:01:06.000Z
AI_algorithms_in_trading/project_6_starter.ipynb
Echo9k/nd880-AI_for_Trading
085c566dcb543cb376e03b720d43742e6d28aeda
[ "MIT" ]
null
null
null
AI_algorithms_in_trading/project_6_starter.ipynb
Echo9k/nd880-AI_for_Trading
085c566dcb543cb376e03b720d43742e6d28aeda
[ "MIT" ]
1
2021-06-24T21:01:14.000Z
2021-06-24T21:01:14.000Z
37.723881
8,644
0.562053
[ [ [ "# Project 6: Analyzing Stock Sentiment from Twits\n## Instructions\nEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment.\n\n## Packages\nWhen you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.\n\n### Load Packages", "_____no_output_____" ] ], [ [ "import json\nimport nltk\nimport os\nimport random\nimport re\nimport torch\nfrom tqdm import tqdm # new\n\nfrom torch import nn, optim\nimport torch.nn.functional as F", "_____no_output_____" ] ], [ [ "## Introduction\nWhen deciding the value of a company, it's important to follow the news. For example, a product recall or natural disaster in a company's product chain. You want to be able to turn this information into a signal. Currently, the best tool for the job is a Neural Network. \n\nFor this project, you'll use posts from the social media site [StockTwits](https://en.wikipedia.org/wiki/StockTwits). The community on StockTwits is full of investors, traders, and entrepreneurs. Each message posted is called a Twit. This is similar to Twitter's version of a post, called a Tweet. You'll build a model around these twits that generate a sentiment score.\n\nWe've collected a bunch of twits, then hand labeled the sentiment of each. To capture the degree of sentiment, we'll use a five-point scale: very negative, negative, neutral, positive, very positive. Each twit is labeled -2 to 2 in steps of 1, from very negative to very positive respectively. You'll build a sentiment analysis model that will learn to assign sentiment to twits on its own, using this labeled data.\n\nThe first thing we should to do, is load the data.\n\n## Import Twits \n### Load Twits Data \nThis JSON file contains a list of objects for each twit in the `'data'` field:\n\n```\n{'data':\n {'message_body': 'Neutral twit body text here',\n 'sentiment': 0},\n {'message_body': 'Happy twit body text here',\n 'sentiment': 1},\n ...\n}\n```\n\nThe fields represent the following:\n\n* `'message_body'`: The text of the twit.\n* `'sentiment'`: Sentiment score for the twit, ranges from -2 to 2 in steps of 1, with 0 being neutral.\n\n\nTo see what the data look like by printing the first 10 twits from the list. ", "_____no_output_____" ] ], [ [ "with open(os.path.join('..', '..', 'data', 'project_6_stocktwits', 'twits.json'), 'r') as f:\n twits = json.load(f)\n\nprint(twits['data'][:10])", "[{'message_body': '$FITB great buy at 26.00...ill wait', 'sentiment': 2, 'timestamp': '2018-07-01T00:00:09Z'}, {'message_body': '@StockTwits $MSFT', 'sentiment': 1, 'timestamp': '2018-07-01T00:00:42Z'}, {'message_body': '#STAAnalystAlert for $TDG : Jefferies Maintains with a rating of Hold setting target price at USD 350.00. Our own verdict is Buy http://www.stocktargetadvisor.com/toprating', 'sentiment': 2, 'timestamp': '2018-07-01T00:01:24Z'}, {'message_body': '$AMD I heard there’s a guy who knows someone who thinks somebody knows something - on StockTwits.', 'sentiment': 1, 'timestamp': '2018-07-01T00:01:47Z'}, {'message_body': '$AMD reveal yourself!', 'sentiment': 0, 'timestamp': '2018-07-01T00:02:13Z'}, {'message_body': '$AAPL Why the drop? I warren Buffet taking out his position?', 'sentiment': 1, 'timestamp': '2018-07-01T00:03:10Z'}, {'message_body': '$BA bears have 1 reason on 06-29 to pay more attention https://dividendbot.com?s=BA', 'sentiment': -2, 'timestamp': '2018-07-01T00:04:09Z'}, {'message_body': '$BAC ok good we&#39;re not dropping in price over the weekend, lol', 'sentiment': 1, 'timestamp': '2018-07-01T00:04:17Z'}, {'message_body': '$AMAT - Daily Chart, we need to get back to above 50.', 'sentiment': 2, 'timestamp': '2018-07-01T00:08:01Z'}, {'message_body': '$GME 3% drop per week after spike... if no news in 3 months, back to 12s... if BO, then bingo... what is the odds?', 'sentiment': -2, 'timestamp': '2018-07-01T00:09:03Z'}]\n" ] ], [ [ "### Length of Data\nNow let's look at the number of twits in dataset. Print the number of twits below.", "_____no_output_____" ] ], [ [ "\"\"\"print out the number of twits\"\"\"\nlen(twits['data'])", "_____no_output_____" ] ], [ [ "### Split Message Body and Sentiment Score", "_____no_output_____" ] ], [ [ "messages = [twit['message_body'] for twit in twits['data']]\n\n# Since the sentiment scores are discrete, we'll scale the sentiments to 0 to 4 for use in our network\nsentiments = [twit['sentiment'] + 2 for twit in twits['data']]", "_____no_output_____" ] ], [ [ "## Preprocessing the Data\nWith our data in hand we need to preprocess our text. These twits are collected by filtering on ticker symbols where these are denoted with a leader $ symbol in the twit itself. For example,\n\n`{'message_body': 'RT @google Our annual look at the year in Google blogging (and beyond) http://t.co/sptHOAh8 $GOOG',\n 'sentiment': 0}`\n\nThe ticker symbols don't provide information on the sentiment, and they are in every twit, so we should remove them. This twit also has the `@google` username, again not providing sentiment information, so we should also remove it. We also see a URL `http://t.co/sptHOAh8`. Let's remove these too.\n\nThe easiest way to remove specific words or phrases is with regex using the `re` module. You can sub out specific patterns with a space:\n\n```python\nre.sub(pattern, ' ', text)\n```\nThis will substitute a space with anywhere the pattern matches in the text. Later when we tokenize the text, we'll split appropriately on those spaces.", "_____no_output_____" ], [ "### Pre-Processing", "_____no_output_____" ] ], [ [ "nltk.download('wordnet')\n\n\ndef preprocess(message):\n \"\"\"\n This function takes a string as input, then performs these operations: \n - lowercase\n - remove URLs\n - remove ticker symbols \n - removes punctuation\n - tokenize by splitting the string on whitespace \n - removes any single character tokens\n \n Parameters\n ----------\n message : The text message to be preprocessed.\n \n Returns\n -------\n tokens: The preprocessed text into tokens.\n \"\"\" \n # Lowercase the twit message\n text = message.lower()\n\n #\n regex_url = r\"https?\\:\\/\\/\\S+\"\n subst = \"\\\" \\\"\"\n\n # Replace URLs with a space in the message\n text = re.sub(regex_url, subst, text, 0, re.MULTILINE | re.IGNORECASE)\n \n # Replace ticker symbols with a space. The ticker symbols are any stock symbol that starts with $.\n text = re.sub(r'\\$\\w*', \" \", text)\n \n # Replace StockTwits usernames with a space. The usernames are any word that starts with @.\n text = re.sub(r'\\@\\w*', \" \", text)\n\n # Replace everything not a letter with a space\n text = re.sub(r\"[^a-zA-Z]+\", \" \", text)\n \n # Tokenize by splitting the string on whitespace into a list of words\n words = text.split(' ')\n\n # Lemmatize words using the WordNetLemmatizer. You can ignore any word that is not longer than one character.\n wnl = nltk.stem.WordNetLemmatizer()\n tokens = [wnl.lemmatize(w) for w in words if len(w) > 1]\n \n return tokens", "[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n" ] ], [ [ "### Preprocess All the Twits \nNow we can preprocess each of the twits in our dataset. Apply the function `preprocess` to all the twit messages.", "_____no_output_____" ] ], [ [ "proccessed_messages = [preprocess(message) for message in messages]", "_____no_output_____" ] ], [ [ "### Bag of Words\nNow with all of our messages tokenized, we want to create a vocabulary and count up how often each word appears in our entire corpus. Use the [`Counter`](https://docs.python.org/3.1/library/collections.html#collections.Counter) function to count up all the tokens.", "_____no_output_____" ] ], [ [ "# Create a vocabulary by using Bag of words\nfrom collections import Counter\nbow=Counter([i for y in proccessed_messages for i in y])", "_____no_output_____" ] ], [ [ "### Frequency of Words Appearing in Message\nWith our vocabulary, now we'll remove some of the most common words such as 'the', 'and', 'it', etc. These words don't contribute to identifying sentiment and are really common, resulting in a lot of noise in our input. If we can filter these out, then our network should have an easier time learning.\n\nWe also want to remove really rare words that show up in a only a few twits. Here you'll want to divide the count of each word by the number of messages. Then remove words that only appear in some small fraction of the messages.", "_____no_output_____" ] ], [ [ "\"\"\"\nSet the following variables:\n freqs\n low_cutoff\n high_cutoff\n K_most_common\n\"\"\"\nfrom IPython.display import display\n\n# Dictionart that contains the Frequency of words appearing in messages.\n# The key is the token and the value is the frequency of that word in the corpus.\nword_count = len(bow)\nfreq_dic = {word:count/word_count for word, count in bow.items()}\n\n# Float that is the frequency cutoff. Drop words with a frequency that is lower or equal to this number.\nlow_cutoff = 0.00007\n\n# Integer that is the cut off for most common words. Drop words that are the `high_cutoff` most common words.\nhigh_cutoff = 20\n\n# The k most common words in the corpus. Use `high_cutoff` as the k.\nK_most_common = bow.most_common(n=high_cutoff)\n\nkept_words = [word for word in freq_dic if (freq_dic[word] > low_cutoff and word not in K_most_common)]\nprint('Kept {n} words:'.format(n=len(kept_words)))\ndisplay(kept_words)", "Kept 22863 words:\n" ] ], [ [ "### Updating Vocabulary by Removing Filtered Words\nLet's creat three variables that will help with our vocabulary.", "_____no_output_____" ] ], [ [ "\"\"\"\nSet the following variables:\n vocab\n id2vocab\n filtered\n\"\"\"\n\n# A dictionary for the `filtered_words`. The key is the word and value is an id that represents the word. \nvocab = {word:index for index, word in enumerate(freq_dic, 1) if word in kept_words}\n\n# Reverse of the `vocab` dictionary. The key is word id and value is the word. \nid2vocab = {word: ii for word,ii in enumerate(kept_words,1)}\n# tokenized with the words not in `filtered_words` removed.\nproccessed_messages = [[word for word in message if word in vocab] for message in tqdm(proccessed_messages)]", "100%|██████████| 1548010/1548010 [00:08<00:00, 187822.45it/s]\n" ] ], [ [ "### Balancing the classes\nLet's do a few last pre-processing steps. If we look at how our twits are labeled, we'll find that 50% of them are neutral. This means that our network will be 50% accurate just by guessing 0 every single time. To help our network learn appropriately, we'll want to balance our classes.\nThat is, make sure each of our different sentiment scores show up roughly as frequently in the data.\n\nWhat we can do here is go through each of our examples and randomly drop twits with neutral sentiment. What should be the probability we drop these twits if we want to get around 20% neutral twits starting at 50% neutral? We should also take this opportunity to remove messages with length 0.", "_____no_output_____" ] ], [ [ "balanced = {'messages': [], 'sentiments':[]}\n\nn_neutral = sum(1 for each in sentiments if each == 2)\nN_examples = len(sentiments)\nkeep_prob = (N_examples - n_neutral)/4/n_neutral\n\nfor idx, sentiment in enumerate(sentiments):\n message = proccessed_messages[idx]\n if len(message) == 0:\n # skip this message because it has length zero\n continue\n elif sentiment != 2 or random.random() < keep_prob:\n balanced['messages'].append(message)\n balanced['sentiments'].append(sentiment) ", "_____no_output_____" ], [ "%matplotlib inline\nfrom sklearn.utils import resample\nimport pandas as pd\n\nlen(sentiments)==len(proccessed_messages)\nmessage_sentiment_df = pd.DataFrame({'proccessed_messages':proccessed_messages, 'sentiments':sentiments})\n\nval_counts = message_sentiment_df.sentiments.value_counts()\nsample_size=int(min(val_counts)*0.8)\n\ndisplay(val_counts.plot(kind='bar', title='Count per class'),\n val_counts)", "_____no_output_____" ], [ "def balance(df, sentiment, size):\n sentiment_df = df[df.sentiments==sentiment]\n return resample(sentiment_df, n_samples=sample_size, replace=True, random_state=0)\n\nbalanced_df = pd.concat([balance(message_sentiment_df, sentiment, sample_size) for sentiment in set(sentiments)])\nval_counts = balanced_df.sentiments.value_counts()\nsample_size=int(min(val_counts)*0.8)\n\ndisplay(val_counts.plot(kind='bar', title='Count per class'),\n val_counts)\n\nbalanced_df.sample(4)", "_____no_output_____" ], [ "class TextClassifier(nn.Module):\n def __init__(self, vocab_size, embed_size, lstm_size, output_size, lstm_layers=1, dropout=0.1):\n \"\"\"\n Initialize the model by setting up the layers.\n \n Parameters\n ----------\n vocab_size : The vocabulary size.\n embed_size : The embedding layer size.\n lstm_size : The LSTM layer size.\n output_size : The output size.\n lstm_layers : The number of LSTM layers.\n dropout : The dropout probability.\n \"\"\"\n \n super().__init__()\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.lstm_size = lstm_size\n self.output_size = output_size\n self.lstm_layers = lstm_layers\n self.dropout = dropout\n \n # Setup embedding layer\n self.embedding = nn.Embedding(vocab_size, embed_size)\n\n # LSTM layer\n self.lstm = nn.LSTM(embed_size,lstm_size,lstm_layers,dropout=dropout,batch_first=False)\n\n # Dropout layer\n self.dropout = nn.Dropout(dropout)\n\n # Liner\n self.fc = nn.Linear(lstm_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n\n def init_hidden(self, batch_size):\n \"\"\" \n Initializes hidden state\n \n Parameters\n ----------\n batch_size : The size of batches.\n \n Returns\n -------\n hidden_state\n \n \"\"\"\n \n # TODO Implement \n \n # Create two new tensors with sizes n_layers x batch_size x hidden_dim,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n return (weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_(),\n weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_()) \n\n\n def forward(self, nn_input, hidden_state):\n \"\"\"\n Perform a forward pass of our model on nn_input.\n \n Parameters\n ----------\n nn_input : The batch of input to the NN.\n hidden_state : The LSTM hidden state.\n\n Returns\n -------\n logps: log softmax output\n hidden_state: The new hidden state.\n\n \"\"\"\n \n embeds = self.embedding(nn_input)\n lstm_out, hidden_state = self.lstm(embeds,hidden_state)\n\n lstm_out = lstm_out[-1,:,:]\n out = self.dropout(lstm_out)\n out = self.fc(out)\n\n return self.softmax(out), hidden_state", "_____no_output_____" ] ], [ [ "If you did it correctly, you should see the following result ", "_____no_output_____" ], [ "Finally let's convert our tokens into integer ids which we can pass to the network.", "_____no_output_____" ] ], [ [ "token_ids = [[vocab[word] for word in message] for message in balanced_df['proccessed_messages']]\nsentiments = balanced_df['sentiments']", "_____no_output_____" ] ], [ [ "## Neural Network\nNow we have our vocabulary which means we can transform our tokens into ids, which are then passed to our network. So, let's define the network now!\n\nHere is a nice diagram showing the network we'd like to build: \n\n#### Embed -> RNN -> Dense -> Softmax\n### Implement the text classifier\nBefore we build text classifier, if you remember from the other network that you built in \"Sentiment Analysis with an RNN\" exercise - which there, the network called \" SentimentRNN\", here we named it \"TextClassifer\" - consists of three main parts: 1) init function `__init__` 2) forward pass `forward` 3) hidden state `init_hidden`. \n\nThis network is pretty similar to the network you built expect in the `forward` pass, we use softmax instead of sigmoid. The reason we are not using sigmoid is that the output of NN is not a binary. In our network, sentiment scores have 5 possible outcomes. We are looking for an outcome with the highest probability thus softmax is a better choice.", "_____no_output_____" ] ], [ [ "class TextClassifier(nn.Module):\n def __init__(self, vocab_size, embed_size, lstm_size, output_size, lstm_layers=1, dropout=0.1):\n \"\"\"\n Initialize the model by setting up the layers.\n \n Parameters\n ----------\n vocab_size : The vocabulary size.\n embed_size : The embedding layer size.\n lstm_size : The LSTM layer size.\n output_size : The output size.\n lstm_layers : The number of LSTM layers.\n dropout : The dropout probability.\n \"\"\"\n \n super().__init__()\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.lstm_size = lstm_size\n self.output_size = output_size\n self.lstm_layers = lstm_layers\n self.dropout = dropout\n \n # Setup embedding layer\n self.embedding = nn.Embedding(vocab_size, embed_size)\n\n # LSTM layer\n self.lstm = nn.LSTM(embed_size,lstm_size,lstm_layers,dropout=dropout,batch_first=False)\n\n # Dropout layer\n self.dropout = nn.Dropout(dropout)\n\n # Liner\n self.fc = nn.Linear(lstm_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n\n def init_hidden(self, batch_size):\n \"\"\" \n Initializes hidden state\n \n Parameters\n ----------\n batch_size : The size of batches.\n \n Returns\n -------\n hidden_state\n \n \"\"\"\n \n # TODO Implement \n \n # Create two new tensors with sizes n_layers x batch_size x hidden_dim,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n return (weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_(),\n weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_()) \n\n\n def forward(self, nn_input, hidden_state):\n \"\"\"\n Perform a forward pass of our model on nn_input.\n \n Parameters\n ----------\n nn_input : The batch of input to the NN.\n hidden_state : The LSTM hidden state.\n\n Returns\n -------\n logps: log softmax output\n hidden_state: The new hidden state.\n\n \"\"\"\n \n embeds = self.embedding(nn_input)\n lstm_out, hidden_state = self.lstm(embeds,hidden_state)\n\n lstm_out = lstm_out[-1,:,:]\n out = self.dropout(lstm_out)\n out = self.fc(out)\n\n return self.softmax(out), hidden_state", "_____no_output_____" ] ], [ [ "### View Model", "_____no_output_____" ] ], [ [ "model = TextClassifier(len(vocab), 10, 6, 5, dropout=0.1, lstm_layers=2)\nmodel.embedding.weight.data.uniform_(-1, 1)\ninput = torch.randint(0, 1000, (5, 4), dtype=torch.int64)\nhidden = model.init_hidden(4)\n\nlogps, _ = model.forward(input, hidden)\nprint(logps)", "tensor([[-1.3388, -1.5820, -1.6625, -1.7398, -1.7894],\n [-1.3531, -1.5783, -1.6491, -1.7431, -1.7836],\n [-1.3427, -1.5810, -1.6534, -1.7541, -1.7800],\n [-1.3426, -1.5795, -1.6679, -1.7240, -1.7973]])\n" ] ], [ [ "## Training\n### DataLoaders and Batching\nNow we should build a generator that we can use to loop through our data. It'll be more efficient if we can pass our sequences in as batches. Our input tensors should look like `(sequence_length, batch_size)`. So if our sequences are 40 tokens long and we pass in 25 sequences, then we'd have an input size of `(40, 25)`.\n\nIf we set our sequence length to 40, what do we do with messages that are more or less than 40 tokens? For messages with fewer than 40 tokens, we will pad the empty spots with zeros. We should be sure to **left** pad so that the RNN starts from nothing before going through the data. If the message has 20 tokens, then the first 20 spots of our 40 long sequence will be 0. If a message has more than 40 tokens, we'll just keep the first 40 tokens.", "_____no_output_____" ] ], [ [ "def dataloader(messages, labels, sequence_length=30, batch_size=32, shuffle=False):\n \"\"\" \n Build a dataloader.\n \"\"\"\n if shuffle:\n indices = list(range(len(messages)))\n random.shuffle(indices)\n messages = [messages[idx] for idx in indices]\n labels = [labels[idx] for idx in indices]\n\n total_sequences = len(messages)\n\n for ii in range(0, total_sequences, batch_size):\n batch_messages = messages[ii: ii+batch_size]\n \n # First initialize a tensor of all zeros\n batch = torch.zeros((sequence_length, len(batch_messages)), dtype=torch.int64)\n for batch_num, tokens in enumerate(batch_messages):\n token_tensor = torch.tensor(tokens)\n # Left pad!\n start_idx = max(sequence_length - len(token_tensor), 0)\n batch[start_idx:, batch_num] = token_tensor[:sequence_length]\n \n label_tensor = torch.tensor(labels[ii: ii+len(batch_messages)])\n \n yield batch, label_tensor", "_____no_output_____" ] ], [ [ "### Training and Validation\nWith our data in nice shape, we'll split it into training and validation sets.", "_____no_output_____" ] ], [ [ "\"\"\"\nSplit data into training and validation datasets. Use an appropriate split size.\nThe features are the `token_ids` and the labels are the `sentiments`.\n\"\"\" \n\n# TODO Implement \n\n## train_features\nsplit = int(len(token_ids) * .8 )\ntrain_features, remaining_x = token_ids[:split], token_ids[split:]\n\n## valid_features\nval = int(len(token_ids[split:]) * .5)\nvalid_features = remaining_x[:val]\n\n## train_labels\ntrain_labels,remaining_y = sentiments[:split],sentiments[split:]\n\n## valid_labels\nvalid_labels = remaining_y[:val]", "_____no_output_____" ], [ "train_labels", "_____no_output_____" ], [ "text_batch, labels = next(iter(dataloader(train_features, train_labels, sequence_length=20, batch_size=64)))\nmodel = TextClassifier(len(vocab)+1, 200, 128, 5, dropout=0.)\nhidden = model.init_hidden(64)\nlogps, hidden = model.forward(text_batch, hidden)", "_____no_output_____" ] ], [ [ "### Training\nIt's time to train the neural network!", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = TextClassifier(len(vocab)+1, 1024, 512, 5, lstm_layers=2, dropout=0.2)\nmodel.embedding.weight.data.uniform_(-1, 1)\nmodel.to(device)", "_____no_output_____" ], [ "\"\"\"\nTrain your model with dropout. Make sure to clip your gradients.\nPrint the training loss, validation loss, and validation accuracy for every 100 steps.\n\"\"\"\n\nepochs = pass\nbatch_size = pass\nlearning_rate = pass\n\nprint_every = 100\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nmodel.train()\n\nfor epoch in range(epochs):\n print('Starting epoch {}'.format(epoch + 1))\n \n steps = 0\n for text_batch, labels in dataloader(\n train_features, train_labels, batch_size=batch_size, sequence_length=20, shuffle=True):\n steps += 1\n hidden = pass\n \n # Set Device\n text_batch, labels = text_batch.to(device), labels.to(device)\n for each in hidden:\n each.to(device)\n \n # TODO Implement: Train Model\n \n if steps % print_every == 0:\n model.eval()\n \n # TODO Implement: Print metrics\n \n model.train()", "_____no_output_____" ] ], [ [ "## Making Predictions\n### Prediction \nOkay, now that you have a trained model, try it on some new twits and see if it works appropriately. Remember that for any new text, you'll need to preprocess it first before passing it to the network. Implement the `predict` function to generate the prediction vector from a message.", "_____no_output_____" ] ], [ [ "def predict(text, model, vocab):\n \"\"\" \n Make a prediction on a single sentence.\n\n Parameters\n ----------\n text : The string to make a prediction on.\n model : The model to use for making the prediction.\n vocab : Dictionary for word to word ids. The key is the word and the value is the word id.\n\n Returns\n -------\n pred : Prediction vector\n \"\"\" \n \n # TODO Implement\n \n tokens = preprocess(text)\n \n # Filter non-vocab words\n tokens = pass\n # Convert words to ids\n tokens = pass\n \n # Adding a batch dimension\n text_input = pass\n # Get the NN output\n hidden = pass\n logps, _ = pass\n # Take the exponent of the NN output to get a range of 0 to 1 for each label.\n pred = pass\n \n return pred", "_____no_output_____" ], [ "text = \"Google is working on self driving cars, I'm bullish on $goog\"\nmodel.eval()\nmodel.to(\"cpu\")\npredict(text, model, vocab)", "_____no_output_____" ] ], [ [ "### Questions: What is the prediction of the model? What is the uncertainty of the prediction?\n** TODO: Answer Question**", "_____no_output_____" ], [ "Now we have a trained model and we can make predictions. We can use this model to track the sentiments of various stocks by predicting the sentiments of twits as they are coming in. Now we have a stream of twits. For each of those twits, pull out the stocks mentioned in them and keep track of the sentiments. Remember that in the twits, ticker symbols are encoded with a dollar sign as the first character, all caps, and 2-4 letters, like $AAPL. Ideally, you'd want to track the sentiments of the stocks in your universe and use this as a signal in your larger model(s).\n\n## Testing\n### Load the Data ", "_____no_output_____" ] ], [ [ "with open(os.path.join('..', '..', 'data', 'project_6_stocktwits', 'test_twits.json'), 'r') as f:\n test_data = json.load(f)", "_____no_output_____" ] ], [ [ "### Twit Stream", "_____no_output_____" ] ], [ [ "def twit_stream():\n for twit in test_data['data']:\n yield twit\n\nnext(twit_stream())", "_____no_output_____" ] ], [ [ "Using the `prediction` function, let's apply it to a stream of twits.", "_____no_output_____" ] ], [ [ "def score_twits(stream, model, vocab, universe):\n \"\"\" \n Given a stream of twits and a universe of tickers, return sentiment scores for tickers in the universe.\n \"\"\"\n for twit in stream:\n\n # Get the message text\n text = twit['message_body']\n symbols = re.findall('\\$[A-Z]{2,4}', text)\n score = predict(text, model, vocab)\n\n for symbol in symbols:\n if symbol in universe:\n yield {'symbol': symbol, 'score': score, 'timestamp': twit['timestamp']}", "_____no_output_____" ], [ "universe = {'$BBRY', '$AAPL', '$AMZN', '$BABA', '$YHOO', '$LQMT', '$FB', '$GOOG', '$BBBY', '$JNUG', '$SBUX', '$MU'}\nscore_stream = score_twits(twit_stream(), model, vocab, universe)\n\nnext(score_stream)", "_____no_output_____" ] ], [ [ "That's it. You have successfully built a model for sentiment analysis! ", "_____no_output_____" ], [ "## Submission\nNow that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ec5bcb8e0d4add5b19aaa34aad885472506fb7fd
253,565
ipynb
Jupyter Notebook
ipython/embedding-tree-illustrator-experiments.ipynb
OpenTreeOfLife/tree-illustrator
d97024e57fc446f17c0cf0cd0090a83e5ac94394
[ "BSD-2-Clause" ]
5
2015-06-17T07:14:04.000Z
2018-04-06T08:13:48.000Z
ipython/embedding-tree-illustrator-experiments.ipynb
OpenTreeOfLife/tree-illustrator
d97024e57fc446f17c0cf0cd0090a83e5ac94394
[ "BSD-2-Clause" ]
11
2015-04-08T15:55:49.000Z
2018-04-13T05:34:44.000Z
ipython/embedding-tree-illustrator-experiments.ipynb
OpenTreeOfLife/tree-illustrator
d97024e57fc446f17c0cf0cd0090a83e5ac94394
[ "BSD-2-Clause" ]
2
2015-06-04T20:13:41.000Z
2016-03-04T22:07:06.000Z
378.455224
112,684
0.726847
[ [ [ "# How can we embed n instances of Tree Illustrator into a notebook?\n\nThis is an attempt to do a simple thing.", "_____no_output_____" ] ], [ [ "from pprint import pprint\nprint(\"hello\")", "hello\n" ] ], [ [ "## Will this work with static notebooks?", "_____no_output_____" ], [ "## Can we enable collaboration (esp. real-time) in these notebooks?\n\nsee Sage Notebooks for a pretty-good implementation, but won't show much inside of a TI instance.", "_____no_output_____" ] ], [ [ "test = \"HI MOM\"\nprint(test)\n%qtconsole\n", "_____no_output_____" ], [ "# use a server-side support class to load the static stylist into an IFrame\nfrom IPython.lib.display import IFrame\nIFrame(\n src=\"http://rawgit.com/OpenTreeOfLife/tree-illustrator/master/stylist/stylist.html\", \n width=\"100%\", \n height=500,\n trees=[],\n data=[],\n ornaments=[]\n)\n\n", "_____no_output_____" ], [ "# Let's try another, as a proper subclass of IFrame above\nfrom IPython.lib.display import IFrame\nclass TreeIllustrator(IFrame):\n \"\"\"Class for embedding an instance of Tree Illustrator in an IPython session\n This could optionally use keyword arguments to pass tree data, Vega specs, etc.\n ti2 = TreeIllustrator()\n display(ti2)\n To load an initial tree (in this case, a Newick string in the IPython session)::\n ti3 = TreeIllustrator(\n trees=[ myNewick ]\n )\n display(ti3)\n TODO: Add more options for insertion of style, Vega specs, maybe a complete illustration?\n We might consider using our Illustration JSON as the model for even partial specs here.\n \"\"\"\n\n def __init__(self, id, width='100%', height=400, **kwargs):\n # tokenize? eg, src = \"https://www.youtube.com/embed/{0}\".format(id)\n src=\"http://rawgit.com/OpenTreeOfLife/tree-illustrator/master/stylist/stylist.html\"\n super(TreeIllustrator, self).__init__(src, width, height, **kwargs)", "_____no_output_____" ], [ "TreeIllustrator('ti4')", "_____no_output_____" ], [ "%%javascript\n// Load a JS file (for example, general support for Tree Illustrator instances) from a second gist at\n// https://gist.github.com/jimallman/f16ef46d4d447e4bb74e\n$.getScript('https://rawgit.com/jimallman/f16ef46d4d447e4bb74e/raw/4874dd6acd55df390e1aa6dde308914dbba4746b/js-import-test.js');\n\n// here's a JS-only implementation of the TreeIllustrator class above\nvar elementID = 'ti5'; // expect this as an incoming argument? scan the DOM to pick a sensible default?\nthis.append_display_data({\n 'data': {\n 'text/html': '<iframe id=\"'+ elementID +'\" width=\"100%\" height=\"500\" \\\n src=\"http://rawgit.com/OpenTreeOfLife/tree-illustrator/master/stylist/stylist.html?data=%5B%5D&amp;ornaments=%5B%5D&amp;trees=%5B%5D\" \\\n frameborder=\"0\" allowfullscreen=\"allowfullscreen\"> \\\n </iframe>'\n } \n})\n\n// Assign serial IDs? or maybe base these on the ID of the current cell (if it can be determined from JS)\nvar tiDomain = 'http://rawgit.com';\nvar ti5 = $('#ti5')[0];\nvar ti5window = ti5.contentWindow;\n\n// add a listener for messages from the Tree Illustrator instance (its window)\nwindow.addEventListener(\"message\", receiveMessage, false);\n// TODO: make sure we're not duplicating this? or allow one listener per instance?\n\nfunction receiveMessage(event) {\n // the dispatched message has origin, data, source [sending window]\n if (event.origin !== tiDomain) {\n alert(\"Attempted inter-window message from an unexpected domain: [\"+ event.origin +\"], expected: [\"+ tiDomain +\"]\");\n return;\n }\n\n // examine the payload to see what's what..\n if (event.source !== ti5window) {\n alert(\"Attempted inter-window message from an unexpected window: [\"+ event.source +\"], expected: [\"+ ti5window +\"]\");\n return;\n }\n \n console.log(event);\n debugger;\n}\n\n// define methods for TreeIllustrator instances\n\nfunction injectTree( data, treeIndex, options ) {\n // pass newick, other formats? bounce to peyotl for conversion, as needed?\n // specify nth tree to REPLACE an existing tree?\n ti5window.postMessage(\n {\n treeData: data,\n treeIndex: treeIndex,\n options: options \n },\n tiDomain // TODO: restrict to the domain extracted from 'src' URL above?\n );\n // TODO: consider a more general message 'addOrReplaceElement' with friendly JS wrappers\n}\n\nfunction useStyleGuide( data ) {\n // specify its name/label; complain if not found!\n}\n\nfunction listAllPossibleInputVars() {\n // return a list of variables in the current IPython session, so TI \n // can offer these as sources for its trees, supplemental data, etc.\n}\n\nfunction saveIllustration() {\n // save everything? or just the main, monolithic JSON?\n}\n\nfunction loadIllustration(data) {\n // based on URL? or assume it's a local variable\n}\n\nfunction dumpSVG() {\n /* possibly options for \n * - put SVG into (or append to?) output of this cell\n * - render it as literal SVG (plus available source?)\n * - save it to a \"local\" file for persistence & display\n * - show it in a new window/frame (current behavior)\n */\n}\n\ninjectTree( {'size':'small', 'color':'blue'} );\n\n\n/* Documentation used to build this:\n\n OutputArea class ('this' above) and its methods:\n https://github.com/jupyter/notebook/blob/master/notebook/static/notebook/js/outputarea.js\n\n basdf\n http:\n*/", "_____no_output_____" ] ], [ [ "### SVG output\nHere's an example of literal SVG output from the Tree Illustrator above. We should support extracting this from a button (within the Tree Illustrator instance, or in the hosting page?), or perhaps a simple API call like\n\n```javascript\nti_3.dumpSVG( {showDiagnostics: true} );\n```", "_____no_output_____" ] ], [ [ "%%html\n<svg class=\"marks\" width=\"9.010416666666666in\" height=\"12.126953125in\" viewBox=\"-50 -124.1875 865 1164.1875\"><defs>\n <!-- Keep volatile masks, backdrops, etc. here -->\n <mask id=\"viewport-mask\">\n <g id=\"mask-shapes\">\n <rect id=\"viewport-bounds\" fill=\"#888\" x=\"-210.36532951289405\" y=\"0\" width=\"1185.730659025788\" height=\"990\"></rect>\n <rect id=\"illustration-bounds\" fill=\"#fff\" x=\"0\" y=\"0\" width=\"765\" height=\"990\"></rect>\n </g>\n </mask>\n <clipPath id=\"printing-clip-path\">\n <use xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"#illustration-bounds\"></use>\n </clipPath>\n <g id=\"printing-crop-marks\">\n <g id=\"crop-mark-top-left\">\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M 0 -10 V -50\"></path>\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M -10 0 H -50\"></path>\n </g>\n <g id=\"crop-mark-top-right\" transform=\"translate(765, 0)\">\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M 0 -10 V -50\"></path>\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M 10 0 H 50\"></path>\n </g>\n <g id=\"crop-mark-bottom-left\" transform=\"translate(0, 990)\">\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M 0 10 V 50\"></path>\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M -10 0 H -50\"></path>\n </g>\n <g id=\"crop-mark-bottom-right\" transform=\"translate(765, 990)\">\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M 0 10 V 50\"></path>\n <path stroke=\"black\" stroke-width=\"0.25pt\" fill=\"none\" d=\"M 10 0 H 50\"></path>\n </g>\n </g>\n <g id=\"printing-description\">\n <text id=\"printing-description-name\" x=\"-50\" y=\"-110\" style=\"font-family: sans-serif; font-weight: bold;\">TODO: Add the actual illustration name, or 'Untitled'</text>\n <text id=\"printing-description-datetime\" x=\"-50\" y=\"-94\" style=\"font-family: sans-serif; font-size: 0.8em;\">Generated 8/3/2015 - 11:11:35 PM</text>\n </g>\n <g id=\"printing-rulers\">\n <g id=\"ruler-cm\">\n <text style=\"font-family: sans-serif; font-size: 0.5em; text-anchor: end;\" x=\"-4\" y=\"-16\">cm</text>\n <line x1=\"0\" y1=\"-15\" x2=\"566.9291338582676\" y2=\"-15\" stroke=\"#000\" stroke-width=\"0.25pt\" stroke-linecap=\"square\"></line>\n <rect fill=\"#000\" stroke=\"none\" x=\"35.433070866141726\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"106.29921259842519\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"177.16535433070862\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"248.0314960629921\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"318.89763779527556\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"389.763779527559\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"460.6299212598425\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"531.4960629921259\" y=\"-20\" width=\"35.433070866141726\" height=\"5\"></rect>\n </g>\n <g id=\"ruler-inches\">\n <text style=\"font-family: sans-serif; font-size: 0.5em; text-anchor: end;\" x=\"-4\" y=\"-1\">inches</text>\n <line x1=\"0\" y1=\"0\" x2=\"540\" y2=\"0\" stroke=\"#000\" stroke-width=\"0.25pt\" stroke-linecap=\"square\"></line>\n <rect fill=\"#000\" stroke=\"none\" x=\"90\" y=\"-5\" width=\"90\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"270\" y=\"-5\" width=\"90\" height=\"5\"></rect>\n <rect fill=\"#000\" stroke=\"none\" x=\"450\" y=\"-5\" width=\"90\" height=\"5\"></rect>\n </g>\n <line x1=\"0\" y1=\"-15\" x2=\"0\" y2=\"-20\" stroke=\"#000\" stroke-width=\"0.25pt\" stroke-linecap=\"square\"></line>\n <line x1=\"0\" y1=\"0\" x2=\"0\" y2=\"-5\" stroke=\"#000\" stroke-width=\"0.25pt\" stroke-linecap=\"square\"></line>\n </g>\n </defs><rect id=\"viewport-background\" width=\"100%\" height=\"100%\" x=\"-210.36532951289405\" y=\"0\" style=\"fill: rgb(204, 204, 204); visibility: hidden;\"></rect><use id=\"illustration-background\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"#illustration-bounds\" style=\"stroke: rgb(187, 187, 187); visibility: hidden;\"></use><use id=\"crop-marks\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"#printing-crop-marks\"></use><use id=\"description\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"#printing-description\"></use><use id=\"rulers\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xlink:href=\"#printing-rulers\" x=\"0\" y=\"-60\"></use><g transform=\"translate(0,0)\"><g id=\"g34\" class=\"type-group\"><g transform=\"translate(0,0)\"><rect class=\"background\" width=\"800\" height=\"900\" style=\"pointer-events: none; fill: none;\"></rect><g id=\"g35\" class=\"type-group illustration-elements\" style=\"clip-path: url(#printing-clip-path);\"><g transform=\"translate(0,0)\"><rect class=\"background\" width=\"765\" height=\"990\" style=\"fill: none;\"></rect><g id=\"g36\" class=\"type-group tree-2\"><g transform=\"translate(382.5,495)\"><rect class=\"background\" width=\"0\" height=\"0\" style=\"fill: none;\"></rect><g id=\"g37\" class=\"type-path\"><path transform=\"translate(0,0)\" d=\"M0,0 A0,0 0 0,1 0,0L227.14502130645195,32.79312878777329\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M0,0 A0,0 0 0,1 0,0L228.64669210996902,19.77220744300015\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M0,0 A0,0 0 0,1 0,0L11.761854192379742,-2.7495009522248384\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M11.761854192379742,-2.7495009522248384 A12.078947368421067,12.078947368421067 0 0,1 12.074219698010198,-0.3379174651726534L229.4101742621935,-6.420431838280407\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M11.761854192379742,-2.7495009522248384 A12.078947368421067,12.078947368421067 0 0,0 10.972785506412173,-5.049648280757501L21.945571012824317,-10.09929656151499\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M21.945571012824317,-10.09929656151499 A24.157894736842106,24.157894736842106 0 0,1 23.913994100554277,-3.424144312817259L227.1829439552656,-32.52937097176395\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M21.945571012824317,-10.09929656151499 A24.157894736842106,24.157894736842106 0 0,0 18.155920290795034,-15.936324435409842L27.233880436192564,-23.904486653114777\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M27.233880436192564,-23.904486653114777 A36.23684210526318,36.23684210526318 0 0,1 34.774916366459685,-10.188911495714782L208.64949819875798,-61.13346897428866\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M27.233880436192564,-23.904486653114777 A36.23684210526318,36.23684210526318 0 0,0 14.611870664187869,-33.16024670376896L19.482494218917147,-44.21366227169192\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M208.64949819875798,-61.13346897428866 A217.42105263157893,217.42105263157893 0 0,1 210.31013988222247,-55.1503326385603L221.99403654234592,-58.214240007369206\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M208.64949819875798,-61.13346897428866 A217.42105263157893,217.42105263157893 0 0,0 206.81867899177317,-67.06674397584625L218.3086056024272,-70.7926741967266\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M19.482494218917147,-44.21366227169192 A48.31578947368421,48.31578947368421 0 0,1 43.96123275659265,-20.04558622206045L208.81585559381506,-95.21653455478715\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M19.482494218917147,-44.21366227169192 A48.31578947368421,48.31578947368421 0 0,0 -14.87183148103695,-45.97003524976909L-18.58978935129619,-57.46254406221138\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-18.58978935129619,-57.46254406221138 A60.39473684210528,60.39473684210528 0 0,1 47.848130134932234,-36.85214621725223L153.11401643178309,-117.92686789520711\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-18.58978935129619,-57.46254406221138 A60.39473684210528,60.39473684210528 0 0,0 -60.36659626323578,-1.8434461800218056L-72.43991551588293,-2.2121354160261664\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M153.11401643178309,-117.92686789520711 A193.2631578947368,193.2631578947368 0 0,1 165.5586286861845,-99.70450675368724L196.6008715648441,-118.39910177000363\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M153.11401643178309,-117.92686789520711 A193.2631578947368,193.2631578947368 0 0,0 138.6733288154841,-134.61187196781196L147.3404118664519,-143.02511396580022\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M147.3404118664519,-143.02511396580022 A205.3421052631579,205.3421052631579 0 0,1 159.0393029212342,-129.89180235963207L168.39455603424798,-137.53249661608103\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M147.3404118664519,-143.02511396580022 A205.3421052631579,205.3421052631579 0 0,0 134.56055224061345,-155.1091163491268L142.4758788430025,-164.2331820167225\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M168.39455603424798,-137.53249661608103 A217.42105263157893,217.42105263157893 0 0,1 172.253268485756,-132.66772638210801L181.82289451274244,-140.03815562555846\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M168.39455603424798,-137.53249661608103 A217.42105263157893,217.42105263157893 0 0,0 164.39849856745067,-142.28509337309987L173.5317484878646,-150.1898207827165\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M142.4758788430025,-164.2331820167225 A217.421052631579,217.421052631579 0 0,1 147.10762724548016,-160.09765811412947L155.2802732035624,-168.99197245380333\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M142.4758788430025,-164.2331820167225 A217.421052631579,217.421052631579 0 0,0 137.72792507012036,-168.23475498036407L145.37947646290482,-177.58113025705097\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-72.43991551588293,-2.2121354160261664 A72.47368421052633,72.47368421052633 0 0,1 20.51188268711618,-69.51041340459052L23.93052980163554,-81.09548230535559\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-72.43991551588293,-2.2121354160261664 A72.47368421052633,72.47368421052633 0 0,0 16.232265923005627,70.63248860157714L27.053776538342717,117.7208143359619\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M23.93052980163554,-81.09548230535559 A84.55263157894737,84.55263157894737 0 0,1 43.70479222402195,-72.38120366214062L112.38375143319931,-186.12309513121872\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M23.93052980163554,-81.09548230535559 A84.55263157894737,84.55263157894737 0 0,0 2.5931995118083964,-84.51285596415L2.963656584923881,-96.58612110188571\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M112.38375143319931,-186.12309513121872 A217.42105263157893,217.42105263157893 0 0,1 117.6528610918169,-182.83795668387202L124.1891311524734,-192.9956209440871\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M112.38375143319931,-186.12309513121872 A217.42105263157893,217.42105263157893 0 0,0 107.02297997492286,-189.25642891249714L112.9687010846408,-199.77067496319142\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M2.963656584923881,-96.58612110188571 A96.63157894736841,96.63157894736841 0 0,1 37.66745275781116,-88.98778035550475L89.46020029980153,-211.3459783443238\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M2.963656584923881,-96.58612110188571 A96.63157894736841,96.63157894736841 0 0,0 -32.14071053769187,-91.12977985265738L-36.158299354903356,-102.52100233423958\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-36.158299354903356,-102.52100233423958 A108.71052631578951,108.71052631578951 0 0,1 30.687844259515497,-104.2891880616571L64.78544899231048,-220.16606368572053\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-36.158299354903356,-102.52100233423958 A108.71052631578951,108.71052631578951 0 0,0 -89.32333475743701,-61.962249795071244L-99.24814973048558,-68.84694421674583\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-99.24814973048558,-68.84694421674583 A120.78947368421056,120.78947368421056 0 0,1 -20.475065676318188,-119.04145764589349L-24.570078811581816,-142.84974917507213\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-99.24814973048558,-68.84694421674583 A120.78947368421056,120.78947368421056 0 0,0 -118.67213289877593,22.51714516016312L-130.5393461886535,24.768859676179428\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-24.570078811581816,-142.84974917507213 A144.94736842105263,144.94736842105263 0 0,1 24.79965491408202,-142.81007222297316L39.26612028062986,-226.1159476863742\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-24.570078811581816,-142.84974917507213 A144.94736842105263,144.94736842105263 0 0,0 -71.0893999324141,-126.31720717874366L-77.01351659344861,-136.84364111030564\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-77.01351659344861,-136.84364111030564 A157.0263157894737,157.0263157894737 0 0,1 -26.134044952479293,-154.83628626661553L-30.15466725286072,-178.65725338455636\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-77.01351659344861,-136.84364111030564 A157.0263157894737,157.0263157894737 0 0,0 -118.7963393428175,-102.68735856551022L-127.934519292265,-110.58638614747255\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-30.15466725286072,-178.65725338455636 A181.1842105263158,181.1842105263158 0 0,1 -2.482041267874948,-181.1672089954164L-2.812980103591608,-205.32283686147193\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-30.15466725286072,-178.65725338455636 A181.1842105263158,181.1842105263158 0 0,0 -57.11808653450414,-171.94546325704908L-60.92595897013774,-183.40849414085233\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-2.812980103591608,-205.32283686147193 A205.3421052631579,205.3421052631579 0 0,1 11.841750521162703,-205.00037350819719L13.234897641299492,-229.11806450916157\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-2.812980103591608,-205.32283686147193 A205.3421052631579,205.3421052631579 0 0,0 -17.45337642653056,-204.59902210230536L-18.480045628091183,-216.63425869655865\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-18.480045628091183,-216.63425869655865 A217.42105263157896,217.42105263157896 0 0,1 -12.286290437954959,-217.0736308138279L-12.968862128952455,-229.13327697015163\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-18.480045628091183,-216.63425869655865 A217.42105263157896,217.42105263157896 0 0,0 -24.658728227942355,-216.01819657057666L-26.028657573939153,-228.01920749116425\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-60.92595897013774,-183.40849414085233 A193.2631578947368,193.2631578947368 0 0,1 -43.66419102264575,-188.26599964354656L-51.851226839391835,-223.56587457671156\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-60.92595897013774,-183.40849414085233 A193.2631578947368,193.2631578947368 0 0,0 -77.66319533516541,-176.97196469999963L-82.51714504361325,-188.0327124937496\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-82.51714504361325,-188.0327124937496 A205.3421052631579,205.3421052631579 0 0,1 -68.89280069131287,-193.44033242013634L-76.99783606676144,-216.1980185872112\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-82.51714504361325,-188.0327124937496 A205.3421052631579,205.3421052631579 0 0,0 -95.72100093939319,-181.66692096544847L-101.35164805347515,-192.3532104340043\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-101.35164805347515,-192.3532104340043 A217.42105263157896,217.42105263157896 0 0,1 -95.81746807704017,-195.1689702251083L-101.14066074798686,-206.01169079316986\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-101.35164805347515,-192.3532104340043 A217.42105263157896,217.42105263157896 0 0,0 -106.8031641720081,-189.38056460542856L-112.7366732926752,-199.90170708350792\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-127.934519292265,-110.58638614747255 A169.10526315789474,169.10526315789474 0 0,1 -108.82904754463313,-129.4327178043821L-132.14955773276878,-157.1683001910354\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-127.934519292265,-110.58638614747255 A169.10526315789474,169.10526315789474 0 0,0 -143.81796845795043,-88.95494351821492L-154.09068049066116,-95.30886805523026\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-132.14955773276878,-157.1683001910354 A205.34210526315786,205.34210526315786 0 0,1 -120.60059764491989,-166.19529488404285L-134.78890325020458,-185.74768251745965\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-132.14955773276878,-157.1683001910354 A205.34210526315786,205.34210526315786 0 0,0 -143.02511396580024,-147.34041186645186L-151.4383559637885,-156.00749491741962\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-151.4383559637885,-156.00749491741962 A217.42105263157893,217.42105263157893 0 0,1 -146.92164036708013,-160.26835532715182L-155.0839537208068,-169.17215284532693\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-151.4383559637885,-156.00749491741962 A217.42105263157893,217.42105263157893 0 0,0 -155.8315562651984,-151.61939255712025L-164.4888649465983,-160.0426921436269\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-154.09068049066116,-95.30886805523026 A181.18421052631578,181.18421052631578 0 0,1 -143.41592772062336,-110.72303202169479L-181.6601751127896,-140.24917389414674\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-154.09068049066116,-95.30886805523026 A181.18421052631578,181.18421052631578 0 0,0 -163.11530074124366,-78.87405662280824L-173.9896541239932,-84.13232706432878\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-173.9896541239932,-84.13232706432878 A193.26315789473682,193.26315789473682 0 0,1 -165.44275122774997,-99.89666794062161L-196.46326708295308,-118.62729317948818\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-173.9896541239932,-84.13232706432878 A193.26315789473682,193.26315789473682 0 0,0 -181.0386229679416,-67.64366336411318L-192.35353690343797,-71.87139232437026\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-192.35353690343797,-71.87139232437026 A205.34210526315792,205.34210526315792 0 0,1 -186.7361957132835,-85.41061646210083L-208.70515991484626,-95.45892428117152\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-192.35353690343797,-71.87139232437026 A205.34210526315792,205.34210526315792 0 0,0 -196.99068856090784,-57.965928045751355L-208.5783761233142,-61.37568851903085\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-208.5783761233142,-61.37568851903085 A217.42105263157893,217.42105263157893 0 0,1 -206.7406690867989,-67.30683377612215L-218.22626181384328,-71.04610231924003\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-208.5783761233142,-61.37568851903085 A217.42105263157893,217.42105263157893 0 0,0 -210.245963644494,-55.394484370033986L-221.92629495807697,-58.47195572392476\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-130.5393461886535,24.768859676179428 A132.8684210526316,132.8684210526316 0 0,1 -131.5050123353143,-18.985495613974045L-227.14502130645195,-32.79312878777335\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-130.5393461886535,24.768859676179428 A132.8684210526316,132.8684210526316 0 0,0 -115.41078772626125,65.83591261175971L-125.90267751955774,71.82099557646515\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-125.90267751955774,71.82099557646515 A144.94736842105266,144.94736842105266 0 0,1 -142.17863797247233,28.195292452482615L-177.72329746559038,35.24411556560326\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-125.90267751955774,71.82099557646515 A144.94736842105266,144.94736842105266 0 0,0 -96.63413763044129,108.03510103947376L-104.68698243297807,117.0380261260966\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-177.72329746559038,35.24411556560326 A181.1842105263158,181.1842105263158 0 0,1 -181.02245041177363,7.654449095870412L-205.15877713334348,8.675042308653133\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-177.72329746559038,35.24411556560326 A181.1842105263158,181.1842105263158 0 0,0 -170.24427568365448,62.00487675168924L-181.5938940625648,66.13853520180186\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-205.15877713334348,8.675042308653133 A205.3421052631579,205.3421052631579 0 0,1 -205.25492651502526,-5.982920291746667L-229.40256492855767,-6.686793267246275\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-205.15877713334348,8.675042308653133 A205.3421052631579,205.3421052631579 0 0,0 -204.01718564998905,23.288798881945723L-216.01819657057666,24.658728227942532\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-216.01819657057666,24.658728227942532 A217.42105263157896,217.42105263157896 0 0,1 -216.63425869655867,18.480045628091073L-228.6694952908119,19.506714829651685\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-216.01819657057666,24.658728227942532 A217.42105263157896,217.42105263157896 0 0,0 -215.22594690498852,30.817298815355244L-227.18294395526564,32.52937097176387\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-181.5938940625648,66.13853520180186 A193.26315789473685,193.26315789473685 0 0,1 -186.94234656197548,49.02251790094262L-221.99403654234587,58.214240007369355\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-181.5938940625648,66.13853520180186 A193.26315789473685,193.26315789473685 0 0,0 -174.68204011279383,82.68514414015539L-185.59966761984347,87.8529656489151\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-185.59966761984347,87.8529656489151 A205.34210526315792,205.34210526315792 0 0,1 -191.39413974939689,74.38859767124372L-213.91109736697297,83.14019739727239\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-185.59966761984347,87.8529656489151 A205.34210526315792,205.34210526315792 0 0,0 -178.8594221273492,100.86965505134106L-189.38056460542856,106.80316417200818\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-189.38056460542856,106.80316417200818 A217.421052631579,217.421052631579 0 0,1 -192.35321043400424,101.35164805347532L-203.03949990256,106.98229516755728\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-189.38056460542856,106.80316417200818 A217.421052631579,217.421052631579 0 0,0 -186.25345727195767,112.16757009789812L-196.6008715648442,118.39910177000355\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-104.68698243297807,117.0380261260966 A157.02631578947373,157.02631578947373 0 0,1 -121.61829046917913,99.32902533383631L-168.394556034248,137.53249661608103\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-104.68698243297807,117.0380261260966 A157.02631578947373,157.02631578947373 0 0,0 -85.20708334282855,131.89775130237737L-91.76147436919997,142.04373217179102\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-168.394556034248,137.53249661608103 A217.421052631579,217.421052631579 0 0,1 -172.253268485756,132.6677263821081L-181.82289451274244,140.03815562555855\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-168.394556034248,137.53249661608103 A217.421052631579,217.421052631579 0 0,0 -164.39849856745062,142.28509337309998L-173.53174848786452,150.18982078271665\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-91.76147436919997,142.04373217179102 A169.10526315789477,169.10526315789477 0 0,1 -114.41704341315128,124.52040075543404L-155.28027320356244,168.99197245380333\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-91.76147436919997,142.04373217179102 A169.10526315789477,169.10526315789477 0 0,0 -66.47357094595503,155.49229689407102L-71.22168315638038,166.5988895293618\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-71.22168315638038,166.5988895293618 A181.1842105263158,181.1842105263158 0 0,1 -95.85836198219849,153.74944742058142L-108.63947691315828,174.2493737433256\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-71.22168315638038,166.5988895293618 A181.1842105263158,181.1842105263158 0 0,0 -44.90994372736371,175.53009741480048L-47.90393997585463,187.2321039091205\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-108.63947691315828,174.2493737433256 A205.34210526315792,205.34210526315792 0 0,1 -120.79348397300821,166.05515476361592L-135.00448208747977,185.59105532404132\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-108.63947691315828,174.2493737433256 A205.34210526315792,205.34210526315792 0 0,0 -95.93186798135503,181.55565785596906L-101.57491903908179,192.23540243573197\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-101.57491903908179,192.23540243573197 A217.421052631579,217.421052631579 0 0,1 -107.02297997492282,189.25642891249723L-112.96870108464074,199.77067496319148\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-101.57491903908179,192.23540243573197 A217.421052631579,217.421052631579 0 0,0 -96.04401214231994,195.05758600738844L-101.37979059467105,205.89411856335445\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-47.90393997585463,187.2321039091205 A193.26315789473682,193.26315789473682 0 0,1 -65.05162899036611,181.986081240148L-77.24880942605976,216.10847147267575\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-47.90393997585463,187.2321039091205 A193.26315789473682,193.26315789473682 0 0,0 -30.343830215465037,190.86618392816737L-32.2403196039316,202.79532042367785\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-32.2403196039316,202.79532042367785 A205.34210526315792,205.34210526315792 0 0,1 -46.62542748030367,199.97862312303474L-52.11077188975116,223.50551996103883\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-32.2403196039316,202.79532042367785 A205.34210526315792,205.34210526315792 0 0,0 -17.690922449000134,204.5786192562881L-18.73156494600014,216.6126556831286\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-18.73156494600014,216.6126556831286 A217.421052631579,217.421052631579 0 0,1 -24.909528077720623,215.98941996859264L-26.2933907487051,227.98883218907\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M-18.73156494600014,216.6126556831286 A217.421052631579,217.421052631579 0 0,0 -12.53832408123103,217.05921900867943L-13.23489764129942,229.1180645091616\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M27.053776538342717,117.7208143359619 A120.78947368421056,120.78947368421056 0 0,1 10.266692015606255,120.3523659425326L18.480045628091254,216.63425869655867\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M27.053776538342717,117.7208143359619 A120.78947368421056,120.78947368421056 0 0,0 43.3054793360289,112.75962226074265L47.636027269631796,124.03558448681692\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M18.480045628091254,216.63425869655867 A217.421052631579,217.421052631579 0 0,1 12.286290437955032,217.07363081382792L12.968862128952532,229.13327697015166\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M18.480045628091254,216.63425869655867 A217.421052631579,217.421052631579 0 0,0 24.658728227942618,216.0181965705767L26.02865757393943,228.01920749116425\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M47.636027269631796,124.03558448681692 A132.86842105263162,132.86842105263162 0 0,1 30.019131328069065,129.4328747549383L51.85122683939201,223.56587457671156\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M47.636027269631796,124.03558448681692 A132.86842105263162,132.86842105263162 0 0,0 64.33688507293367,116.25309687114373L70.18569280683673,126.82156022306587\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M70.18569280683673,126.82156022306587 A144.9473684210527,144.9473684210527 0 0,1 52.50959835617191,135.10174570545666L78.76439753425785,202.65261855818503\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M70.18569280683673,126.82156022306587 A144.9473684210527,144.9473684210527 0 0,0 86.58898923176845,116.24150100548883L93.80473833441583,125.92829275594625\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M78.76439753425785,202.65261855818503 A217.421052631579,217.421052631579 0 0,1 72.94531837903709,204.81917550367388L76.99783606676138,216.1980185872113\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M78.76439753425785,202.65261855818503 A217.421052631579,217.421052631579 0 0,0 84.51923531708333,200.32077523022753L89.21474839025461,211.44970718746237\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M93.80473833441583,125.92829275594625 A157.02631578947373,157.02631578947373 0 0,1 77.13561856867264,136.77485221503173L112.73667329267538,199.9017070835079\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M93.80473833441583,125.92829275594625 A157.02631578947373,157.02631578947373 0 0,0 108.96921147993847,113.06181848818801L117.3514585168568,121.75888144881785\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M117.3514585168568,121.75888144881785 A169.10526315789477,169.10526315789477 0 0,1 99.31813923699274,136.86671343391777L134.7889032502044,185.74768251745982\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M117.3514585168568,121.75888144881785 A169.10526315789477,169.10526315789477 0 0,0 133.11360367853734,104.29457581012606L142.6217182270043,111.7441883679922\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M142.6217182270043,111.7441883679922 A181.1842105263158,181.1842105263158 0 0,1 122.43470030589998,133.55696277262666L155.08395372080665,169.1721528453271\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M142.6217182270043,111.7441883679922 A181.1842105263158,181.1842105263158 0 0,0 158.9711379472409,86.92465383194593L169.56921381039027,92.71963075407565\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M169.56921381039027,92.71963075407565 A193.26315789473685,193.26315789473685 0 0,1 154.6478644568918,115.90809384322051L164.31335598544754,123.1523497084218\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M169.56921381039027,92.71963075407565 A193.26315789473685,193.26315789473685 0 0,0 181.03862296794154,67.64366336411338L192.35353690343788,71.87139232437048\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M164.31335598544754,123.1523497084218 A205.34210526315792,205.34210526315792 0 0,1 155.10911634912694,134.56055224061333L173.35724768431834,150.3912054453914\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M164.31335598544754,123.1523497084218 A205.34210526315792,205.34210526315792 0 0,0 172.68029242365688,111.11659103116045L182.83795668387202,117.65286109181696\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M182.83795668387202,117.65286109181696 A217.421052631579,217.421052631579 0 0,1 179.4036929764956,122.82601138935983L189.3705648085231,129.6496786887687\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M182.83795668387202,117.65286109181696 A217.421052631579,217.421052631579 0 0,0 186.12309513121863,112.38375143319952L196.463267082953,118.62729317948838\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M192.35353690343788,71.87139232437048 A205.3421052631579,205.3421052631579 0 0,1 186.73619571328354,85.41061646210088L208.7051599148463,95.45892428117156\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M192.35353690343788,71.87139232437048 A205.3421052631579,205.3421052631579 0 0,0 196.9906885609078,57.96592804575157L208.57837612331417,61.37568851903107\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M208.57837612331417,61.37568851903107 A217.421052631579,217.421052631579 0 0,1 206.7406690867989,67.30683377612226L218.22626181384328,71.04610231924016\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path><path transform=\"translate(0,0)\" d=\"M208.57837612331417,61.37568851903107 A217.421052631579,217.421052631579 0 0,0 210.24596364449405,55.39448437003393L221.92629495807702,58.4719557239247\" style=\"fill: none; stroke: rgb(119, 119, 119); stroke-width: 2px;\"></path></g><g id=\"g38\" class=\"type-group\"><g transform=\"translate(0,0)\"><rect class=\"background\" width=\"0\" height=\"0\" style=\"fill: none;\"></rect><g id=\"g39\" class=\"type-symbol\"><path transform=\"translate(0,0)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(227.14502130645195,32.79312878777329)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(228.64669210996902,19.77220744300015)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(11.761854192379742,-2.7495009522248384)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(229.4101742621935,-6.420431838280407)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(21.945571012824317,-10.09929656151499)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(227.1829439552656,-32.52937097176395)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(27.233880436192564,-23.904486653114777)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(208.64949819875798,-61.13346897428866)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(221.99403654234592,-58.214240007369206)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(218.3086056024272,-70.7926741967266)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(19.482494218917147,-44.21366227169192)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(208.81585559381506,-95.21653455478715)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-18.58978935129619,-57.46254406221138)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(153.11401643178309,-117.92686789520711)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(196.6008715648441,-118.39910177000363)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(147.3404118664519,-143.02511396580022)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(168.39455603424798,-137.53249661608103)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(181.82289451274244,-140.03815562555846)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(173.5317484878646,-150.1898207827165)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(142.4758788430025,-164.2331820167225)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(155.2802732035624,-168.99197245380333)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(145.37947646290482,-177.58113025705097)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-72.43991551588293,-2.2121354160261664)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(23.93052980163554,-81.09548230535559)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(112.38375143319931,-186.12309513121872)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(124.1891311524734,-192.9956209440871)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(112.9687010846408,-199.77067496319142)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(2.963656584923881,-96.58612110188571)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(89.46020029980153,-211.3459783443238)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-36.158299354903356,-102.52100233423958)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(64.78544899231048,-220.16606368572053)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-99.24814973048558,-68.84694421674583)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-24.570078811581816,-142.84974917507213)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(39.26612028062986,-226.1159476863742)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-77.01351659344861,-136.84364111030564)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-30.15466725286072,-178.65725338455636)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-2.812980103591608,-205.32283686147193)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(13.234897641299492,-229.11806450916157)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-18.480045628091183,-216.63425869655865)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-12.968862128952455,-229.13327697015163)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-26.028657573939153,-228.01920749116425)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-60.92595897013774,-183.40849414085233)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-51.851226839391835,-223.56587457671156)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-82.51714504361325,-188.0327124937496)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-76.99783606676144,-216.1980185872112)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-101.35164805347515,-192.3532104340043)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-101.14066074798686,-206.01169079316986)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-112.7366732926752,-199.90170708350792)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-127.934519292265,-110.58638614747255)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-132.14955773276878,-157.1683001910354)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-134.78890325020458,-185.74768251745965)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-151.4383559637885,-156.00749491741962)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-155.0839537208068,-169.17215284532693)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-164.4888649465983,-160.0426921436269)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-154.09068049066116,-95.30886805523026)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-181.6601751127896,-140.24917389414674)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-173.9896541239932,-84.13232706432878)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-196.46326708295308,-118.62729317948818)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-192.35353690343797,-71.87139232437026)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-208.70515991484626,-95.45892428117152)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-208.5783761233142,-61.37568851903085)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-218.22626181384328,-71.04610231924003)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-221.92629495807697,-58.47195572392476)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-130.5393461886535,24.768859676179428)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-227.14502130645195,-32.79312878777335)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-125.90267751955774,71.82099557646515)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-177.72329746559038,35.24411556560326)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-205.15877713334348,8.675042308653133)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-229.40256492855767,-6.686793267246275)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-216.01819657057666,24.658728227942532)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-228.6694952908119,19.506714829651685)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-227.18294395526564,32.52937097176387)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-181.5938940625648,66.13853520180186)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-221.99403654234587,58.214240007369355)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-185.59966761984347,87.8529656489151)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-213.91109736697297,83.14019739727239)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-189.38056460542856,106.80316417200818)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-203.03949990256,106.98229516755728)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-196.6008715648442,118.39910177000355)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-104.68698243297807,117.0380261260966)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-168.394556034248,137.53249661608103)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-181.82289451274244,140.03815562555855)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-173.53174848786452,150.18982078271665)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-91.76147436919997,142.04373217179102)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-155.28027320356244,168.99197245380333)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-71.22168315638038,166.5988895293618)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-108.63947691315828,174.2493737433256)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-135.00448208747977,185.59105532404132)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-101.57491903908179,192.23540243573197)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-112.96870108464074,199.77067496319148)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-101.37979059467105,205.89411856335445)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-47.90393997585463,187.2321039091205)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-77.24880942605976,216.10847147267575)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-32.2403196039316,202.79532042367785)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-52.11077188975116,223.50551996103883)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-18.73156494600014,216.6126556831286)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-26.2933907487051,227.98883218907)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(-13.23489764129942,229.1180645091616)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(27.053776538342717,117.7208143359619)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(18.480045628091254,216.63425869655867)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(12.968862128952532,229.13327697015166)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(26.02865757393943,228.01920749116425)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(47.636027269631796,124.03558448681692)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(51.85122683939201,223.56587457671156)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(70.18569280683673,126.82156022306587)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(78.76439753425785,202.65261855818503)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(76.99783606676138,216.1980185872113)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(89.21474839025461,211.44970718746237)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(93.80473833441583,125.92829275594625)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(112.73667329267538,199.9017070835079)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(117.3514585168568,121.75888144881785)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(134.7889032502044,185.74768251745982)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(142.6217182270043,111.7441883679922)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(155.08395372080665,169.1721528453271)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(169.56921381039027,92.71963075407565)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(164.31335598544754,123.1523497084218)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(173.35724768431834,150.3912054453914)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(182.83795668387202,117.65286109181696)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(189.3705648085231,129.6496786887687)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(196.463267082953,118.62729317948838)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(192.35353690343788,71.87139232437048)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(208.7051599148463,95.45892428117156)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(208.57837612331417,61.37568851903107)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(218.22626181384328,71.04610231924016)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path><path transform=\"translate(221.92629495807702,58.4719557239247)\" d=\"M0,1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,-1.5957691216057308A1.5957691216057308,1.5957691216057308 0 1,1 0,1.5957691216057308Z\" style=\"fill: rgb(0, 0, 0);\"></path></g><g id=\"g40\" class=\"type-text\"><text x=\"3.9997806774620486\" y=\"0.04188713646498323\" text-anchor=\"start\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"230.74191655348386\" y=\"35.782928125365736\" text-anchor=\"start\" transform=\"rotate(8.21508095481181 230.74191655348386,35.782928125365736)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Cycas</text><text x=\"232.40840528356588\" y=\"22.551788256140302\" text-anchor=\"start\" transform=\"rotate(4.942353682084558 232.40840528356588,22.551788256140302)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Ginkgo</text><text x=\"15.694315672770797\" y=\"-3.4958552658649578\" text-anchor=\"start\" transform=\"rotate(346.84256536310363 15.694315672770797,-3.4958552658649578)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"233.46421592957657\" y=\"-4.0877720567625175\" text-anchor=\"start\" transform=\"rotate(358.39689913663005 233.46421592957657,-4.0877720567625175)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Pinus</text><text x=\"25.701126528233978\" y=\"-11.503005311256192\" text-anchor=\"start\" transform=\"rotate(335.28823158957744 25.701126528233978,-11.503005311256192)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"231.47646348344284\" y=\"-30.674042009463783\" text-anchor=\"start\" transform=\"rotate(351.85144459117555 231.47646348344284,-30.674042009463783)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Amborella</text><text x=\"30.516385837176806\" y=\"-26.22505782720859\" text-anchor=\"start\" transform=\"rotate(318.7250185879793 30.516385837176806,-26.22505782720859)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"213.12842694648444\" y=\"-60.02962748338105\" text-anchor=\"start\" transform=\"rotate(343.6696264093573 213.12842694648444,-60.02962748338105)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"226.47106133817567\" y=\"-56.860428914670294\" text-anchor=\"start\" transform=\"rotate(345.3059900457208 226.47106133817567,-56.860428914670294)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Nuphar</text><text x=\"222.85561630150985\" y=\"-69.69665905102057\" text-anchor=\"start\" transform=\"rotate(342.03326277299357 222.85561630150985,-69.69665905102057)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Nymphaea</text><text x=\"21.595594091774846\" y=\"-47.65052092137238\" text-anchor=\"start\" transform=\"rotate(293.78041076660156 21.595594091774846,-47.65052092137238)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"213.45816398550855\" y=\"-94.64598368623861\" text-anchor=\"start\" transform=\"rotate(335.48780822753906 213.45816398550855,-94.64598368623861)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Illicium</text><text x=\"-20.461510569377932\" y=\"-61.05742148324991\" text-anchor=\"end\" transform=\"rotate(72.07301330566406 -20.461510569377932,-61.05742148324991)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"157.5349375491483\" y=\"-118.72445794355016\" text-anchor=\"start\" transform=\"rotate(322.39689913663005 157.5349375491483,-118.72445794355016)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"201.27795805976697\" y=\"-118.36145318172923\" text-anchor=\"start\" transform=\"rotate(328.94235368208456 201.27795805976697,-118.36145318172923)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Chloranthus</text><text x=\"151.72922426810345\" y=\"-144.23023101628397\" text-anchor=\"start\" transform=\"rotate(315.85144459117555 151.72922426810345,-144.23023101628397)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"172.9498957389181\" y=\"-138.25923517933194\" text-anchor=\"start\" transform=\"rotate(320.7605355002663 172.9498957389181,-138.25923517933194)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"186.47378613575148\" y=\"-140.53390012448247\" text-anchor=\"start\" transform=\"rotate(322.39689913663005 186.47378613575148,-140.53390012448247)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Calycanthus</text><text x=\"178.14675352605036\" y=\"-150.9502706460928\" text-anchor=\"start\" transform=\"rotate(319.1241718639028 178.14675352605036,-150.9502706460928)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Liriodendron</text><text x=\"146.84057524131225\" y=\"-165.72606316230704\" text-anchor=\"start\" transform=\"rotate(310.94235368208456 146.84057524131225,-165.72606316230704)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"159.77851166546415\" y=\"-170.27353643467072\" text-anchor=\"start\" transform=\"rotate(312.57871731844807 159.77851166546415,-170.27353643467072)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Drimys</text><text x=\"149.7972158016475\" y=\"-179.11740322502067\" text-anchor=\"start\" transform=\"rotate(309.3059900457208 149.7972158016475,-179.11740322502067)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Piper</text><text x=\"-76.45830412991033\" y=\"-1.5336578201934001\" text-anchor=\"end\" transform=\"rotate(1.7491274746980707 -76.45830412991033,-1.5336578201934001)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"25.950645464849018\" y=\"-84.66482480649503\" text-anchor=\"start\" transform=\"rotate(286.44084444912994 25.950645464849018,-84.66482480649503)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"116.42995240529945\" y=\"-188.33838889450064\" text-anchor=\"start\" transform=\"rotate(301.1241718639027 116.42995240529945,-188.33838889450064)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"128.4029525754494\" y=\"-195.02546441402694\" text-anchor=\"start\" transform=\"rotate(302.7605355002663 128.4029525754494,-195.02546441402694)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Nandina</text><text x=\"117.0597688528657\" y=\"-202.0377700236113\" text-anchor=\"start\" transform=\"rotate(299.48780822753906 117.0597688528657,-202.0377700236113)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Ranunculus</text><text x=\"4.139462394767885\" y=\"-100.54640487119738\" text-anchor=\"start\" transform=\"rotate(271.7575170343573 4.139462394767885,-100.54640487119738)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"93.26617204331299\" y=\"-214.06464246200758\" text-anchor=\"start\" transform=\"rotate(292.94235368208456 93.26617204331299,-214.06464246200758)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Platanus</text><text x=\"-38.59976724037909\" y=\"-105.89485686930614\" text-anchor=\"end\" transform=\"rotate(70.57268038663005 -38.59976724037909,-105.89485686930614)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"68.2567080489482\" y=\"-223.30085491623325\" text-anchor=\"start\" transform=\"rotate(286.39689913662994 68.2567080489482,-223.30085491623325)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Buxus</text><text x=\"-103.27400159453316\" y=\"-70.049220816756\" text-anchor=\"end\" transform=\"rotate(34.748461636630054 -103.27400159453316,-70.049220816756)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-26.783908529037195\" y=\"-146.51942125010237\" text-anchor=\"end\" transform=\"rotate(80.24064913663005 -26.783908529037195,-146.51942125010237)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"42.35741347998403\" y=\"-229.62599923198084\" text-anchor=\"start\" transform=\"rotate(279.85144459117544 42.35741347998403,-229.62599923198084)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Vitis</text><text x=\"-80.44048573646539\" y=\"-139.49481220179055\" text-anchor=\"end\" transform=\"rotate(60.62985368208456 -80.44048573646539,-139.49481220179055)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-32.73086443435102\" y=\"-182.26870916763244\" text-anchor=\"end\" transform=\"rotate(80.4196264093573 -32.73086443435102,-182.26870916763244)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-5.059598462116648\" y=\"-209.28095350368898\" text-anchor=\"end\" transform=\"rotate(89.21508095481181 -5.059598462116648,-209.28095350368898)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"15.905925248992867\" y=\"-232.95761748003318\" text-anchor=\"start\" transform=\"rotate(273.30599004572093 15.905925248992867,-232.95761748003318)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Pelargonium</text><text x=\"-21.129282775266482\" y=\"-220.4106076346583\" text-anchor=\"end\" transform=\"rotate(85.12417186390257 -21.129282775266482,-220.4106076346583)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-15.63542987135665\" y=\"-232.9759286551679\" text-anchor=\"end\" transform=\"rotate(86.76053550026631 -15.63542987135665,-232.9759286551679)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Eucalyptus</text><text x=\"-28.91024883673896\" y=\"-231.70336102913535\" text-anchor=\"end\" transform=\"rotate(83.48780822753906 -28.91024883673896,-231.70336102913535)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Oenothera</text><text x=\"-64.14390944261\" y=\"-186.54305761411257\" text-anchor=\"end\" transform=\"rotate(71.62417186390257 -64.14390944261,-186.54305761411257)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-55.133996983769954\" y=\"-226.89753717613078\" text-anchor=\"end\" transform=\"rotate(76.94235368208456 -55.133996983769954,-226.89753717613078)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Citrus</text><text x=\"-86.1273348393052\" y=\"-190.80408598740564\" text-anchor=\"end\" transform=\"rotate(66.30599004572105 -86.1273348393052,-190.80408598740564)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-80.638989163536\" y=\"-219.13375693097382\" text-anchor=\"end\" transform=\"rotate(70.39689913663005 -80.638989163536,-219.13375693097382)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Gossypium</text><text x=\"-105.26194280507448\" y=\"-194.80042593736206\" text-anchor=\"end\" transform=\"rotate(62.21508095481181 -105.26194280507448,-194.80042593736206)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-105.09272879811537\" y=\"-208.5132330423318\" text-anchor=\"end\" transform=\"rotate(63.85144459117555 -105.09272879811537,-208.5132330423318)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Arabidopsis</text><text x=\"-116.82510600286486\" y=\"-202.17355071180648\" text-anchor=\"end\" transform=\"rotate(60.57871731844807 -116.82510600286486,-202.17355071180648)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Brassica</text><text x=\"-132.13891858463649\" y=\"-111.82458731534315\" text-anchor=\"end\" transform=\"rotate(40.84008095481181 -132.13891858463649,-111.82458731534315)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-136.39429561548027\" y=\"-158.81030558363665\" text-anchor=\"end\" transform=\"rotate(49.94235368208456 -136.39429561548027,-158.81030558363665)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-139.10965713222015\" y=\"-187.53867145886514\" text-anchor=\"end\" transform=\"rotate(54.03326277299357 -139.10965713222015,-187.53867145886514)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Manihot</text><text x=\"-155.8797165836126\" y=\"-157.25392365944026\" text-anchor=\"end\" transform=\"rotate(45.85144459117555 -155.8797165836126,-157.25392365944026)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-159.58070114005477\" y=\"-170.45893882002352\" text-anchor=\"end\" transform=\"rotate(47.48780822753906 -159.58070114005477,-170.45893882002352)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Passiflora</text><text x=\"-169.05173971926533\" y=\"-161.07066554121445\" text-anchor=\"end\" transform=\"rotate(44.21508095481181 -169.05173971926533,-161.07066554121445)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Populus</text><text x=\"-158.503989688282\" y=\"-95.75843086199694\" text-anchor=\"end\" transform=\"rotate(31.737808227539062 -158.503989688282,-95.75843086199694)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-186.310487997038\" y=\"-140.7503181605838\" text-anchor=\"end\" transform=\"rotate(37.66962640935708 -186.310487997038,-140.7503181605838)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Cucumis</text><text x=\"-178.4802598630418\" y=\"-84.00922748022951\" text-anchor=\"end\" transform=\"rotate(25.80599004572082 -178.4802598630418,-84.00922748022951)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-201.14039413859962\" y=\"-118.59507513286039\" text-anchor=\"end\" transform=\"rotate(31.124171863902575 -201.14039413859962,-118.59507513286039)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Morus</text><text x=\"-196.85705387719833\" y=\"-71.21388470516763\" text-anchor=\"end\" transform=\"rotate(20.487808227539062 -196.85705387719833,-71.21388470516763)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-213.34812763797663\" y=\"-94.89376393298788\" text-anchor=\"end\" transform=\"rotate(24.57871731844807 -213.34812763797663,-94.89376393298788)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Glycine</text><text x=\"-213.0585835107765\" y=\"-60.277048209562636\" text-anchor=\"end\" transform=\"rotate(16.396899136630054 -213.0585835107765,-60.277048209562636)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-222.77454201968243\" y=\"-69.95536739894054\" text-anchor=\"end\" transform=\"rotate(18.033262772993567 -222.77454201968243,-69.95536739894054)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Lotus</text><text x=\"-226.40488863201915\" y=\"-57.123343770502395\" text-anchor=\"end\" transform=\"rotate(14.760535500266315 -226.40488863201915,-57.123343770502395)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Medicago</text><text x=\"-134.1946738208793\" y=\"26.92125924893301\" text-anchor=\"end\" transform=\"rotate(349.25627413663005 -134.1946738208793,26.92125924893301)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-231.44069214359524\" y=\"-30.942786236755875\" text-anchor=\"end\" transform=\"rotate(8.21508095481181 -231.44069214359524,-30.942786236755875)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Spinacia</text><text x=\"-128.59717322641444\" y=\"75.15374639851531\" text-anchor=\"end\" transform=\"rotate(330.29746731844807 -128.59717322641444,75.15374639851531)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-181.25971445352823\" y=\"37.92239107018771\" text-anchor=\"end\" transform=\"rotate(348.78326277299357 -181.25971445352823,37.92239107018771)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-209.05112507924687\" y=\"11.033772659981816\" text-anchor=\"end\" transform=\"rotate(357.57871731844807 -209.05112507924687,11.033772659981816)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-233.459312290158\" y=\"-4.358842163520528\" text-anchor=\"end\" transform=\"rotate(1.6696264093573063 -233.459312290158,-4.358842163520528)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Panax</text><text x=\"-219.7173537359568\" y=\"27.414722609300437\" text-anchor=\"end\" transform=\"rotate(353.48780822753906 -219.7173537359568,27.414722609300437)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-232.43443327101534\" y=\"22.281926083306267\" text-anchor=\"end\" transform=\"rotate(355.1241718639028 -232.43443327101534,22.281926083306267)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Anethum</text><text x=\"-230.78330820253467\" y=\"35.5149919765426\" text-anchor=\"end\" transform=\"rotate(351.85144459117555 -230.78330820253467,35.5149919765426)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Daucus</text><text x=\"-184.63528728121662\" y=\"69.44468412475135\" text-anchor=\"end\" transform=\"rotate(339.98780822753906 -184.63528728121662,69.44468412475135)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-225.23059751487608\" y=\"61.59081054095562\" text-anchor=\"end\" transform=\"rotate(345.3059900457208 -225.23059751487608,61.59081054095562)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Trachelium</text><text x=\"-188.26681816433234\" y=\"91.54082266556986\" text-anchor=\"end\" transform=\"rotate(334.6696264093573 -188.26681816433234,91.54082266556986)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-216.74166154280533\" y=\"86.86369869896195\" text-anchor=\"end\" transform=\"rotate(338.7605355002663 -216.74166154280533,86.86369869896195)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Scaevola</text><text x=\"-191.71511954120817\" y=\"110.78174708758084\" text-anchor=\"end\" transform=\"rotate(330.57871731844807 -191.71511954120817,110.78174708758084)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-205.42716654887255\" y=\"111.0041857035211\" text-anchor=\"end\" transform=\"rotate(332.2150809548118 -205.42716654887255,111.0041857035211)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Helianthus</text><text x=\"-198.75503922687915\" y=\"122.55074207005747\" text-anchor=\"end\" transform=\"rotate(328.94235368208456 -198.75503922687915,122.55074207005747)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Lactuca</text><text x=\"-106.09101632727847\" y=\"121.13699117683437\" text-anchor=\"end\" transform=\"rotate(311.8116718639026 -106.09101632727847,121.13699117683437)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-170.01648201288148\" y=\"141.8509019796491\" text-anchor=\"end\" transform=\"rotate(320.7605355002663 -170.01648201288148,141.8509019796491)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-183.48976940869113\" y=\"144.40829104433502\" text-anchor=\"end\" transform=\"rotate(322.39689913663005 -183.48976940869113,144.40829104433502)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Coffea</text><text x=\"-174.94641904972258\" y=\"154.6479888704631\" text-anchor=\"end\" transform=\"rotate(319.1241718639026 -174.94641904972258,154.6479888704631)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Jasminum</text><text x=\"-92.40420597670727\" y=\"146.3792842268955\" text-anchor=\"end\" transform=\"rotate(302.86280822753906 -92.40420597670727,146.3792842268955)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-156.17752970245243\" y=\"173.58234131397006\" text-anchor=\"end\" transform=\"rotate(312.57871731844807 -156.17752970245243,173.58234131397006)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Ipomoea</text><text x=\"-71.00694858294905\" y=\"171.02983681566585\" text-anchor=\"end\" transform=\"rotate(293.14689913663005 -71.00694858294905,171.02983681566585)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-108.88942069915863\" y=\"178.79376693781228\" text-anchor=\"end\" transform=\"rotate(301.94235368208456 -108.88942069915863,178.79376693781228)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-135.37262741276183\" y=\"190.25378247847155\" text-anchor=\"end\" transform=\"rotate(306.03326277299357 -135.37262741276183,190.25378247847155)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Nicotiana tomentosa</text><text x=\"-101.3878873066047\" y=\"196.84455505799417\" text-anchor=\"end\" transform=\"rotate(297.85144459117555 -101.3878873066047,196.84455505799417)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-112.80293589899401\" y=\"204.44497463270517\" text-anchor=\"end\" transform=\"rotate(299.48780822753906 -112.80293589899401,204.44497463270517)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Nicotiana sylvestris</text><text x=\"-100.947445545066\" y=\"210.55133159855805\" text-anchor=\"end\" transform=\"rotate(296.2150809548118 -100.947445545066,210.55133159855805)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Nicotiana tabacum</text><text x=\"-46.891500730961575\" y=\"191.60882187885323\" text-anchor=\"end\" transform=\"rotate(284.35144459117555 -46.891500730961575,191.60882187885323)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-76.28840023586532\" y=\"220.68604393901404\" text-anchor=\"end\" transform=\"rotate(289.6696264093573 -76.28840023586532,220.68604393901404)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Atropa</text><text x=\"-30.701552315199596\" y=\"207.07856412832342\" text-anchor=\"end\" transform=\"rotate(279.03326277299357 -30.701552315199596,207.07856412832342)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-50.63481896151807\" y=\"227.94377620091822\" text-anchor=\"end\" transform=\"rotate(283.1241718639026 -50.63481896151807,227.94377620091822)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Solanum lycopersicum</text><text x=\"-16.76508026203134\" y=\"220.78544932192457\" text-anchor=\"end\" transform=\"rotate(274.94235368208456 -16.76508026203134,220.78544932192457)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"-24.321135386031976\" y=\"232.22991274496556\" text-anchor=\"end\" transform=\"rotate(276.57871731844807 -24.321135386031976,232.22991274496556)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Solanum bulbocastanum</text><text x=\"-11.023740576444446\" y=\"233.23963459005688\" text-anchor=\"end\" transform=\"rotate(273.3059900457208 -11.023740576444446,233.23963459005688)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Solanum tuberosum</text><text x=\"26.67457257572557\" y=\"121.90520874960443\" text-anchor=\"start\" transform=\"rotate(77.05741050026631 26.67457257572557,121.90520874960443)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"16.50871728255856\" y=\"220.8047662577985\" text-anchor=\"start\" transform=\"rotate(85.12417186390269 16.50871728255856,220.8047662577985)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"10.752921042596652\" y=\"233.25227692147337\" text-anchor=\"start\" transform=\"rotate(86.76053550026631 10.752921042596652,233.25227692147337)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Acorus americanus</text><text x=\"24.051479266365757\" y=\"232.25799522319915\" text-anchor=\"start\" transform=\"rotate(83.48780822753906 24.051479266365757,232.25799522319915)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Acorus calamus</text><text x=\"47.729441533525886\" y=\"128.27651808627402\" text-anchor=\"start\" transform=\"rotate(68.99064913663005 47.729441533525886,128.27651808627402)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"50.3701216930918\" y=\"228.002414111393\" text-anchor=\"start\" transform=\"rotate(76.94235368208444 50.3701216930918,228.002414111393)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Dioscorea</text><text x=\"70.75390124936091\" y=\"131.06946256681027\" text-anchor=\"start\" transform=\"rotate(61.03894459117544 70.75390124936091,131.06946256681027)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"78.04788959658657\" y=\"207.20957863476173\" text-anchor=\"start\" transform=\"rotate(68.76053550026631 78.04788959658657,207.20957863476173)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"76.03211255237481\" y=\"220.7744728468919\" text-anchor=\"start\" transform=\"rotate(70.39689913663005 76.03211255237481,220.7744728468919)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Phalaenopsis</text><text x=\"88.51186420220913\" y=\"216.07382973291723\" text-anchor=\"start\" transform=\"rotate(67.12417186390269 88.51186420220913,216.07382973291723)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Yucca</text><text x=\"94.83670749423939\" y=\"130.13636420456578\" text-anchor=\"start\" transform=\"rotate(53.31735368208456 94.83670749423939,130.13636420456578)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"112.56548093824993\" y=\"204.5758111340158\" text-anchor=\"start\" transform=\"rotate(60.57871731844807 112.56548093824993,204.5758111340158)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Elaeis</text><text x=\"118.81550000656\" y=\"125.89006748411543\" text-anchor=\"start\" transform=\"rotate(46.05599004572082 118.81550000656,125.89006748411543)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"135.1516344835439\" y=\"190.41083397860154\" text-anchor=\"start\" transform=\"rotate(54.03326277299357 135.1516344835439,190.41083397860154)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Musa</text><text x=\"144.5663875775445\" y=\"115.73137608718753\" text-anchor=\"start\" transform=\"rotate(38.07871731844807 144.5663875775445,115.73137608718753)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"155.9758797855584\" y=\"173.76356040643574\" text-anchor=\"start\" transform=\"rotate(47.48780822753906 155.9758797855584,173.76356040643574)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Typha</text><text x=\"172.07829058980536\" y=\"96.4459194071703\" text-anchor=\"start\" transform=\"rotate(28.669626409357306 172.07829058980536,96.4459194071703)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"166.19019756547704\" y=\"127.29860670548914\" text-anchor=\"start\" transform=\"rotate(36.85144459117532 166.19019756547704,127.29860670548914)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"174.76674096020352\" y=\"154.85101308725473\" text-anchor=\"start\" transform=\"rotate(40.942353682084445 174.76674096020352,154.85101308725473)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Zea</text><text x=\"184.93680351398876\" y=\"121.76067203536937\" text-anchor=\"start\" transform=\"rotate(32.760535500266315 184.93680351398876,121.76067203536937)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"191.27925039514813\" y=\"133.91974599837863\" text-anchor=\"start\" transform=\"rotate(34.39689913662994 191.27925039514813,133.91974599837863)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Saccharum</text><text x=\"198.61261286675813\" y=\"122.7814318626366\" text-anchor=\"start\" transform=\"rotate(31.124171863902802 198.61261286675813,122.7814318626366)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Sorghum</text><text x=\"195.32248879716917\" y=\"75.32092940272854\" text-anchor=\"start\" transform=\"rotate(20.487808227539062 195.32248879716917,75.32092940272854)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"211.31403181937804\" y=\"99.34097823274594\" text-anchor=\"start\" transform=\"rotate(24.57871731844807 211.31403181937804,99.34097823274594)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Oryza</text><text x=\"211.74950898965247\" y=\"64.72579077243903\" text-anchor=\"start\" transform=\"rotate(16.396899136630054 211.74950898965247,64.72579077243903)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\"></text><text x=\"221.26064920407063\" y=\"74.60546302914558\" text-anchor=\"start\" transform=\"rotate(18.033262772993567 221.26064920407063,74.60546302914558)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Hordeum</text><text x=\"225.15893324818325\" y=\"61.852281918617805\" text-anchor=\"start\" transform=\"rotate(14.760535500266315 225.15893324818325,61.852281918617805)\" style=\"font-style: normal; font-variant: normal; font-weight: normal; font-stretch: normal; font-size: 12px; line-height: normal; font-family: sans-serif; fill: rgb(0, 0, 0);\">Triticum</text></g></g></g></g></g></g></g></g></g></g></svg>", "_____no_output_____" ], [ "%%javascript \n// Load JS support for Tree Illustrator widgets \n$.getScript('https://rawgit.com/OpenTreeOfLife/tree-illustrator/master/stylist/ipynb-tree-illustrator.js');\n// https://rawgit.com/OpenTreeOfLife/tree-illustrator/83999e2f924abac059042452e60c62152c262950/stylist/ipynb-tree-illustrator.js');\nvar ti = new IPythonTreeIllustrator.IllustratorWidget(this);\n\n", "_____no_output_____" ], [ "# define some variables in Python\ntest_int = 23\ntest_list = ['a','b','c']\ntest_tuple = ('A','B','C')\ntest_dict = {'Aa','Bb','Cc'}\ntest_dict", "_____no_output_____" ], [ "%%javascript\ntest_int", "_____no_output_____" ], [ "%%javascript\nIPython.kernel\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec5bcee5deac00e20324cb3f927b7f76e26d6696
5,730
ipynb
Jupyter Notebook
HW4.ipynb
avadijstelbloem/HW4
a864b8e62911552b08af963fe679e21c2df1e14b
[ "MIT" ]
null
null
null
HW4.ipynb
avadijstelbloem/HW4
a864b8e62911552b08af963fe679e21c2df1e14b
[ "MIT" ]
null
null
null
HW4.ipynb
avadijstelbloem/HW4
a864b8e62911552b08af963fe679e21c2df1e14b
[ "MIT" ]
null
null
null
30.31746
425
0.521815
[ [ [ "<a href=\"https://colab.research.google.com/github/avadijstelbloem/HW4/blob/main/HW4.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import ee\n\n# Trigger the authentication flow.\nee.Authenticate()\n\n# Initialize the library.\nee.Initialize()", "To authorize access needed by Earth Engine, open the following URL in a web browser and follow the instructions. If the web browser does not start automatically, please manually browse the URL below.\n\n https://accounts.google.com/o/oauth2/auth?client_id=517222506229-vsmmajv00ul0bs7p89v5m89qs8eb9359.apps.googleusercontent.com&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fearthengine+https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&code_challenge=13dGnL-Ybm71bEWwGg5HtnJH0_4GELNBcTMQ5IMBkCw&code_challenge_method=S256\n\nThe authorization workflow will generate a code, which you should paste in the box below. \nEnter verification code: 4/1AX4XfWj7B8Zc_2TD05EERoCzUXTipB9OUkGWZjeRwBpnG4s10KKBLVAlUXw\n\nSuccessfully saved authorization token.\n" ], [ "# Import the MODIS land cover collection.\nlc = ee.ImageCollection('MODIS/006/MCD12Q1')\n\n# Import the MODIS land surface temperature collection.\nlst = ee.ImageCollection('MODIS/006/MOD11A1')\n\n# Import the USGS ground elevation image.\nelv = ee.Image('USGS/SRTMGL1_003')", "_____no_output_____" ], [ "# Initial date of interest (inclusive).\ni_date = '2019-01-01'\n\n# Final date of interest (exclusive).\nf_date = '2022-01-01'\n\n# Selection of appropriate bands and dates for LST.\nlst = lst.select('LST_Day_1km', 'QC_Day').filterDate(i_date, f_date)", "_____no_output_____" ], [ "# Antwerp, Belgium.\nu_lon = 4.4051\nu_lat = 51.2213\nu_poi = ee.Geometry.Point(u_lon, u_lat)", "_____no_output_____" ], [ "# Define a region of interest with a buffer zone of 1000 km around Antwerp.\nroi = u_poi.buffer(1e6)", "_____no_output_____" ], [ "# Reduce the LST collection by mean.\nlst_img = lst.mean()\n\n# Adjust for scale factor.\nlst_img = lst_img.select('LST_Day_1km').multiply(0.02)\n\n# Convert Kelvin to Celsius.\nlst_img = lst_img.select('LST_Day_1km').add(-273.15)", "_____no_output_____" ], [ "from IPython.display import Image\n\n", "_____no_output_____" ], [ "# Create a buffer zone of 10 km around Antwerp.\nantwerp = u_poi.buffer(10000) # meters\n\nurl = elv_img.getThumbUrl({\n 'min': 0, 'max': 25, 'region': antwerp, 'dimensions': 512,\n 'palette': ['pink', 'green', 'blue', 'red', 'yellow']})\nImage(url=url)\nprint(url)", "https://earthengine.googleapis.com/v1alpha/projects/earthengine-legacy/thumbnails/71049b3e2e854f4390653369948f93ad-9bb75d4b24880bf2aba5a78fb74160d5:getPixels\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5bde4cac67aca52debcd92f7fb6d5995ea0ba3
7,676
ipynb
Jupyter Notebook
docs/tutorials/point_source.ipynb
AstroJacobLi/scarlet
134fac69465c2eea46b6909c6f401e1b17cdd85b
[ "MIT" ]
23
2019-08-09T19:10:06.000Z
2022-03-03T11:47:21.000Z
docs/tutorials/point_source.ipynb
AstroJacobLi/scarlet
134fac69465c2eea46b6909c6f401e1b17cdd85b
[ "MIT" ]
131
2019-08-16T17:38:16.000Z
2022-03-29T16:01:06.000Z
docs/tutorials/point_source.ipynb
AstroJacobLi/scarlet
134fac69465c2eea46b6909c6f401e1b17cdd85b
[ "MIT" ]
11
2018-10-30T22:55:53.000Z
2019-06-25T16:06:46.000Z
32.252101
437
0.543252
[ [ [ "# Point Source Tutorial\n\nThis is a quick demonstration of how to model both extended objects and point sources in the same scence. This may or may not be robust enough for crowded field photometry. We're curious about reports.\n\nFirst we load a simulated image cube, for which we know which sources are galaxies and which ones are stars, so we can use the appropriate source type. In practice, this would have to be guessed and potentially revised after the first attempt.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\n# use a better colormap and don't interpolate the pixels\nmatplotlib.rc('image', cmap='inferno', interpolation='none', origin='lower')\n\nimport numpy as np\nimport scarlet\nfrom scarlet.display import AsinhMapping", "_____no_output_____" ], [ "# Use this to point to the location of the data on your system\n# Load the sample images\ndata = np.load(\"../../data/psf_unmatched_sim.npz\")\nimages = data[\"images\"]\nfilters = data[\"filters\"]\npsf = data[\"psfs\"]\ncatalog = data[\"catalog\"]\n# Estimate of the background noise level\nweights = np.ones_like(images) / 2**2\n\n# display psfs\npnorm = AsinhMapping(minimum=psf.min(), stretch=psf.max()/20, Q=20)\nprgb = scarlet.display.img_to_rgb(psf, norm=pnorm)\nplt.imshow(prgb)\nplt.show()\n\n# Use Asinh scaling for the images\nnorm = AsinhMapping(minimum=images.min(), stretch=10, Q=20)\n# Map i,r,g -> RGB\n# Convert the image to an RGB image\nimg_rgb = scarlet.display.img_to_rgb(images, norm=norm)\nplt.imshow(img_rgb)\nfor src in catalog:\n if src[\"is_star\"]:\n plt.plot(src[\"x\"], src[\"y\"], \"rx\", mew=2)\n else:\n plt.plot(src[\"x\"], src[\"y\"], \"bx\", mew=2)\nplt.show()", "_____no_output_____" ] ], [ [ "## Create Frame and Observation", "_____no_output_____" ] ], [ [ "model_psf = scarlet.GaussianPSF(sigma=0.9)\nmodel_frame = scarlet.Frame(images.shape, psf=model_psf, channels=filters)\n\nobservation = scarlet.Observation(images, \n psf=scarlet.ImagePSF(psf), \n weights=weights,\n channels=filters)\nobservation = observation.match(model_frame)", "_____no_output_____" ] ], [ [ "## Define Sources\n\n**You have to define what sources you want to fit.** \nSince we know which sources are stars (it's stored in the catalog), we can pick the proper source type. This is the situations e.g. *Gaia*-confirmed stars. In deeper observations or more crowded fields, additional logic will be needed (such as color priors on stars vs galaxies) to perform the star-galaxy separation.", "_____no_output_____" ] ], [ [ "# Initalize the sources\nsources = []\nfor idx in np.unique(catalog[\"index\"]):\n src = catalog[catalog[\"index\"]==idx][0]\n if src[\"is_star\"]:\n new_source = scarlet.PointSource(\n model_frame, \n (src[\"y\"], src[\"x\"]),\n observation\n )\n else:\n new_source = scarlet.ExtendedSource(\n model_frame,\n (src[\"y\"], src[\"x\"]),\n observation,\n )\n sources.append(new_source)", "_____no_output_____" ] ], [ [ "## Create Blend", "_____no_output_____" ] ], [ [ "# Initialize the Blend object, which later fits the model\nblend = scarlet.Blend(sources, observation)\n\n# Display the initial model\nmodel = blend.get_model()\nmodel_ = observation.render(model)\nimg_rgb = scarlet.display.img_to_rgb(model_, norm=norm)\nplt.imshow(img_rgb)\nfor src in catalog:\n if src[\"is_star\"]:\n plt.plot(src[\"x\"], src[\"y\"], \"rx\", mew=2)\n else:\n plt.plot(src[\"x\"], src[\"y\"], \"bx\", mew=2)\nplt.show()", "_____no_output_____" ] ], [ [ "Our three stars (the red x's) are initialized to match their peak value with the peak of the image while the extended sources are initialized in the usual way. Note that the star centers may be a bit off, but this will be corrected by recentering them during the fit.\n\n## Fit Model and Display Results", "_____no_output_____" ] ], [ [ "%time it, logL = blend.fit(200, e_rel=1e-4)\nprint(f\"scarlet ran for {it} iterations to logL = {logL}\")\nscarlet.display.show_likelihood(blend)\nplt.show()", "_____no_output_____" ], [ "scarlet.display.show_scene(sources, \n norm=norm, \n observation=observation, \n show_rendered=True, \n show_observed=True, \n show_residual=True)\nplt.show()", "_____no_output_____" ], [ "scarlet.display.show_sources(sources, \n norm=norm, \n observation=observation,\n show_rendered=True, \n show_observed=True,\n add_boxes=True\n )\nplt.show()", "_____no_output_____" ] ], [ [ "We can see that the model overall performs well, especially in fitting the amplitude and locations of the point sources. It is worth noting that the stellar residuals have red rings and blue cores, which suggest that the PSF model of the observation is not quite correct across all bands. The extended sources are less convincing. The colors of the two galaxies are not very different, so these two sources remain poorly separated.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
ec5bf4af9741036b5e4005a99ae044f9bd24f4a3
29,176
ipynb
Jupyter Notebook
scripts/gnomad_bigquery_notebooks/get_gnomad_data.ipynb
andrewcboardman/gnomad_lof
876798a64525eaa9c74618884fe1b318e0cf3bd1
[ "MIT" ]
null
null
null
scripts/gnomad_bigquery_notebooks/get_gnomad_data.ipynb
andrewcboardman/gnomad_lof
876798a64525eaa9c74618884fe1b318e0cf3bd1
[ "MIT" ]
null
null
null
scripts/gnomad_bigquery_notebooks/get_gnomad_data.ipynb
andrewcboardman/gnomad_lof
876798a64525eaa9c74618884fe1b318e0cf3bd1
[ "MIT" ]
null
null
null
42.592701
110
0.667295
[ [ [ "import requests\nimport json\nimport pandas as pd\nimport time", "_____no_output_____" ], [ "def request_gene_variants(gene_id, gnomad_version='gnomad_r2_1',reference_genome='GRCh37'):\n QUERY = \"\"\"\n query ($geneId: String!, $refGenome: ReferenceGenomeId!, $dataSet:DatasetId!) {\n gene(gene_id: $geneId, reference_genome: $refGenome) {\n gencode_symbol\n gene_id\n gnomad_constraint {\n oe_lof\n oe_lof_lower\n oe_lof_upper\n oe_mis\n oe_mis_lower\n oe_mis_upper\n }\n variants(dataset: $dataSet) {\n variant_id\n chrom\n pos\n ref\n alt\n rsids\n exome {\n ac\n an\n }\n genome {\n ac\n an\n }\n transcript_consequence {\n hgvsp\n major_consequence\n is_canonical\n transcript_id\n polyphen_prediction\n sift_prediction\n lof\n lof_filter\n lof_flags\n }\n lof_curation {\n verdict\n flags\n }\n flags\n }\n clinvar_variants {\n hgvsp\n major_consequence\n review_status\n clinical_significance\n }\n }\n }\n \"\"\"\n VARIABLES = {\n \"geneId\": gene_id,\n \"refGenome\": reference_genome,\n \"dataSet\": gnomad_version\n }\n\n response = requests.post(\n \"https://gnomad.broadinstitute.org/api\",\n data=json.dumps({\n \"query\": QUERY,\n \"variables\": VARIABLES\n }),\n headers={\n \"Content-Type\": \"application/json\",\n },\n )\n \n return response", "_____no_output_____" ], [ "gpcr_targets = pd.read_csv('../data/Ensembl_gene_sequences/Ensembl_Grch37_gpcr_genome_locations.csv')\n\ntarget_gene_symbols = gpcr_targets['HGNC symbol']\ntarget_gene_ids = gpcr_targets['Ensembl id GRCh37']\n\ngnomad_variants_r2_1_GRCh37 = []\nfor gene_symbol, gene_id in zip(target_gene_symbols,target_gene_ids):\n print('Fetching gnomad annotations for {}'.format(gene_id))\n response = request_gene_variants(gene_id)\n while response.status_code != 200:\n print()\n time.sleep(10) # Limit request rate to avoid rate limit\n response = request_gene_variants(gene_id)\n gnomad_variants_r2_1_GRCh37.append(response.json())", "Fetching gnomad annotations for ENSG00000213088\nFetching gnomad annotations for ENSG00000144648\nFetching gnomad annotations for ENSG00000144476\nFetching gnomad annotations for ENSG00000129048\nFetching gnomad annotations for ENSG00000078549\nFetching gnomad annotations for ENSG00000197177\nFetching gnomad annotations for ENSG00000020181\nFetching gnomad annotations for ENSG00000152990\nFetching gnomad annotations for ENSG00000181790\nFetching gnomad annotations for ENSG00000121753\nFetching gnomad annotations for ENSG00000135298\nFetching gnomad annotations for ENSG00000111452\nFetching gnomad annotations for ENSG00000180264\nFetching gnomad annotations for ENSG00000174837\nFetching gnomad annotations for ENSG00000127507\n\n\n\n\n\nFetching gnomad annotations for ENSG00000131355\nFetching gnomad annotations for ENSG00000123146\nFetching gnomad annotations for ENSG00000153292\nFetching gnomad annotations for ENSG00000164393\nFetching gnomad annotations for ENSG00000173567\nFetching gnomad annotations for ENSG00000153294\nFetching gnomad annotations for ENSG00000069122\nFetching gnomad annotations for ENSG00000205336\nFetching gnomad annotations for ENSG00000173698\nFetching gnomad annotations for ENSG00000182885\nFetching gnomad annotations for ENSG00000156920\nFetching gnomad annotations for ENSG00000159618\nFetching gnomad annotations for ENSG00000112414\nFetching gnomad annotations for ENSG00000144820\n\n\n\n\n\nFetching gnomad annotations for ENSG00000072071\nFetching gnomad annotations for ENSG00000117114\nFetching gnomad annotations for ENSG00000150471\nFetching gnomad annotations for ENSG00000162618\nFetching gnomad annotations for ENSG00000164199\nFetching gnomad annotations for ENSG00000163485\nFetching gnomad annotations for ENSG00000128271\nFetching gnomad annotations for ENSG00000170425\nFetching gnomad annotations for ENSG00000121933\nFetching gnomad annotations for ENSG00000120907\nFetching gnomad annotations for ENSG00000170214\nFetching gnomad annotations for ENSG00000171873\nFetching gnomad annotations for ENSG00000150594\nFetching gnomad annotations for ENSG00000222040\n\n\n\n\n\nFetching gnomad annotations for ENSG00000184160\nFetching gnomad annotations for ENSG00000043591\nFetching gnomad annotations for ENSG00000169252\nFetching gnomad annotations for ENSG00000188778\nFetching gnomad annotations for ENSG00000144891\nFetching gnomad annotations for ENSG00000180772\nFetching gnomad annotations for ENSG00000134817\nFetching gnomad annotations for ENSG00000166148\nFetching gnomad annotations for ENSG00000198049\nFetching gnomad annotations for ENSG00000126895\nFetching gnomad annotations for ENSG00000100739\nFetching gnomad annotations for ENSG00000168398\nFetching gnomad annotations for ENSG00000102239\nFetching gnomad annotations for ENSG00000171860\n\n\n\n\n\nFetching gnomad annotations for ENSG00000197405\nFetching gnomad annotations for ENSG00000134830\nFetching gnomad annotations for ENSG00000004948\nFetching gnomad annotations for ENSG00000064989\nFetching gnomad annotations for ENSG00000036828\nFetching gnomad annotations for ENSG00000163394\nFetching gnomad annotations for ENSG00000110148\nFetching gnomad annotations for ENSG00000163823\nFetching gnomad annotations for ENSG00000184451\nFetching gnomad annotations for ENSG00000121807\nFetching gnomad annotations for ENSG00000183625\nFetching gnomad annotations for ENSG00000183813\nFetching gnomad annotations for ENSG00000160791\nFetching gnomad annotations for ENSG00000112486\n\n\n\n\n\n\nFetching gnomad annotations for ENSG00000126353\nFetching gnomad annotations for ENSG00000179934\nFetching gnomad annotations for ENSG00000173585\nFetching gnomad annotations for ENSG00000121797\nFetching gnomad annotations for ENSG00000075275\nFetching gnomad annotations for ENSG00000143126\nFetching gnomad annotations for ENSG00000008300\nFetching gnomad annotations for ENSG00000168539\nFetching gnomad annotations for ENSG00000181072\nFetching gnomad annotations for ENSG00000133019\nFetching gnomad annotations for ENSG00000180720\nFetching gnomad annotations for ENSG00000184984\nFetching gnomad annotations for ENSG00000174600\nFetching gnomad annotations for ENSG00000118432\n\n\n\n\nFetching gnomad annotations for ENSG00000188822\nFetching gnomad annotations for ENSG00000120088\nFetching gnomad annotations for ENSG00000106113\nFetching gnomad annotations for ENSG00000168329\nFetching gnomad annotations for ENSG00000163464\nFetching gnomad annotations for ENSG00000180871\nFetching gnomad annotations for ENSG00000186810\nFetching gnomad annotations for ENSG00000121966\nFetching gnomad annotations for ENSG00000160683\nFetching gnomad annotations for ENSG00000172215\nFetching gnomad annotations for ENSG00000173198\nFetching gnomad annotations for ENSG00000152207\nFetching gnomad annotations for ENSG00000184845\nFetching gnomad annotations for ENSG00000149295\n\n\n\n\n\nFetching gnomad annotations for ENSG00000151577\nFetching gnomad annotations for ENSG00000069696\nFetching gnomad annotations for ENSG00000169676\nFetching gnomad annotations for ENSG00000151617\nFetching gnomad annotations for ENSG00000136160\nFetching gnomad annotations for ENSG00000181104\nFetching gnomad annotations for ENSG00000164251\nFetching gnomad annotations for ENSG00000164220\nFetching gnomad annotations for ENSG00000127533\nFetching gnomad annotations for ENSG00000126266\nFetching gnomad annotations for ENSG00000126262\nFetching gnomad annotations for ENSG00000185897\nFetching gnomad annotations for ENSG00000186188\nFetching gnomad annotations for ENSG00000171051\n\n\n\n\n\nFetching gnomad annotations for ENSG00000171049\nFetching gnomad annotations for ENSG00000187474\nFetching gnomad annotations for ENSG00000170820\nFetching gnomad annotations for ENSG00000157240\nFetching gnomad annotations for ENSG00000111432\nFetching gnomad annotations for ENSG00000180340\nFetching gnomad annotations for ENSG00000104290\nFetching gnomad annotations for ENSG00000174804\nFetching gnomad annotations for ENSG00000163251\nFetching gnomad annotations for ENSG00000164930\nFetching gnomad annotations for ENSG00000155760\nFetching gnomad annotations for ENSG00000177283\nFetching gnomad annotations for ENSG00000188763\nFetching gnomad annotations for ENSG00000204681\n\n\n\n\n\nFetching gnomad annotations for ENSG00000136928\nFetching gnomad annotations for ENSG00000166573\nFetching gnomad annotations for ENSG00000182687\nFetching gnomad annotations for ENSG00000128310\nFetching gnomad annotations for ENSG00000215644\nFetching gnomad annotations for ENSG00000106128\nFetching gnomad annotations for ENSG00000121853\nFetching gnomad annotations for ENSG00000010310\nFetching gnomad annotations for ENSG00000112164\nFetching gnomad annotations for ENSG00000065325\nFetching gnomad annotations for ENSG00000109163\nFetching gnomad annotations for ENSG00000179921\nFetching gnomad annotations for ENSG00000164850\nFetching gnomad annotations for ENSG00000183671\n\n\n\n\n\nFetching gnomad annotations for ENSG00000165370\nFetching gnomad annotations for ENSG00000148358\nFetching gnomad annotations for ENSG00000147262\nFetching gnomad annotations for ENSG00000132975\nFetching gnomad annotations for ENSG00000183484\nFetching gnomad annotations for ENSG00000181619\nFetching gnomad annotations for ENSG00000173264\nFetching gnomad annotations for ENSG00000180269\nFetching gnomad annotations for ENSG00000187037\nFetching gnomad annotations for ENSG00000257008\nFetching gnomad annotations for ENSG00000101850\nFetching gnomad annotations for ENSG00000164849\nFetching gnomad annotations for ENSG00000173302\nFetching gnomad annotations for ENSG00000174948\n\n\n\n\nFetching gnomad annotations for ENSG00000154165\nFetching gnomad annotations for ENSG00000178015\nFetching gnomad annotations for ENSG00000173250\nFetching gnomad annotations for ENSG00000175514\nFetching gnomad annotations for ENSG00000158292\nFetching gnomad annotations for ENSG00000175697\nFetching gnomad annotations for ENSG00000180758\nFetching gnomad annotations for ENSG00000151025\nFetching gnomad annotations for ENSG00000173890\nFetching gnomad annotations for ENSG00000143147\nFetching gnomad annotations for ENSG00000250510\nFetching gnomad annotations for ENSG00000144230\nFetching gnomad annotations for ENSG00000174946\nFetching gnomad annotations for ENSG00000184194\n\n\n\n\nFetching gnomad annotations for ENSG00000147138\nFetching gnomad annotations for ENSG00000166073\nFetching gnomad annotations for ENSG00000188888\nFetching gnomad annotations for ENSG00000125245\nFetching gnomad annotations for ENSG00000166856\nFetching gnomad annotations for ENSG00000169508\nFetching gnomad annotations for ENSG00000183150\nFetching gnomad annotations for ENSG00000204882\nFetching gnomad annotations for ENSG00000188394\nFetching gnomad annotations for ENSG00000172209\nFetching gnomad annotations for ENSG00000170128\nFetching gnomad annotations for ENSG00000154478\nFetching gnomad annotations for ENSG00000170837\nFetching gnomad annotations for ENSG00000181773\n\n\n\n\n\nFetching gnomad annotations for ENSG00000120436\nFetching gnomad annotations for ENSG00000142511\nFetching gnomad annotations for ENSG00000214943\nFetching gnomad annotations for ENSG00000171659\nFetching gnomad annotations for ENSG00000178623\nFetching gnomad annotations for ENSG00000170775\nFetching gnomad annotations for ENSG00000170075\nFetching gnomad annotations for ENSG00000183840\nFetching gnomad annotations for ENSG00000177464\nFetching gnomad annotations for ENSG00000126251\nFetching gnomad annotations for ENSG00000135973\nFetching gnomad annotations for ENSG00000102195\nFetching gnomad annotations for ENSG00000203737\nFetching gnomad annotations for ENSG00000135898\n\n\n\n\n\nFetching gnomad annotations for ENSG00000146360\nFetching gnomad annotations for ENSG00000156097\nFetching gnomad annotations for ENSG00000180929\nFetching gnomad annotations for ENSG00000112218\nFetching gnomad annotations for ENSG00000140030\nFetching gnomad annotations for ENSG00000119714\nFetching gnomad annotations for ENSG00000119737\nFetching gnomad annotations for ENSG00000155269\nFetching gnomad annotations for ENSG00000171657\nFetching gnomad annotations for ENSG00000123901\nFetching gnomad annotations for ENSG00000139572\nFetching gnomad annotations for ENSG00000164604\nFetching gnomad annotations for ENSG00000138271\nFetching gnomad annotations for ENSG00000181656\n\n\n\n\n\n\nFetching gnomad annotations for ENSG00000013588\nFetching gnomad annotations for ENSG00000167191\nFetching gnomad annotations for ENSG00000170412\nFetching gnomad annotations for ENSG00000111291\nFetching gnomad annotations for ENSG00000173612\nFetching gnomad annotations for ENSG00000152822\nFetching gnomad annotations for ENSG00000164082\nFetching gnomad annotations for ENSG00000198822\nFetching gnomad annotations for ENSG00000124493\nFetching gnomad annotations for ENSG00000168959\nFetching gnomad annotations for ENSG00000113262\nFetching gnomad annotations for ENSG00000196277\nFetching gnomad annotations for ENSG00000179603\nFetching gnomad annotations for ENSG00000126010\n\n\n\n\n\nFetching gnomad annotations for ENSG00000196917\nFetching gnomad annotations for ENSG00000182782\nFetching gnomad annotations for ENSG00000255398\nFetching gnomad annotations for ENSG00000121764\nFetching gnomad annotations for ENSG00000137252\nFetching gnomad annotations for ENSG00000196639\nFetching gnomad annotations for ENSG00000113749\nFetching gnomad annotations for ENSG00000101180\nFetching gnomad annotations for ENSG00000134489\nFetching gnomad annotations for ENSG00000178394\nFetching gnomad annotations for ENSG00000135312\nFetching gnomad annotations for ENSG00000179546\nFetching gnomad annotations for ENSG00000168830\nFetching gnomad annotations for ENSG00000179097\n\n\n\n\n\nFetching gnomad annotations for ENSG00000102468\nFetching gnomad annotations for ENSG00000135914\nFetching gnomad annotations for ENSG00000147246\nFetching gnomad annotations for ENSG00000164270\nFetching gnomad annotations for ENSG00000157219\nFetching gnomad annotations for ENSG00000158748\nFetching gnomad annotations for ENSG00000148680\nFetching gnomad annotations for ENSG00000116014\nFetching gnomad annotations for ENSG00000205213\nFetching gnomad annotations for ENSG00000139292\nFetching gnomad annotations for ENSG00000133067\nFetching gnomad annotations for ENSG00000138039\nFetching gnomad annotations for ENSG00000198121\nFetching gnomad annotations for ENSG00000064547\n\n\n\n\nFetching gnomad annotations for ENSG00000171517\nFetching gnomad annotations for ENSG00000147145\nFetching gnomad annotations for ENSG00000184574\nFetching gnomad annotations for ENSG00000139679\nFetching gnomad annotations for ENSG00000213903\nFetching gnomad annotations for ENSG00000213906\nFetching gnomad annotations for ENSG00000130368\nFetching gnomad annotations for ENSG00000204687\nFetching gnomad annotations for ENSG00000258839\nFetching gnomad annotations for ENSG00000185231\nFetching gnomad annotations for ENSG00000124089\nFetching gnomad annotations for ENSG00000166603\nFetching gnomad annotations for ENSG00000176136\nFetching gnomad annotations for ENSG00000128285\n\n\n\n\n\nFetching gnomad annotations for ENSG00000152034\nFetching gnomad annotations for ENSG00000102539\nFetching gnomad annotations for ENSG00000172938\nFetching gnomad annotations for ENSG00000184350\nFetching gnomad annotations for ENSG00000172935\nFetching gnomad annotations for ENSG00000182170\nFetching gnomad annotations for ENSG00000170255\nFetching gnomad annotations for ENSG00000183695\nFetching gnomad annotations for ENSG00000179826\nFetching gnomad annotations for ENSG00000179817\nFetching gnomad annotations for ENSG00000168412\nFetching gnomad annotations for ENSG00000134640\nFetching gnomad annotations for ENSG00000135577\nFetching gnomad annotations for ENSG00000171596\nFetching gnomad annotations for ENSG00000132911\n\n\n\n\n\nFetching gnomad annotations for ENSG00000183729\nFetching gnomad annotations for ENSG00000125522\nFetching gnomad annotations for ENSG00000148734\nFetching gnomad annotations for ENSG00000056291\nFetching gnomad annotations for ENSG00000187258\nFetching gnomad annotations for ENSG00000164128\nFetching gnomad annotations for ENSG00000185149\nFetching gnomad annotations for ENSG00000204174\nFetching gnomad annotations for ENSG00000164129\nFetching gnomad annotations for ENSG00000101188\nFetching gnomad annotations for ENSG00000169006\nFetching gnomad annotations for ENSG00000102076\nFetching gnomad annotations for ENSG00000147380\nFetching gnomad annotations for ENSG00000128617\n\n\n\n\n\n\nFetching gnomad annotations for ENSG00000054277\nFetching gnomad annotations for ENSG00000122375\nFetching gnomad annotations for ENSG00000124818\nFetching gnomad annotations for ENSG00000116329\nFetching gnomad annotations for ENSG00000082556\nFetching gnomad annotations for ENSG00000125510\nFetching gnomad annotations for ENSG00000112038\nFetching gnomad annotations for ENSG00000162881\nFetching gnomad annotations for ENSG00000165621\nFetching gnomad annotations for ENSG00000180914\nFetching gnomad annotations for ENSG00000169860\nFetching gnomad annotations for ENSG00000078589\nFetching gnomad annotations for ENSG00000244165\nFetching gnomad annotations for ENSG00000169313\n\n\n\n\n\nFetching gnomad annotations for ENSG00000181631\nFetching gnomad annotations for ENSG00000174944\nFetching gnomad annotations for ENSG00000175591\nFetching gnomad annotations for ENSG00000186912\nFetching gnomad annotations for ENSG00000171631\nFetching gnomad annotations for ENSG00000182162\nFetching gnomad annotations for ENSG00000119973\nFetching gnomad annotations for ENSG00000169618\nFetching gnomad annotations for ENSG00000101292\nFetching gnomad annotations for ENSG00000169403\nFetching gnomad annotations for ENSG00000168229\nFetching gnomad annotations for ENSG00000183134\nFetching gnomad annotations for ENSG00000160951\nFetching gnomad annotations for ENSG00000125384\n\n\n\n\nFetching gnomad annotations for ENSG00000050628\nFetching gnomad annotations for ENSG00000171522\nFetching gnomad annotations for ENSG00000122420\nFetching gnomad annotations for ENSG00000160013\nFetching gnomad annotations for ENSG00000160801\nFetching gnomad annotations for ENSG00000144407\nFetching gnomad annotations for ENSG00000186867\nFetching gnomad annotations for ENSG00000163914\nFetching gnomad annotations for ENSG00000171509\nFetching gnomad annotations for ENSG00000133105\nFetching gnomad annotations for ENSG00000182631\nFetching gnomad annotations for ENSG00000173080\nFetching gnomad annotations for ENSG00000170989\nFetching gnomad annotations for ENSG00000267534\n\n\n\n\n\nFetching gnomad annotations for ENSG00000213694\nFetching gnomad annotations for ENSG00000125910\nFetching gnomad annotations for ENSG00000180739\nFetching gnomad annotations for ENSG00000080293\nFetching gnomad annotations for ENSG00000128602\nFetching gnomad annotations for ENSG00000139874\nFetching gnomad annotations for ENSG00000180616\nFetching gnomad annotations for ENSG00000183473\nFetching gnomad annotations for ENSG00000132671\nFetching gnomad annotations for ENSG00000162009\nFetching gnomad annotations for ENSG00000198829\nFetching gnomad annotations for ENSG00000146399\nFetching gnomad annotations for ENSG00000146378\nFetching gnomad annotations for ENSG00000135569\n\n\n\n\n\n\nFetching gnomad annotations for ENSG00000146383\nFetching gnomad annotations for ENSG00000146385\nFetching gnomad annotations for ENSG00000237110\nFetching gnomad annotations for ENSG00000115353\nFetching gnomad annotations for ENSG00000075073\nFetching gnomad annotations for ENSG00000169836\nFetching gnomad annotations for ENSG00000173662\nFetching gnomad annotations for ENSG00000179002\nFetching gnomad annotations for ENSG00000169962\nFetching gnomad annotations for ENSG00000169777\nFetching gnomad annotations for ENSG00000121318\nFetching gnomad annotations for ENSG00000212128\nFetching gnomad annotations for ENSG00000212127\nFetching gnomad annotations for ENSG00000128519\n\n\n\n\n\nFetching gnomad annotations for ENSG00000212124\nFetching gnomad annotations for ENSG00000255837\nFetching gnomad annotations for ENSG00000127362\nFetching gnomad annotations for ENSG00000256188\nFetching gnomad annotations for ENSG00000256436\nFetching gnomad annotations for ENSG00000257138\nFetching gnomad annotations for ENSG00000236398\nFetching gnomad annotations for ENSG00000127364\nFetching gnomad annotations for ENSG00000221937\nFetching gnomad annotations for ENSG00000221855\nFetching gnomad annotations for ENSG00000186136\nFetching gnomad annotations for ENSG00000255374\nFetching gnomad annotations for ENSG00000226761\nFetching gnomad annotations for ENSG00000127366\n\n\n\n\n\nFetching gnomad annotations for ENSG00000212126\nFetching gnomad annotations for ENSG00000185899\nFetching gnomad annotations for ENSG00000121377\nFetching gnomad annotations for ENSG00000121314\nFetching gnomad annotations for ENSG00000121381\nFetching gnomad annotations for ENSG00000006638\nFetching gnomad annotations for ENSG00000163870\nFetching gnomad annotations for ENSG00000174417\nFetching gnomad annotations for ENSG00000165409\nFetching gnomad annotations for ENSG00000181408\nFetching gnomad annotations for ENSG00000114812\nFetching gnomad annotations for ENSG00000106018\nFetching gnomad annotations for ENSG00000173578\n" ], [ " with open('../data/gnomAD_population_variants/gnomad_r2_1_GRCh37_raw_data.json', 'w') as outfile:\n json.dump(gnomad_variants_r2_1_GRCh37, outfile)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
ec5bfcc3cafb73f76817aa1062fd8931d2227715
95,790
ipynb
Jupyter Notebook
examples/notebooks/pourbaix.ipynb
yw-fang/MPInterfaces
ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e
[ "MIT" ]
56
2015-06-23T03:03:18.000Z
2022-02-06T16:41:34.000Z
examples/notebooks/pourbaix.ipynb
yw-fang/MPInterfaces
ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e
[ "MIT" ]
21
2015-09-03T17:50:18.000Z
2022-03-01T02:26:34.000Z
examples/notebooks/pourbaix.ipynb
yw-fang/MPInterfaces
ca2e43b590fdfbcf87a116c5c758e54cb7cb2d2e
[ "MIT" ]
50
2015-09-17T19:09:36.000Z
2021-11-15T19:13:20.000Z
309
85,813
0.910408
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec5c16dd35fbea8561f0850aaa4b53dd6ca23593
192,332
ipynb
Jupyter Notebook
docs/examples/general/data_loading/external_input.ipynb
npanpaliya/DALI
dc13d3b9d1a2fe67931b05a2861e81cd50da4d2b
[ "ECL-2.0", "Apache-2.0" ]
11
2021-03-16T05:09:16.000Z
2022-03-29T12:48:44.000Z
docs/examples/general/data_loading/external_input.ipynb
npanpaliya/DALI
dc13d3b9d1a2fe67931b05a2861e81cd50da4d2b
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/examples/general/data_loading/external_input.ipynb
npanpaliya/DALI
dc13d3b9d1a2fe67931b05a2861e81cd50da4d2b
[ "ECL-2.0", "Apache-2.0" ]
3
2021-05-08T16:51:55.000Z
2021-07-22T09:02:44.000Z
843.561404
185,856
0.950346
[ [ [ "# ExternalSource operator\n\nIn this example, we will see how to use `ExternalSource` operator, that allows us to\nuse an external data source as an input to the Pipeline.\n\nIn order to achieve that, we have to define a Iterator or Generator class which `next` function will\nreturn one or several `numpy` arrays.", "_____no_output_____" ] ], [ [ "import types\nimport collections\nimport numpy as np\nfrom random import shuffle\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops \nimport nvidia.dali.types as types\n\nbatch_size = 16", "_____no_output_____" ] ], [ [ "### Defining the iterator", "_____no_output_____" ] ], [ [ "class ExternalInputIterator(object):\n def __init__(self, batch_size):\n self.images_dir = \"../../data/images/\"\n self.batch_size = batch_size\n with open(self.images_dir + \"file_list.txt\", 'r') as f:\n self.files = [line.rstrip() for line in f if line is not '']\n shuffle(self.files)\n\n def __iter__(self):\n self.i = 0\n self.n = len(self.files)\n return self\n\n def __next__(self):\n batch = []\n labels = []\n for _ in range(self.batch_size):\n jpeg_filename, label = self.files[self.i].split(' ')\n f = open(self.images_dir + jpeg_filename, 'rb')\n batch.append(np.frombuffer(f.read(), dtype = np.uint8))\n labels.append(np.array([label], dtype = np.uint8))\n self.i = (self.i + 1) % self.n\n return (batch, labels)\n \n next = __next__", "_____no_output_____" ] ], [ [ "### Defining the pipeline\n\nThe next step is to define the Pipeline.\n\nWe override `Pipeline.iter_setup`, a method called by the pipeline before every `Pipeline.run`, to call the iterator and feed the result to `ExternalSource()` operator, referenced by `self.jpeg`, by using `feed_input`. DALI makes sure that the `iter_setup` is called an appropriate number of times ahead when DALI does prefetching. Calling `feed_input` outside `iter_setup` will lead to undefined results.", "_____no_output_____" ] ], [ [ "eii = ExternalInputIterator(batch_size)\niterator = iter(eii)", "_____no_output_____" ], [ " class ExternalSourcePipeline(Pipeline): \n def __init__(self, batch_size, num_threads, device_id):\n super(ExternalSourcePipeline, self).__init__(batch_size,\n num_threads,\n device_id,\n seed=12)\n self.input = ops.ExternalSource()\n self.input_label = ops.ExternalSource()\n self.decode = ops.ImageDecoder(device = \"mixed\", output_type = types.RGB)\n self.cast = ops.Cast(device = \"gpu\",\n dtype = types.INT32)\n\n def define_graph(self): \n self.jpegs = self.input()\n self.labels = self.input_label()\n images = self.decode(self.jpegs) \n output = self.cast(images)\n return (output, self.labels)\n\n def iter_setup(self):\n (images, labels) = iterator.next()\n self.feed_input(self.jpegs, images, layout=\"HWC\")\n self.feed_input(self.labels, labels)", "_____no_output_____" ] ], [ [ "### Using the pipeline", "_____no_output_____" ] ], [ [ "pipe = ExternalSourcePipeline(batch_size=batch_size, num_threads=2, device_id = 0)\npipe.build() \npipe_out = pipe.run()", "_____no_output_____" ] ], [ [ "Notice that labels are still on CPU and no as_cpu call is needed to show them.", "_____no_output_____" ] ], [ [ "batch_cpu = pipe_out[0].as_cpu()\nlabels_cpu = pipe_out[1]", "_____no_output_____" ], [ "from __future__ import print_function\nimport matplotlib.pyplot as plt\nimg = batch_cpu.at(2)\nprint(img.shape)\nprint(labels_cpu.at(2))\nplt.imshow(img.astype('uint8'))", "(427, 640, 3)\n[0]\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec5c2a6f364de3cd95c289fe896cbb6f333d74a7
18,882
ipynb
Jupyter Notebook
babble.ipynb
harrisonpim/babbling
bd332eb2cbd3fd05b7d75a35261db37a8ba8dbfc
[ "MIT" ]
1
2018-01-20T14:19:41.000Z
2018-01-20T14:19:41.000Z
babble.ipynb
harrisonpim/babbling
bd332eb2cbd3fd05b7d75a35261db37a8ba8dbfc
[ "MIT" ]
null
null
null
babble.ipynb
harrisonpim/babbling
bd332eb2cbd3fd05b7d75a35261db37a8ba8dbfc
[ "MIT" ]
null
null
null
38.378049
1,762
0.588497
[ [ [ "# Babbling\nLets write a stupid LSTM RNN which learns to generate text based on a corpus fed to it. Keras has a lovely API so we'll use that, backed up by the brunt of Tensorflow.", "_____no_output_____" ] ], [ [ "import math\nimport pandas as pd\nimport numpy as np\nimport nltk\n\nfrom numpy.random import choice\nfrom keras.layers import *\nfrom keras.models import Sequential", "_____no_output_____" ] ], [ [ "Let's load in a big lump of text for the LSTM to read", "_____no_output_____" ] ], [ [ "book_path = './data/hp_philosophers_stone.txt'\n\nwith open(book_path) as f:\n text = f.read().lower()", "_____no_output_____" ], [ "print('corpus length:', len(text))", "corpus length: 439400\n" ] ], [ [ "Then get a set of the unique characters in the text, and call it our vocabulary. Even in normal text the vocabulary is likely to be quite large - 26 upper case characters, 26 lower case characters, and loads of punctuation", "_____no_output_____" ] ], [ [ "characters = sorted(list(set(text)))\nvocab_size = len(characters)\n\nvocab_size", "_____no_output_____" ] ], [ [ "To make our data computationally interpretable, we should make some kind of index mapping each character to a unique numeric id. We can then represent our full book text as a list of character indicies. In other words, the output will be a long sequence of numbers which spell out the book.", "_____no_output_____" ] ], [ [ "character_to_index = dict((c, i) for i, c in enumerate(characters))\nindex_to_character = dict((i, c) for i, c in enumerate(characters))\n\ntext_as_indicies = [character_to_index[c] for c in text]", "_____no_output_____" ] ], [ [ "Now we can start splitting that massively long series of numbers into a load of training sequences. We'll use a sequence length of 40, because, having tested this with a bunch of lengths, 40 is a nice round number that seems to work well. It also gives us enough context to start picking up on grammar and sentence cadence without being excessive.", "_____no_output_____" ] ], [ [ "sequence_length = 40\nnum_sequences = len(text) - sequence_length + 1\n\nsequences = [text_as_indicies[i : i + sequence_length] \n for i in range(num_sequences)]\n\nnext_characters = [text_as_indicies[i + 1 : i + sequence_length + 1] \n for i in range(num_sequences)]\n\nlen(sequences)", "_____no_output_____" ] ], [ [ "Now we need to come up with the series of next-characters that follow each sequence.", "_____no_output_____" ] ], [ [ "sequences = np.concatenate([[np.array(seq)] for seq in sequences[:-2]])\nnext_characters = np.concatenate([[np.array(char)] for char in next_characters[:-2]])", "_____no_output_____" ] ], [ [ "Here's an example of the two things we'll be using to train the network", "_____no_output_____" ] ], [ [ "print('sequence:\\n' + str(sequences[0]) + '\\n')\nprint('next characters:\\n' + str(sequences[1]))", "sequence:\n[44 32 29 1 26 39 49 1 47 32 39 1 36 33 46 29 28 52 52 37 42 10 1 25 38\n 28 1 37 42 43 10 1 28 45 42 43 36 29 49 8]\n\nnext characters:\n[32 29 1 26 39 49 1 47 32 39 1 36 33 46 29 28 52 52 37 42 10 1 25 38 28\n 1 37 42 43 10 1 28 45 42 43 36 29 49 8 1]\n" ] ], [ [ "# Building the model\nWe're going to use a pretty generic model structure: \n- embedding\n- lstm\n- dropout\n- lstm \n- dropout\n- dense (time distributed)\n- softmax\n\nWe're also going to use the ADAM optimizer because it's the best and most clever mashup of things (AdaGrad and RMSProp) ever, sparse categorical cross entropy as our loss function, and the mean average error as our metric.", "_____no_output_____" ] ], [ [ "model = Sequential([Embedding(vocab_size, \n 24, \n input_length=sequence_length),\n LSTM(512, \n input_dim=24, \n return_sequences=True, \n dropout_U=0.2, \n dropout_W=0.2, \n consume_less='gpu'),\n Dropout(0.2),\n LSTM(512, \n return_sequences=True, \n dropout_U=0.2, \n dropout_W=0.2, \n consume_less='gpu'),\n Dropout(0.2),\n TimeDistributed(Dense(vocab_size)),\n Activation('softmax')])", "/home/ubuntu/anaconda2/envs/py36/lib/python3.6/site-packages/ipykernel/__main__.py:9: UserWarning: The `input_dim` and `input_length` arguments in recurrent layers are deprecated. Use `input_shape` instead.\n/home/ubuntu/anaconda2/envs/py36/lib/python3.6/site-packages/ipykernel/__main__.py:9: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(512, return_sequences=True, input_shape=(None, 24), dropout=0.2, recurrent_dropout=0.2, implementation=2)`\n/home/ubuntu/anaconda2/envs/py36/lib/python3.6/site-packages/ipykernel/__main__.py:15: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(512, return_sequences=True, dropout=0.2, recurrent_dropout=0.2, implementation=2)`\n" ], [ "model.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['mae'])", "_____no_output_____" ] ], [ [ "# Training the model", "_____no_output_____" ] ], [ [ "model.optimizer.lr = 0.001\n\nmodel.fit(sequences, \n np.expand_dims(next_characters,-1), \n batch_size=64, \n nb_epoch=1)", "/home/ubuntu/anaconda2/envs/py36/lib/python3.6/site-packages/keras/models.py:851: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.\n warnings.warn('The `nb_epoch` argument in `fit` '\n" ] ], [ [ "Now that we've trained the model and optimised all of the weights in the network, we can save them to an `.h5` file.", "_____no_output_____" ] ], [ [ "model.save_weights('models/weights.h5')", "_____no_output_____" ] ], [ [ "# Reloading a pretrained model\nIf you've build the model and trained it elsewhere, you can reload it by calling `.load_weights()` with the path to the `.h5` file, as follows", "_____no_output_____" ] ], [ [ "model.load_weights('models/weights.h5')", "_____no_output_____" ] ], [ [ "# Babbling", "_____no_output_____" ] ], [ [ "def babble(seed_string=' '*40, output_length=500):\n '''\n Say a lot of stupid stuff based on all of the input text \n that we trained the model on\n \n Parameters\n ----------\n seed_string : string (optional)\n The story that you want your idiot network to be \n inspired by\n default = 40 spaces\n \n output_length : int (optional)\n how long do you want the idiot network to talk for\n default = 500\n \n Returns\n -------\n seed_string : string\n the original seed string with 500 characters of new \n stuff attached to the end of it\n '''\n for i in range(output_length):\n x = np.array([character_to_index[c] for c in seed_string[-40:]])[np.newaxis,:]\n preds = model.predict(x, verbose=0)[0][-1]\n preds = preds / np.sum(preds)\n next_character = choice(characters, p=preds)\n seed_string += next_character\n print(seed_string)", "_____no_output_____" ], [ "babble()", " in it and walked up by trouble glumping on his tricks as harry left harry's broom and back.  \"let's everyone else had to go bit to look at each other.  \"just then,\" harry. but harry, too, ron, and ron fruffled so back for us,\" ron sighed, as they telling himself against the stone.  \"then the armchairs wouldn't over his mouth. the flash of the days to give us them id it, just a wafd.\"  this is it must be sort. i dungeon had left professor mcgonagall noticing making the first i've got to said. \"co\n" ] ], [ [ "Hooray we did the thing", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ec5c513cde90b9ac2363cc32d70d0573d2418472
182,639
ipynb
Jupyter Notebook
Image-Captioning-Project-P2/1_Preliminaries.ipynb
rayryeng/Udacity_Computer_Vision_Nanodegree
ca29b6b99c62fce7b03ab178bf54118402348c44
[ "MIT" ]
null
null
null
Image-Captioning-Project-P2/1_Preliminaries.ipynb
rayryeng/Udacity_Computer_Vision_Nanodegree
ca29b6b99c62fce7b03ab178bf54118402348c44
[ "MIT" ]
null
null
null
Image-Captioning-Project-P2/1_Preliminaries.ipynb
rayryeng/Udacity_Computer_Vision_Nanodegree
ca29b6b99c62fce7b03ab178bf54118402348c44
[ "MIT" ]
null
null
null
182,639
182,639
0.590131
[ [ [ "# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.\n\nNote that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Explore the Data Loader\n- [Step 2](#step2): Use the Data Loader to Obtain Batches\n- [Step 3](#step3): Experiment with the CNN Encoder\n- [Step 4](#step4): Implement the RNN Decoder", "_____no_output_____" ], [ "<a id='step1'></a>\n## Step 1: Explore the Data Loader\n\nWe have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. \n\nIn the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. \n\n> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.\n\nThe `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:\n1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.\n2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.\n3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.\n4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. \n5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. \n\nWe will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!", "_____no_output_____" ] ], [ [ "import sys\n#sys.path.append('/opt/cocoapi/PythonAPI')\nsys.path.append('./cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\n!pip install nltk\nimport nltk\nnltk.download('punkt')\nfrom data_loader import get_loader\nfrom torchvision import transforms", "Requirement already satisfied: nltk in /opt/venv/lib/python3.7/site-packages (3.5)\nRequirement already satisfied: tqdm in /opt/venv/lib/python3.7/site-packages (from nltk) (4.46.1)\nRequirement already satisfied: joblib in /opt/venv/lib/python3.7/site-packages (from nltk) (0.15.1)\nRequirement already satisfied: regex in /opt/venv/lib/python3.7/site-packages (from nltk) (2020.6.8)\nRequirement already satisfied: click in /opt/venv/lib/python3.7/site-packages (from nltk) (7.1.2)\n[nltk_data] Downloading package punkt to /home/jovyan/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n" ], [ "# Define a transform to pre-process the training images.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Set the minimum word count threshold.\nvocab_threshold = 5\n\n# Specify the batch size.\nbatch_size = 10", "_____no_output_____" ], [ "# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False,\n cocoapi_loc='.') # Added by Ray to reference local cocoapi install from Github", "loading annotations into memory...\nDone (t=1.18s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\nDone (t=0.97s)\ncreating index...\n\n 0%| | 0/414113 [00:00<?, ?it/s]\u001b[A\n 0%| | 586/414113 [00:00<01:10, 5850.85it/s]\u001b[Aindex created!\nObtaining caption lengths...\n\n 0%| | 1079/414113 [00:00<01:14, 5538.72it/s]\u001b[A\n 0%| | 1602/414113 [00:00<01:15, 5440.80it/s]\u001b[A\n 0%| | 2044/414113 [00:00<01:20, 5087.73it/s]\u001b[A\n 1%| | 2478/414113 [00:00<01:25, 4834.66it/s]\u001b[A\n 1%| | 3120/414113 [00:00<01:18, 5221.38it/s]\u001b[A\n 1%| | 3750/414113 [00:00<01:14, 5504.00it/s]\u001b[A\n 1%| | 4267/414113 [00:00<01:18, 5242.36it/s]\u001b[A\n 1%| | 4790/414113 [00:00<01:18, 5236.19it/s]\u001b[A\n 1%|▏ | 5299/414113 [00:01<01:21, 5047.00it/s]\u001b[A\n 1%|▏ | 5795/414113 [00:01<01:22, 4941.59it/s]\u001b[A\n 2%|▏ | 6308/414113 [00:01<01:21, 4995.35it/s]\u001b[A\n 2%|▏ | 6883/414113 [00:01<01:18, 5199.78it/s]\u001b[A\n 2%|▏ | 7447/414113 [00:01<01:16, 5322.49it/s]\u001b[A\n 2%|▏ | 8022/414113 [00:01<01:14, 5443.68it/s]\u001b[A\n 2%|▏ | 8568/414113 [00:01<01:24, 4814.04it/s]\u001b[A\n 2%|▏ | 9189/414113 [00:01<01:18, 5161.54it/s]\u001b[A\n 2%|▏ | 9926/414113 [00:01<01:11, 5670.77it/s]\u001b[A\n 3%|▎ | 10623/414113 [00:01<01:07, 6006.37it/s]\u001b[A\n 3%|▎ | 11328/414113 [00:02<01:04, 6284.83it/s]\u001b[A\n 3%|▎ | 12022/414113 [00:02<01:02, 6466.83it/s]\u001b[A\n 3%|▎ | 12685/414113 [00:02<01:02, 6459.39it/s]\u001b[A\n 3%|▎ | 13408/414113 [00:02<01:00, 6672.39it/s]\u001b[A\n 3%|▎ | 14085/414113 [00:02<01:03, 6254.11it/s]\u001b[A\n 4%|▎ | 14723/414113 [00:02<01:05, 6055.53it/s]\u001b[A\n 4%|▎ | 15339/414113 [00:02<01:07, 5897.93it/s]\u001b[A\n 4%|▍ | 15937/414113 [00:02<01:08, 5780.00it/s]\u001b[A\n 4%|▍ | 16521/414113 [00:02<01:08, 5788.43it/s]\u001b[A\n 4%|▍ | 17104/414113 [00:03<01:10, 5660.35it/s]\u001b[A\n 4%|▍ | 17678/414113 [00:03<01:09, 5682.78it/s]\u001b[A\n 4%|▍ | 18249/414113 [00:03<01:12, 5474.50it/s]\u001b[A\n 5%|▍ | 18800/414113 [00:03<01:17, 5111.40it/s]\u001b[A\n 5%|▍ | 19319/414113 [00:03<01:19, 4993.56it/s]\u001b[A\n 5%|▍ | 19881/414113 [00:03<01:16, 5165.68it/s]\u001b[A\n 5%|▍ | 20418/414113 [00:03<01:15, 5223.58it/s]\u001b[A\n 5%|▌ | 20945/414113 [00:03<01:18, 4983.61it/s]\u001b[A\n 5%|▌ | 21584/414113 [00:03<01:13, 5335.05it/s]\u001b[A\n 5%|▌ | 22129/414113 [00:04<01:16, 5109.14it/s]\u001b[A\n 5%|▌ | 22650/414113 [00:04<01:22, 4767.29it/s]\u001b[A\n 6%|▌ | 23138/414113 [00:04<01:22, 4725.30it/s]\u001b[A\n 6%|▌ | 23868/414113 [00:04<01:13, 5283.17it/s]\u001b[A\n 6%|▌ | 24597/414113 [00:04<01:07, 5758.49it/s]\u001b[A\n 6%|▌ | 25250/414113 [00:04<01:05, 5968.36it/s]\u001b[A\n 6%|▋ | 25968/414113 [00:04<01:01, 6285.10it/s]\u001b[A\n 6%|▋ | 26618/414113 [00:04<01:01, 6345.48it/s]\u001b[A\n 7%|▋ | 27268/414113 [00:04<01:01, 6286.00it/s]\u001b[A\n 7%|▋ | 27908/414113 [00:05<01:09, 5566.63it/s]\u001b[A\n 7%|▋ | 28494/414113 [00:05<01:08, 5649.34it/s]\u001b[A\n 7%|▋ | 29076/414113 [00:05<01:10, 5426.21it/s]\u001b[A\n 7%|▋ | 29632/414113 [00:05<01:13, 5250.84it/s]\u001b[A\n 7%|▋ | 30183/414113 [00:05<01:12, 5323.39it/s]\u001b[A\n 7%|▋ | 30760/414113 [00:05<01:10, 5448.43it/s]\u001b[A\n 8%|▊ | 31322/414113 [00:05<01:09, 5497.53it/s]\u001b[A\n 8%|▊ | 31894/414113 [00:05<01:08, 5561.33it/s]\u001b[A\n 8%|▊ | 32454/414113 [00:05<01:11, 5307.25it/s]\u001b[A\n 8%|▊ | 33020/414113 [00:05<01:10, 5406.82it/s]\u001b[A\n 8%|▊ | 33574/414113 [00:06<01:09, 5444.46it/s]\u001b[A\n 8%|▊ | 34122/414113 [00:06<01:14, 5115.77it/s]\u001b[A\n 8%|▊ | 34640/414113 [00:06<01:16, 4947.01it/s]\u001b[A\n 9%|▊ | 35219/414113 [00:06<01:13, 5170.01it/s]\u001b[A\n 9%|▊ | 35780/414113 [00:06<01:11, 5293.48it/s]\u001b[A\n 9%|▉ | 36315/414113 [00:06<01:13, 5114.25it/s]\u001b[A\n 9%|▉ | 36934/414113 [00:06<01:09, 5394.13it/s]\u001b[A\n 9%|▉ | 37634/414113 [00:06<01:05, 5791.68it/s]\u001b[A\n 9%|▉ | 38227/414113 [00:07<01:53, 3300.56it/s]\u001b[A\n 9%|▉ | 38965/414113 [00:07<01:34, 3956.08it/s]\u001b[A\n 10%|▉ | 39512/414113 [00:07<01:27, 4285.11it/s]\u001b[A\n 10%|▉ | 40054/414113 [00:07<01:23, 4505.05it/s]\u001b[A\n 10%|▉ | 40587/414113 [00:07<01:20, 4648.45it/s]\u001b[A\n 10%|▉ | 41111/414113 [00:07<01:22, 4523.30it/s]\u001b[A\n 10%|█ | 41680/414113 [00:07<01:17, 4817.05it/s]\u001b[A\n 10%|█ | 42197/414113 [00:07<01:15, 4894.56it/s]\u001b[A\n 10%|█ | 42777/414113 [00:08<01:12, 5134.90it/s]\u001b[A\n 10%|█ | 43368/414113 [00:08<01:09, 5342.85it/s]\u001b[A\n 11%|█ | 43919/414113 [00:08<01:10, 5230.72it/s]\u001b[A\n 11%|█ | 44612/414113 [00:08<01:05, 5645.02it/s]\u001b[A\n 11%|█ | 45340/414113 [00:08<01:00, 6052.77it/s]\u001b[A\n 11%|█ | 45987/414113 [00:08<00:59, 6171.46it/s]\u001b[A\n 11%|█▏ | 46620/414113 [00:08<01:01, 6014.67it/s]\u001b[A\n 11%|█▏ | 47233/414113 [00:08<01:03, 5744.02it/s]\u001b[A\n 12%|█▏ | 47818/414113 [00:08<01:03, 5760.40it/s]\u001b[A\n 12%|█▏ | 48547/414113 [00:08<00:59, 6143.49it/s]\u001b[A\n 12%|█▏ | 49174/414113 [00:09<01:09, 5242.35it/s]\u001b[A\n 12%|█▏ | 49730/414113 [00:09<01:11, 5101.31it/s]\u001b[A\n 12%|█▏ | 50264/414113 [00:09<01:12, 4985.53it/s]\u001b[A\n 12%|█▏ | 50780/414113 [00:09<01:17, 4688.56it/s]\u001b[A\n 12%|█▏ | 51511/414113 [00:09<01:09, 5252.95it/s]\u001b[A\n 13%|█▎ | 52283/414113 [00:09<01:02, 5809.65it/s]\u001b[A\n 13%|█▎ | 52943/414113 [00:09<00:59, 6025.36it/s]\u001b[A\n 13%|█▎ | 53577/414113 [00:09<00:59, 6059.75it/s]\u001b[A\n 13%|█▎ | 54205/414113 [00:09<01:01, 5810.99it/s]\u001b[A\n 13%|█▎ | 54804/414113 [00:10<01:04, 5534.93it/s]\u001b[A\n 13%|█▎ | 55383/414113 [00:10<01:03, 5606.33it/s]\u001b[A\n 14%|█▎ | 56004/414113 [00:10<01:02, 5772.83it/s]\u001b[A\n 14%|█▎ | 56590/414113 [00:10<01:02, 5709.93it/s]\u001b[A\n 14%|█▍ | 57168/414113 [00:10<01:06, 5391.52it/s]\u001b[A\n 14%|█▍ | 57716/414113 [00:10<01:10, 5038.54it/s]\u001b[A\n 14%|█▍ | 58231/414113 [00:10<01:12, 4923.36it/s]\u001b[A\n 14%|█▍ | 58808/414113 [00:10<01:09, 5149.22it/s]\u001b[A\n 14%|█▍ | 59425/414113 [00:10<01:05, 5415.60it/s]\u001b[A\n 15%|█▍ | 60141/414113 [00:11<01:00, 5839.94it/s]\u001b[A\n 15%|█▍ | 60742/414113 [00:11<01:03, 5539.86it/s]\u001b[A\n 15%|█▍ | 61329/414113 [00:11<01:02, 5633.62it/s]\u001b[A\n 15%|█▍ | 61904/414113 [00:11<01:02, 5666.08it/s]\u001b[A\n 15%|█▌ | 62528/414113 [00:11<01:00, 5826.82it/s]\u001b[A\n 15%|█▌ | 63306/414113 [00:11<00:55, 6301.38it/s]\u001b[A\n 15%|█▌ | 64041/414113 [00:11<00:53, 6579.80it/s]\u001b[A\n 16%|█▌ | 64714/414113 [00:11<01:02, 5585.57it/s]\u001b[A\n 16%|█▌ | 65474/414113 [00:11<00:57, 6065.46it/s]\u001b[A\n 16%|█▌ | 66214/414113 [00:12<00:54, 6412.31it/s]\u001b[A\n 16%|█▌ | 66935/414113 [00:12<00:52, 6630.98it/s]\u001b[A\n 16%|█▋ | 67622/414113 [00:12<00:54, 6334.85it/s]\u001b[A\n 16%|█▋ | 68275/414113 [00:12<00:55, 6245.52it/s]\u001b[A\n 17%|█▋ | 68914/414113 [00:12<00:58, 5868.40it/s]\u001b[A\n 17%|█▋ | 69516/414113 [00:12<01:00, 5738.01it/s]\u001b[A\n 17%|█▋ | 70110/414113 [00:12<00:59, 5796.08it/s]\u001b[A\n 17%|█▋ | 70722/414113 [00:12<00:58, 5885.47it/s]\u001b[A\n 17%|█▋ | 71317/414113 [00:12<01:01, 5594.27it/s]\u001b[A\n 17%|█▋ | 71938/414113 [00:13<00:59, 5764.04it/s]\u001b[A\n 18%|█▊ | 72531/414113 [00:13<00:58, 5811.15it/s]\u001b[A\n 18%|█▊ | 73117/414113 [00:13<01:00, 5604.90it/s]\u001b[A\n 18%|█▊ | 73683/414113 [00:13<01:00, 5586.68it/s]\u001b[A\n 18%|█▊ | 74245/414113 [00:13<01:02, 5406.53it/s]\u001b[A\n 18%|█▊ | 74790/414113 [00:13<01:04, 5298.63it/s]\u001b[A\n 18%|█▊ | 75380/414113 [00:13<01:01, 5465.11it/s]\u001b[A\n 18%|█▊ | 75930/414113 [00:13<01:02, 5395.86it/s]\u001b[A\n 18%|█▊ | 76576/414113 [00:13<00:59, 5673.65it/s]\u001b[A\n 19%|█▊ | 77149/414113 [00:14<01:06, 5053.20it/s]\u001b[A\n 19%|█▉ | 77671/414113 [00:14<01:10, 4791.06it/s]\u001b[A\n 19%|█▉ | 78430/414113 [00:14<01:02, 5385.97it/s]\u001b[A\n 19%|█▉ | 79224/414113 [00:14<00:56, 5961.12it/s]\u001b[A\n 19%|█▉ | 79987/414113 [00:14<00:52, 6379.72it/s]\u001b[A\n 19%|█▉ | 80664/414113 [00:14<00:52, 6391.07it/s]\u001b[A\n 20%|█▉ | 81330/414113 [00:14<00:57, 5754.35it/s]\u001b[A\n 20%|█▉ | 81937/414113 [00:14<01:01, 5416.34it/s]\u001b[A\n 20%|█▉ | 82505/414113 [00:14<01:01, 5361.14it/s]\u001b[A\n 20%|██ | 83092/414113 [00:15<01:00, 5503.71it/s]\u001b[A\n 20%|██ | 83677/414113 [00:15<00:58, 5600.67it/s]\u001b[A\n 20%|██ | 84287/414113 [00:15<00:57, 5739.44it/s]\u001b[A\n 20%|██ | 84869/414113 [00:15<01:01, 5363.83it/s]\u001b[A\n 21%|██ | 85417/414113 [00:15<01:00, 5397.90it/s]\u001b[A\n 21%|██ | 85965/414113 [00:15<01:04, 5085.10it/s]\u001b[A\n 21%|██ | 86483/414113 [00:15<01:04, 5076.29it/s]\u001b[A\n 21%|██ | 87020/414113 [00:15<01:03, 5158.14it/s]\u001b[A\n 21%|██ | 87601/414113 [00:15<01:01, 5337.72it/s]\u001b[A\n 21%|██▏ | 88222/414113 [00:15<00:58, 5570.19it/s]\u001b[A\n 21%|██▏ | 88794/414113 [00:16<00:57, 5612.62it/s]\u001b[A\n 22%|██▏ | 89360/414113 [00:16<01:01, 5288.36it/s]\u001b[A\n 22%|██▏ | 89896/414113 [00:16<01:03, 5133.64it/s]\u001b[A\n 22%|██▏ | 90674/414113 [00:16<00:56, 5716.72it/s]\u001b[A\n 22%|██▏ | 91271/414113 [00:16<00:59, 5397.92it/s]\u001b[A\n 22%|██▏ | 92014/414113 [00:16<00:54, 5878.82it/s]\u001b[A\n 22%|██▏ | 92630/414113 [00:16<00:57, 5609.13it/s]\u001b[A\n 23%|██▎ | 93213/414113 [00:16<01:00, 5282.09it/s]\u001b[A\n 23%|██▎ | 93761/414113 [00:16<01:03, 5071.59it/s]\u001b[A\n 23%|██▎ | 94284/414113 [00:17<01:04, 4942.75it/s]\u001b[A\n 23%|██▎ | 94790/414113 [00:17<01:04, 4927.23it/s]\u001b[A\n 23%|██▎ | 95438/414113 [00:17<01:00, 5307.63it/s]\u001b[A\n 23%|██▎ | 95983/414113 [00:17<01:00, 5286.07it/s]\u001b[A\n 23%|██▎ | 96547/414113 [00:17<00:58, 5384.06it/s]\u001b[A\n 23%|██▎ | 97093/414113 [00:17<01:00, 5209.74it/s]\u001b[A\n 24%|██▎ | 97621/414113 [00:17<01:03, 4949.74it/s]\u001b[A\n 24%|██▎ | 98190/414113 [00:17<01:01, 5150.41it/s]\u001b[A\n 24%|██▍ | 98713/414113 [00:17<01:01, 5132.23it/s]\u001b[A\n 24%|██▍ | 99320/414113 [00:18<00:58, 5378.73it/s]\u001b[A\n 24%|██▍ | 99899/414113 [00:18<00:57, 5495.58it/s]\u001b[A\n 24%|██▍ | 100466/414113 [00:18<00:56, 5545.42it/s]\u001b[A\n 24%|██▍ | 101033/414113 [00:18<00:56, 5579.81it/s]\u001b[A\n 25%|██▍ | 101599/414113 [00:18<00:55, 5601.66it/s]\u001b[A\n 25%|██▍ | 102193/414113 [00:18<00:54, 5697.40it/s]\u001b[A\n 25%|██▍ | 102768/414113 [00:18<00:54, 5711.75it/s]\u001b[A\n 25%|██▍ | 103370/414113 [00:18<00:53, 5797.92it/s]\u001b[A\n 25%|██▌ | 103951/414113 [00:18<00:57, 5381.47it/s]\u001b[A\n 25%|██▌ | 104496/414113 [00:18<01:02, 4986.80it/s]\u001b[A\n 25%|██▌ | 105049/414113 [00:19<01:00, 5136.60it/s]\u001b[A\n 25%|██▌ | 105585/414113 [00:19<00:59, 5201.29it/s]\u001b[A\n 26%|██▌ | 106363/414113 [00:19<00:53, 5775.58it/s]\u001b[A\n 26%|██▌ | 106965/414113 [00:19<00:53, 5716.77it/s]\u001b[A\n 26%|██▌ | 107554/414113 [00:19<00:57, 5288.12it/s]\u001b[A\n 26%|██▌ | 108102/414113 [00:19<01:01, 4998.34it/s]\u001b[A\n 26%|██▌ | 108628/414113 [00:19<01:00, 5073.53it/s]\u001b[A\n 26%|██▋ | 109147/414113 [00:19<01:00, 5011.42it/s]\u001b[A\n 26%|██▋ | 109697/414113 [00:19<00:59, 5147.30it/s]\u001b[A\n 27%|██▋ | 110271/414113 [00:20<00:57, 5310.64it/s]\u001b[A\n 27%|██▋ | 110976/414113 [00:20<00:52, 5735.04it/s]\u001b[A\n 27%|██▋ | 111623/414113 [00:20<00:50, 5935.21it/s]\u001b[A\n 27%|██▋ | 112282/414113 [00:20<00:49, 6114.73it/s]\u001b[A\n 27%|██▋ | 112903/414113 [00:20<00:50, 6013.34it/s]\u001b[A\n 27%|██▋ | 113512/414113 [00:20<00:50, 5963.86it/s]\u001b[A\n 28%|██▊ | 114114/414113 [00:20<00:51, 5824.72it/s]\u001b[A\n 28%|██▊ | 114701/414113 [00:20<00:51, 5769.17it/s]\u001b[A\n 28%|██▊ | 115281/414113 [00:20<00:52, 5678.29it/s]\u001b[A\n 28%|██▊ | 115872/414113 [00:20<00:51, 5742.85it/s]\u001b[A\n 28%|██▊ | 116449/414113 [00:21<00:52, 5707.98it/s]\u001b[A\n 28%|██▊ | 117022/414113 [00:21<00:58, 5084.06it/s]\u001b[A\n 28%|██▊ | 117560/414113 [00:21<00:57, 5169.02it/s]\u001b[A\n 29%|██▊ | 118304/414113 [00:21<00:51, 5689.17it/s]\u001b[A\n 29%|██▊ | 119046/414113 [00:21<00:48, 6115.92it/s]\u001b[A\n 29%|██▉ | 119683/414113 [00:21<00:54, 5390.21it/s]\u001b[A\n 29%|██▉ | 120256/414113 [00:21<00:54, 5364.09it/s]\u001b[A\n 29%|██▉ | 120999/414113 [00:21<00:50, 5851.47it/s]\u001b[A\n 29%|██▉ | 121750/414113 [00:22<00:46, 6264.59it/s]\u001b[A\n 30%|██▉ | 122406/414113 [00:22<00:48, 6038.20it/s]\u001b[A\n 30%|██▉ | 123032/414113 [00:22<00:48, 6059.57it/s]\u001b[A\n 30%|██▉ | 123654/414113 [00:22<00:48, 5993.89it/s]\u001b[A\n 30%|███ | 124265/414113 [00:22<00:48, 5939.01it/s]\u001b[A\n 30%|███ | 124925/414113 [00:22<00:47, 6122.90it/s]\u001b[A\n 30%|███ | 125546/414113 [00:22<00:46, 6148.00it/s]\u001b[A\n 30%|███ | 126166/414113 [00:22<00:47, 6061.40it/s]\u001b[A\n 31%|███ | 126776/414113 [00:22<00:47, 6014.27it/s]\u001b[A\n 31%|███ | 127389/414113 [00:22<00:47, 6047.01it/s]\u001b[A\n 31%|███ | 127996/414113 [00:23<00:47, 6035.27it/s]\u001b[A\n 31%|███ | 128601/414113 [00:23<00:51, 5584.11it/s]\u001b[A\n 31%|███ | 129270/414113 [00:23<00:48, 5874.90it/s]\u001b[A\n 31%|███▏ | 129867/414113 [00:23<00:48, 5879.60it/s]\u001b[A\n 32%|███▏ | 130462/414113 [00:23<00:50, 5606.10it/s]\u001b[A\n 32%|███▏ | 131030/414113 [00:23<00:57, 4905.55it/s]\u001b[A\n 32%|███▏ | 131551/414113 [00:23<00:56, 4990.72it/s]\u001b[A\n 32%|███▏ | 132185/414113 [00:23<00:52, 5330.10it/s]\u001b[A\n 32%|███▏ | 132936/414113 [00:23<00:48, 5838.31it/s]\u001b[A\n 32%|███▏ | 133652/414113 [00:24<00:45, 6178.59it/s]\u001b[A\n 32%|███▏ | 134294/414113 [00:24<00:45, 6203.81it/s]\u001b[A\n 33%|███▎ | 134932/414113 [00:24<00:45, 6145.29it/s]\u001b[A\n 33%|███▎ | 135559/414113 [00:24<00:46, 6043.55it/s]\u001b[A\n 33%|███▎ | 136269/414113 [00:24<00:43, 6325.39it/s]\u001b[A\n 33%|███▎ | 136912/414113 [00:24<00:46, 5899.47it/s]\u001b[A\n 33%|███▎ | 137515/414113 [00:24<00:47, 5834.88it/s]\u001b[A\n 33%|███▎ | 138108/414113 [00:24<00:47, 5769.42it/s]\u001b[A\n 33%|███▎ | 138692/414113 [00:25<01:23, 3301.60it/s]\u001b[A\n 34%|███▎ | 139178/414113 [00:25<01:15, 3652.98it/s]\u001b[A\n 34%|███▎ | 139652/414113 [00:25<01:09, 3922.46it/s]\u001b[A\n 34%|███▍ | 140121/414113 [00:25<01:09, 3933.93it/s]\u001b[A\n 34%|███▍ | 140680/414113 [00:25<01:03, 4317.66it/s]\u001b[A\n 34%|███▍ | 141161/414113 [00:25<01:02, 4349.79it/s]\u001b[A\n 34%|███▍ | 141719/414113 [00:25<00:58, 4657.50it/s]\u001b[A\n 34%|███▍ | 142326/414113 [00:25<00:54, 5006.94it/s]\u001b[A\n 34%|███▍ | 142856/414113 [00:25<00:56, 4825.11it/s]\u001b[A\n 35%|███▍ | 143387/414113 [00:26<00:54, 4959.62it/s]\u001b[A\n 35%|███▍ | 144057/414113 [00:26<00:50, 5377.52it/s]\u001b[A\n 35%|███▍ | 144617/414113 [00:26<00:52, 5118.16it/s]\u001b[A\n 35%|███▌ | 145147/414113 [00:26<00:52, 5085.30it/s]\u001b[A\n 35%|███▌ | 145723/414113 [00:26<00:50, 5268.89it/s]\u001b[A\n 35%|███▌ | 146338/414113 [00:26<00:48, 5502.68it/s]\u001b[A\n 35%|███▌ | 146899/414113 [00:26<00:52, 5115.74it/s]\u001b[A\n 36%|███▌ | 147582/414113 [00:26<00:48, 5531.50it/s]\u001b[A\n 36%|███▌ | 148154/414113 [00:26<00:49, 5411.68it/s]\u001b[A\n 36%|███▌ | 148709/414113 [00:27<00:50, 5213.66it/s]\u001b[A\n 36%|███▌ | 149242/414113 [00:27<00:51, 5129.93it/s]\u001b[A\n 36%|███▌ | 149921/414113 [00:27<00:47, 5534.64it/s]\u001b[A\n 36%|███▋ | 150556/414113 [00:27<00:45, 5754.02it/s]\u001b[A\n 37%|███▋ | 151166/414113 [00:27<00:44, 5850.87it/s]\u001b[A\n 37%|███▋ | 151761/414113 [00:27<00:45, 5825.38it/s]\u001b[A\n 37%|███▋ | 152350/414113 [00:27<00:44, 5816.96it/s]\u001b[A\n 37%|███▋ | 152937/414113 [00:27<00:53, 4920.45it/s]\u001b[A\n 37%|███▋ | 153457/414113 [00:27<00:52, 5000.95it/s]\u001b[A\n 37%|███▋ | 154056/414113 [00:28<00:49, 5260.19it/s]\u001b[A\n 37%|███▋ | 154623/414113 [00:28<00:48, 5376.32it/s]\u001b[A\n 37%|███▋ | 155174/414113 [00:28<00:48, 5318.71it/s]\u001b[A\n 38%|███▊ | 155715/414113 [00:28<00:54, 4727.21it/s]\u001b[A\n 38%|███▊ | 156284/414113 [00:28<00:51, 4979.82it/s]\u001b[A\n 38%|███▊ | 156802/414113 [00:28<00:51, 5036.61it/s]\u001b[A\n 38%|███▊ | 157377/414113 [00:28<00:49, 5229.09it/s]\u001b[A\n 38%|███▊ | 158038/414113 [00:28<00:45, 5578.40it/s]\u001b[A\n 38%|███▊ | 158610/414113 [00:28<00:47, 5342.83it/s]\u001b[A\n 38%|███▊ | 159156/414113 [00:29<00:51, 4927.72it/s]\u001b[A\n 39%|███▊ | 159664/414113 [00:29<00:51, 4923.77it/s]\u001b[A\n 39%|███▊ | 160167/414113 [00:29<00:52, 4794.59it/s]\u001b[A\n 39%|███▉ | 160797/414113 [00:29<00:49, 5164.58it/s]\u001b[A\n 39%|███▉ | 161485/414113 [00:29<00:45, 5579.98it/s]\u001b[A\n 39%|███▉ | 162063/414113 [00:29<00:45, 5496.01it/s]\u001b[A\n 39%|███▉ | 162764/414113 [00:29<00:42, 5876.55it/s]\u001b[A\n 39%|███▉ | 163369/414113 [00:29<00:42, 5856.77it/s]\u001b[A\n 40%|███▉ | 163967/414113 [00:29<00:43, 5696.62it/s]\u001b[A\n 40%|███▉ | 164546/414113 [00:29<00:43, 5688.34it/s]\u001b[A\n 40%|███▉ | 165122/414113 [00:30<00:43, 5695.00it/s]\u001b[A\n 40%|████ | 165697/414113 [00:30<00:43, 5664.31it/s]\u001b[A\n 40%|████ | 166267/414113 [00:30<00:46, 5344.70it/s]\u001b[A\n 40%|████ | 166808/414113 [00:30<00:46, 5307.25it/s]\u001b[A\n 40%|████ | 167416/414113 [00:30<00:44, 5514.80it/s]\u001b[A\n 41%|████ | 167973/414113 [00:30<00:44, 5500.83it/s]\u001b[A\n 41%|████ | 168527/414113 [00:30<00:46, 5295.10it/s]\u001b[A\n 41%|████ | 169061/414113 [00:30<00:47, 5199.31it/s]\u001b[A\n 41%|████ | 169585/414113 [00:30<00:50, 4868.28it/s]\u001b[A\n 41%|████ | 170152/414113 [00:31<00:48, 5081.64it/s]\u001b[A\n 41%|████ | 170721/414113 [00:31<00:46, 5249.12it/s]\u001b[A\n 41%|████▏ | 171303/414113 [00:31<00:44, 5406.95it/s]\u001b[A\n 41%|████▏ | 171850/414113 [00:31<00:46, 5197.52it/s]\u001b[A\n 42%|████▏ | 172376/414113 [00:31<00:47, 5081.25it/s]\u001b[A\n 42%|████▏ | 172889/414113 [00:31<00:47, 5074.38it/s]\u001b[A\n 42%|████▏ | 173487/414113 [00:31<00:45, 5315.39it/s]\u001b[A\n 42%|████▏ | 174185/414113 [00:31<00:41, 5723.62it/s]\u001b[A\n 42%|████▏ | 174770/414113 [00:31<00:44, 5389.15it/s]\u001b[A\n 42%|████▏ | 175365/414113 [00:32<00:43, 5544.72it/s]\u001b[A\n 43%|████▎ | 176095/414113 [00:32<00:39, 5975.04it/s]\u001b[A\n 43%|████▎ | 176795/414113 [00:32<00:37, 6248.26it/s]\u001b[A\n 43%|████▎ | 177435/414113 [00:32<00:38, 6103.45it/s]\u001b[A\n 43%|████▎ | 178057/414113 [00:32<00:39, 5907.42it/s]\u001b[A\n 43%|████▎ | 178657/414113 [00:32<00:41, 5690.61it/s]\u001b[A\n 43%|████▎ | 179235/414113 [00:32<00:41, 5696.90it/s]\u001b[A\n 43%|████▎ | 179811/414113 [00:32<00:41, 5669.98it/s]\u001b[A\n 44%|████▎ | 180383/414113 [00:32<00:42, 5451.45it/s]\u001b[A\n 44%|████▎ | 180933/414113 [00:32<00:43, 5315.95it/s]\u001b[A\n 44%|████▍ | 181469/414113 [00:33<00:45, 5144.38it/s]\u001b[A\n 44%|████▍ | 181988/414113 [00:33<00:45, 5057.95it/s]\u001b[A\n 44%|████▍ | 182530/414113 [00:33<00:44, 5161.02it/s]\u001b[A\n 44%|████▍ | 183049/414113 [00:33<00:44, 5141.00it/s]\u001b[A\n 44%|████▍ | 183567/414113 [00:33<00:44, 5151.76it/s]\u001b[A\n 44%|████▍ | 184116/414113 [00:33<00:43, 5247.82it/s]\u001b[A\n 45%|████▍ | 184643/414113 [00:33<00:47, 4792.75it/s]\u001b[A\n 45%|████▍ | 185173/414113 [00:33<00:46, 4934.36it/s]\u001b[A\n 45%|████▍ | 185674/414113 [00:33<00:47, 4844.87it/s]\u001b[A\n 45%|████▌ | 186428/414113 [00:34<00:41, 5426.44it/s]\u001b[A\n 45%|████▌ | 187147/414113 [00:34<00:38, 5855.95it/s]\u001b[A\n 45%|████▌ | 187867/414113 [00:34<00:36, 6202.81it/s]\u001b[A\n 46%|████▌ | 188559/414113 [00:34<00:35, 6399.94it/s]\u001b[A\n 46%|████▌ | 189219/414113 [00:34<00:34, 6446.09it/s]\u001b[A\n 46%|████▌ | 189963/414113 [00:34<00:33, 6714.07it/s]\u001b[A\n 46%|████▌ | 190647/414113 [00:34<00:34, 6475.96it/s]\u001b[A\n 46%|████▌ | 191306/414113 [00:34<00:35, 6191.17it/s]\u001b[A\n 46%|████▋ | 191936/414113 [00:34<00:36, 6167.10it/s]\u001b[A\n 46%|████▋ | 192560/414113 [00:34<00:36, 6041.87it/s]\u001b[A\n 47%|████▋ | 193170/414113 [00:35<00:37, 5933.19it/s]\u001b[A\n 47%|████▋ | 193768/414113 [00:35<00:37, 5872.48it/s]\u001b[A\n 47%|████▋ | 194359/414113 [00:35<00:37, 5813.29it/s]\u001b[A\n 47%|████▋ | 194943/414113 [00:35<00:39, 5534.82it/s]\u001b[A\n 47%|████▋ | 195517/414113 [00:35<00:39, 5594.77it/s]\u001b[A\n 47%|████▋ | 196118/414113 [00:35<00:38, 5712.80it/s]\u001b[A\n 47%|████▋ | 196693/414113 [00:35<00:39, 5435.91it/s]\u001b[A\n 48%|████▊ | 197242/414113 [00:35<00:42, 5105.42it/s]\u001b[A\n 48%|████▊ | 197761/414113 [00:35<00:42, 5102.79it/s]\u001b[A\n 48%|████▊ | 198277/414113 [00:36<00:44, 4860.74it/s]\u001b[A\n 48%|████▊ | 198907/414113 [00:36<00:41, 5216.56it/s]\u001b[A\n 48%|████▊ | 199441/414113 [00:36<00:41, 5171.70it/s]\u001b[A\n 48%|████▊ | 199967/414113 [00:36<00:44, 4765.90it/s]\u001b[A\n 48%|████▊ | 200496/414113 [00:36<00:43, 4910.93it/s]\u001b[A\n 49%|████▊ | 200997/414113 [00:36<00:45, 4695.54it/s]\u001b[A\n 49%|████▊ | 201476/414113 [00:36<00:46, 4555.60it/s]\u001b[A\n 49%|████▉ | 201939/414113 [00:36<00:47, 4464.52it/s]\u001b[A\n 49%|████▉ | 202634/414113 [00:36<00:42, 4999.23it/s]\u001b[A\n 49%|████▉ | 203159/414113 [00:37<00:43, 4885.37it/s]\u001b[A\n 49%|████▉ | 203666/414113 [00:37<00:43, 4828.58it/s]\u001b[A\n 49%|████▉ | 204328/414113 [00:37<00:39, 5254.42it/s]\u001b[A\n 49%|████▉ | 204874/414113 [00:37<00:39, 5302.14it/s]\u001b[A\n 50%|████▉ | 205430/414113 [00:37<00:38, 5376.27it/s]\u001b[A\n 50%|████▉ | 206167/414113 [00:37<00:35, 5849.91it/s]\u001b[A\n 50%|████▉ | 206793/414113 [00:37<00:34, 5965.04it/s]\u001b[A\n 50%|█████ | 207404/414113 [00:37<00:37, 5515.71it/s]\u001b[A\n 50%|█████ | 207973/414113 [00:37<00:40, 5063.66it/s]\u001b[A\n 50%|█████ | 208511/414113 [00:38<00:39, 5154.06it/s]\u001b[A\n 50%|█████ | 209080/414113 [00:38<00:38, 5303.59it/s]\u001b[A\n 51%|█████ | 209658/414113 [00:38<00:37, 5436.20it/s]\u001b[A\n 51%|█████ | 210210/414113 [00:38<00:37, 5383.32it/s]\u001b[A\n 51%|█████ | 210755/414113 [00:38<00:38, 5318.19it/s]\u001b[A\n 51%|█████ | 211377/414113 [00:38<00:36, 5558.79it/s]\u001b[A\n 51%|█████ | 212051/414113 [00:38<00:34, 5864.06it/s]\u001b[A\n 51%|█████▏ | 212646/414113 [00:38<00:34, 5782.51it/s]\u001b[A\n 52%|█████▏ | 213379/414113 [00:38<00:32, 6172.65it/s]\u001b[A\n 52%|█████▏ | 214008/414113 [00:38<00:33, 5971.19it/s]\u001b[A\n 52%|█████▏ | 214661/414113 [00:39<00:32, 6128.03it/s]\u001b[A\n 52%|█████▏ | 215282/414113 [00:39<00:36, 5430.39it/s]\u001b[A\n 52%|█████▏ | 215846/414113 [00:39<00:39, 4975.71it/s]\u001b[A\n 52%|█████▏ | 216544/414113 [00:39<00:36, 5444.13it/s]\u001b[A\n 52%|█████▏ | 217275/414113 [00:39<00:33, 5894.67it/s]\u001b[A\n 53%|█████▎ | 218041/414113 [00:39<00:30, 6330.18it/s]\u001b[A\n 53%|█████▎ | 218705/414113 [00:39<00:31, 6202.72it/s]\u001b[A\n 53%|█████▎ | 219348/414113 [00:39<00:31, 6128.82it/s]\u001b[A\n 53%|█████▎ | 219977/414113 [00:39<00:33, 5871.71it/s]\u001b[A\n 53%|█████▎ | 220578/414113 [00:40<00:33, 5811.31it/s]\u001b[A\n 53%|█████▎ | 221169/414113 [00:40<00:33, 5776.50it/s]\u001b[A\n 54%|█████▎ | 221788/414113 [00:40<00:32, 5894.15it/s]\u001b[A\n 54%|█████▎ | 222407/414113 [00:40<00:32, 5979.08it/s]\u001b[A\n 54%|█████▍ | 223009/414113 [00:40<00:32, 5856.76it/s]\u001b[A\n 54%|█████▍ | 223598/414113 [00:40<00:32, 5833.91it/s]\u001b[A\n 54%|█████▍ | 224184/414113 [00:40<00:34, 5487.98it/s]\u001b[A\n 54%|█████▍ | 224812/414113 [00:40<00:33, 5702.59it/s]\u001b[A\n 54%|█████▍ | 225436/414113 [00:40<00:32, 5852.01it/s]\u001b[A\n 55%|█████▍ | 226027/414113 [00:41<00:32, 5766.00it/s]\u001b[A\n 55%|█████▍ | 226608/414113 [00:41<00:32, 5713.85it/s]\u001b[A\n 55%|█████▍ | 227183/414113 [00:41<00:33, 5653.84it/s]\u001b[A\n 55%|█████▍ | 227751/414113 [00:41<00:37, 4932.74it/s]\u001b[A\n 55%|█████▌ | 228453/414113 [00:41<00:34, 5414.51it/s]\u001b[A\n 55%|█████▌ | 229221/414113 [00:41<00:31, 5938.76it/s]\u001b[A\n 56%|█████▌ | 229969/414113 [00:41<00:29, 6328.09it/s]\u001b[A\n 56%|█████▌ | 230633/414113 [00:41<00:28, 6397.37it/s]\u001b[A\n 56%|█████▌ | 231328/414113 [00:41<00:27, 6551.39it/s]\u001b[A\n 56%|█████▌ | 232028/414113 [00:41<00:27, 6678.99it/s]\u001b[A\n 56%|█████▌ | 232708/414113 [00:42<00:29, 6197.91it/s]\u001b[A\n 56%|█████▋ | 233344/414113 [00:42<00:29, 6134.75it/s]\u001b[A\n 56%|█████▋ | 233969/414113 [00:42<00:29, 6063.29it/s]\u001b[A\n 57%|█████▋ | 234584/414113 [00:42<00:30, 5859.25it/s]\u001b[A\n 57%|█████▋ | 235177/414113 [00:42<00:36, 4917.95it/s]\u001b[A\n 57%|█████▋ | 235812/414113 [00:42<00:33, 5274.29it/s]\u001b[A\n 57%|█████▋ | 236369/414113 [00:42<00:35, 5023.18it/s]\u001b[A\n 57%|█████▋ | 236901/414113 [00:42<00:34, 5107.42it/s]\u001b[A\n 57%|█████▋ | 237442/414113 [00:43<00:34, 5192.52it/s]\u001b[A\n 57%|█████▋ | 237973/414113 [00:43<00:35, 4974.39it/s]\u001b[A\n 58%|█████▊ | 238590/414113 [00:43<00:33, 5279.15it/s]\u001b[A\n 58%|█████▊ | 239156/414113 [00:43<00:32, 5387.08it/s]\u001b[A\n 58%|█████▊ | 239704/414113 [00:43<00:33, 5223.51it/s]\u001b[A\n 58%|█████▊ | 240234/414113 [00:43<00:35, 4967.29it/s]\u001b[A\n 58%|█████▊ | 240774/414113 [00:43<00:34, 5088.76it/s]\u001b[A\n 58%|█████▊ | 241290/414113 [00:43<00:34, 5075.31it/s]\u001b[A\n 58%|█████▊ | 241802/414113 [00:43<00:36, 4730.98it/s]\u001b[A\n 59%|█████▊ | 242283/414113 [00:44<00:37, 4557.63it/s]\u001b[A\n 59%|█████▊ | 243021/414113 [00:44<00:33, 5147.40it/s]\u001b[A\n 59%|█████▉ | 243567/414113 [00:44<00:33, 5157.29it/s]\u001b[A\n 59%|█████▉ | 244298/414113 [00:44<00:30, 5656.43it/s]\u001b[A\n 59%|█████▉ | 244894/414113 [00:44<00:31, 5297.16it/s]\u001b[A\n 59%|█████▉ | 245473/414113 [00:44<00:31, 5434.66it/s]\u001b[A\n 59%|█████▉ | 246036/414113 [00:44<00:31, 5317.01it/s]\u001b[A\n 60%|█████▉ | 246643/414113 [00:44<00:30, 5520.11it/s]\u001b[A\n 60%|█████▉ | 247207/414113 [00:44<00:30, 5521.95it/s]\u001b[A\n 60%|█████▉ | 247768/414113 [00:45<00:32, 5093.46it/s]\u001b[A\n 60%|█████▉ | 248424/414113 [00:45<00:30, 5458.84it/s]\u001b[A\n 60%|██████ | 248986/414113 [00:45<00:30, 5421.58it/s]\u001b[A\n 60%|██████ | 249655/414113 [00:45<00:28, 5747.78it/s]\u001b[A\n 60%|██████ | 250249/414113 [00:45<00:28, 5803.09it/s]\u001b[A\n 61%|██████ | 250839/414113 [00:45<00:28, 5661.82it/s]\u001b[A\n 61%|██████ | 251413/414113 [00:45<00:29, 5571.18it/s]\u001b[A\n 61%|██████ | 251982/414113 [00:45<00:28, 5606.27it/s]\u001b[A\n 61%|██████ | 252547/414113 [00:45<00:28, 5614.66it/s]\u001b[A\n 61%|██████ | 253112/414113 [00:45<00:28, 5556.34it/s]\u001b[A\n 61%|██████▏ | 253670/414113 [00:46<00:28, 5549.54it/s]\u001b[A\n 61%|██████▏ | 254227/414113 [00:46<00:30, 5255.04it/s]\u001b[A\n 62%|██████▏ | 254831/414113 [00:46<00:29, 5465.88it/s]\u001b[A\n 62%|██████▏ | 255408/414113 [00:46<00:28, 5553.19it/s]\u001b[A\n 62%|██████▏ | 256175/414113 [00:46<00:26, 6053.68it/s]\u001b[A\n 62%|██████▏ | 256914/414113 [00:46<00:24, 6400.77it/s]\u001b[A\n 62%|██████▏ | 257618/414113 [00:46<00:23, 6578.30it/s]\u001b[A\n 62%|██████▏ | 258289/414113 [00:46<00:27, 5667.77it/s]\u001b[A\n 63%|██████▎ | 258888/414113 [00:46<00:29, 5258.48it/s]\u001b[A\n 63%|██████▎ | 259442/414113 [00:47<00:30, 5078.92it/s]\u001b[A\n 63%|██████▎ | 259971/414113 [00:47<00:31, 4971.84it/s]\u001b[A\n 63%|██████▎ | 260559/414113 [00:47<00:29, 5211.87it/s]\u001b[A\n 63%|██████▎ | 261094/414113 [00:47<00:30, 5087.22it/s]\u001b[A\n 63%|██████▎ | 261671/414113 [00:47<00:28, 5273.80it/s]\u001b[A\n 63%|██████▎ | 262352/414113 [00:47<00:26, 5656.08it/s]\u001b[A\n 63%|██████▎ | 262932/414113 [00:47<00:26, 5657.13it/s]\u001b[A\n 64%|██████▎ | 263535/414113 [00:48<00:49, 3068.56it/s]\u001b[A\n 64%|██████▍ | 264074/414113 [00:48<00:42, 3523.73it/s]\u001b[A\n 64%|██████▍ | 264662/414113 [00:48<00:37, 4005.08it/s]\u001b[A\n 64%|██████▍ | 265229/414113 [00:48<00:33, 4390.41it/s]\u001b[A\n 64%|██████▍ | 265755/414113 [00:48<00:32, 4504.55it/s]\u001b[A\n 64%|██████▍ | 266336/414113 [00:48<00:30, 4829.47it/s]\u001b[A\n 64%|██████▍ | 266869/414113 [00:48<00:30, 4874.76it/s]\u001b[A\n 65%|██████▍ | 267453/414113 [00:48<00:28, 5128.90it/s]\u001b[A\n 65%|██████▍ | 268196/414113 [00:48<00:25, 5652.89it/s]\u001b[A\n 65%|██████▍ | 268959/414113 [00:49<00:23, 6128.17it/s]\u001b[A\n 65%|██████▌ | 269683/414113 [00:49<00:22, 6422.72it/s]\u001b[A\n 65%|██████▌ | 270399/414113 [00:49<00:21, 6626.37it/s]\u001b[A\n 65%|██████▌ | 271084/414113 [00:49<00:22, 6467.71it/s]\u001b[A\n 66%|██████▌ | 271806/414113 [00:49<00:21, 6674.94it/s]\u001b[A\n 66%|██████▌ | 272553/414113 [00:49<00:20, 6893.69it/s]\u001b[A\n 66%|██████▌ | 273254/414113 [00:49<00:22, 6331.65it/s]\u001b[A\n 66%|██████▌ | 273904/414113 [00:49<00:23, 6042.15it/s]\u001b[A\n 66%|██████▋ | 274523/414113 [00:49<00:24, 5708.98it/s]\u001b[A\n 66%|██████▋ | 275108/414113 [00:50<00:25, 5529.11it/s]\u001b[A\n 67%|██████▋ | 275672/414113 [00:50<00:25, 5522.46it/s]\u001b[A\n 67%|██████▋ | 276232/414113 [00:50<00:26, 5190.94it/s]\u001b[A\n 67%|██████▋ | 276761/414113 [00:50<00:26, 5111.80it/s]\u001b[A\n 67%|██████▋ | 277280/414113 [00:50<00:27, 5042.13it/s]\u001b[A\n 67%|██████▋ | 277838/414113 [00:50<00:26, 5191.22it/s]\u001b[A\n 67%|██████▋ | 278400/414113 [00:50<00:25, 5310.88it/s]\u001b[A\n 67%|██████▋ | 278963/414113 [00:50<00:25, 5402.66it/s]\u001b[A\n 68%|██████▊ | 279555/414113 [00:50<00:24, 5547.20it/s]\u001b[A\n 68%|██████▊ | 280113/414113 [00:50<00:25, 5196.34it/s]\u001b[A\n 68%|██████▊ | 280640/414113 [00:51<00:26, 5095.02it/s]\u001b[A\n 68%|██████▊ | 281183/414113 [00:51<00:25, 5189.53it/s]\u001b[A\n 68%|██████▊ | 281706/414113 [00:51<00:25, 5188.67it/s]\u001b[A\n 68%|██████▊ | 282318/414113 [00:51<00:24, 5434.93it/s]\u001b[A\n 68%|██████▊ | 282867/414113 [00:51<00:24, 5329.63it/s]\u001b[A\n 68%|██████▊ | 283624/414113 [00:51<00:22, 5848.12it/s]\u001b[A\n 69%|██████▊ | 284354/414113 [00:51<00:20, 6219.03it/s]\u001b[A\n 69%|██████▉ | 284995/414113 [00:51<00:21, 6036.08it/s]\u001b[A\n 69%|██████▉ | 285757/414113 [00:51<00:19, 6436.10it/s]\u001b[A\n 69%|██████▉ | 286444/414113 [00:52<00:19, 6558.92it/s]\u001b[A\n 69%|██████▉ | 287113/414113 [00:52<00:21, 5831.28it/s]\u001b[A\n 69%|██████▉ | 287767/414113 [00:52<00:20, 6026.24it/s]\u001b[A\n 70%|██████▉ | 288389/414113 [00:52<00:21, 5878.71it/s]\u001b[A\n 70%|██████▉ | 288991/414113 [00:52<00:21, 5864.47it/s]\u001b[A\n 70%|██████▉ | 289588/414113 [00:52<00:22, 5508.88it/s]\u001b[A\n 70%|███████ | 290156/414113 [00:52<00:22, 5557.76it/s]\u001b[A\n 70%|███████ | 290810/414113 [00:52<00:21, 5819.70it/s]\u001b[A\n 70%|███████ | 291414/414113 [00:52<00:20, 5882.89it/s]\u001b[A\n 71%|███████ | 292009/414113 [00:52<00:20, 5886.82it/s]\u001b[A\n 71%|███████ | 292607/414113 [00:53<00:20, 5913.34it/s]\u001b[A\n 71%|███████ | 293202/414113 [00:53<00:21, 5653.36it/s]\u001b[A\n 71%|███████ | 293772/414113 [00:53<00:22, 5363.45it/s]\u001b[A\n 71%|███████ | 294387/414113 [00:53<00:21, 5577.22it/s]\u001b[A\n 71%|███████ | 294952/414113 [00:53<00:21, 5525.48it/s]\u001b[A\n 71%|███████▏ | 295510/414113 [00:53<00:22, 5346.47it/s]\u001b[A\n 71%|███████▏ | 296050/414113 [00:53<00:22, 5203.41it/s]\u001b[A\n 72%|███████▏ | 296575/414113 [00:53<00:24, 4804.28it/s]\u001b[A\n 72%|███████▏ | 297065/414113 [00:53<00:25, 4582.25it/s]\u001b[A\n 72%|███████▏ | 297532/414113 [00:54<00:25, 4526.37it/s]\u001b[A\n 72%|███████▏ | 298256/414113 [00:54<00:22, 5099.78it/s]\u001b[A\n 72%|███████▏ | 299011/414113 [00:54<00:20, 5649.16it/s]\u001b[A\n 72%|███████▏ | 299730/414113 [00:54<00:18, 6036.24it/s]\u001b[A\n 73%|███████▎ | 300372/414113 [00:54<00:18, 6145.73it/s]\u001b[A\n 73%|███████▎ | 301036/414113 [00:54<00:17, 6284.94it/s]\u001b[A\n 73%|███████▎ | 301683/414113 [00:54<00:18, 6186.02it/s]\u001b[A\n 73%|███████▎ | 302315/414113 [00:54<00:18, 6130.49it/s]\u001b[A\n 73%|███████▎ | 302938/414113 [00:54<00:18, 6058.41it/s]\u001b[A\n 73%|███████▎ | 303551/414113 [00:55<00:19, 5556.75it/s]\u001b[A\n 73%|███████▎ | 304120/414113 [00:55<00:20, 5480.39it/s]\u001b[A\n 74%|███████▎ | 304693/414113 [00:55<00:19, 5550.67it/s]\u001b[A\n 74%|███████▎ | 305255/414113 [00:55<00:20, 5266.58it/s]\u001b[A\n 74%|███████▍ | 305814/414113 [00:55<00:20, 5359.33it/s]\u001b[A\n 74%|███████▍ | 306423/414113 [00:55<00:19, 5559.26it/s]\u001b[A\n 74%|███████▍ | 307041/414113 [00:55<00:18, 5729.46it/s]\u001b[A\n 74%|███████▍ | 307623/414113 [00:55<00:18, 5756.26it/s]\u001b[A\n 74%|███████▍ | 308314/414113 [00:55<00:17, 6059.17it/s]\u001b[A\n 75%|███████▍ | 309010/414113 [00:55<00:16, 6302.61it/s]\u001b[A\n 75%|███████▍ | 309648/414113 [00:56<00:17, 6124.56it/s]\u001b[A\n 75%|███████▍ | 310267/414113 [00:56<00:17, 5973.32it/s]\u001b[A\n 75%|███████▌ | 311013/414113 [00:56<00:16, 6352.62it/s]\u001b[A\n 75%|███████▌ | 311659/414113 [00:56<00:18, 5509.65it/s]\u001b[A\n 75%|███████▌ | 312238/414113 [00:56<00:20, 5073.10it/s]\u001b[A\n 76%|███████▌ | 312772/414113 [00:56<00:20, 4888.37it/s]\u001b[A\n 76%|███████▌ | 313463/414113 [00:56<00:18, 5357.80it/s]\u001b[A\n 76%|███████▌ | 314026/414113 [00:56<00:19, 5254.43it/s]\u001b[A\n 76%|███████▌ | 314751/414113 [00:57<00:17, 5725.98it/s]\u001b[A\n 76%|███████▌ | 315350/414113 [00:57<00:17, 5789.14it/s]\u001b[A\n 76%|███████▋ | 315948/414113 [00:57<00:16, 5838.91it/s]\u001b[A\n 76%|███████▋ | 316545/414113 [00:57<00:17, 5604.29it/s]\u001b[A\n 77%|███████▋ | 317117/414113 [00:57<00:18, 5217.90it/s]\u001b[A\n 77%|███████▋ | 317710/414113 [00:57<00:17, 5412.41it/s]\u001b[A\n 77%|███████▋ | 318328/414113 [00:57<00:17, 5620.64it/s]\u001b[A\n 77%|███████▋ | 318949/414113 [00:57<00:16, 5783.66it/s]\u001b[A\n 77%|███████▋ | 319556/414113 [00:57<00:16, 5864.54it/s]\u001b[A\n 77%|███████▋ | 320149/414113 [00:57<00:16, 5617.50it/s]\u001b[A\n 77%|███████▋ | 320718/414113 [00:58<00:17, 5341.89it/s]\u001b[A\n 78%|███████▊ | 321393/414113 [00:58<00:16, 5698.24it/s]\u001b[A\n 78%|███████▊ | 321999/414113 [00:58<00:15, 5800.18it/s]\u001b[A\n 78%|███████▊ | 322588/414113 [00:58<00:15, 5759.93it/s]\u001b[A\n 78%|███████▊ | 323171/414113 [00:58<00:17, 5289.84it/s]\u001b[A\n 78%|███████▊ | 323712/414113 [00:58<00:18, 5010.05it/s]\u001b[A\n 78%|███████▊ | 324320/414113 [00:58<00:16, 5287.83it/s]\u001b[A\n 78%|███████▊ | 324965/414113 [00:58<00:15, 5588.91it/s]\u001b[A\n 79%|███████▊ | 325537/414113 [00:58<00:17, 5148.40it/s]\u001b[A\n 79%|███████▊ | 326068/414113 [00:59<00:17, 4896.68it/s]\u001b[A\n 79%|███████▉ | 326572/414113 [00:59<00:18, 4672.60it/s]\u001b[A\n 79%|███████▉ | 327178/414113 [00:59<00:17, 5016.64it/s]\u001b[A\n 79%|███████▉ | 327696/414113 [00:59<00:17, 4832.95it/s]\u001b[A\n 79%|███████▉ | 328451/414113 [00:59<00:15, 5416.74it/s]\u001b[A\n 79%|███████▉ | 329056/414113 [00:59<00:15, 5591.28it/s]\u001b[A\n 80%|███████▉ | 329639/414113 [00:59<00:15, 5399.92it/s]\u001b[A\n 80%|███████▉ | 330217/414113 [00:59<00:15, 5508.58it/s]\u001b[A\n 80%|███████▉ | 330781/414113 [00:59<00:15, 5373.47it/s]\u001b[A\n 80%|████████ | 331329/414113 [01:00<00:16, 5085.53it/s]\u001b[A\n 80%|████████ | 331975/414113 [01:00<00:15, 5430.85it/s]\u001b[A\n 80%|████████ | 332566/414113 [01:00<00:14, 5565.39it/s]\u001b[A\n 80%|████████ | 333179/414113 [01:00<00:14, 5723.30it/s]\u001b[A\n 81%|████████ | 333760/414113 [01:00<00:14, 5462.36it/s]\u001b[A\n 81%|████████ | 334352/414113 [01:00<00:14, 5589.72it/s]\u001b[A\n 81%|████████ | 334944/414113 [01:00<00:13, 5683.15it/s]\u001b[A\n 81%|████████ | 335572/414113 [01:00<00:13, 5847.61it/s]\u001b[A\n 81%|████████ | 336162/414113 [01:00<00:14, 5276.65it/s]\u001b[A\n 81%|████████▏ | 336739/414113 [01:01<00:14, 5415.00it/s]\u001b[A\n 81%|████████▏ | 337292/414113 [01:01<00:14, 5147.36it/s]\u001b[A\n 82%|████████▏ | 337819/414113 [01:01<00:14, 5182.00it/s]\u001b[A\n 82%|████████▏ | 338355/414113 [01:01<00:14, 5230.43it/s]\u001b[A\n 82%|████████▏ | 338884/414113 [01:01<00:14, 5098.71it/s]\u001b[A\n 82%|████████▏ | 339604/414113 [01:01<00:13, 5586.61it/s]\u001b[A\n 82%|████████▏ | 340180/414113 [01:01<00:14, 5035.30it/s]\u001b[A\n 82%|████████▏ | 340918/414113 [01:01<00:13, 5564.20it/s]\u001b[A\n 83%|████████▎ | 341666/414113 [01:01<00:12, 6026.85it/s]\u001b[A\n 83%|████████▎ | 342303/414113 [01:02<00:12, 5535.31it/s]\u001b[A\n 83%|████████▎ | 343036/414113 [01:02<00:11, 5973.00it/s]\u001b[A\n 83%|████████▎ | 343666/414113 [01:02<00:11, 5902.05it/s]\u001b[A\n 83%|████████▎ | 344279/414113 [01:02<00:12, 5792.93it/s]\u001b[A\n 83%|████████▎ | 344875/414113 [01:02<00:11, 5785.38it/s]\u001b[A\n 83%|████████▎ | 345470/414113 [01:02<00:11, 5831.29it/s]\u001b[A\n 84%|████████▎ | 346166/414113 [01:02<00:11, 6128.51it/s]\u001b[A\n 84%|████████▎ | 346789/414113 [01:02<00:11, 5956.32it/s]\u001b[A\n 84%|████████▍ | 347393/414113 [01:02<00:11, 5906.97it/s]\u001b[A\n 84%|████████▍ | 347990/414113 [01:03<00:11, 5774.45it/s]\u001b[A\n 84%|████████▍ | 348692/414113 [01:03<00:10, 6097.25it/s]\u001b[A\n 84%|████████▍ | 349310/414113 [01:03<00:11, 5867.84it/s]\u001b[A\n 84%|████████▍ | 349909/414113 [01:03<00:10, 5903.17it/s]\u001b[A\n 85%|████████▍ | 350505/414113 [01:03<00:10, 5916.00it/s]\u001b[A\n 85%|████████▍ | 351123/414113 [01:03<00:10, 5992.29it/s]\u001b[A\n 85%|████████▍ | 351753/414113 [01:03<00:10, 6079.98it/s]\u001b[A\n 85%|████████▌ | 352364/414113 [01:03<00:10, 5947.73it/s]\u001b[A\n 85%|████████▌ | 352962/414113 [01:03<00:11, 5498.98it/s]\u001b[A\n 85%|████████▌ | 353689/414113 [01:03<00:10, 5931.93it/s]\u001b[A\n 86%|████████▌ | 354368/414113 [01:04<00:09, 6164.95it/s]\u001b[A\n 86%|████████▌ | 355119/414113 [01:04<00:09, 6513.25it/s]\u001b[A\n 86%|████████▌ | 355785/414113 [01:04<00:10, 5609.94it/s]\u001b[A\n 86%|████████▌ | 356379/414113 [01:04<00:10, 5443.05it/s]\u001b[A\n 86%|████████▌ | 357027/414113 [01:04<00:09, 5716.73it/s]\u001b[A\n 86%|████████▋ | 357779/414113 [01:04<00:09, 6158.11it/s]\u001b[A\n 87%|████████▋ | 358523/414113 [01:04<00:08, 6492.58it/s]\u001b[A\n 87%|████████▋ | 359194/414113 [01:04<00:08, 6177.90it/s]\u001b[A\n 87%|████████▋ | 359831/414113 [01:04<00:09, 5997.72it/s]\u001b[A\n 87%|████████▋ | 360445/414113 [01:05<00:09, 5888.23it/s]\u001b[A\n 87%|████████▋ | 361046/414113 [01:05<00:08, 5922.65it/s]\u001b[A\n 87%|████████▋ | 361646/414113 [01:05<00:09, 5822.00it/s]\u001b[A\n 87%|████████▋ | 362234/414113 [01:05<00:09, 5272.20it/s]\u001b[A\n 88%|████████▊ | 362775/414113 [01:05<00:09, 5138.49it/s]\u001b[A\n 88%|████████▊ | 363388/414113 [01:05<00:09, 5398.96it/s]\u001b[A\n 88%|████████▊ | 363962/414113 [01:05<00:09, 5495.06it/s]\u001b[A\n 88%|████████▊ | 364520/414113 [01:05<00:09, 5436.24it/s]\u001b[A\n 88%|████████▊ | 365088/414113 [01:05<00:08, 5504.80it/s]\u001b[A\n 88%|████████▊ | 365772/414113 [01:06<00:08, 5845.77it/s]\u001b[A\n 88%|████████▊ | 366366/414113 [01:06<00:08, 5374.78it/s]\u001b[A\n 89%|████████▊ | 366918/414113 [01:06<00:09, 5185.40it/s]\u001b[A\n 89%|████████▊ | 367470/414113 [01:06<00:08, 5280.88it/s]\u001b[A\n 89%|████████▉ | 368155/414113 [01:06<00:08, 5669.13it/s]\u001b[A\n 89%|████████▉ | 368837/414113 [01:06<00:07, 5969.95it/s]\u001b[A\n 89%|████████▉ | 369554/414113 [01:06<00:07, 6284.36it/s]\u001b[A\n 89%|████████▉ | 370197/414113 [01:06<00:08, 5376.76it/s]\u001b[A\n 90%|████████▉ | 370768/414113 [01:06<00:08, 4958.91it/s]\u001b[A\n 90%|████████▉ | 371477/414113 [01:07<00:07, 5445.88it/s]\u001b[A\n 90%|████████▉ | 372239/414113 [01:07<00:07, 5954.96it/s]\u001b[A\n 90%|█████████ | 372874/414113 [01:07<00:06, 5977.47it/s]\u001b[A\n 90%|█████████ | 373499/414113 [01:07<00:07, 5729.59it/s]\u001b[A\n 90%|█████████ | 374094/414113 [01:07<00:07, 5269.70it/s]\u001b[A\n 90%|█████████ | 374678/414113 [01:07<00:07, 5426.42it/s]\u001b[A\n 91%|█████████ | 375264/414113 [01:07<00:07, 5547.13it/s]\u001b[A\n 91%|█████████ | 375832/414113 [01:07<00:06, 5507.78it/s]\u001b[A\n 91%|█████████ | 376433/414113 [01:07<00:06, 5649.15it/s]\u001b[A\n 91%|█████████ | 377005/414113 [01:08<00:06, 5630.70it/s]\u001b[A\n 91%|█████████ | 377573/414113 [01:08<00:06, 5257.05it/s]\u001b[A\n 91%|█████████▏| 378108/414113 [01:08<00:07, 5066.68it/s]\u001b[A\n 91%|█████████▏| 378686/414113 [01:08<00:06, 5259.98it/s]\u001b[A\n 92%|█████████▏| 379255/414113 [01:08<00:06, 5378.70it/s]\u001b[A\n 92%|█████████▏| 379799/414113 [01:08<00:06, 5081.46it/s]\u001b[A\n 92%|█████████▏| 380371/414113 [01:08<00:06, 5256.67it/s]\u001b[A\n 92%|█████████▏| 380905/414113 [01:08<00:06, 5280.06it/s]\u001b[A\n 92%|█████████▏| 381656/414113 [01:08<00:05, 5795.51it/s]\u001b[A\n 92%|█████████▏| 382408/414113 [01:09<00:05, 6222.46it/s]\u001b[A\n 92%|█████████▏| 383053/414113 [01:09<00:05, 5488.07it/s]\u001b[A\n 93%|█████████▎| 383633/414113 [01:09<00:06, 5058.52it/s]\u001b[A\n 93%|█████████▎| 384168/414113 [01:09<00:06, 4728.14it/s]\u001b[A\n 93%|█████████▎| 384858/414113 [01:09<00:05, 5220.11it/s]\u001b[A\n 93%|█████████▎| 385600/414113 [01:09<00:04, 5728.35it/s]\u001b[A\n 93%|█████████▎| 386211/414113 [01:09<00:05, 5363.45it/s]\u001b[A\n 93%|█████████▎| 386797/414113 [01:09<00:04, 5503.05it/s]\u001b[A\n 94%|█████████▎| 387371/414113 [01:09<00:04, 5554.28it/s]\u001b[A\n 94%|█████████▎| 387973/414113 [01:10<00:04, 5685.50it/s]\u001b[A\n 94%|█████████▍| 388554/414113 [01:10<00:04, 5232.81it/s]\u001b[A\n 94%|█████████▍| 389099/414113 [01:10<00:04, 5296.02it/s]\u001b[A\n 94%|█████████▍| 389695/414113 [01:10<00:04, 5477.86it/s]\u001b[A\n 94%|█████████▍| 390253/414113 [01:10<00:04, 5487.00it/s]\u001b[A\n 94%|█████████▍| 390809/414113 [01:10<00:04, 5242.61it/s]\u001b[A\n 95%|█████████▍| 391378/414113 [01:10<00:04, 5368.97it/s]\u001b[A\n 95%|█████████▍| 391955/414113 [01:10<00:04, 5481.32it/s]\u001b[A\n 95%|█████████▍| 392508/414113 [01:10<00:04, 5209.36it/s]\u001b[A\n 95%|█████████▍| 393076/414113 [01:11<00:03, 5340.30it/s]\u001b[A\n 95%|█████████▌| 393638/414113 [01:11<00:03, 5421.06it/s]\u001b[A\n 95%|█████████▌| 394206/414113 [01:11<00:03, 5494.78it/s]\u001b[A\n 95%|█████████▌| 394759/414113 [01:11<00:03, 5126.15it/s]\u001b[A\n 95%|█████████▌| 395279/414113 [01:11<00:03, 5111.09it/s]\u001b[A\n 96%|█████████▌| 395796/414113 [01:11<00:03, 4721.25it/s]\u001b[A\n 96%|█████████▌| 396278/414113 [01:11<00:03, 4520.68it/s]\u001b[A\n 96%|█████████▌| 396739/414113 [01:11<00:03, 4419.66it/s]\u001b[A\n 96%|█████████▌| 397432/414113 [01:11<00:03, 4957.66it/s]\u001b[A\n 96%|█████████▌| 398132/414113 [01:12<00:02, 5432.29it/s]\u001b[A\n 96%|█████████▋| 398707/414113 [01:12<00:02, 5162.68it/s]\u001b[A\n 96%|█████████▋| 399249/414113 [01:12<00:02, 4988.76it/s]\u001b[A\n 97%|█████████▋| 399767/414113 [01:12<00:02, 4980.40it/s]\u001b[A\n 97%|█████████▋| 400397/414113 [01:12<00:02, 5313.67it/s]\u001b[A\n 97%|█████████▋| 400962/414113 [01:12<00:02, 5408.30it/s]\u001b[A\n 97%|█████████▋| 401560/414113 [01:12<00:02, 5566.96it/s]\u001b[A\n 97%|█████████▋| 402126/414113 [01:12<00:02, 5124.65it/s]\u001b[A\n 97%|█████████▋| 402652/414113 [01:12<00:02, 5017.57it/s]\u001b[A\n 97%|█████████▋| 403207/414113 [01:12<00:02, 5164.91it/s]\u001b[A\n 98%|█████████▊| 403791/414113 [01:13<00:01, 5349.75it/s]\u001b[A\n 98%|█████████▊| 404374/414113 [01:13<00:01, 5483.48it/s]\u001b[A\n 98%|█████████▊| 404945/414113 [01:13<00:01, 5549.12it/s]\u001b[A\n 98%|█████████▊| 405523/414113 [01:13<00:01, 5614.08it/s]\u001b[A\n 98%|█████████▊| 406234/414113 [01:13<00:01, 5991.58it/s]\u001b[A\n 98%|█████████▊| 406843/414113 [01:13<00:01, 5844.03it/s]\u001b[A\n 98%|█████████▊| 407435/414113 [01:13<00:01, 5597.15it/s]\u001b[A\n 99%|█████████▊| 408013/414113 [01:13<00:01, 5649.91it/s]\u001b[A\n 99%|█████████▊| 408590/414113 [01:13<00:00, 5683.75it/s]\u001b[A\n 99%|█████████▉| 409163/414113 [01:14<00:00, 5585.83it/s]\u001b[A\n 99%|█████████▉| 409923/414113 [01:14<00:00, 6068.07it/s]\u001b[A\n 99%|█████████▉| 410663/414113 [01:14<00:00, 6412.68it/s]\u001b[A\n 99%|█████████▉| 411434/414113 [01:14<00:00, 6753.16it/s]\u001b[A\n100%|█████████▉| 412140/414113 [01:14<00:00, 6842.32it/s]\u001b[A\n100%|█████████▉| 412836/414113 [01:14<00:00, 6463.50it/s]\u001b[A\n100%|█████████▉| 413495/414113 [01:14<00:00, 5775.13it/s]\u001b[A\n100%|██████████| 414113/414113 [01:14<00:00, 5534.56it/s]\u001b[A\n" ] ], [ [ "When you ran the code cell above, the data loader was stored in the variable `data_loader`. \n\nYou can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n### Exploring the `__getitem__` Method\n\nThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). \n\nWhen the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).\n\n#### Image Pre-Processing \n\nImage pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):\n```python\n# Convert image to tensor and pre-process using transform\nimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\nimage = self.transform(image)\n```\nAfter loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. \n\n#### Caption Pre-Processing \n\nThe captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.\n\nTo understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:\n```python\ndef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n ...\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n ...\n```\nFrom the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. \n\nWe use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):\n\n```python\n# Convert caption to tensor of word ids.\ntokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1\ncaption = [] # line 2\ncaption.append(self.vocab(self.vocab.start_word)) # line 3\ncaption.extend([self.vocab(token) for token in tokens]) # line 4\ncaption.append(self.vocab(self.vocab.end_word)) # line 5\ncaption = torch.Tensor(caption).long() # line 6\n```\n\nAs you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.", "_____no_output_____" ] ], [ [ "sample_caption = 'A person doing a trick on a rail while riding a skateboard.'", "_____no_output_____" ] ], [ [ "In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.", "_____no_output_____" ] ], [ [ "import nltk\n\nsample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())\nprint(sample_tokens)", "['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']\n" ] ], [ [ "In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.\n\nThis special start word (`\"<start>\"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=\"<start>\"`).\n\nAs you will see below, the integer `0` is always used to mark the start of a caption.", "_____no_output_____" ] ], [ [ "sample_caption = []\n\nstart_word = data_loader.dataset.vocab.start_word\nprint('Special start word:', start_word)\nsample_caption.append(data_loader.dataset.vocab(start_word))\nprint(sample_caption)", "Special start word: <start>\n[0]\n" ] ], [ [ "In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.", "_____no_output_____" ] ], [ [ "sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])\nprint(sample_caption)", "[0, 3, 98, 754, 3, 396, 39, 3, 1010, 207, 139, 3, 753, 18]\n" ] ], [ [ "In **`line 5`**, we append a final integer to mark the end of the caption. \n\nIdentical to the case of the special start word (above), the special end word (`\"<end>\"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=\"<end>\"`).\n\nAs you will see below, the integer `1` is always used to mark the end of a caption.", "_____no_output_____" ] ], [ [ "end_word = data_loader.dataset.vocab.end_word\nprint('Special end word:', end_word)\n\nsample_caption.append(data_loader.dataset.vocab(end_word))\nprint(sample_caption)", "Special end word: <end>\n[0, 3, 98, 754, 3, 396, 39, 3, 1010, 207, 139, 3, 753, 18, 1]\n" ] ], [ [ "Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).", "_____no_output_____" ] ], [ [ "import torch\n\nsample_caption = torch.Tensor(sample_caption).long()\nprint(sample_caption)", "tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1010, 207, 139, 3,\n 753, 18, 1])\n" ] ], [ [ "And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:\n```\n[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]\n```\nThis list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:\n```\n[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]\n```\nFinally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. \n\nAs you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. \n\n```python\ndef __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx[self.unk_word]\n return self.word2idx[word]\n```\n\nThe `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.\n\nUse the code cell below to view a subset of this dictionary.", "_____no_output_____" ] ], [ [ "# Preview the word2idx dictionary.\ndict(list(data_loader.dataset.vocab.word2idx.items())[:10])", "_____no_output_____" ] ], [ [ "We also print the total number of keys.", "_____no_output_____" ] ], [ [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))", "Total number of tokens in vocabulary: 8852\n" ] ], [ [ "As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader. ", "_____no_output_____" ] ], [ [ "# Modify the minimum word count threshold.\nvocab_threshold = 4\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False,\n cocoapi_loc='.') # Added by Ray to reference local cocoapi install", "loading annotations into memory...\nDone (t=1.10s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\nDone (t=0.98s)\ncreating index...\n\n 0%| | 0/414113 [00:00<?, ?it/s]\u001b[A\n 0%| | 540/414113 [00:00<01:16, 5395.44it/s]\u001b[Aindex created!\nObtaining caption lengths...\n\n 0%| | 1149/414113 [00:00<01:13, 5585.99it/s]\u001b[A\n 0%| | 1817/414113 [00:00<01:10, 5873.18it/s]\u001b[A\n 1%| | 2536/414113 [00:00<01:06, 6213.63it/s]\u001b[A\n 1%| | 3037/414113 [00:00<01:13, 5578.58it/s]\u001b[A\n 1%| | 3787/414113 [00:00<01:07, 6042.36it/s]\u001b[A\n 1%| | 4517/414113 [00:00<01:04, 6371.41it/s]\u001b[A\n 1%|▏ | 5300/414113 [00:00<01:00, 6746.70it/s]\u001b[A\n 1%|▏ | 5965/414113 [00:00<01:02, 6552.33it/s]\u001b[A\n 2%|▏ | 6615/414113 [00:01<01:05, 6255.90it/s]\u001b[A\n 2%|▏ | 7240/414113 [00:01<01:05, 6234.15it/s]\u001b[A\n 2%|▏ | 7912/414113 [00:01<01:03, 6370.61it/s]\u001b[A\n 2%|▏ | 8576/414113 [00:01<01:02, 6448.06it/s]\u001b[A\n 2%|▏ | 9222/414113 [00:01<01:06, 6085.72it/s]\u001b[A\n 2%|▏ | 9836/414113 [00:01<01:13, 5470.28it/s]\u001b[A\n 3%|▎ | 10398/414113 [00:01<01:18, 5123.00it/s]\u001b[A\n 3%|▎ | 10970/414113 [00:01<01:16, 5288.03it/s]\u001b[A\n 3%|▎ | 11698/414113 [00:01<01:09, 5759.76it/s]\u001b[A\n 3%|▎ | 12383/414113 [00:02<01:06, 6045.98it/s]\u001b[A\n 3%|▎ | 13006/414113 [00:02<01:08, 5865.10it/s]\u001b[A\n 3%|▎ | 13607/414113 [00:02<01:13, 5440.04it/s]\u001b[A\n 3%|▎ | 14191/414113 [00:02<01:12, 5552.76it/s]\u001b[A\n 4%|▎ | 14758/414113 [00:02<01:12, 5539.57it/s]\u001b[A\n 4%|▎ | 15321/414113 [00:02<01:17, 5152.11it/s]\u001b[A\n 4%|▍ | 16075/414113 [00:02<01:09, 5692.81it/s]\u001b[A\n 4%|▍ | 16802/414113 [00:02<01:05, 6088.35it/s]\u001b[A\n 4%|▍ | 17540/414113 [00:02<01:01, 6424.45it/s]\u001b[A\n 4%|▍ | 18257/414113 [00:02<00:59, 6630.28it/s]\u001b[A\n 5%|▍ | 18938/414113 [00:03<01:03, 6227.26it/s]\u001b[A\n 5%|▍ | 19579/414113 [00:03<01:06, 5936.07it/s]\u001b[A\n 5%|▍ | 20188/414113 [00:03<01:11, 5539.35it/s]\u001b[A\n 5%|▌ | 20758/414113 [00:03<01:12, 5400.88it/s]\u001b[A\n 5%|▌ | 21341/414113 [00:03<01:11, 5521.89it/s]\u001b[A\n 5%|▌ | 21903/414113 [00:03<01:12, 5435.78it/s]\u001b[A\n 5%|▌ | 22454/414113 [00:03<01:12, 5376.00it/s]\u001b[A\n 6%|▌ | 23037/414113 [00:03<01:11, 5503.59it/s]\u001b[A\n 6%|▌ | 23592/414113 [00:03<01:11, 5451.14it/s]\u001b[A\n 6%|▌ | 24170/414113 [00:04<01:10, 5545.31it/s]\u001b[A\n 6%|▌ | 24728/414113 [00:04<01:10, 5546.96it/s]\u001b[A\n 6%|▌ | 25299/414113 [00:04<01:09, 5593.18it/s]\u001b[A\n 6%|▋ | 26057/414113 [00:04<01:03, 6069.99it/s]\u001b[A\n 6%|▋ | 26677/414113 [00:04<01:06, 5824.48it/s]\u001b[A\n 7%|▋ | 27271/414113 [00:04<02:07, 3025.28it/s]\u001b[A\n 7%|▋ | 27763/414113 [00:05<01:52, 3419.39it/s]\u001b[A\n 7%|▋ | 28441/414113 [00:05<01:36, 4015.95it/s]\u001b[A\n 7%|▋ | 29152/414113 [00:05<01:23, 4618.50it/s]\u001b[A\n 7%|▋ | 29897/414113 [00:05<01:13, 5211.60it/s]\u001b[A\n 7%|▋ | 30530/414113 [00:05<01:12, 5293.82it/s]\u001b[A\n 8%|▊ | 31138/414113 [00:05<01:09, 5471.42it/s]\u001b[A\n 8%|▊ | 31742/414113 [00:05<01:13, 5206.39it/s]\u001b[A\n 8%|▊ | 32356/414113 [00:05<01:10, 5452.09it/s]\u001b[A\n 8%|▊ | 32970/414113 [00:05<01:07, 5640.90it/s]\u001b[A\n 8%|▊ | 33559/414113 [00:05<01:07, 5640.51it/s]\u001b[A\n 8%|▊ | 34224/414113 [00:06<01:04, 5909.48it/s]\u001b[A\n 8%|▊ | 34831/414113 [00:06<01:05, 5832.06it/s]\u001b[A\n 9%|▊ | 35426/414113 [00:06<01:04, 5859.39it/s]\u001b[A\n 9%|▊ | 36020/414113 [00:06<01:09, 5407.93it/s]\u001b[A\n 9%|▉ | 36574/414113 [00:06<01:10, 5363.69it/s]\u001b[A\n 9%|▉ | 37120/414113 [00:06<01:12, 5193.58it/s]\u001b[A\n 9%|▉ | 37739/414113 [00:06<01:08, 5457.05it/s]\u001b[A\n 9%|▉ | 38294/414113 [00:06<01:09, 5433.44it/s]\u001b[A\n 9%|▉ | 38882/414113 [00:06<01:07, 5557.87it/s]\u001b[A\n 10%|▉ | 39443/414113 [00:07<01:09, 5361.83it/s]\u001b[A\n 10%|▉ | 39985/414113 [00:07<01:09, 5371.39it/s]\u001b[A\n 10%|▉ | 40564/414113 [00:07<01:08, 5490.33it/s]\u001b[A\n 10%|▉ | 41172/414113 [00:07<01:05, 5654.74it/s]\u001b[A\n 10%|█ | 41741/414113 [00:07<01:08, 5429.10it/s]\u001b[A\n 10%|█ | 42289/414113 [00:07<01:11, 5207.67it/s]\u001b[A\n 10%|█ | 43051/414113 [00:07<01:04, 5754.14it/s]\u001b[A\n 11%|█ | 43800/414113 [00:07<00:59, 6183.04it/s]\u001b[A\n 11%|█ | 44570/414113 [00:07<00:56, 6569.44it/s]\u001b[A\n 11%|█ | 45304/414113 [00:07<00:54, 6782.07it/s]\u001b[A\n 11%|█ | 46008/414113 [00:08<00:53, 6856.71it/s]\u001b[A\n 11%|█▏ | 46707/414113 [00:08<00:57, 6427.58it/s]\u001b[A\n 11%|█▏ | 47365/414113 [00:08<00:57, 6423.53it/s]\u001b[A\n 12%|█▏ | 48018/414113 [00:08<00:58, 6272.38it/s]\u001b[A\n 12%|█▏ | 48654/414113 [00:08<00:59, 6171.93it/s]\u001b[A\n 12%|█▏ | 49278/414113 [00:08<01:01, 5902.25it/s]\u001b[A\n 12%|█▏ | 49875/414113 [00:08<01:05, 5561.31it/s]\u001b[A\n 12%|█▏ | 50440/414113 [00:08<01:10, 5170.00it/s]\u001b[A\n 12%|█▏ | 51016/414113 [00:08<01:08, 5332.14it/s]\u001b[A\n 12%|█▏ | 51559/414113 [00:09<01:08, 5256.39it/s]\u001b[A\n 13%|█▎ | 52207/414113 [00:09<01:04, 5570.88it/s]\u001b[A\n 13%|█▎ | 52794/414113 [00:09<01:03, 5655.47it/s]\u001b[A\n 13%|█▎ | 53384/414113 [00:09<01:02, 5726.52it/s]\u001b[A\n 13%|█▎ | 53962/414113 [00:09<01:04, 5588.69it/s]\u001b[A\n 13%|█▎ | 54539/414113 [00:09<01:03, 5640.08it/s]\u001b[A\n 13%|█▎ | 55172/414113 [00:09<01:01, 5829.95it/s]\u001b[A\n 13%|█▎ | 55759/414113 [00:09<01:04, 5570.99it/s]\u001b[A\n 14%|█▎ | 56322/414113 [00:09<01:07, 5273.69it/s]\u001b[A\n 14%|█▍ | 56945/414113 [00:10<01:04, 5528.10it/s]\u001b[A\n 14%|█▍ | 57506/414113 [00:10<01:09, 5110.74it/s]\u001b[A\n 14%|█▍ | 58029/414113 [00:10<01:13, 4831.69it/s]\u001b[A\n 14%|█▍ | 58524/414113 [00:10<01:16, 4633.16it/s]\u001b[A\n 14%|█▍ | 59004/414113 [00:10<01:15, 4679.18it/s]\u001b[A\n 14%|█▍ | 59479/414113 [00:10<01:16, 4643.46it/s]\u001b[A\n 14%|█▍ | 59949/414113 [00:10<01:17, 4599.09it/s]\u001b[A\n 15%|█▍ | 60673/414113 [00:10<01:08, 5162.83it/s]\u001b[A\n 15%|█▍ | 61295/414113 [00:10<01:04, 5438.44it/s]\u001b[A\n 15%|█▍ | 61930/414113 [00:11<01:01, 5682.14it/s]\u001b[A\n 15%|█▌ | 62517/414113 [00:11<01:08, 5170.20it/s]\u001b[A\n 15%|█▌ | 63056/414113 [00:11<01:08, 5131.75it/s]\u001b[A\n 15%|█▌ | 63585/414113 [00:11<01:10, 4964.35it/s]\u001b[A\n 16%|█▌ | 64251/414113 [00:11<01:05, 5374.13it/s]\u001b[A\n 16%|█▌ | 64984/414113 [00:11<00:59, 5840.31it/s]\u001b[A\n 16%|█▌ | 65593/414113 [00:11<00:59, 5836.01it/s]\u001b[A\n 16%|█▌ | 66194/414113 [00:11<01:05, 5318.38it/s]\u001b[A\n 16%|█▌ | 66747/414113 [00:11<01:05, 5290.25it/s]\u001b[A\n 16%|█▋ | 67300/414113 [00:12<01:04, 5356.37it/s]\u001b[A\n 16%|█▋ | 67957/414113 [00:12<01:01, 5665.15it/s]\u001b[A\n 17%|█▋ | 68673/414113 [00:12<00:57, 6037.53it/s]\u001b[A\n 17%|█▋ | 69292/414113 [00:12<01:01, 5626.65it/s]\u001b[A\n 17%|█▋ | 69874/414113 [00:12<01:00, 5680.36it/s]\u001b[A\n 17%|█▋ | 70454/414113 [00:12<01:01, 5620.44it/s]\u001b[A\n 17%|█▋ | 71025/414113 [00:12<01:06, 5147.30it/s]\u001b[A\n 17%|█▋ | 71554/414113 [00:12<01:10, 4851.48it/s]\u001b[A\n 17%|█▋ | 72053/414113 [00:12<01:13, 4678.50it/s]\u001b[A\n 18%|█▊ | 72669/414113 [00:13<01:07, 5041.93it/s]\u001b[A\n 18%|█▊ | 73189/414113 [00:13<01:09, 4916.28it/s]\u001b[A\n 18%|█▊ | 73693/414113 [00:13<01:10, 4857.04it/s]\u001b[A\n 18%|█▊ | 74417/414113 [00:13<01:03, 5388.82it/s]\u001b[A\n 18%|█▊ | 74980/414113 [00:13<01:03, 5327.00it/s]\u001b[A\n 18%|█▊ | 75530/414113 [00:13<01:09, 4902.04it/s]\u001b[A\n 18%|█▊ | 76057/414113 [00:13<01:07, 5005.78it/s]\u001b[A\n 19%|█▊ | 76635/414113 [00:13<01:04, 5213.68it/s]\u001b[A\n 19%|█▊ | 77219/414113 [00:13<01:02, 5386.26it/s]\u001b[A\n 19%|█▉ | 77953/414113 [00:13<00:57, 5853.22it/s]\u001b[A\n 19%|█▉ | 78597/414113 [00:14<00:55, 6017.59it/s]\u001b[A\n 19%|█▉ | 79213/414113 [00:14<00:56, 5971.48it/s]\u001b[A\n 19%|█▉ | 79821/414113 [00:14<00:56, 5904.54it/s]\u001b[A\n 19%|█▉ | 80423/414113 [00:14<00:56, 5936.34it/s]\u001b[A\n 20%|█▉ | 81022/414113 [00:14<00:56, 5863.56it/s]\u001b[A\n 20%|█▉ | 81613/414113 [00:14<00:58, 5723.61it/s]\u001b[A\n 20%|█▉ | 82189/414113 [00:14<01:00, 5518.93it/s]\u001b[A\n 20%|█▉ | 82768/414113 [00:14<00:59, 5595.99it/s]\u001b[A\n 20%|██ | 83340/414113 [00:14<00:58, 5630.49it/s]\u001b[A\n 20%|██ | 84071/414113 [00:15<00:54, 6045.88it/s]\u001b[A\n 20%|██ | 84796/414113 [00:15<00:51, 6361.78it/s]\u001b[A\n 21%|██ | 85537/414113 [00:15<00:49, 6643.26it/s]\u001b[A\n 21%|██ | 86278/414113 [00:15<00:47, 6854.71it/s]\u001b[A\n 21%|██ | 86974/414113 [00:15<00:50, 6516.41it/s]\u001b[A\n 21%|██ | 87644/414113 [00:15<00:49, 6567.14it/s]\u001b[A\n 21%|██▏ | 88440/414113 [00:15<00:46, 6929.84it/s]\u001b[A\n 22%|██▏ | 89144/414113 [00:15<00:49, 6580.35it/s]\u001b[A\n 22%|██▏ | 89814/414113 [00:15<00:52, 6236.11it/s]\u001b[A\n 22%|██▏ | 90450/414113 [00:16<00:52, 6116.94it/s]\u001b[A\n 22%|██▏ | 91071/414113 [00:16<00:54, 5889.94it/s]\u001b[A\n 22%|██▏ | 91712/414113 [00:16<00:53, 6036.23it/s]\u001b[A\n 22%|██▏ | 92322/414113 [00:16<00:57, 5593.64it/s]\u001b[A\n 22%|██▏ | 92893/414113 [00:16<01:02, 5126.43it/s]\u001b[A\n 23%|██▎ | 93496/414113 [00:16<00:59, 5366.79it/s]\u001b[A\n 23%|██▎ | 94047/414113 [00:16<01:00, 5293.49it/s]\u001b[A\n 23%|██▎ | 94587/414113 [00:16<01:00, 5258.94it/s]\u001b[A\n 23%|██▎ | 95228/414113 [00:16<00:57, 5556.77it/s]\u001b[A\n 23%|██▎ | 95837/414113 [00:16<00:55, 5704.13it/s]\u001b[A\n 23%|██▎ | 96415/414113 [00:17<00:58, 5427.78it/s]\u001b[A\n 23%|██▎ | 97009/414113 [00:17<00:56, 5570.79it/s]\u001b[A\n 24%|██▎ | 97573/414113 [00:17<01:00, 5241.06it/s]\u001b[A\n 24%|██▎ | 98141/414113 [00:17<00:58, 5363.06it/s]\u001b[A\n 24%|██▍ | 98698/414113 [00:17<00:58, 5422.85it/s]\u001b[A\n 24%|██▍ | 99246/414113 [00:17<01:03, 4971.34it/s]\u001b[A\n 24%|██▍ | 99754/414113 [00:17<01:04, 4879.41it/s]\u001b[A\n 24%|██▍ | 100514/414113 [00:17<00:57, 5466.37it/s]\u001b[A\n 24%|██▍ | 101255/414113 [00:17<00:52, 5932.69it/s]\u001b[A\n 25%|██▍ | 101955/414113 [00:18<00:50, 6215.03it/s]\u001b[A\n 25%|██▍ | 102613/414113 [00:18<00:49, 6319.35it/s]\u001b[A\n 25%|██▍ | 103264/414113 [00:18<00:50, 6196.78it/s]\u001b[A\n 25%|██▌ | 103898/414113 [00:18<00:50, 6086.72it/s]\u001b[A\n 25%|██▌ | 104517/414113 [00:18<00:52, 5909.86it/s]\u001b[A\n 25%|██▌ | 105116/414113 [00:18<00:53, 5791.03it/s]\u001b[A\n 26%|██▌ | 105717/414113 [00:18<00:52, 5853.17it/s]\u001b[A\n 26%|██▌ | 106307/414113 [00:18<00:58, 5290.37it/s]\u001b[A\n 26%|██▌ | 106865/414113 [00:18<00:57, 5371.80it/s]\u001b[A\n 26%|██▌ | 107428/414113 [00:19<00:56, 5444.89it/s]\u001b[A\n 26%|██▌ | 107980/414113 [00:19<00:56, 5404.75it/s]\u001b[A\n 26%|██▌ | 108611/414113 [00:19<00:54, 5646.98it/s]\u001b[A\n 26%|██▋ | 109183/414113 [00:19<00:55, 5512.70it/s]\u001b[A\n 27%|██▋ | 109740/414113 [00:19<01:01, 4978.03it/s]\u001b[A\n 27%|██▋ | 110271/414113 [00:19<00:59, 5072.46it/s]\u001b[A\n 27%|██▋ | 111026/414113 [00:19<00:53, 5626.22it/s]\u001b[A\n 27%|██▋ | 111615/414113 [00:19<00:55, 5450.77it/s]\u001b[A\n 27%|██▋ | 112180/414113 [00:19<00:55, 5444.03it/s]\u001b[A\n 27%|██▋ | 112738/414113 [00:20<00:56, 5305.30it/s]\u001b[A\n 27%|██▋ | 113407/414113 [00:20<00:53, 5654.98it/s]\u001b[A\n 28%|██▊ | 114161/414113 [00:20<00:49, 6113.50it/s]\u001b[A\n 28%|██▊ | 114926/414113 [00:20<00:45, 6505.29it/s]\u001b[A\n 28%|██▊ | 115680/414113 [00:20<00:44, 6782.12it/s]\u001b[A\n 28%|██▊ | 116376/414113 [00:20<00:43, 6790.16it/s]\u001b[A\n 28%|██▊ | 117068/414113 [00:20<00:45, 6560.73it/s]\u001b[A\n 28%|██▊ | 117812/414113 [00:20<00:43, 6801.41it/s]\u001b[A\n 29%|██▊ | 118502/414113 [00:20<00:45, 6561.20it/s]\u001b[A\n 29%|██▉ | 119167/414113 [00:20<00:47, 6275.39it/s]\u001b[A\n 29%|██▉ | 119836/414113 [00:21<00:46, 6392.75it/s]\u001b[A\n 29%|██▉ | 120482/414113 [00:21<00:50, 5829.90it/s]\u001b[A\n 29%|██▉ | 121105/414113 [00:21<00:49, 5942.53it/s]\u001b[A\n 29%|██▉ | 121710/414113 [00:21<00:49, 5867.48it/s]\u001b[A\n 30%|██▉ | 122305/414113 [00:21<00:50, 5743.66it/s]\u001b[A\n 30%|██▉ | 122886/414113 [00:21<00:51, 5686.37it/s]\u001b[A\n 30%|██▉ | 123459/414113 [00:21<00:54, 5348.34it/s]\u001b[A\n 30%|██▉ | 124001/414113 [00:21<00:55, 5266.89it/s]\u001b[A\n 30%|███ | 124549/414113 [00:21<00:54, 5327.64it/s]\u001b[A\n 30%|███ | 125086/414113 [00:22<00:56, 5108.26it/s]\u001b[A\n 30%|███ | 125602/414113 [00:22<00:56, 5083.90it/s]\u001b[A\n 30%|███ | 126282/414113 [00:22<00:52, 5500.17it/s]\u001b[A\n 31%|███ | 126845/414113 [00:22<00:54, 5278.97it/s]\u001b[A\n 31%|███ | 127384/414113 [00:22<00:55, 5135.19it/s]\u001b[A\n 31%|███ | 127906/414113 [00:22<00:58, 4897.37it/s]\u001b[A\n 31%|███ | 128686/414113 [00:22<00:51, 5511.47it/s]\u001b[A\n 31%|███▏ | 129433/414113 [00:22<00:47, 5981.21it/s]\u001b[A\n 31%|███▏ | 130155/414113 [00:22<00:45, 6304.68it/s]\u001b[A\n 32%|███▏ | 130906/414113 [00:23<00:42, 6621.24it/s]\u001b[A\n 32%|███▏ | 131592/414113 [00:23<00:43, 6474.23it/s]\u001b[A\n 32%|███▏ | 132375/414113 [00:23<00:41, 6828.20it/s]\u001b[A\n 32%|███▏ | 133087/414113 [00:23<00:40, 6911.14it/s]\u001b[A\n 32%|███▏ | 133791/414113 [00:23<00:42, 6546.49it/s]\u001b[A\n 32%|███▏ | 134459/414113 [00:23<00:46, 6007.10it/s]\u001b[A\n 33%|███▎ | 135078/414113 [00:23<00:46, 5948.26it/s]\u001b[A\n 33%|███▎ | 135686/414113 [00:23<00:46, 5968.80it/s]\u001b[A\n 33%|███▎ | 136292/414113 [00:23<00:47, 5885.30it/s]\u001b[A\n 33%|███▎ | 136887/414113 [00:24<00:47, 5860.87it/s]\u001b[A\n 33%|███▎ | 137478/414113 [00:24<00:50, 5507.79it/s]\u001b[A\n 33%|███▎ | 138065/414113 [00:24<00:49, 5611.16it/s]\u001b[A\n 33%|███▎ | 138674/414113 [00:24<00:47, 5743.42it/s]\u001b[A\n 34%|███▎ | 139254/414113 [00:24<00:48, 5667.33it/s]\u001b[A\n 34%|███▍ | 139825/414113 [00:24<00:50, 5458.87it/s]\u001b[A\n 34%|███▍ | 140412/414113 [00:24<00:49, 5574.06it/s]\u001b[A\n 34%|███▍ | 140973/414113 [00:24<00:50, 5371.55it/s]\u001b[A\n 34%|███▍ | 141551/414113 [00:24<00:49, 5487.08it/s]\u001b[A\n 34%|███▍ | 142214/414113 [00:24<00:46, 5785.42it/s]\u001b[A\n 34%|███▍ | 142800/414113 [00:25<00:50, 5379.57it/s]\u001b[A\n 35%|███▍ | 143550/414113 [00:25<00:46, 5876.77it/s]\u001b[A\n 35%|███▍ | 144159/414113 [00:25<01:31, 2945.15it/s]\u001b[A\n 35%|███▍ | 144784/414113 [00:25<01:16, 3499.27it/s]\u001b[A\n 35%|███▌ | 145342/414113 [00:25<01:08, 3937.26it/s]\u001b[A\n 35%|███▌ | 145906/414113 [00:25<01:01, 4328.18it/s]\u001b[A\n 35%|███▌ | 146639/414113 [00:26<00:54, 4934.37it/s]\u001b[A\n 36%|███▌ | 147236/414113 [00:26<00:52, 5096.80it/s]\u001b[A\n 36%|███▌ | 147819/414113 [00:26<00:52, 5082.69it/s]\u001b[A\n 36%|███▌ | 148379/414113 [00:26<00:54, 4905.22it/s]\u001b[A\n 36%|███▌ | 148943/414113 [00:26<00:51, 5103.91it/s]\u001b[A\n 36%|███▌ | 149482/414113 [00:26<00:51, 5161.82it/s]\u001b[A\n 36%|███▌ | 150037/414113 [00:26<00:50, 5270.97it/s]\u001b[A\n 36%|███▋ | 150582/414113 [00:26<00:49, 5322.31it/s]\u001b[A\n 36%|███▋ | 151125/414113 [00:26<00:52, 5014.15it/s]\u001b[A\n 37%|███▋ | 151638/414113 [00:27<00:53, 4927.20it/s]\u001b[A\n 37%|███▋ | 152140/414113 [00:27<00:52, 4953.22it/s]\u001b[A\n 37%|███▋ | 152683/414113 [00:27<00:51, 5086.18it/s]\u001b[A\n 37%|███▋ | 153260/414113 [00:27<00:49, 5272.22it/s]\u001b[A\n 37%|███▋ | 153793/414113 [00:27<00:49, 5238.79it/s]\u001b[A\n 37%|███▋ | 154542/414113 [00:27<00:45, 5757.91it/s]\u001b[A\n 37%|███▋ | 155283/414113 [00:27<00:41, 6170.19it/s]\u001b[A\n 38%|███▊ | 156044/414113 [00:27<00:39, 6539.90it/s]\u001b[A\n 38%|███▊ | 156859/414113 [00:27<00:37, 6950.64it/s]\u001b[A\n 38%|███▊ | 157576/414113 [00:27<00:37, 6902.92it/s]\u001b[A\n 38%|███▊ | 158282/414113 [00:28<00:42, 6059.01it/s]\u001b[A\n 38%|███▊ | 158918/414113 [00:28<00:45, 5566.11it/s]\u001b[A\n 39%|███▊ | 159504/414113 [00:28<00:53, 4789.92it/s]\u001b[A\n 39%|███▊ | 160089/414113 [00:28<00:50, 5064.87it/s]\u001b[A\n 39%|███▉ | 160702/414113 [00:28<00:47, 5342.35it/s]\u001b[A\n 39%|███▉ | 161411/414113 [00:28<00:43, 5767.07it/s]\u001b[A\n 39%|███▉ | 162016/414113 [00:28<00:44, 5711.11it/s]\u001b[A\n 39%|███▉ | 162607/414113 [00:28<00:46, 5448.35it/s]\u001b[A\n 39%|███▉ | 163181/414113 [00:28<00:45, 5532.46it/s]\u001b[A\n 40%|███▉ | 163884/414113 [00:29<00:42, 5908.84it/s]\u001b[A\n 40%|███▉ | 164497/414113 [00:29<00:41, 5973.37it/s]\u001b[A\n 40%|███▉ | 165106/414113 [00:29<00:42, 5899.46it/s]\u001b[A\n 40%|████ | 165704/414113 [00:29<00:43, 5766.27it/s]\u001b[A\n 40%|████ | 166318/414113 [00:29<00:42, 5871.85it/s]\u001b[A\n 40%|████ | 166910/414113 [00:29<00:42, 5757.29it/s]\u001b[A\n 40%|████ | 167490/414113 [00:29<00:42, 5742.17it/s]\u001b[A\n 41%|████ | 168067/414113 [00:29<00:43, 5690.28it/s]\u001b[A\n 41%|████ | 168639/414113 [00:29<00:45, 5427.37it/s]\u001b[A\n 41%|████ | 169249/414113 [00:30<00:43, 5611.35it/s]\u001b[A\n 41%|████ | 169815/414113 [00:30<00:47, 5122.41it/s]\u001b[A\n 41%|████ | 170339/414113 [00:30<00:50, 4856.37it/s]\u001b[A\n 41%|████▏ | 171025/414113 [00:30<00:45, 5322.08it/s]\u001b[A\n 41%|████▏ | 171579/414113 [00:30<00:47, 5122.36it/s]\u001b[A\n 42%|████▏ | 172108/414113 [00:30<00:48, 4987.18it/s]\u001b[A\n 42%|████▏ | 172869/414113 [00:30<00:43, 5561.18it/s]\u001b[A\n 42%|████▏ | 173455/414113 [00:30<00:43, 5596.49it/s]\u001b[A\n 42%|████▏ | 174036/414113 [00:30<00:45, 5316.69it/s]\u001b[A\n 42%|████▏ | 174602/414113 [00:31<00:44, 5413.15it/s]\u001b[A\n 42%|████▏ | 175156/414113 [00:31<00:44, 5392.94it/s]\u001b[A\n 42%|████▏ | 175705/414113 [00:31<00:50, 4761.80it/s]\u001b[A\n 43%|████▎ | 176262/414113 [00:31<00:47, 4977.80it/s]\u001b[A\n 43%|████▎ | 176821/414113 [00:31<00:46, 5145.47it/s]\u001b[A\n 43%|████▎ | 177432/414113 [00:31<00:43, 5400.27it/s]\u001b[A\n 43%|████▎ | 178012/414113 [00:31<00:42, 5513.67it/s]\u001b[A\n 43%|████▎ | 178579/414113 [00:31<00:42, 5557.74it/s]\u001b[A\n 43%|████▎ | 179155/414113 [00:31<00:41, 5615.15it/s]\u001b[A\n 43%|████▎ | 179746/414113 [00:32<00:41, 5698.66it/s]\u001b[A\n 44%|████▎ | 180320/414113 [00:32<00:43, 5360.45it/s]\u001b[A\n 44%|████▎ | 180863/414113 [00:32<00:44, 5261.20it/s]\u001b[A\n 44%|████▍ | 181397/414113 [00:32<00:44, 5283.97it/s]\u001b[A\n 44%|████▍ | 181929/414113 [00:32<00:44, 5188.69it/s]\u001b[A\n 44%|████▍ | 182632/414113 [00:32<00:41, 5630.47it/s]\u001b[A\n 44%|████▍ | 183361/414113 [00:32<00:38, 6041.60it/s]\u001b[A\n 44%|████▍ | 184136/414113 [00:32<00:35, 6468.42it/s]\u001b[A\n 45%|████▍ | 184804/414113 [00:32<00:40, 5610.71it/s]\u001b[A\n 45%|████▍ | 185507/414113 [00:32<00:38, 5972.33it/s]\u001b[A\n 45%|████▍ | 186141/414113 [00:33<00:37, 6077.42it/s]\u001b[A\n 45%|████▌ | 186822/414113 [00:33<00:36, 6276.73it/s]\u001b[A\n 45%|████▌ | 187541/414113 [00:33<00:34, 6523.54it/s]\u001b[A\n 45%|████▌ | 188236/414113 [00:33<00:33, 6643.87it/s]\u001b[A\n 46%|████▌ | 188912/414113 [00:33<00:46, 4876.27it/s]\u001b[A\n 46%|████▌ | 189475/414113 [00:33<00:44, 5045.47it/s]\u001b[A\n 46%|████▌ | 190034/414113 [00:33<00:43, 5180.81it/s]\u001b[A\n 46%|████▌ | 190762/414113 [00:33<00:39, 5670.56it/s]\u001b[A\n 46%|████▌ | 191371/414113 [00:34<00:38, 5712.46it/s]\u001b[A\n 46%|████▋ | 191972/414113 [00:34<00:40, 5497.94it/s]\u001b[A\n 47%|████▋ | 192591/414113 [00:34<00:38, 5687.04it/s]\u001b[A\n 47%|████▋ | 193177/414113 [00:34<00:39, 5607.80it/s]\u001b[A\n 47%|████▋ | 193750/414113 [00:34<00:41, 5278.95it/s]\u001b[A\n 47%|████▋ | 194412/414113 [00:34<00:39, 5619.14it/s]\u001b[A\n 47%|████▋ | 195147/414113 [00:34<00:36, 6045.14it/s]\u001b[A\n 47%|████▋ | 195771/414113 [00:34<00:37, 5766.04it/s]\u001b[A\n 47%|████▋ | 196364/414113 [00:34<00:38, 5618.68it/s]\u001b[A\n 48%|████▊ | 196939/414113 [00:35<00:42, 5070.81it/s]\u001b[A\n 48%|████▊ | 197621/414113 [00:35<00:39, 5492.33it/s]\u001b[A\n 48%|████▊ | 198409/414113 [00:35<00:35, 6040.80it/s]\u001b[A\n 48%|████▊ | 199047/414113 [00:35<00:37, 5810.34it/s]\u001b[A\n 48%|████▊ | 199703/414113 [00:35<00:35, 6014.85it/s]\u001b[A\n 48%|████▊ | 200324/414113 [00:35<00:38, 5518.93it/s]\u001b[A\n 49%|████▊ | 201021/414113 [00:35<00:36, 5883.74it/s]\u001b[A\n 49%|████▊ | 201632/414113 [00:35<00:36, 5857.67it/s]\u001b[A\n 49%|████▉ | 202273/414113 [00:35<00:35, 6011.65it/s]\u001b[A\n 49%|████▉ | 202886/414113 [00:36<00:35, 5906.61it/s]\u001b[A\n 49%|████▉ | 203543/414113 [00:36<00:34, 6086.06it/s]\u001b[A\n 49%|████▉ | 204159/414113 [00:36<00:37, 5526.46it/s]\u001b[A\n 49%|████▉ | 204824/414113 [00:36<00:35, 5820.59it/s]\u001b[A\n 50%|████▉ | 205429/414113 [00:36<00:35, 5885.59it/s]\u001b[A\n 50%|████▉ | 206029/414113 [00:36<00:36, 5771.74it/s]\u001b[A\n 50%|████▉ | 206668/414113 [00:36<00:34, 5943.06it/s]\u001b[A\n 50%|█████ | 207313/414113 [00:36<00:33, 6085.35it/s]\u001b[A\n 50%|█████ | 207928/414113 [00:36<00:34, 6035.62it/s]\u001b[A\n 50%|█████ | 208536/414113 [00:36<00:34, 6023.52it/s]\u001b[A\n 51%|█████ | 209142/414113 [00:37<00:34, 5980.43it/s]\u001b[A\n 51%|█████ | 209743/414113 [00:37<00:35, 5763.74it/s]\u001b[A\n 51%|█████ | 210457/414113 [00:37<00:33, 6116.58it/s]\u001b[A\n 51%|█████ | 211194/414113 [00:37<00:31, 6442.97it/s]\u001b[A\n 51%|█████ | 211849/414113 [00:37<00:36, 5579.62it/s]\u001b[A\n 51%|█████▏ | 212435/414113 [00:37<00:39, 5073.41it/s]\u001b[A\n 51%|█████▏ | 212971/414113 [00:37<00:41, 4795.18it/s]\u001b[A\n 52%|█████▏ | 213669/414113 [00:37<00:37, 5290.44it/s]\u001b[A\n 52%|█████▏ | 214398/414113 [00:38<00:34, 5764.00it/s]\u001b[A\n 52%|█████▏ | 215064/414113 [00:38<00:33, 6006.28it/s]\u001b[A\n 52%|█████▏ | 215692/414113 [00:38<00:33, 5867.27it/s]\u001b[A\n 52%|█████▏ | 216299/414113 [00:38<00:35, 5497.44it/s]\u001b[A\n 52%|█████▏ | 216950/414113 [00:38<00:34, 5764.52it/s]\u001b[A\n 53%|█████▎ | 217683/414113 [00:38<00:31, 6156.23it/s]\u001b[A\n 53%|█████▎ | 218343/414113 [00:38<00:31, 6281.79it/s]\u001b[A\n 53%|█████▎ | 218985/414113 [00:38<00:32, 6002.18it/s]\u001b[A\n 53%|█████▎ | 219598/414113 [00:38<00:33, 5743.23it/s]\u001b[A\n 53%|█████▎ | 220184/414113 [00:39<00:35, 5478.19it/s]\u001b[A\n 53%|█████▎ | 220783/414113 [00:39<00:34, 5620.12it/s]\u001b[A\n 53%|█████▎ | 221354/414113 [00:39<00:34, 5637.13it/s]\u001b[A\n 54%|█████▎ | 221929/414113 [00:39<00:33, 5669.70it/s]\u001b[A\n 54%|█████▎ | 222500/414113 [00:39<00:39, 4900.96it/s]\u001b[A\n 54%|█████▍ | 223035/414113 [00:39<00:38, 5026.86it/s]\u001b[A\n 54%|█████▍ | 223648/414113 [00:39<00:35, 5311.95it/s]\u001b[A\n 54%|█████▍ | 224209/414113 [00:39<00:35, 5396.35it/s]\u001b[A\n 54%|█████▍ | 224855/414113 [00:39<00:33, 5676.07it/s]\u001b[A\n 54%|█████▍ | 225435/414113 [00:39<00:36, 5168.44it/s]\u001b[A\n 55%|█████▍ | 225969/414113 [00:40<00:37, 5057.64it/s]\u001b[A\n 55%|█████▍ | 226734/414113 [00:40<00:33, 5628.69it/s]\u001b[A\n 55%|█████▍ | 227412/414113 [00:40<00:31, 5927.69it/s]\u001b[A\n 55%|█████▌ | 228174/414113 [00:40<00:29, 6349.38it/s]\u001b[A\n 55%|█████▌ | 228920/414113 [00:40<00:27, 6646.12it/s]\u001b[A\n 55%|█████▌ | 229607/414113 [00:40<00:31, 5813.77it/s]\u001b[A\n 56%|█████▌ | 230223/414113 [00:40<00:32, 5723.72it/s]\u001b[A\n 56%|█████▌ | 230820/414113 [00:40<00:32, 5720.90it/s]\u001b[A\n 56%|█████▌ | 231409/414113 [00:40<00:32, 5660.18it/s]\u001b[A\n 56%|█████▌ | 231987/414113 [00:41<00:32, 5614.99it/s]\u001b[A\n 56%|█████▌ | 232557/414113 [00:41<00:33, 5427.92it/s]\u001b[A\n 56%|█████▋ | 233108/414113 [00:41<00:34, 5198.58it/s]\u001b[A\n 56%|█████▋ | 233635/414113 [00:41<00:36, 4947.56it/s]\u001b[A\n 57%|█████▋ | 234161/414113 [00:41<00:35, 5035.59it/s]\u001b[A\n 57%|█████▋ | 234711/414113 [00:41<00:34, 5165.07it/s]\u001b[A\n 57%|█████▋ | 235284/414113 [00:41<00:33, 5322.16it/s]\u001b[A\n 57%|█████▋ | 235882/414113 [00:41<00:32, 5501.55it/s]\u001b[A\n 57%|█████▋ | 236438/414113 [00:41<00:32, 5517.19it/s]\u001b[A\n 57%|█████▋ | 237013/414113 [00:42<00:31, 5582.97it/s]\u001b[A\n 57%|█████▋ | 237610/414113 [00:42<00:31, 5693.12it/s]\u001b[A\n 58%|█████▊ | 238182/414113 [00:42<00:32, 5494.26it/s]\u001b[A\n 58%|█████▊ | 238782/414113 [00:42<00:31, 5634.07it/s]\u001b[A\n 58%|█████▊ | 239500/414113 [00:42<00:28, 6022.51it/s]\u001b[A\n 58%|█████▊ | 240171/414113 [00:42<00:28, 6210.55it/s]\u001b[A\n 58%|█████▊ | 240801/414113 [00:42<00:30, 5741.89it/s]\u001b[A\n 58%|█████▊ | 241499/414113 [00:42<00:28, 6064.27it/s]\u001b[A\n 58%|█████▊ | 242238/414113 [00:42<00:26, 6408.69it/s]\u001b[A\n 59%|█████▊ | 242894/414113 [00:43<00:28, 5942.20it/s]\u001b[A\n 59%|█████▉ | 243506/414113 [00:43<00:33, 5097.14it/s]\u001b[A\n 59%|█████▉ | 244072/414113 [00:43<00:32, 5253.84it/s]\u001b[A\n 59%|█████▉ | 244622/414113 [00:43<00:32, 5238.19it/s]\u001b[A\n 59%|█████▉ | 245163/414113 [00:43<00:32, 5246.69it/s]\u001b[A\n 59%|█████▉ | 245700/414113 [00:43<00:32, 5258.47it/s]\u001b[A\n 59%|█████▉ | 246248/414113 [00:43<00:31, 5321.51it/s]\u001b[A\n 60%|█████▉ | 246826/414113 [00:43<00:30, 5448.66it/s]\u001b[A\n 60%|█████▉ | 247376/414113 [00:43<00:30, 5455.74it/s]\u001b[A\n 60%|█████▉ | 247999/414113 [00:43<00:29, 5664.89it/s]\u001b[A\n 60%|██████ | 248605/414113 [00:44<00:28, 5777.22it/s]\u001b[A\n 60%|██████ | 249187/414113 [00:44<00:29, 5684.56it/s]\u001b[A\n 60%|██████ | 249759/414113 [00:44<00:29, 5593.61it/s]\u001b[A\n 60%|██████ | 250426/414113 [00:44<00:27, 5878.12it/s]\u001b[A\n 61%|██████ | 251066/414113 [00:44<00:27, 6022.63it/s]\u001b[A\n 61%|██████ | 251673/414113 [00:44<00:29, 5476.97it/s]\u001b[A\n 61%|██████ | 252234/414113 [00:44<00:32, 5011.91it/s]\u001b[A\n 61%|██████ | 252752/414113 [00:44<00:34, 4698.90it/s]\u001b[A\n 61%|██████ | 253529/414113 [00:44<00:30, 5330.58it/s]\u001b[A\n 61%|██████▏ | 254239/414113 [00:45<00:27, 5760.38it/s]\u001b[A\n 62%|██████▏ | 254853/414113 [00:45<00:29, 5349.76it/s]\u001b[A\n 62%|██████▏ | 255598/414113 [00:45<00:27, 5842.40it/s]\u001b[A\n 62%|██████▏ | 256219/414113 [00:45<00:27, 5705.54it/s]\u001b[A\n 62%|██████▏ | 256816/414113 [00:45<00:29, 5278.46it/s]\u001b[A\n 62%|██████▏ | 257369/414113 [00:45<00:29, 5308.79it/s]\u001b[A\n 62%|██████▏ | 257918/414113 [00:45<00:29, 5290.04it/s]\u001b[A\n 62%|██████▏ | 258489/414113 [00:45<00:28, 5407.91it/s]\u001b[A\n 63%|██████▎ | 259084/414113 [00:45<00:27, 5557.47it/s]\u001b[A\n 63%|██████▎ | 259648/414113 [00:46<00:29, 5275.16it/s]\u001b[A\n 63%|██████▎ | 260184/414113 [00:46<00:29, 5251.59it/s]\u001b[A\n 63%|██████▎ | 260722/414113 [00:46<00:29, 5287.94it/s]\u001b[A\n 63%|██████▎ | 261321/414113 [00:46<00:27, 5480.00it/s]\u001b[A\n 63%|██████▎ | 261962/414113 [00:46<00:26, 5726.72it/s]\u001b[A\n 63%|██████▎ | 262596/414113 [00:46<00:25, 5895.63it/s]\u001b[A\n 64%|██████▎ | 263192/414113 [00:46<00:26, 5786.22it/s]\u001b[A\n 64%|██████▎ | 263775/414113 [00:46<00:27, 5534.68it/s]\u001b[A\n 64%|██████▍ | 264493/414113 [00:46<00:25, 5942.07it/s]\u001b[A\n 64%|██████▍ | 265100/414113 [00:47<00:26, 5552.49it/s]\u001b[A\n 64%|██████▍ | 265669/414113 [00:47<00:28, 5139.35it/s]\u001b[A\n 64%|██████▍ | 266199/414113 [00:47<00:29, 5071.22it/s]\u001b[A\n 64%|██████▍ | 266886/414113 [00:47<00:26, 5501.99it/s]\u001b[A\n 65%|██████▍ | 267578/414113 [00:47<00:24, 5861.61it/s]\u001b[A\n 65%|██████▍ | 268311/414113 [00:47<00:23, 6234.65it/s]\u001b[A\n 65%|██████▍ | 269091/414113 [00:47<00:21, 6632.11it/s]\u001b[A\n 65%|██████▌ | 269801/414113 [00:47<00:21, 6765.23it/s]\u001b[A\n 65%|██████▌ | 270493/414113 [00:47<00:22, 6474.56it/s]\u001b[A\n 65%|██████▌ | 271217/414113 [00:47<00:21, 6686.06it/s]\u001b[A\n 66%|██████▌ | 271897/414113 [00:48<00:22, 6292.69it/s]\u001b[A\n 66%|██████▌ | 272539/414113 [00:48<00:22, 6193.15it/s]\u001b[A\n 66%|██████▌ | 273168/414113 [00:48<00:25, 5528.08it/s]\u001b[A\n 66%|██████▌ | 273741/414113 [00:48<00:25, 5524.41it/s]\u001b[A\n 66%|██████▌ | 274308/414113 [00:48<00:25, 5563.70it/s]\u001b[A\n 66%|██████▋ | 274875/414113 [00:48<00:24, 5581.54it/s]\u001b[A\n 67%|██████▋ | 275443/414113 [00:48<00:24, 5609.46it/s]\u001b[A\n 67%|██████▋ | 276016/414113 [00:48<00:24, 5643.71it/s]\u001b[A\n 67%|██████▋ | 276584/414113 [00:49<00:26, 5113.63it/s]\u001b[A\n 67%|██████▋ | 277151/414113 [00:49<00:26, 5267.37it/s]\u001b[A\n 67%|██████▋ | 277689/414113 [00:49<00:25, 5298.74it/s]\u001b[A\n 67%|██████▋ | 278226/414113 [00:49<00:26, 5062.85it/s]\u001b[A\n 67%|██████▋ | 278740/414113 [00:49<00:26, 5035.37it/s]\u001b[A\n 67%|██████▋ | 279265/414113 [00:49<00:26, 5096.16it/s]\u001b[A\n 68%|██████▊ | 279779/414113 [00:49<00:27, 4837.09it/s]\u001b[A\n 68%|██████▊ | 280309/414113 [00:49<00:26, 4966.13it/s]\u001b[A\n 68%|██████▊ | 280998/414113 [00:49<00:24, 5418.91it/s]\u001b[A\n 68%|██████▊ | 281759/414113 [00:49<00:22, 5931.19it/s]\u001b[A\n 68%|██████▊ | 282475/414113 [00:50<00:21, 6251.50it/s]\u001b[A\n 68%|██████▊ | 283128/414113 [00:50<00:20, 6331.11it/s]\u001b[A\n 69%|██████▊ | 283803/414113 [00:50<00:20, 6447.95it/s]\u001b[A\n 69%|██████▊ | 284460/414113 [00:50<00:22, 5771.60it/s]\u001b[A\n 69%|██████▉ | 285168/414113 [00:50<00:21, 6109.82it/s]\u001b[A\n 69%|██████▉ | 285800/414113 [00:50<00:44, 2866.37it/s]\u001b[A\n 69%|██████▉ | 286432/414113 [00:51<00:37, 3427.90it/s]\u001b[A\n 69%|██████▉ | 287001/414113 [00:51<00:32, 3891.99it/s]\u001b[A\n 69%|██████▉ | 287550/414113 [00:51<00:29, 4263.73it/s]\u001b[A\n 70%|██████▉ | 288092/414113 [00:51<00:28, 4451.87it/s]\u001b[A\n 70%|██████▉ | 288658/414113 [00:51<00:26, 4756.07it/s]\u001b[A\n 70%|██████▉ | 289256/414113 [00:51<00:24, 5065.98it/s]\u001b[A\n 70%|██████▉ | 289813/414113 [00:51<00:25, 4948.52it/s]\u001b[A\n 70%|███████ | 290344/414113 [00:51<00:24, 5015.42it/s]\u001b[A\n 70%|███████ | 290871/414113 [00:51<00:24, 5046.90it/s]\u001b[A\n 70%|███████ | 291457/414113 [00:52<00:23, 5265.95it/s]\u001b[A\n 71%|███████ | 292176/414113 [00:52<00:21, 5724.63it/s]\u001b[A\n 71%|███████ | 292771/414113 [00:52<00:21, 5736.10it/s]\u001b[A\n 71%|███████ | 293492/414113 [00:52<00:19, 6108.83it/s]\u001b[A\n 71%|███████ | 294121/414113 [00:52<00:19, 6015.64it/s]\u001b[A\n 71%|███████ | 294736/414113 [00:52<00:20, 5806.72it/s]\u001b[A\n 71%|███████▏ | 295328/414113 [00:52<00:21, 5554.23it/s]\u001b[A\n 71%|███████▏ | 295894/414113 [00:52<00:23, 5118.17it/s]\u001b[A\n 72%|███████▏ | 296637/414113 [00:52<00:20, 5644.95it/s]\u001b[A\n 72%|███████▏ | 297404/414113 [00:52<00:19, 6129.27it/s]\u001b[A\n 72%|███████▏ | 298048/414113 [00:53<00:20, 5798.39it/s]\u001b[A\n 72%|███████▏ | 298671/414113 [00:53<00:19, 5921.23it/s]\u001b[A\n 72%|███████▏ | 299282/414113 [00:53<00:19, 5886.98it/s]\u001b[A\n 72%|███████▏ | 299884/414113 [00:53<00:20, 5652.90it/s]\u001b[A\n 73%|███████▎ | 300461/414113 [00:53<00:20, 5453.27it/s]\u001b[A\n 73%|███████▎ | 301016/414113 [00:53<00:21, 5345.35it/s]\u001b[A\n 73%|███████▎ | 301643/414113 [00:53<00:20, 5591.33it/s]\u001b[A\n 73%|███████▎ | 302246/414113 [00:53<00:19, 5688.39it/s]\u001b[A\n 73%|███████▎ | 302825/414113 [00:53<00:19, 5717.50it/s]\u001b[A\n 73%|███████▎ | 303472/414113 [00:54<00:18, 5922.07it/s]\u001b[A\n 73%|███████▎ | 304069/414113 [00:54<00:18, 5836.86it/s]\u001b[A\n 74%|███████▎ | 304657/414113 [00:54<00:20, 5459.83it/s]\u001b[A\n 74%|███████▎ | 305253/414113 [00:54<00:19, 5600.33it/s]\u001b[A\n 74%|███████▍ | 305981/414113 [00:54<00:17, 6013.00it/s]\u001b[A\n 74%|███████▍ | 306596/414113 [00:54<00:18, 5690.42it/s]\u001b[A\n 74%|███████▍ | 307178/414113 [00:54<00:21, 5041.80it/s]\u001b[A\n 74%|███████▍ | 307705/414113 [00:54<00:22, 4785.99it/s]\u001b[A\n 74%|███████▍ | 308479/414113 [00:54<00:19, 5404.25it/s]\u001b[A\n 75%|███████▍ | 309230/414113 [00:55<00:17, 5900.36it/s]\u001b[A\n 75%|███████▍ | 309862/414113 [00:55<00:18, 5785.21it/s]\u001b[A\n 75%|███████▌ | 310642/414113 [00:55<00:16, 6269.86it/s]\u001b[A\n 75%|███████▌ | 311302/414113 [00:55<00:17, 5908.49it/s]\u001b[A\n 75%|███████▌ | 311920/414113 [00:55<00:19, 5352.25it/s]\u001b[A\n 75%|███████▌ | 312497/414113 [00:55<00:18, 5467.94it/s]\u001b[A\n 76%|███████▌ | 313095/414113 [00:55<00:18, 5609.35it/s]\u001b[A\n 76%|███████▌ | 313752/414113 [00:55<00:17, 5864.29it/s]\u001b[A\n 76%|███████▌ | 314353/414113 [00:55<00:16, 5869.11it/s]\u001b[A\n 76%|███████▌ | 314950/414113 [00:56<00:17, 5810.68it/s]\u001b[A\n 76%|███████▌ | 315539/414113 [00:56<00:16, 5833.70it/s]\u001b[A\n 76%|███████▋ | 316152/414113 [00:56<00:16, 5917.74it/s]\u001b[A\n 76%|███████▋ | 316748/414113 [00:56<00:16, 5910.34it/s]\u001b[A\n 77%|███████▋ | 317342/414113 [00:56<00:17, 5487.28it/s]\u001b[A\n 77%|███████▋ | 317899/414113 [00:56<00:18, 5298.25it/s]\u001b[A\n 77%|███████▋ | 318436/414113 [00:56<00:18, 5067.40it/s]\u001b[A\n 77%|███████▋ | 318954/414113 [00:56<00:18, 5099.16it/s]\u001b[A\n 77%|███████▋ | 319530/414113 [00:56<00:17, 5280.18it/s]\u001b[A\n 77%|███████▋ | 320064/414113 [00:57<00:18, 5172.70it/s]\u001b[A\n 77%|███████▋ | 320604/414113 [00:57<00:17, 5237.27it/s]\u001b[A\n 78%|███████▊ | 321199/414113 [00:57<00:17, 5431.65it/s]\u001b[A\n 78%|███████▊ | 321941/414113 [00:57<00:15, 5905.40it/s]\u001b[A\n 78%|███████▊ | 322632/414113 [00:57<00:14, 6173.62it/s]\u001b[A\n 78%|███████▊ | 323404/414113 [00:57<00:13, 6566.26it/s]\u001b[A\n 78%|███████▊ | 324077/414113 [00:57<00:13, 6446.22it/s]\u001b[A\n 78%|███████▊ | 324734/414113 [00:57<00:14, 5996.58it/s]\u001b[A\n 79%|███████▊ | 325442/414113 [00:57<00:14, 6284.73it/s]\u001b[A\n 79%|███████▊ | 326085/414113 [00:57<00:14, 6182.77it/s]\u001b[A\n 79%|███████▉ | 326714/414113 [00:58<00:14, 6206.35it/s]\u001b[A\n 79%|███████▉ | 327342/414113 [00:58<00:14, 5940.48it/s]\u001b[A\n 79%|███████▉ | 327944/414113 [00:58<00:16, 5378.20it/s]\u001b[A\n 79%|███████▉ | 328498/414113 [00:58<00:16, 5043.99it/s]\u001b[A\n 79%|███████▉ | 329047/414113 [00:58<00:16, 5167.72it/s]\u001b[A\n 80%|███████▉ | 329646/414113 [00:58<00:15, 5388.23it/s]\u001b[A\n 80%|███████▉ | 330265/414113 [00:58<00:14, 5605.76it/s]\u001b[A\n 80%|███████▉ | 330856/414113 [00:58<00:14, 5692.90it/s]\u001b[A\n 80%|████████ | 331433/414113 [00:58<00:15, 5244.15it/s]\u001b[A\n 80%|████████ | 331981/414113 [00:59<00:15, 5312.62it/s]\u001b[A\n 80%|████████ | 332521/414113 [00:59<00:15, 5205.96it/s]\u001b[A\n 80%|████████ | 333049/414113 [00:59<00:15, 5111.79it/s]\u001b[A\n 81%|████████ | 333566/414113 [00:59<00:15, 5126.81it/s]\u001b[A\n 81%|████████ | 334165/414113 [00:59<00:14, 5356.31it/s]\u001b[A\n 81%|████████ | 334706/414113 [00:59<00:15, 5041.70it/s]\u001b[A\n 81%|████████ | 335218/414113 [00:59<00:15, 5032.42it/s]\u001b[A\n 81%|████████ | 335779/414113 [00:59<00:15, 5192.62it/s]\u001b[A\n 81%|████████▏ | 336528/414113 [00:59<00:13, 5716.65it/s]\u001b[A\n 81%|████████▏ | 337249/414113 [01:00<00:12, 6095.22it/s]\u001b[A\n 82%|████████▏ | 337880/414113 [01:00<00:13, 5742.34it/s]\u001b[A\n 82%|████████▏ | 338538/414113 [01:00<00:12, 5968.48it/s]\u001b[A\n 82%|████████▏ | 339268/414113 [01:00<00:11, 6312.91it/s]\u001b[A\n 82%|████████▏ | 340006/414113 [01:00<00:11, 6597.69it/s]\u001b[A\n 82%|████████▏ | 340681/414113 [01:00<00:11, 6373.37it/s]\u001b[A\n 82%|████████▏ | 341331/414113 [01:00<00:12, 6060.19it/s]\u001b[A\n 83%|████████▎ | 341949/414113 [01:00<00:12, 5584.24it/s]\u001b[A\n 83%|████████▎ | 342523/414113 [01:00<00:13, 5362.25it/s]\u001b[A\n 83%|████████▎ | 343117/414113 [01:01<00:12, 5521.89it/s]\u001b[A\n 83%|████████▎ | 343680/414113 [01:01<00:12, 5484.42it/s]\u001b[A\n 83%|████████▎ | 344260/414113 [01:01<00:12, 5573.42it/s]\u001b[A\n 83%|████████▎ | 344823/414113 [01:01<00:13, 5209.44it/s]\u001b[A\n 83%|████████▎ | 345353/414113 [01:01<00:13, 5129.07it/s]\u001b[A\n 84%|████████▎ | 345946/414113 [01:01<00:12, 5344.48it/s]\u001b[A\n 84%|████████▎ | 346501/414113 [01:01<00:12, 5404.10it/s]\u001b[A\n 84%|████████▍ | 347130/414113 [01:01<00:11, 5642.28it/s]\u001b[A\n 84%|████████▍ | 347701/414113 [01:01<00:11, 5558.41it/s]\u001b[A\n 84%|████████▍ | 348262/414113 [01:02<00:12, 5179.21it/s]\u001b[A\n 84%|████████▍ | 348891/414113 [01:02<00:11, 5468.51it/s]\u001b[A\n 84%|████████▍ | 349467/414113 [01:02<00:11, 5550.16it/s]\u001b[A\n 85%|████████▍ | 350030/414113 [01:02<00:11, 5559.23it/s]\u001b[A\n 85%|████████▍ | 350788/414113 [01:02<00:10, 6041.56it/s]\u001b[A\n 85%|████████▍ | 351409/414113 [01:02<00:11, 5340.41it/s]\u001b[A\n 85%|████████▍ | 351970/414113 [01:02<00:11, 5232.68it/s]\u001b[A\n 85%|████████▌ | 352625/414113 [01:02<00:11, 5567.48it/s]\u001b[A\n 85%|████████▌ | 353201/414113 [01:02<00:11, 5222.78it/s]\u001b[A\n 85%|████████▌ | 353741/414113 [01:03<00:12, 5018.57it/s]\u001b[A\n 86%|████████▌ | 354257/414113 [01:03<00:12, 4910.61it/s]\u001b[A\n 86%|████████▌ | 354845/414113 [01:03<00:11, 5164.43it/s]\u001b[A\n 86%|████████▌ | 355428/414113 [01:03<00:10, 5345.86it/s]\u001b[A\n 86%|████████▌ | 355972/414113 [01:03<00:11, 5139.19it/s]\u001b[A\n 86%|████████▌ | 356495/414113 [01:03<00:11, 5152.12it/s]\u001b[A\n 86%|████████▌ | 357025/414113 [01:03<00:11, 5120.62it/s]\u001b[A\n 86%|████████▋ | 357568/414113 [01:03<00:10, 5208.48it/s]\u001b[A\n 86%|████████▋ | 358188/414113 [01:03<00:10, 5469.70it/s]\u001b[A\n 87%|████████▋ | 358741/414113 [01:03<00:10, 5196.76it/s]\u001b[A\n 87%|████████▋ | 359387/414113 [01:04<00:09, 5518.14it/s]\u001b[A\n 87%|████████▋ | 359950/414113 [01:04<00:10, 5372.76it/s]\u001b[A\n 87%|████████▋ | 360496/414113 [01:04<00:10, 4996.86it/s]\u001b[A\n 87%|████████▋ | 361007/414113 [01:04<00:10, 4990.98it/s]\u001b[A\n 87%|████████▋ | 361571/414113 [01:04<00:10, 5168.24it/s]\u001b[A\n 87%|████████▋ | 362129/414113 [01:04<00:09, 5283.36it/s]\u001b[A\n 88%|████████▊ | 362667/414113 [01:04<00:09, 5310.82it/s]\u001b[A\n 88%|████████▊ | 363202/414113 [01:04<00:09, 5247.18it/s]\u001b[A\n 88%|████████▊ | 363730/414113 [01:04<00:09, 5248.59it/s]\u001b[A\n 88%|████████▊ | 364257/414113 [01:05<00:10, 4963.67it/s]\u001b[A\n 88%|████████▊ | 364987/414113 [01:05<00:08, 5490.21it/s]\u001b[A\n 88%|████████▊ | 365557/414113 [01:05<00:09, 5255.07it/s]\u001b[A\n 88%|████████▊ | 366100/414113 [01:05<00:09, 4874.36it/s]\u001b[A\n 89%|████████▊ | 366628/414113 [01:05<00:09, 4987.44it/s]\u001b[A\n 89%|████████▊ | 367335/414113 [01:05<00:08, 5469.40it/s]\u001b[A\n 89%|████████▉ | 367905/414113 [01:05<00:08, 5257.54it/s]\u001b[A\n 89%|████████▉ | 368449/414113 [01:05<00:08, 5246.61it/s]\u001b[A\n 89%|████████▉ | 368987/414113 [01:05<00:08, 5253.81it/s]\u001b[A\n 89%|████████▉ | 369522/414113 [01:06<00:08, 5074.32it/s]\u001b[A\n 89%|████████▉ | 370061/414113 [01:06<00:08, 5164.02it/s]\u001b[A\n 89%|████████▉ | 370618/414113 [01:06<00:08, 5279.41it/s]\u001b[A\n 90%|████████▉ | 371211/414113 [01:06<00:07, 5457.72it/s]\u001b[A\n 90%|████████▉ | 371828/414113 [01:06<00:07, 5651.71it/s]\u001b[A\n 90%|████████▉ | 372399/414113 [01:06<00:07, 5657.00it/s]\u001b[A\n 90%|█████████ | 372969/414113 [01:06<00:07, 5573.23it/s]\u001b[A\n 90%|█████████ | 373530/414113 [01:06<00:07, 5515.92it/s]\u001b[A\n 90%|█████████ | 374084/414113 [01:06<00:07, 5171.63it/s]\u001b[A\n 90%|█████████ | 374696/414113 [01:06<00:07, 5423.45it/s]\u001b[A\n 91%|█████████ | 375246/414113 [01:07<00:07, 5397.41it/s]\u001b[A\n 91%|█████████ | 375791/414113 [01:07<00:08, 4539.26it/s]\u001b[A\n 91%|█████████ | 376392/414113 [01:07<00:07, 4897.05it/s]\u001b[A\n 91%|█████████ | 376998/414113 [01:07<00:07, 5194.88it/s]\u001b[A\n 91%|█████████ | 377542/414113 [01:07<00:07, 4971.75it/s]\u001b[A\n 91%|█████████▏| 378216/414113 [01:07<00:06, 5395.68it/s]\u001b[A\n 91%|█████████▏| 378780/414113 [01:07<00:07, 4972.83it/s]\u001b[A\n 92%|█████████▏| 379301/414113 [01:07<00:06, 4992.95it/s]\u001b[A\n 92%|█████████▏| 380044/414113 [01:07<00:06, 5537.72it/s]\u001b[A\n 92%|█████████▏| 380767/414113 [01:08<00:05, 5955.36it/s]\u001b[A\n 92%|█████████▏| 381483/414113 [01:08<00:05, 6269.47it/s]\u001b[A\n 92%|█████████▏| 382192/414113 [01:08<00:04, 6494.17it/s]\u001b[A\n 92%|█████████▏| 382862/414113 [01:08<00:05, 5799.24it/s]\u001b[A\n 93%|█████████▎| 383470/414113 [01:08<00:05, 5778.15it/s]\u001b[A\n 93%|█████████▎| 384068/414113 [01:08<00:05, 5313.58it/s]\u001b[A\n 93%|█████████▎| 384621/414113 [01:08<00:05, 5079.59it/s]\u001b[A\n 93%|█████████▎| 385207/414113 [01:08<00:05, 5290.64it/s]\u001b[A\n 93%|█████████▎| 385827/414113 [01:08<00:05, 5532.50it/s]\u001b[A\n 93%|█████████▎| 386409/414113 [01:09<00:04, 5614.57it/s]\u001b[A\n 93%|█████████▎| 386997/414113 [01:09<00:04, 5690.48it/s]\u001b[A\n 94%|█████████▎| 387629/414113 [01:09<00:04, 5863.92it/s]\u001b[A\n 94%|█████████▎| 388222/414113 [01:09<00:04, 5850.77it/s]\u001b[A\n 94%|█████████▍| 388812/414113 [01:09<00:04, 5863.75it/s]\u001b[A\n 94%|█████████▍| 389402/414113 [01:09<00:04, 5226.91it/s]\u001b[A\n 94%|█████████▍| 389981/414113 [01:09<00:04, 5383.69it/s]\u001b[A\n 94%|█████████▍| 390572/414113 [01:09<00:04, 5528.94it/s]\u001b[A\n 94%|█████████▍| 391135/414113 [01:09<00:04, 5533.46it/s]\u001b[A\n 95%|█████████▍| 391695/414113 [01:10<00:04, 5102.09it/s]\u001b[A\n 95%|█████████▍| 392217/414113 [01:10<00:04, 4861.13it/s]\u001b[A\n 95%|█████████▍| 392714/414113 [01:10<00:04, 4607.62it/s]\u001b[A\n 95%|█████████▍| 393185/414113 [01:10<00:04, 4471.46it/s]\u001b[A\n 95%|█████████▌| 393936/414113 [01:10<00:03, 5088.06it/s]\u001b[A\n 95%|█████████▌| 394660/414113 [01:10<00:03, 5585.65it/s]\u001b[A\n 95%|█████████▌| 395258/414113 [01:10<00:03, 5256.20it/s]\u001b[A\n 96%|█████████▌| 395864/414113 [01:10<00:03, 5473.81it/s]\u001b[A\n 96%|█████████▌| 396436/414113 [01:10<00:03, 5426.52it/s]\u001b[A\n 96%|█████████▌| 397113/414113 [01:11<00:02, 5768.19it/s]\u001b[A\n 96%|█████████▌| 397708/414113 [01:11<00:02, 5735.70it/s]\u001b[A\n 96%|█████████▌| 398295/414113 [01:11<00:02, 5732.32it/s]\u001b[A\n 96%|█████████▋| 398887/414113 [01:11<00:02, 5784.59it/s]\u001b[A\n 96%|█████████▋| 399472/414113 [01:11<00:02, 5673.94it/s]\u001b[A\n 97%|█████████▋| 400045/414113 [01:11<00:02, 5507.76it/s]\u001b[A\n 97%|█████████▋| 400601/414113 [01:11<00:02, 5503.53it/s]\u001b[A\n 97%|█████████▋| 401321/414113 [01:11<00:02, 5921.56it/s]\u001b[A\n 97%|█████████▋| 401959/414113 [01:11<00:02, 6050.49it/s]\u001b[A\n 97%|█████████▋| 402573/414113 [01:12<00:02, 5474.96it/s]\u001b[A\n 97%|█████████▋| 403137/414113 [01:12<00:02, 5132.19it/s]\u001b[A\n 97%|█████████▋| 403667/414113 [01:12<00:02, 4867.74it/s]\u001b[A\n 98%|█████████▊| 404438/414113 [01:12<00:01, 5471.83it/s]\u001b[A\n 98%|█████████▊| 405100/414113 [01:12<00:01, 5770.81it/s]\u001b[A\n 98%|█████████▊| 405829/414113 [01:12<00:01, 6153.78it/s]\u001b[A\n 98%|█████████▊| 406593/414113 [01:12<00:01, 6534.42it/s]\u001b[A\n 98%|█████████▊| 407272/414113 [01:12<00:01, 5955.94it/s]\u001b[A\n 99%|█████████▊| 408022/414113 [01:12<00:00, 6347.29it/s]\u001b[A\n 99%|█████████▊| 408692/414113 [01:13<00:00, 6446.99it/s]\u001b[A\n 99%|█████████▉| 409356/414113 [01:13<00:00, 5833.45it/s]\u001b[A\n 99%|█████████▉| 409964/414113 [01:13<00:00, 5770.68it/s]\u001b[A\n 99%|█████████▉| 410571/414113 [01:13<00:00, 5856.55it/s]\u001b[A\n 99%|█████████▉| 411170/414113 [01:13<00:00, 5556.96it/s]\u001b[A\n 99%|█████████▉| 411738/414113 [01:13<00:00, 5508.06it/s]\u001b[A\n100%|█████████▉| 412303/414113 [01:13<00:00, 5547.98it/s]\u001b[A\n100%|█████████▉| 412931/414113 [01:13<00:00, 5747.32it/s]\u001b[A\n100%|██████████| 414113/414113 [01:13<00:00, 5598.02it/s]\u001b[A\n" ], [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))", "Total number of tokens in vocabulary: 9947\n" ] ], [ [ "There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`\"<start>\"`) and special end word (`\"<end>\"`). There is one more special token, corresponding to unknown words (`\"<unk>\"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.", "_____no_output_____" ] ], [ [ "unk_word = data_loader.dataset.vocab.unk_word\nprint('Special unknown word:', unk_word)\n\nprint('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))", "Special unknown word: <unk>\nAll unknown words are mapped to this integer: 2\n" ] ], [ [ "Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions. ", "_____no_output_____" ] ], [ [ "print(data_loader.dataset.vocab('jfkafejw'))\nprint(data_loader.dataset.vocab('ieowoqjf'))", "2\n2\n" ] ], [ [ "The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.\n\nIf you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. \n\nBut once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.\n\nNote that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.", "_____no_output_____" ] ], [ [ "# Obtain the data loader (from file). Note that it runs much faster than before!\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_from_file=True,\n cocoapi_loc='.') # Added ", "Vocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\nDone (t=0.96s)\ncreating index...\n\n 0%| | 0/414113 [00:00<?, ?it/s]\u001b[A\n 0%| | 448/414113 [00:00<01:39, 4163.61it/s]\u001b[Aindex created!\nObtaining caption lengths...\n\n 0%| | 1157/414113 [00:00<01:26, 4751.97it/s]\u001b[A\n 0%| | 1809/414113 [00:00<01:19, 5171.88it/s]\u001b[A\n 1%| | 2421/414113 [00:00<01:15, 5422.89it/s]\u001b[A\n 1%| | 3018/414113 [00:00<01:13, 5575.57it/s]\u001b[A\n 1%| | 3626/414113 [00:00<01:11, 5716.11it/s]\u001b[A\n 1%| | 4194/414113 [00:00<01:11, 5704.10it/s]\u001b[A\n 1%| | 4769/414113 [00:00<01:11, 5716.86it/s]\u001b[A\n 1%|▏ | 5324/414113 [00:00<01:12, 5626.91it/s]\u001b[A\n 1%|▏ | 5875/414113 [00:01<01:14, 5443.72it/s]\u001b[A\n 2%|▏ | 6413/414113 [00:01<01:26, 4740.49it/s]\u001b[A\n 2%|▏ | 7000/414113 [00:01<01:20, 5028.46it/s]\u001b[A\n 2%|▏ | 7580/414113 [00:01<01:17, 5236.94it/s]\u001b[A\n 2%|▏ | 8138/414113 [00:01<01:16, 5334.69it/s]\u001b[A\n 2%|▏ | 8708/414113 [00:01<01:14, 5439.02it/s]\u001b[A\n 2%|▏ | 9275/414113 [00:01<01:13, 5505.42it/s]\u001b[A\n 2%|▏ | 9875/414113 [00:01<01:11, 5644.64it/s]\u001b[A\n 3%|▎ | 10444/414113 [00:01<01:11, 5614.23it/s]\u001b[A\n 3%|▎ | 11009/414113 [00:01<01:12, 5557.07it/s]\u001b[A\n 3%|▎ | 11689/414113 [00:02<01:08, 5877.65it/s]\u001b[A\n 3%|▎ | 12362/414113 [00:02<01:05, 6107.73it/s]\u001b[A\n 3%|▎ | 12980/414113 [00:02<01:14, 5357.07it/s]\u001b[A\n 3%|▎ | 13538/414113 [00:02<01:22, 4860.54it/s]\u001b[A\n 3%|▎ | 14048/414113 [00:02<01:23, 4794.91it/s]\u001b[A\n 4%|▎ | 14545/414113 [00:02<01:24, 4729.61it/s]\u001b[A\n 4%|▎ | 15293/414113 [00:02<01:15, 5315.30it/s]\u001b[A\n 4%|▍ | 15857/414113 [00:02<01:16, 5208.10it/s]\u001b[A\n 4%|▍ | 16423/414113 [00:02<01:14, 5333.11it/s]\u001b[A\n 4%|▍ | 17046/414113 [00:03<01:11, 5571.18it/s]\u001b[A\n 4%|▍ | 17673/414113 [00:03<01:08, 5763.60it/s]\u001b[A\n 4%|▍ | 18311/414113 [00:03<01:06, 5933.86it/s]\u001b[A\n 5%|▍ | 18914/414113 [00:03<01:07, 5843.69it/s]\u001b[A\n 5%|▍ | 19506/414113 [00:03<01:11, 5547.69it/s]\u001b[A\n 5%|▍ | 20069/414113 [00:03<01:15, 5212.19it/s]\u001b[A\n 5%|▌ | 20805/414113 [00:03<01:08, 5711.64it/s]\u001b[A\n 5%|▌ | 21398/414113 [00:03<01:12, 5410.54it/s]\u001b[A\n 5%|▌ | 21958/414113 [00:03<01:14, 5247.31it/s]\u001b[A\n 5%|▌ | 22497/414113 [00:04<01:22, 4744.05it/s]\u001b[A\n 6%|▌ | 23037/414113 [00:04<01:19, 4921.76it/s]\u001b[A\n 6%|▌ | 23643/414113 [00:04<01:14, 5214.68it/s]\u001b[A\n 6%|▌ | 24180/414113 [00:04<01:16, 5093.05it/s]\u001b[A\n 6%|▌ | 24707/414113 [00:04<01:15, 5141.42it/s]\u001b[A\n 6%|▌ | 25252/414113 [00:04<01:14, 5228.33it/s]\u001b[A\n 6%|▋ | 25930/414113 [00:04<01:09, 5613.49it/s]\u001b[A\n 6%|▋ | 26504/414113 [00:04<01:17, 5026.09it/s]\u001b[A\n 7%|▋ | 27208/414113 [00:04<01:10, 5497.24it/s]\u001b[A\n 7%|▋ | 27830/414113 [00:05<01:07, 5694.62it/s]\u001b[A\n 7%|▋ | 28574/414113 [00:05<01:02, 6122.30it/s]\u001b[A\n 7%|▋ | 29210/414113 [00:05<01:09, 5524.69it/s]\u001b[A\n 7%|▋ | 29975/414113 [00:05<01:03, 6025.45it/s]\u001b[A\n 7%|▋ | 30611/414113 [00:05<01:03, 6040.42it/s]\u001b[A\n 8%|▊ | 31239/414113 [00:05<01:04, 5948.06it/s]\u001b[A\n 8%|▊ | 31851/414113 [00:05<01:04, 5898.86it/s]\u001b[A\n 8%|▊ | 32453/414113 [00:05<01:08, 5554.74it/s]\u001b[A\n 8%|▊ | 33021/414113 [00:05<01:15, 5034.12it/s]\u001b[A\n 8%|▊ | 33590/414113 [00:06<01:13, 5211.94it/s]\u001b[A\n 8%|▊ | 34126/414113 [00:06<01:16, 4991.62it/s]\u001b[A\n 8%|▊ | 34684/414113 [00:06<01:13, 5153.98it/s]\u001b[A\n 9%|▊ | 35223/414113 [00:06<01:12, 5220.95it/s]\u001b[A\n 9%|▊ | 35782/414113 [00:06<01:11, 5325.40it/s]\u001b[A\n 9%|▉ | 36364/414113 [00:06<01:09, 5464.68it/s]\u001b[A\n 9%|▉ | 36977/414113 [00:06<01:06, 5646.55it/s]\u001b[A\n 9%|▉ | 37547/414113 [00:06<01:08, 5468.62it/s]\u001b[A\n 9%|▉ | 38099/414113 [00:06<01:10, 5360.30it/s]\u001b[A\n 9%|▉ | 38639/414113 [00:07<01:11, 5233.97it/s]\u001b[A\n 9%|▉ | 39166/414113 [00:07<01:15, 4994.44it/s]\u001b[A\n 10%|▉ | 39671/414113 [00:07<01:14, 4999.60it/s]\u001b[A\n 10%|▉ | 40175/414113 [00:07<01:21, 4609.73it/s]\u001b[A\n 10%|▉ | 40645/414113 [00:07<01:23, 4499.30it/s]\u001b[A\n 10%|▉ | 41102/414113 [00:07<01:25, 4384.54it/s]\u001b[A\n 10%|█ | 41550/414113 [00:07<01:24, 4411.18it/s]\u001b[A\n 10%|█ | 42120/414113 [00:07<01:18, 4731.28it/s]\u001b[A\n 10%|█ | 42706/414113 [00:07<01:13, 5021.29it/s]\u001b[A\n 10%|█ | 43220/414113 [00:08<01:16, 4844.63it/s]\u001b[A\n 11%|█ | 43714/414113 [00:08<01:18, 4707.37it/s]\u001b[A\n 11%|█ | 44280/414113 [00:08<01:14, 4957.33it/s]\u001b[A\n 11%|█ | 44809/414113 [00:08<01:13, 5048.77it/s]\u001b[A\n 11%|█ | 45370/414113 [00:08<01:10, 5204.36it/s]\u001b[A\n 11%|█ | 45998/414113 [00:08<01:07, 5485.30it/s]\u001b[A\n 11%|█ | 46565/414113 [00:08<01:06, 5536.81it/s]\u001b[A\n 11%|█▏ | 47141/414113 [00:08<01:05, 5599.63it/s]\u001b[A\n 12%|█▏ | 47706/414113 [00:08<01:07, 5440.94it/s]\u001b[A\n 12%|█▏ | 48255/414113 [00:08<01:11, 5098.19it/s]\u001b[A\n 12%|█▏ | 48772/414113 [00:09<01:12, 5006.87it/s]\u001b[A\n 12%|█▏ | 49279/414113 [00:09<01:14, 4895.16it/s]\u001b[A\n 12%|█▏ | 49877/414113 [00:09<01:10, 5151.65it/s]\u001b[A\n 12%|█▏ | 50414/414113 [00:09<01:09, 5214.94it/s]\u001b[A\n 12%|█▏ | 50972/414113 [00:09<01:08, 5317.03it/s]\u001b[A\n 12%|█▏ | 51549/414113 [00:09<01:06, 5444.38it/s]\u001b[A\n 13%|█▎ | 52208/414113 [00:09<01:03, 5742.72it/s]\u001b[A\n 13%|█▎ | 52949/414113 [00:09<00:58, 6158.27it/s]\u001b[A\n 13%|█▎ | 53684/414113 [00:09<00:55, 6472.07it/s]\u001b[A\n 13%|█▎ | 54396/414113 [00:09<00:54, 6653.56it/s]\u001b[A\n 13%|█▎ | 55110/414113 [00:10<00:52, 6790.56it/s]\u001b[A\n 13%|█▎ | 55865/414113 [00:10<00:51, 7001.31it/s]\u001b[A\n 14%|█▎ | 56573/414113 [00:10<00:58, 6109.73it/s]\u001b[A\n 14%|█▍ | 57210/414113 [00:10<01:03, 5594.14it/s]\u001b[A\n 14%|█▍ | 57796/414113 [00:10<01:07, 5313.25it/s]\u001b[A\n 14%|█▍ | 58366/414113 [00:10<01:05, 5422.95it/s]\u001b[A\n 14%|█▍ | 59008/414113 [00:10<01:02, 5686.44it/s]\u001b[A\n 14%|█▍ | 59591/414113 [00:10<01:03, 5603.48it/s]\u001b[A\n 15%|█▍ | 60162/414113 [00:11<01:02, 5623.37it/s]\u001b[A\n 15%|█▍ | 60732/414113 [00:11<01:03, 5601.58it/s]\u001b[A\n 15%|█▍ | 61351/414113 [00:11<01:01, 5763.65it/s]\u001b[A\n 15%|█▍ | 61962/414113 [00:11<01:00, 5853.21it/s]\u001b[A\n 15%|█▌ | 62587/414113 [00:11<00:58, 5964.64it/s]\u001b[A\n 15%|█▌ | 63187/414113 [00:11<00:59, 5946.99it/s]\u001b[A\n 15%|█▌ | 63784/414113 [00:11<00:59, 5917.77it/s]\u001b[A\n 16%|█▌ | 64378/414113 [00:11<01:02, 5598.97it/s]\u001b[A\n 16%|█▌ | 65019/414113 [00:11<00:59, 5818.31it/s]\u001b[A\n 16%|█▌ | 65620/414113 [00:11<00:59, 5873.02it/s]\u001b[A\n 16%|█▌ | 66212/414113 [00:12<00:59, 5866.07it/s]\u001b[A\n 16%|█▌ | 66802/414113 [00:12<01:01, 5612.68it/s]\u001b[A\n 16%|█▋ | 67368/414113 [00:12<01:01, 5621.90it/s]\u001b[A\n 16%|█▋ | 67934/414113 [00:12<01:04, 5365.86it/s]\u001b[A\n 17%|█▋ | 68592/414113 [00:12<01:00, 5679.40it/s]\u001b[A\n 17%|█▋ | 69360/414113 [00:12<00:55, 6160.59it/s]\u001b[A\n 17%|█▋ | 70067/414113 [00:12<00:53, 6404.75it/s]\u001b[A\n 17%|█▋ | 70851/414113 [00:12<00:50, 6775.07it/s]\u001b[A\n 17%|█▋ | 71545/414113 [00:12<00:51, 6714.01it/s]\u001b[A\n 17%|█▋ | 72228/414113 [00:13<00:57, 5927.41it/s]\u001b[A\n 18%|█▊ | 73006/414113 [00:13<00:53, 6381.81it/s]\u001b[A\n 18%|█▊ | 73672/414113 [00:13<01:43, 3300.66it/s]\u001b[A\n 18%|█▊ | 74185/414113 [00:13<01:32, 3686.43it/s]\u001b[A\n 18%|█▊ | 74825/414113 [00:13<01:20, 4222.32it/s]\u001b[A\n 18%|█▊ | 75510/414113 [00:13<01:10, 4770.52it/s]\u001b[A\n 18%|█▊ | 76251/414113 [00:13<01:03, 5339.95it/s]\u001b[A\n 19%|█▊ | 76886/414113 [00:14<01:04, 5262.48it/s]\u001b[A\n 19%|█▊ | 77484/414113 [00:14<01:03, 5311.99it/s]\u001b[A\n 19%|█▉ | 78065/414113 [00:14<01:02, 5412.91it/s]\u001b[A\n 19%|█▉ | 78642/414113 [00:14<01:02, 5387.49it/s]\u001b[A\n 19%|█▉ | 79322/414113 [00:14<00:58, 5745.07it/s]\u001b[A\n 19%|█▉ | 80001/414113 [00:14<00:55, 6022.81it/s]\u001b[A\n 19%|█▉ | 80624/414113 [00:14<00:55, 6021.58it/s]\u001b[A\n 20%|█▉ | 81286/414113 [00:14<00:53, 6188.40it/s]\u001b[A\n 20%|█▉ | 81928/414113 [00:14<00:53, 6255.90it/s]\u001b[A\n 20%|█▉ | 82689/414113 [00:15<00:50, 6607.57it/s]\u001b[A\n 20%|██ | 83469/414113 [00:15<00:47, 6924.94it/s]\u001b[A\n 20%|██ | 84174/414113 [00:15<00:51, 6413.68it/s]\u001b[A\n 21%|██ | 84906/414113 [00:15<00:49, 6660.47it/s]\u001b[A\n 21%|██ | 85586/414113 [00:15<00:57, 5749.81it/s]\u001b[A\n 21%|██ | 86193/414113 [00:15<01:00, 5408.31it/s]\u001b[A\n 21%|██ | 86760/414113 [00:15<01:02, 5241.16it/s]\u001b[A\n 21%|██ | 87304/414113 [00:15<01:03, 5116.76it/s]\u001b[A\n 21%|██ | 87851/414113 [00:15<01:02, 5216.38it/s]\u001b[A\n 21%|██▏ | 88450/414113 [00:16<01:00, 5426.30it/s]\u001b[A\n 21%|██▏ | 89003/414113 [00:16<00:59, 5423.36it/s]\u001b[A\n 22%|██▏ | 89615/414113 [00:16<00:57, 5614.52it/s]\u001b[A\n 22%|██▏ | 90183/414113 [00:16<00:57, 5594.61it/s]\u001b[A\n 22%|██▏ | 90747/414113 [00:16<00:57, 5577.93it/s]\u001b[A\n 22%|██▏ | 91361/414113 [00:16<00:56, 5733.71it/s]\u001b[A\n 22%|██▏ | 91938/414113 [00:16<00:57, 5615.41it/s]\u001b[A\n 22%|██▏ | 92649/414113 [00:16<00:53, 5992.15it/s]\u001b[A\n 23%|██▎ | 93258/414113 [00:16<00:55, 5749.09it/s]\u001b[A\n 23%|██▎ | 93842/414113 [00:16<00:58, 5492.42it/s]\u001b[A\n 23%|██▎ | 94490/414113 [00:17<00:55, 5754.78it/s]\u001b[A\n 23%|██▎ | 95075/414113 [00:17<00:55, 5725.49it/s]\u001b[A\n 23%|██▎ | 95654/414113 [00:17<00:56, 5686.51it/s]\u001b[A\n 23%|██▎ | 96236/414113 [00:17<00:55, 5722.66it/s]\u001b[A\n 23%|██▎ | 96833/414113 [00:17<00:54, 5792.83it/s]\u001b[A\n 24%|██▎ | 97568/414113 [00:17<00:51, 6183.41it/s]\u001b[A\n 24%|██▎ | 98196/414113 [00:17<00:51, 6123.55it/s]\u001b[A\n 24%|██▍ | 98948/414113 [00:17<00:48, 6484.61it/s]\u001b[A\n 24%|██▍ | 99673/414113 [00:17<00:46, 6696.23it/s]\u001b[A\n 24%|██▍ | 100429/414113 [00:18<00:45, 6933.68it/s]\u001b[A\n 24%|██▍ | 101205/414113 [00:18<00:43, 7161.53it/s]\u001b[A\n 25%|██▍ | 101929/414113 [00:18<00:46, 6743.58it/s]\u001b[A\n 25%|██▍ | 102614/414113 [00:18<00:48, 6378.07it/s]\u001b[A\n 25%|██▍ | 103264/414113 [00:18<00:50, 6170.22it/s]\u001b[A\n 25%|██▌ | 103913/414113 [00:18<00:49, 6260.88it/s]\u001b[A\n 25%|██▌ | 104547/414113 [00:18<00:53, 5796.27it/s]\u001b[A\n 25%|██▌ | 105139/414113 [00:18<00:56, 5496.47it/s]\u001b[A\n 26%|██▌ | 105701/414113 [00:18<00:57, 5379.60it/s]\u001b[A\n 26%|██▌ | 106274/414113 [00:19<00:56, 5479.19it/s]\u001b[A\n 26%|██▌ | 106829/414113 [00:19<00:56, 5452.97it/s]\u001b[A\n 26%|██▌ | 107379/414113 [00:19<00:57, 5318.00it/s]\u001b[A\n 26%|██▌ | 107915/414113 [00:19<00:59, 5152.80it/s]\u001b[A\n 26%|██▌ | 108435/414113 [00:19<01:03, 4784.22it/s]\u001b[A\n 26%|██▋ | 109029/414113 [00:19<01:00, 5080.41it/s]\u001b[A\n 26%|██▋ | 109625/414113 [00:19<00:57, 5315.63it/s]\u001b[A\n 27%|██▋ | 110295/414113 [00:19<00:53, 5665.59it/s]\u001b[A\n 27%|██▋ | 111021/414113 [00:19<00:49, 6064.80it/s]\u001b[A\n 27%|██▋ | 111736/414113 [00:19<00:47, 6353.45it/s]\u001b[A\n 27%|██▋ | 112450/414113 [00:20<00:45, 6570.42it/s]\u001b[A\n 27%|██▋ | 113120/414113 [00:20<00:51, 5793.22it/s]\u001b[A\n 27%|██▋ | 113726/414113 [00:20<00:52, 5725.53it/s]\u001b[A\n 28%|██▊ | 114317/414113 [00:20<00:56, 5264.73it/s]\u001b[A\n 28%|██▊ | 114864/414113 [00:20<00:59, 5038.64it/s]\u001b[A\n 28%|██▊ | 115384/414113 [00:20<00:59, 5021.45it/s]\u001b[A\n 28%|██▊ | 116007/414113 [00:20<00:55, 5331.39it/s]\u001b[A\n 28%|██▊ | 116585/414113 [00:20<00:54, 5456.13it/s]\u001b[A\n 28%|██▊ | 117172/414113 [00:20<00:53, 5572.91it/s]\u001b[A\n 28%|██▊ | 117737/414113 [00:21<00:56, 5203.65it/s]\u001b[A\n 29%|██▊ | 118268/414113 [00:21<00:57, 5122.79it/s]\u001b[A\n 29%|██▊ | 118788/414113 [00:21<00:59, 4971.41it/s]\u001b[A\n 29%|██▉ | 119292/414113 [00:21<01:01, 4809.73it/s]\u001b[A\n 29%|██▉ | 119799/414113 [00:21<01:00, 4884.30it/s]\u001b[A\n 29%|██▉ | 120413/414113 [00:21<00:56, 5202.70it/s]\u001b[A\n 29%|██▉ | 120991/414113 [00:21<00:54, 5362.36it/s]\u001b[A\n 29%|██▉ | 121570/414113 [00:21<00:53, 5483.54it/s]\u001b[A\n 30%|██▉ | 122237/414113 [00:21<00:50, 5790.93it/s]\u001b[A\n 30%|██▉ | 122825/414113 [00:22<00:51, 5648.82it/s]\u001b[A\n 30%|██▉ | 123422/414113 [00:22<00:50, 5739.54it/s]\u001b[A\n 30%|██▉ | 124026/414113 [00:22<00:49, 5823.56it/s]\u001b[A\n 30%|███ | 124613/414113 [00:22<00:51, 5579.15it/s]\u001b[A\n 30%|███ | 125176/414113 [00:22<00:52, 5481.56it/s]\u001b[A\n 30%|███ | 125728/414113 [00:22<00:57, 4983.99it/s]\u001b[A\n 31%|███ | 126444/414113 [00:22<00:52, 5482.48it/s]\u001b[A\n 31%|███ | 127174/414113 [00:22<00:48, 5923.95it/s]\u001b[A\n 31%|███ | 127793/414113 [00:22<00:54, 5205.94it/s]\u001b[A\n 31%|███ | 128348/414113 [00:23<00:56, 5077.10it/s]\u001b[A\n 31%|███ | 129134/414113 [00:23<00:50, 5678.88it/s]\u001b[A\n 31%|███▏ | 129743/414113 [00:23<00:53, 5339.32it/s]\u001b[A\n 31%|███▏ | 130318/414113 [00:23<00:52, 5455.94it/s]\u001b[A\n 32%|███▏ | 130887/414113 [00:23<00:55, 5100.39it/s]\u001b[A\n 32%|███▏ | 131458/414113 [00:23<00:53, 5268.22it/s]\u001b[A\n 32%|███▏ | 132073/414113 [00:23<00:51, 5503.00it/s]\u001b[A\n 32%|███▏ | 132638/414113 [00:23<00:52, 5399.47it/s]\u001b[A\n 32%|███▏ | 133189/414113 [00:23<00:53, 5286.29it/s]\u001b[A\n 32%|███▏ | 133761/414113 [00:24<00:51, 5408.03it/s]\u001b[A\n 32%|███▏ | 134352/414113 [00:24<00:50, 5548.78it/s]\u001b[A\n 33%|███▎ | 135082/414113 [00:24<00:46, 5978.38it/s]\u001b[A\n 33%|███▎ | 135693/414113 [00:24<00:49, 5569.35it/s]\u001b[A\n 33%|███▎ | 136265/414113 [00:24<00:51, 5373.70it/s]\u001b[A\n 33%|███▎ | 136941/414113 [00:24<00:48, 5725.12it/s]\u001b[A\n 33%|███▎ | 137529/414113 [00:24<00:51, 5411.38it/s]\u001b[A\n 33%|███▎ | 138090/414113 [00:24<00:50, 5468.16it/s]\u001b[A\n 33%|███▎ | 138670/414113 [00:24<00:49, 5562.12it/s]\u001b[A\n 34%|███▎ | 139234/414113 [00:25<00:49, 5529.03it/s]\u001b[A\n 34%|███▍ | 139793/414113 [00:25<00:55, 4914.00it/s]\u001b[A\n 34%|███▍ | 140518/414113 [00:25<00:50, 5439.20it/s]\u001b[A\n 34%|███▍ | 141215/414113 [00:25<00:46, 5822.72it/s]\u001b[A\n 34%|███▍ | 141826/414113 [00:25<00:52, 5213.63it/s]\u001b[A\n 34%|███▍ | 142592/414113 [00:25<00:47, 5764.88it/s]\u001b[A\n 35%|███▍ | 143312/414113 [00:25<00:44, 6130.84it/s]\u001b[A\n 35%|███▍ | 143960/414113 [00:25<00:43, 6172.45it/s]\u001b[A\n 35%|███▍ | 144602/414113 [00:25<00:47, 5654.48it/s]\u001b[A\n 35%|███▌ | 145193/414113 [00:26<00:47, 5633.98it/s]\u001b[A\n 35%|███▌ | 145775/414113 [00:26<00:47, 5674.00it/s]\u001b[A\n 35%|███▌ | 146355/414113 [00:26<00:47, 5690.33it/s]\u001b[A\n 36%|███▌ | 147109/414113 [00:26<00:43, 6141.73it/s]\u001b[A\n 36%|███▌ | 147740/414113 [00:26<00:44, 6005.76it/s]\u001b[A\n 36%|███▌ | 148353/414113 [00:26<00:44, 6001.13it/s]\u001b[A\n 36%|███▌ | 148962/414113 [00:26<00:44, 5952.15it/s]\u001b[A\n 36%|███▌ | 149604/414113 [00:26<00:43, 6084.74it/s]\u001b[A\n 36%|███▋ | 150222/414113 [00:26<00:43, 6111.00it/s]\u001b[A\n 36%|███▋ | 150837/414113 [00:26<00:44, 5883.63it/s]\u001b[A\n 37%|███▋ | 151430/414113 [00:27<00:45, 5718.50it/s]\u001b[A\n 37%|███▋ | 152146/414113 [00:27<00:43, 6084.56it/s]\u001b[A\n 37%|███▋ | 152764/414113 [00:27<00:43, 5995.03it/s]\u001b[A\n 37%|███▋ | 153371/414113 [00:27<00:49, 5299.78it/s]\u001b[A\n 37%|███▋ | 153921/414113 [00:27<00:52, 4954.31it/s]\u001b[A\n 37%|███▋ | 154435/414113 [00:27<00:55, 4672.89it/s]\u001b[A\n 37%|███▋ | 155076/414113 [00:27<00:50, 5085.24it/s]\u001b[A\n 38%|███▊ | 155843/414113 [00:27<00:45, 5656.07it/s]\u001b[A\n 38%|███▊ | 156615/414113 [00:28<00:41, 6146.86it/s]\u001b[A\n 38%|███▊ | 157322/414113 [00:28<00:40, 6397.14it/s]\u001b[A\n 38%|███▊ | 157991/414113 [00:28<00:41, 6143.21it/s]\u001b[A\n 38%|███▊ | 158628/414113 [00:28<00:42, 6045.17it/s]\u001b[A\n 38%|███▊ | 159249/414113 [00:28<00:43, 5823.44it/s]\u001b[A\n 39%|███▊ | 159883/414113 [00:28<00:42, 5966.84it/s]\u001b[A\n 39%|███▉ | 160490/414113 [00:28<00:42, 5977.33it/s]\u001b[A\n 39%|███▉ | 161101/414113 [00:28<00:42, 6012.06it/s]\u001b[A\n 39%|███▉ | 161708/414113 [00:28<00:46, 5400.61it/s]\u001b[A\n 39%|███▉ | 162301/414113 [00:28<00:45, 5546.76it/s]\u001b[A\n 39%|███▉ | 162891/414113 [00:29<00:44, 5646.64it/s]\u001b[A\n 40%|███▉ | 163585/414113 [00:29<00:41, 5980.13it/s]\u001b[A\n 40%|███▉ | 164304/414113 [00:29<00:39, 6296.65it/s]\u001b[A\n 40%|███▉ | 164956/414113 [00:29<00:39, 6362.03it/s]\u001b[A\n 40%|███▉ | 165602/414113 [00:29<00:40, 6210.17it/s]\u001b[A\n 40%|████ | 166231/414113 [00:29<00:40, 6064.70it/s]\u001b[A\n 40%|████ | 166844/414113 [00:29<00:44, 5501.01it/s]\u001b[A\n 40%|████ | 167409/414113 [00:29<00:48, 5061.77it/s]\u001b[A\n 41%|████ | 167932/414113 [00:30<00:51, 4766.80it/s]\u001b[A\n 41%|████ | 168424/414113 [00:30<00:53, 4627.75it/s]\u001b[A\n 41%|████ | 169201/414113 [00:30<00:46, 5266.05it/s]\u001b[A\n 41%|████ | 169915/414113 [00:30<00:42, 5713.79it/s]\u001b[A\n 41%|████ | 170624/414113 [00:30<00:40, 6066.22it/s]\u001b[A\n 41%|████▏ | 171264/414113 [00:30<00:39, 6143.76it/s]\u001b[A\n 42%|████▏ | 171902/414113 [00:30<00:41, 5793.24it/s]\u001b[A\n 42%|████▏ | 172502/414113 [00:30<00:45, 5299.34it/s]\u001b[A\n 42%|████▏ | 173055/414113 [00:30<00:45, 5356.17it/s]\u001b[A\n 42%|████▏ | 173652/414113 [00:30<00:43, 5525.84it/s]\u001b[A\n 42%|████▏ | 174217/414113 [00:31<00:43, 5516.61it/s]\u001b[A\n 42%|████▏ | 174778/414113 [00:31<00:44, 5342.12it/s]\u001b[A\n 42%|████▏ | 175360/414113 [00:31<00:43, 5475.96it/s]\u001b[A\n 42%|████▏ | 175963/414113 [00:31<00:42, 5630.81it/s]\u001b[A\n 43%|████▎ | 176536/414113 [00:31<00:41, 5659.75it/s]\u001b[A\n 43%|████▎ | 177106/414113 [00:31<00:41, 5670.67it/s]\u001b[A\n 43%|████▎ | 177703/414113 [00:31<00:41, 5753.82it/s]\u001b[A\n 43%|████▎ | 178281/414113 [00:31<00:42, 5570.56it/s]\u001b[A\n 43%|████▎ | 178888/414113 [00:31<00:41, 5711.19it/s]\u001b[A\n 43%|████▎ | 179549/414113 [00:32<00:39, 5950.74it/s]\u001b[A\n 44%|████▎ | 180156/414113 [00:32<00:39, 5983.74it/s]\u001b[A\n 44%|████▎ | 180758/414113 [00:32<00:39, 5955.68it/s]\u001b[A\n 44%|████▍ | 181356/414113 [00:32<00:44, 5233.67it/s]\u001b[A\n 44%|████▍ | 182094/414113 [00:32<00:40, 5733.66it/s]\u001b[A\n 44%|████▍ | 182702/414113 [00:32<00:39, 5831.54it/s]\u001b[A\n 44%|████▍ | 183389/414113 [00:32<00:37, 6106.08it/s]\u001b[A\n 44%|████▍ | 184026/414113 [00:32<00:37, 6181.48it/s]\u001b[A\n 45%|████▍ | 184711/414113 [00:32<00:36, 6365.59it/s]\u001b[A\n 45%|████▍ | 185436/414113 [00:32<00:34, 6607.18it/s]\u001b[A\n 45%|████▍ | 186106/414113 [00:33<00:38, 5941.59it/s]\u001b[A\n 45%|████▌ | 186759/414113 [00:33<00:37, 6104.46it/s]\u001b[A\n 45%|████▌ | 187384/414113 [00:33<00:39, 5684.16it/s]\u001b[A\n 45%|████▌ | 187969/414113 [00:33<00:40, 5630.98it/s]\u001b[A\n 46%|████▌ | 188544/414113 [00:33<00:41, 5499.08it/s]\u001b[A\n 46%|████▌ | 189147/414113 [00:33<00:39, 5648.14it/s]\u001b[A\n 46%|████▌ | 189719/414113 [00:33<00:40, 5578.25it/s]\u001b[A\n 46%|████▌ | 190282/414113 [00:33<00:42, 5264.34it/s]\u001b[A\n 46%|████▌ | 190879/414113 [00:33<00:40, 5455.39it/s]\u001b[A\n 46%|████▌ | 191432/414113 [00:34<00:40, 5464.53it/s]\u001b[A\n 46%|████▋ | 191984/414113 [00:34<00:44, 4978.63it/s]\u001b[A\n 46%|████▋ | 192555/414113 [00:34<00:42, 5176.74it/s]\u001b[A\n 47%|████▋ | 193113/414113 [00:34<00:41, 5290.88it/s]\u001b[A\n 47%|████▋ | 193740/414113 [00:34<00:39, 5550.38it/s]\u001b[A\n 47%|████▋ | 194328/414113 [00:34<00:38, 5643.60it/s]\u001b[A\n 47%|████▋ | 194899/414113 [00:34<00:41, 5284.33it/s]\u001b[A\n 47%|████▋ | 195525/414113 [00:34<00:39, 5542.53it/s]\u001b[A\n 47%|████▋ | 196246/414113 [00:34<00:36, 5955.40it/s]\u001b[A\n 48%|████▊ | 196858/414113 [00:35<00:41, 5288.83it/s]\u001b[A\n 48%|████▊ | 197613/414113 [00:35<00:37, 5810.04it/s]\u001b[A\n 48%|████▊ | 198334/414113 [00:35<00:34, 6168.86it/s]\u001b[A\n 48%|████▊ | 198980/414113 [00:35<01:06, 3253.12it/s]\u001b[A\n 48%|████▊ | 199487/414113 [00:35<00:58, 3644.90it/s]\u001b[A\n 48%|████▊ | 200026/414113 [00:35<00:53, 4036.64it/s]\u001b[A\n 48%|████▊ | 200572/414113 [00:35<00:48, 4378.33it/s]\u001b[A\n 49%|████▊ | 201168/414113 [00:36<00:44, 4755.55it/s]\u001b[A\n 49%|████▊ | 201713/414113 [00:36<00:43, 4938.28it/s]\u001b[A\n 49%|████▉ | 202437/414113 [00:36<00:38, 5456.61it/s]\u001b[A\n 49%|████▉ | 203038/414113 [00:36<00:37, 5611.24it/s]\u001b[A\n 49%|████▉ | 203637/414113 [00:36<00:37, 5569.74it/s]\u001b[A\n 49%|████▉ | 204221/414113 [00:36<00:38, 5408.41it/s]\u001b[A\n 49%|████▉ | 204899/414113 [00:36<00:36, 5754.64it/s]\u001b[A\n 50%|████▉ | 205616/414113 [00:36<00:34, 6115.78it/s]\u001b[A\n 50%|████▉ | 206248/414113 [00:36<00:34, 5999.69it/s]\u001b[A\n 50%|████▉ | 206863/414113 [00:37<00:36, 5692.31it/s]\u001b[A\n 50%|█████ | 207446/414113 [00:37<00:38, 5438.31it/s]\u001b[A\n 50%|█████ | 208002/414113 [00:37<00:38, 5305.35it/s]\u001b[A\n 50%|█████ | 208571/414113 [00:37<00:37, 5414.94it/s]\u001b[A\n 51%|█████ | 209185/414113 [00:37<00:36, 5612.91it/s]\u001b[A\n 51%|█████ | 209900/414113 [00:37<00:34, 5995.95it/s]\u001b[A\n 51%|█████ | 210512/414113 [00:37<00:34, 5964.08it/s]\u001b[A\n 51%|█████ | 211117/414113 [00:37<00:35, 5726.45it/s]\u001b[A\n 51%|█████ | 211861/414113 [00:37<00:32, 6150.28it/s]\u001b[A\n 51%|█████▏ | 212491/414113 [00:38<00:36, 5586.31it/s]\u001b[A\n 51%|█████▏ | 213098/414113 [00:38<00:35, 5721.62it/s]\u001b[A\n 52%|█████▏ | 213685/414113 [00:38<00:35, 5601.39it/s]\u001b[A\n 52%|█████▏ | 214352/414113 [00:38<00:33, 5882.09it/s]\u001b[A\n 52%|█████▏ | 214952/414113 [00:38<00:34, 5786.18it/s]\u001b[A\n 52%|█████▏ | 215620/414113 [00:38<00:32, 6026.83it/s]\u001b[A\n 52%|█████▏ | 216232/414113 [00:38<00:33, 5970.62it/s]\u001b[A\n 52%|█████▏ | 216836/414113 [00:38<00:36, 5467.77it/s]\u001b[A\n 52%|█████▏ | 217396/414113 [00:38<00:36, 5342.56it/s]\u001b[A\n 53%|█████▎ | 217978/414113 [00:38<00:35, 5476.13it/s]\u001b[A\n 53%|█████▎ | 218557/414113 [00:39<00:35, 5564.41it/s]\u001b[A\n 53%|█████▎ | 219152/414113 [00:39<00:34, 5674.02it/s]\u001b[A\n 53%|█████▎ | 219807/414113 [00:39<00:32, 5909.62it/s]\u001b[A\n 53%|█████▎ | 220404/414113 [00:39<00:32, 5912.50it/s]\u001b[A\n 53%|█████▎ | 221028/414113 [00:39<00:32, 6003.07it/s]\u001b[A\n 54%|█████▎ | 221710/414113 [00:39<00:30, 6225.94it/s]\u001b[A\n 54%|█████▎ | 222337/414113 [00:39<00:32, 5958.15it/s]\u001b[A\n 54%|█████▍ | 222940/414113 [00:39<00:31, 5976.96it/s]\u001b[A\n 54%|█████▍ | 223542/414113 [00:39<00:34, 5594.87it/s]\u001b[A\n 54%|█████▍ | 224249/414113 [00:40<00:31, 5966.92it/s]\u001b[A\n 54%|█████▍ | 224858/414113 [00:40<00:35, 5379.72it/s]\u001b[A\n 54%|█████▍ | 225416/414113 [00:40<00:37, 5047.41it/s]\u001b[A\n 55%|█████▍ | 226014/414113 [00:40<00:35, 5294.56it/s]\u001b[A\n 55%|█████▍ | 226798/414113 [00:40<00:31, 5864.71it/s]\u001b[A\n 55%|█████▍ | 227526/414113 [00:40<00:29, 6224.35it/s]\u001b[A\n 55%|█████▌ | 228176/414113 [00:40<00:30, 6165.06it/s]\u001b[A\n 55%|█████▌ | 228812/414113 [00:40<00:29, 6182.52it/s]\u001b[A\n 55%|█████▌ | 229444/414113 [00:40<00:31, 5949.81it/s]\u001b[A\n 56%|█████▌ | 230156/414113 [00:41<00:29, 6258.35it/s]\u001b[A\n 56%|█████▌ | 230856/414113 [00:41<00:28, 6460.86it/s]\u001b[A\n 56%|█████▌ | 231513/414113 [00:41<00:30, 5916.26it/s]\u001b[A\n 56%|█████▌ | 232121/414113 [00:41<00:31, 5836.60it/s]\u001b[A\n 56%|█████▌ | 232717/414113 [00:41<00:31, 5754.39it/s]\u001b[A\n 56%|█████▋ | 233301/414113 [00:41<00:32, 5649.82it/s]\u001b[A\n 56%|█████▋ | 233873/414113 [00:41<00:32, 5602.09it/s]\u001b[A\n 57%|█████▋ | 234438/414113 [00:41<00:32, 5528.06it/s]\u001b[A\n 57%|█████▋ | 234999/414113 [00:41<00:32, 5550.46it/s]\u001b[A\n 57%|█████▋ | 235567/414113 [00:41<00:31, 5588.04it/s]\u001b[A\n 57%|█████▋ | 236128/414113 [00:42<00:31, 5568.11it/s]\u001b[A\n 57%|█████▋ | 236687/414113 [00:42<00:35, 4974.18it/s]\u001b[A\n 57%|█████▋ | 237198/414113 [00:42<00:37, 4737.26it/s]\u001b[A\n 57%|█████▋ | 237794/414113 [00:42<00:34, 5047.94it/s]\u001b[A\n 58%|█████▊ | 238538/414113 [00:42<00:31, 5583.82it/s]\u001b[A\n 58%|█████▊ | 239142/414113 [00:42<00:30, 5712.27it/s]\u001b[A\n 58%|█████▊ | 239733/414113 [00:42<00:30, 5708.42it/s]\u001b[A\n 58%|█████▊ | 240386/414113 [00:42<00:29, 5931.30it/s]\u001b[A\n 58%|█████▊ | 240992/414113 [00:42<00:29, 5898.17it/s]\u001b[A\n 58%|█████▊ | 241591/414113 [00:43<00:29, 5842.80it/s]\u001b[A\n 58%|█████▊ | 242182/414113 [00:43<00:29, 5849.36it/s]\u001b[A\n 59%|█████▊ | 242772/414113 [00:43<00:29, 5721.31it/s]\u001b[A\n 59%|█████▉ | 243348/414113 [00:43<00:30, 5616.68it/s]\u001b[A\n 59%|█████▉ | 243913/414113 [00:43<00:31, 5336.01it/s]\u001b[A\n 59%|█████▉ | 244452/414113 [00:43<00:33, 5047.88it/s]\u001b[A\n 59%|█████▉ | 245012/414113 [00:43<00:32, 5200.91it/s]\u001b[A\n 59%|█████▉ | 245538/414113 [00:43<00:33, 5036.59it/s]\u001b[A\n 59%|█████▉ | 246095/414113 [00:43<00:32, 5183.77it/s]\u001b[A\n 60%|█████▉ | 246619/414113 [00:44<00:32, 5120.79it/s]\u001b[A\n 60%|█████▉ | 247135/414113 [00:44<00:33, 4927.66it/s]\u001b[A\n 60%|█████▉ | 247694/414113 [00:44<00:32, 5108.23it/s]\u001b[A\n 60%|█████▉ | 248263/414113 [00:44<00:31, 5268.50it/s]\u001b[A\n 60%|██████ | 248864/414113 [00:44<00:30, 5469.38it/s]\u001b[A\n 60%|██████ | 249444/414113 [00:44<00:29, 5563.07it/s]\u001b[A\n 60%|██████ | 250196/414113 [00:44<00:27, 6034.12it/s]\u001b[A\n 61%|██████ | 250814/414113 [00:44<00:29, 5460.47it/s]\u001b[A\n 61%|██████ | 251485/414113 [00:44<00:28, 5782.43it/s]\u001b[A\n 61%|██████ | 252153/414113 [00:44<00:26, 6022.63it/s]\u001b[A\n 61%|██████ | 252772/414113 [00:45<00:29, 5562.87it/s]\u001b[A\n 61%|██████ | 253524/414113 [00:45<00:26, 6032.91it/s]\u001b[A\n 61%|██████▏ | 254152/414113 [00:45<00:27, 5873.52it/s]\u001b[A\n 62%|██████▏ | 254758/414113 [00:45<00:29, 5462.09it/s]\u001b[A\n 62%|██████▏ | 255417/414113 [00:45<00:27, 5756.44it/s]\u001b[A\n 62%|██████▏ | 256069/414113 [00:45<00:26, 5964.26it/s]\u001b[A\n 62%|██████▏ | 256680/414113 [00:45<00:26, 5846.68it/s]\u001b[A\n 62%|██████▏ | 257275/414113 [00:45<00:27, 5767.29it/s]\u001b[A\n 62%|██████▏ | 257860/414113 [00:45<00:27, 5723.20it/s]\u001b[A\n 62%|██████▏ | 258438/414113 [00:46<00:28, 5509.46it/s]\u001b[A\n 63%|██████▎ | 258995/414113 [00:46<00:28, 5433.47it/s]\u001b[A\n 63%|██████▎ | 259543/414113 [00:46<00:28, 5354.14it/s]\u001b[A\n 63%|██████▎ | 260179/414113 [00:46<00:27, 5619.06it/s]\u001b[A\n 63%|██████▎ | 260747/414113 [00:46<00:27, 5620.69it/s]\u001b[A\n 63%|██████▎ | 261313/414113 [00:46<00:27, 5480.28it/s]\u001b[A\n 63%|██████▎ | 261865/414113 [00:46<00:28, 5318.12it/s]\u001b[A\n 63%|██████▎ | 262401/414113 [00:46<00:29, 5221.28it/s]\u001b[A\n 63%|██████▎ | 262926/414113 [00:46<00:28, 5213.49it/s]\u001b[A\n 64%|██████▎ | 263520/414113 [00:47<00:27, 5410.78it/s]\u001b[A\n 64%|██████▍ | 264216/414113 [00:47<00:25, 5797.61it/s]\u001b[A\n 64%|██████▍ | 264951/414113 [00:47<00:24, 6189.36it/s]\u001b[A\n 64%|██████▍ | 265710/414113 [00:47<00:22, 6551.60it/s]\u001b[A\n 64%|██████▍ | 266483/414113 [00:47<00:21, 6864.63it/s]\u001b[A\n 65%|██████▍ | 267185/414113 [00:47<00:22, 6499.16it/s]\u001b[A\n 65%|██████▍ | 267870/414113 [00:47<00:22, 6598.45it/s]\u001b[A\n 65%|██████▍ | 268541/414113 [00:47<00:24, 5886.73it/s]\u001b[A\n 65%|██████▍ | 269152/414113 [00:47<00:24, 5913.85it/s]\u001b[A\n 65%|██████▌ | 269759/414113 [00:48<00:25, 5726.26it/s]\u001b[A\n 65%|██████▌ | 270370/414113 [00:48<00:24, 5834.25it/s]\u001b[A\n 65%|██████▌ | 270963/414113 [00:48<00:25, 5704.78it/s]\u001b[A\n 66%|██████▌ | 271541/414113 [00:48<00:25, 5692.46it/s]\u001b[A\n 66%|██████▌ | 272142/414113 [00:48<00:24, 5781.88it/s]\u001b[A\n 66%|██████▌ | 272724/414113 [00:48<00:24, 5667.50it/s]\u001b[A\n 66%|██████▌ | 273307/414113 [00:48<00:24, 5715.15it/s]\u001b[A\n 66%|██████▌ | 273881/414113 [00:48<00:25, 5517.10it/s]\u001b[A\n 66%|██████▋ | 274436/414113 [00:48<00:27, 5116.30it/s]\u001b[A\n 66%|██████▋ | 274988/414113 [00:48<00:26, 5230.38it/s]\u001b[A\n 67%|██████▋ | 275518/414113 [00:49<00:27, 5078.95it/s]\u001b[A\n 67%|██████▋ | 276168/414113 [00:49<00:25, 5434.34it/s]\u001b[A\n 67%|██████▋ | 276723/414113 [00:49<00:26, 5176.90it/s]\u001b[A\n 67%|██████▋ | 277302/414113 [00:49<00:25, 5344.32it/s]\u001b[A\n 67%|██████▋ | 277845/414113 [00:49<00:25, 5336.58it/s]\u001b[A\n 67%|██████▋ | 278385/414113 [00:49<00:27, 5026.30it/s]\u001b[A\n 67%|██████▋ | 279048/414113 [00:49<00:24, 5419.07it/s]\u001b[A\n 68%|██████▊ | 279804/414113 [00:49<00:22, 5920.95it/s]\u001b[A\n 68%|██████▊ | 280420/414113 [00:49<00:22, 5938.26it/s]\u001b[A\n 68%|██████▊ | 281031/414113 [00:50<00:23, 5767.12it/s]\u001b[A\n 68%|██████▊ | 281779/414113 [00:50<00:21, 6191.53it/s]\u001b[A\n 68%|██████▊ | 282416/414113 [00:50<00:22, 5775.18it/s]\u001b[A\n 68%|██████▊ | 283012/414113 [00:50<00:22, 5821.47it/s]\u001b[A\n 68%|██████▊ | 283607/414113 [00:50<00:22, 5807.19it/s]\u001b[A\n 69%|██████▊ | 284197/414113 [00:50<00:23, 5634.86it/s]\u001b[A\n 69%|██████▉ | 284768/414113 [00:50<00:24, 5370.41it/s]\u001b[A\n 69%|██████▉ | 285325/414113 [00:50<00:23, 5426.09it/s]\u001b[A\n 69%|██████▉ | 285874/414113 [00:50<00:25, 5127.22it/s]\u001b[A\n 69%|██████▉ | 286441/414113 [00:51<00:24, 5278.45it/s]\u001b[A\n 69%|██████▉ | 287030/414113 [00:51<00:23, 5447.80it/s]\u001b[A\n 69%|██████▉ | 287618/414113 [00:51<00:22, 5568.48it/s]\u001b[A\n 70%|██████▉ | 288213/414113 [00:51<00:22, 5677.18it/s]\u001b[A\n 70%|██████▉ | 288785/414113 [00:51<00:22, 5535.01it/s]\u001b[A\n 70%|██████▉ | 289355/414113 [00:51<00:22, 5583.04it/s]\u001b[A\n 70%|███████ | 289916/414113 [00:51<00:22, 5555.40it/s]\u001b[A\n 70%|███████ | 290517/414113 [00:51<00:21, 5681.96it/s]\u001b[A\n 70%|███████ | 291088/414113 [00:51<00:22, 5361.89it/s]\u001b[A\n 70%|███████ | 291646/414113 [00:51<00:22, 5424.84it/s]\u001b[A\n 71%|███████ | 292193/414113 [00:52<00:23, 5137.60it/s]\u001b[A\n 71%|███████ | 292978/414113 [00:52<00:21, 5730.67it/s]\u001b[A\n 71%|███████ | 293777/414113 [00:52<00:19, 6261.66it/s]\u001b[A\n 71%|███████ | 294447/414113 [00:52<00:18, 6384.37it/s]\u001b[A\n 71%|███████▏ | 295109/414113 [00:52<00:19, 6222.08it/s]\u001b[A\n 71%|███████▏ | 295749/414113 [00:52<00:18, 6256.65it/s]\u001b[A\n 72%|███████▏ | 296399/414113 [00:52<00:18, 6326.79it/s]\u001b[A\n 72%|███████▏ | 297138/414113 [00:52<00:17, 6612.15it/s]\u001b[A\n 72%|███████▏ | 297809/414113 [00:52<00:19, 5868.82it/s]\u001b[A\n 72%|███████▏ | 298418/414113 [00:53<00:21, 5470.97it/s]\u001b[A\n 72%|███████▏ | 299042/414113 [00:53<00:20, 5680.85it/s]\u001b[A\n 72%|███████▏ | 299637/414113 [00:53<00:19, 5756.89it/s]\u001b[A\n 73%|███████▎ | 300233/414113 [00:53<00:19, 5816.00it/s]\u001b[A\n 73%|███████▎ | 300824/414113 [00:53<00:19, 5752.81it/s]\u001b[A\n 73%|███████▎ | 301436/414113 [00:53<00:19, 5857.51it/s]\u001b[A\n 73%|███████▎ | 302086/414113 [00:53<00:18, 6035.81it/s]\u001b[A\n 73%|███████▎ | 302695/414113 [00:53<00:18, 5943.52it/s]\u001b[A\n 73%|███████▎ | 303315/414113 [00:53<00:18, 6017.79it/s]\u001b[A\n 73%|███████▎ | 303920/414113 [00:53<00:18, 5850.32it/s]\u001b[A\n 74%|███████▎ | 304565/414113 [00:54<00:18, 6017.17it/s]\u001b[A\n 74%|███████▎ | 305170/414113 [00:54<00:18, 6023.28it/s]\u001b[A\n 74%|███████▍ | 305775/414113 [00:54<00:18, 5863.70it/s]\u001b[A\n 74%|███████▍ | 306364/414113 [00:54<00:18, 5821.47it/s]\u001b[A\n 74%|███████▍ | 306989/414113 [00:54<00:18, 5942.37it/s]\u001b[A\n 74%|███████▍ | 307761/414113 [00:54<00:16, 6381.86it/s]\u001b[A\n 74%|███████▍ | 308432/414113 [00:54<00:16, 6474.87it/s]\u001b[A\n 75%|███████▍ | 309159/414113 [00:54<00:15, 6690.61it/s]\u001b[A\n 75%|███████▍ | 309836/414113 [00:54<00:15, 6636.61it/s]\u001b[A\n 75%|███████▍ | 310505/414113 [00:55<00:16, 6319.31it/s]\u001b[A\n 75%|███████▌ | 311228/414113 [00:55<00:15, 6566.15it/s]\u001b[A\n 75%|███████▌ | 311926/414113 [00:55<00:15, 6682.91it/s]\u001b[A\n 75%|███████▌ | 312600/414113 [00:55<00:15, 6461.17it/s]\u001b[A\n 76%|███████▌ | 313252/414113 [00:55<00:16, 6266.58it/s]\u001b[A\n 76%|███████▌ | 313933/414113 [00:55<00:15, 6417.63it/s]\u001b[A\n 76%|███████▌ | 314580/414113 [00:55<00:15, 6351.25it/s]\u001b[A\n 76%|███████▌ | 315219/414113 [00:55<00:15, 6323.79it/s]\u001b[A\n 76%|███████▋ | 315854/414113 [00:55<00:16, 6009.62it/s]\u001b[A\n 76%|███████▋ | 316460/414113 [00:56<00:17, 5595.21it/s]\u001b[A\n 77%|███████▋ | 317029/414113 [00:56<00:18, 5354.12it/s]\u001b[A\n 77%|███████▋ | 317574/414113 [00:56<00:19, 5065.48it/s]\u001b[A\n 77%|███████▋ | 318141/414113 [00:56<00:18, 5231.31it/s]\u001b[A\n 77%|███████▋ | 318730/414113 [00:56<00:17, 5410.62it/s]\u001b[A\n 77%|███████▋ | 319437/414113 [00:56<00:16, 5819.88it/s]\u001b[A\n 77%|███████▋ | 320033/414113 [00:56<00:17, 5386.01it/s]\u001b[A\n 77%|███████▋ | 320731/414113 [00:56<00:16, 5781.41it/s]\u001b[A\n 78%|███████▊ | 321329/414113 [00:56<00:16, 5545.07it/s]\u001b[A\n 78%|███████▊ | 322017/414113 [00:56<00:15, 5886.82it/s]\u001b[A\n 78%|███████▊ | 322623/414113 [00:57<00:17, 5338.19it/s]\u001b[A\n 78%|███████▊ | 323179/414113 [00:57<00:17, 5214.70it/s]\u001b[A\n 78%|███████▊ | 323916/414113 [00:57<00:15, 5715.81it/s]\u001b[A\n 78%|███████▊ | 324663/414113 [00:57<00:14, 6148.38it/s]\u001b[A\n 79%|███████▊ | 325379/414113 [00:57<00:13, 6420.22it/s]\u001b[A\n 79%|███████▊ | 326044/414113 [00:57<00:13, 6400.08it/s]\u001b[A\n 79%|███████▉ | 326700/414113 [00:57<00:14, 5930.96it/s]\u001b[A\n 79%|███████▉ | 327311/414113 [00:57<00:14, 5942.13it/s]\u001b[A\n 79%|███████▉ | 327970/414113 [00:57<00:14, 6121.60it/s]\u001b[A\n 79%|███████▉ | 328593/414113 [00:58<00:14, 5736.35it/s]\u001b[A\n 79%|███████▉ | 329179/414113 [00:58<00:15, 5524.34it/s]\u001b[A\n 80%|███████▉ | 329754/414113 [00:58<00:15, 5589.09it/s]\u001b[A\n 80%|███████▉ | 330321/414113 [00:58<00:15, 5557.21it/s]\u001b[A\n 80%|███████▉ | 330882/414113 [00:58<00:15, 5344.40it/s]\u001b[A\n 80%|████████ | 331441/414113 [00:58<00:15, 5413.61it/s]\u001b[A\n 80%|████████ | 331987/414113 [00:58<00:15, 5346.39it/s]\u001b[A\n 80%|████████ | 332525/414113 [00:58<00:16, 5061.52it/s]\u001b[A\n 80%|████████ | 333037/414113 [00:58<00:17, 4754.82it/s]\u001b[A\n 81%|████████ | 333718/414113 [00:59<00:15, 5227.31it/s]\u001b[A\n 81%|████████ | 334261/414113 [00:59<00:15, 5021.00it/s]\u001b[A\n 81%|████████ | 334862/414113 [00:59<00:15, 5280.68it/s]\u001b[A\n 81%|████████ | 335570/414113 [00:59<00:13, 5715.20it/s]\u001b[A\n 81%|████████ | 336162/414113 [00:59<00:14, 5297.04it/s]\u001b[A\n 81%|████████▏ | 336849/414113 [00:59<00:13, 5687.25it/s]\u001b[A\n 82%|████████▏ | 337555/414113 [00:59<00:12, 6037.53it/s]\u001b[A\n 82%|████████▏ | 338181/414113 [00:59<00:14, 5338.62it/s]\u001b[A\n 82%|████████▏ | 338745/414113 [00:59<00:14, 5190.23it/s]\u001b[A\n 82%|████████▏ | 339443/414113 [01:00<00:13, 5620.98it/s]\u001b[A\n 82%|████████▏ | 340172/414113 [01:00<00:12, 6034.53it/s]\u001b[A\n 82%|████████▏ | 340802/414113 [01:00<00:12, 6063.07it/s]\u001b[A\n 82%|████████▏ | 341513/414113 [01:00<00:11, 6342.26it/s]\u001b[A\n 83%|████████▎ | 342164/414113 [01:00<00:11, 6244.65it/s]\u001b[A\n 83%|████████▎ | 342801/414113 [01:00<00:12, 5654.46it/s]\u001b[A\n 83%|████████▎ | 343386/414113 [01:00<00:12, 5592.18it/s]\u001b[A\n 83%|████████▎ | 343959/414113 [01:00<00:12, 5433.02it/s]\u001b[A\n 83%|████████▎ | 344513/414113 [01:00<00:12, 5396.78it/s]\u001b[A\n 83%|████████▎ | 345087/414113 [01:01<00:12, 5492.91it/s]\u001b[A\n 83%|████████▎ | 345669/414113 [01:01<00:12, 5586.17it/s]\u001b[A\n 84%|████████▎ | 346257/414113 [01:01<00:11, 5669.70it/s]\u001b[A\n 84%|████████▍ | 346828/414113 [01:01<00:12, 5279.43it/s]\u001b[A\n 84%|████████▍ | 347364/414113 [01:01<00:13, 5038.30it/s]\u001b[A\n 84%|████████▍ | 347921/414113 [01:01<00:12, 5185.47it/s]\u001b[A\n 84%|████████▍ | 348530/414113 [01:01<00:12, 5427.20it/s]\u001b[A\n 84%|████████▍ | 349107/414113 [01:01<00:11, 5524.68it/s]\u001b[A\n 84%|████████▍ | 349666/414113 [01:01<00:12, 5030.15it/s]\u001b[A\n 85%|████████▍ | 350311/414113 [01:02<00:11, 5385.59it/s]\u001b[A\n 85%|████████▍ | 350867/414113 [01:02<00:11, 5436.64it/s]\u001b[A\n 85%|████████▍ | 351612/414113 [01:02<00:10, 5915.93it/s]\u001b[A\n 85%|████████▌ | 352340/414113 [01:02<00:09, 6266.65it/s]\u001b[A\n 85%|████████▌ | 352987/414113 [01:02<00:11, 5547.38it/s]\u001b[A\n 85%|████████▌ | 353571/414113 [01:02<00:11, 5088.92it/s]\u001b[A\n 86%|████████▌ | 354173/414113 [01:02<00:11, 5334.93it/s]\u001b[A\n 86%|████████▌ | 354730/414113 [01:02<00:11, 5255.37it/s]\u001b[A\n 86%|████████▌ | 355272/414113 [01:02<00:11, 5265.38it/s]\u001b[A\n 86%|████████▌ | 355826/414113 [01:03<00:10, 5342.05it/s]\u001b[A\n 86%|████████▌ | 356369/414113 [01:03<00:11, 4959.97it/s]\u001b[A\n 86%|████████▌ | 356877/414113 [01:03<00:22, 2496.31it/s]\u001b[A\n 86%|████████▋ | 357353/414113 [01:03<00:19, 2911.63it/s]\u001b[A\n 86%|████████▋ | 357887/414113 [01:03<00:16, 3370.87it/s]\u001b[A\n 87%|████████▋ | 358449/414113 [01:03<00:14, 3813.86it/s]\u001b[A\n 87%|████████▋ | 359012/414113 [01:04<00:13, 4221.79it/s]\u001b[A\n 87%|████████▋ | 359597/414113 [01:04<00:11, 4605.71it/s]\u001b[A\n 87%|████████▋ | 360127/414113 [01:04<00:11, 4552.96it/s]\u001b[A\n 87%|████████▋ | 360656/414113 [01:04<00:11, 4749.48it/s]\u001b[A\n 87%|████████▋ | 361233/414113 [01:04<00:10, 5014.71it/s]\u001b[A\n 87%|████████▋ | 361764/414113 [01:04<00:10, 4875.71it/s]\u001b[A\n 87%|████████▋ | 362273/414113 [01:04<00:10, 4891.04it/s]\u001b[A\n 88%|████████▊ | 362994/414113 [01:04<00:09, 5412.46it/s]\u001b[A\n 88%|████████▊ | 363563/414113 [01:04<00:09, 5432.37it/s]\u001b[A\n 88%|████████▊ | 364225/414113 [01:04<00:08, 5740.70it/s]\u001b[A\n 88%|████████▊ | 364952/414113 [01:05<00:08, 6126.35it/s]\u001b[A\n 88%|████████▊ | 365585/414113 [01:05<00:08, 5727.00it/s]\u001b[A\n 88%|████████▊ | 366364/414113 [01:05<00:07, 6220.26it/s]\u001b[A\n 89%|████████▊ | 367060/414113 [01:05<00:07, 6424.95it/s]\u001b[A\n 89%|████████▉ | 367723/414113 [01:05<00:07, 6184.10it/s]\u001b[A\n 89%|████████▉ | 368367/414113 [01:05<00:07, 6256.59it/s]\u001b[A\n 89%|████████▉ | 369004/414113 [01:05<00:07, 6249.68it/s]\u001b[A\n 89%|████████▉ | 369637/414113 [01:05<00:07, 6159.95it/s]\u001b[A\n 89%|████████▉ | 370322/414113 [01:05<00:06, 6350.65it/s]\u001b[A\n 90%|████████▉ | 370963/414113 [01:06<00:07, 5476.96it/s]\u001b[A\n 90%|████████▉ | 371536/414113 [01:06<00:07, 5530.04it/s]\u001b[A\n 90%|████████▉ | 372107/414113 [01:06<00:07, 5513.86it/s]\u001b[A\n 90%|████████▉ | 372691/414113 [01:06<00:07, 5606.41it/s]\u001b[A\n 90%|█████████ | 373261/414113 [01:06<00:07, 5569.44it/s]\u001b[A\n 90%|█████████ | 373825/414113 [01:06<00:07, 5552.36it/s]\u001b[A\n 90%|█████████ | 374385/414113 [01:06<00:07, 5243.56it/s]\u001b[A\n 91%|█████████ | 374917/414113 [01:06<00:07, 5016.53it/s]\u001b[A\n 91%|█████████ | 375494/414113 [01:06<00:07, 5219.61it/s]\u001b[A\n 91%|█████████ | 376091/414113 [01:07<00:07, 5422.78it/s]\u001b[A\n 91%|█████████ | 376641/414113 [01:07<00:07, 5345.01it/s]\u001b[A\n 91%|█████████ | 377181/414113 [01:07<00:07, 5069.34it/s]\u001b[A\n 91%|█████████▏| 377927/414113 [01:07<00:06, 5608.33it/s]\u001b[A\n 91%|█████████▏| 378687/414113 [01:07<00:05, 6086.61it/s]\u001b[A\n 92%|█████████▏| 379450/414113 [01:07<00:05, 6478.93it/s]\u001b[A\n 92%|█████████▏| 380156/414113 [01:07<00:05, 6641.43it/s]\u001b[A\n 92%|█████████▏| 380840/414113 [01:07<00:05, 6468.05it/s]\u001b[A\n 92%|█████████▏| 381530/414113 [01:07<00:04, 6589.83it/s]\u001b[A\n 92%|█████████▏| 382200/414113 [01:07<00:05, 6366.02it/s]\u001b[A\n 92%|█████████▏| 382885/414113 [01:08<00:04, 6503.06it/s]\u001b[A\n 93%|█████████▎| 383543/414113 [01:08<00:04, 6354.60it/s]\u001b[A\n 93%|█████████▎| 384185/414113 [01:08<00:04, 6001.03it/s]\u001b[A\n 93%|█████████▎| 384794/414113 [01:08<00:05, 5522.61it/s]\u001b[A\n 93%|█████████▎| 385360/414113 [01:08<00:05, 5432.76it/s]\u001b[A\n 93%|█████████▎| 385991/414113 [01:08<00:04, 5668.77it/s]\u001b[A\n 93%|█████████▎| 386572/414113 [01:08<00:04, 5709.73it/s]\u001b[A\n 93%|█████████▎| 387150/414113 [01:08<00:04, 5536.87it/s]\u001b[A\n 94%|█████████▎| 387710/414113 [01:08<00:04, 5451.68it/s]\u001b[A\n 94%|█████████▍| 388260/414113 [01:09<00:05, 5079.06it/s]\u001b[A\n 94%|█████████▍| 388842/414113 [01:09<00:04, 5278.97it/s]\u001b[A\n 94%|█████████▍| 389378/414113 [01:09<00:04, 5302.68it/s]\u001b[A\n 94%|█████████▍| 390017/414113 [01:09<00:04, 5587.04it/s]\u001b[A\n 94%|█████████▍| 390773/414113 [01:09<00:03, 6060.79it/s]\u001b[A\n 95%|█████████▍| 391397/414113 [01:09<00:03, 5746.10it/s]\u001b[A\n 95%|█████████▍| 392152/414113 [01:09<00:03, 6189.21it/s]\u001b[A\n 95%|█████████▍| 392792/414113 [01:09<00:03, 5433.03it/s]\u001b[A\n 95%|█████████▍| 393367/414113 [01:09<00:04, 4995.84it/s]\u001b[A\n 95%|█████████▌| 394067/414113 [01:10<00:03, 5465.05it/s]\u001b[A\n 95%|█████████▌| 394647/414113 [01:10<00:03, 5197.87it/s]\u001b[A\n 95%|█████████▌| 395193/414113 [01:10<00:03, 4971.27it/s]\u001b[A\n 96%|█████████▌| 395711/414113 [01:10<00:03, 4889.41it/s]\u001b[A\n 96%|█████████▌| 396215/414113 [01:10<00:03, 4923.24it/s]\u001b[A\n 96%|█████████▌| 396802/414113 [01:10<00:03, 5172.12it/s]\u001b[A\n 96%|█████████▌| 397355/414113 [01:10<00:03, 5273.40it/s]\u001b[A\n 96%|█████████▌| 397900/414113 [01:10<00:03, 5323.75it/s]\u001b[A\n 96%|█████████▌| 398438/414113 [01:10<00:03, 5178.27it/s]\u001b[A\n 96%|█████████▋| 398961/414113 [01:11<00:03, 5031.85it/s]\u001b[A\n 96%|█████████▋| 399527/414113 [01:11<00:02, 5204.53it/s]\u001b[A\n 97%|█████████▋| 400066/414113 [01:11<00:02, 5257.96it/s]\u001b[A\n 97%|█████████▋| 400617/414113 [01:11<00:02, 5331.07it/s]\u001b[A\n 97%|█████████▋| 401196/414113 [01:11<00:02, 5459.68it/s]\u001b[A\n 97%|█████████▋| 401745/414113 [01:11<00:02, 5266.53it/s]\u001b[A\n 97%|█████████▋| 402312/414113 [01:11<00:02, 5379.53it/s]\u001b[A\n 97%|█████████▋| 402856/414113 [01:11<00:02, 5395.69it/s]\u001b[A\n 97%|█████████▋| 403415/414113 [01:11<00:01, 5451.95it/s]\u001b[A\n 98%|█████████▊| 403962/414113 [01:11<00:01, 5179.60it/s]\u001b[A\n 98%|█████████▊| 404484/414113 [01:12<00:01, 5138.83it/s]\u001b[A\n 98%|█████████▊| 405259/414113 [01:12<00:01, 5716.14it/s]\u001b[A\n 98%|█████████▊| 405854/414113 [01:12<00:01, 5128.28it/s]\u001b[A\n 98%|█████████▊| 406394/414113 [01:12<00:01, 4804.32it/s]\u001b[A\n 98%|█████████▊| 406952/414113 [01:12<00:01, 5013.24it/s]\u001b[A\n 98%|█████████▊| 407743/414113 [01:12<00:01, 5630.81it/s]\u001b[A\n 99%|█████████▊| 408344/414113 [01:12<00:01, 5344.50it/s]\u001b[A\n 99%|█████████▊| 408909/414113 [01:12<00:00, 5212.92it/s]\u001b[A\n 99%|█████████▉| 409484/414113 [01:13<00:00, 5361.75it/s]\u001b[A\n 99%|█████████▉| 410037/414113 [01:13<00:00, 5017.19it/s]\u001b[A\n 99%|█████████▉| 410555/414113 [01:13<00:00, 5037.36it/s]\u001b[A\n 99%|█████████▉| 411071/414113 [01:13<00:00, 5070.45it/s]\u001b[A\n 99%|█████████▉| 411637/414113 [01:13<00:00, 5233.59it/s]\u001b[A\n100%|█████████▉| 412242/414113 [01:13<00:00, 5453.50it/s]\u001b[A\n100%|█████████▉| 412795/414113 [01:13<00:00, 5428.78it/s]\u001b[A\n100%|█████████▉| 413343/414113 [01:13<00:00, 4967.18it/s]\u001b[A\n100%|██████████| 414113/414113 [01:13<00:00, 5603.20it/s]\u001b[A\n" ] ], [ [ "In the next section, you will learn how to use the data loader to obtain batches of training data.", "_____no_output_____" ], [ "<a id='step2'></a>\n## Step 2: Use the Data Loader to Obtain Batches\n\nThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). \n\nIn the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare. ", "_____no_output_____" ] ], [ [ "from collections import Counter\n\n# Tally the total number of training captions with each length.\ncounter = Counter(data_loader.dataset.caption_lengths)\nlengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\nfor value, count in lengths:\n print('value: %2d --- count: %5d' % (value, count))", "value: 10 --- count: 86302\nvalue: 11 --- count: 79970\nvalue: 9 --- count: 71920\nvalue: 12 --- count: 57652\nvalue: 13 --- count: 37669\nvalue: 14 --- count: 22342\nvalue: 8 --- count: 20742\nvalue: 15 --- count: 12840\nvalue: 16 --- count: 7736\nvalue: 17 --- count: 4845\nvalue: 18 --- count: 3101\nvalue: 19 --- count: 2017\nvalue: 7 --- count: 1594\nvalue: 20 --- count: 1453\nvalue: 21 --- count: 997\nvalue: 22 --- count: 683\nvalue: 23 --- count: 534\nvalue: 24 --- count: 384\nvalue: 25 --- count: 277\nvalue: 26 --- count: 214\nvalue: 27 --- count: 160\nvalue: 28 --- count: 114\nvalue: 29 --- count: 87\nvalue: 30 --- count: 58\nvalue: 31 --- count: 49\nvalue: 32 --- count: 44\nvalue: 34 --- count: 40\nvalue: 37 --- count: 32\nvalue: 35 --- count: 31\nvalue: 33 --- count: 30\nvalue: 36 --- count: 26\nvalue: 38 --- count: 18\nvalue: 39 --- count: 18\nvalue: 43 --- count: 16\nvalue: 44 --- count: 16\nvalue: 48 --- count: 12\nvalue: 45 --- count: 11\nvalue: 42 --- count: 10\nvalue: 40 --- count: 9\nvalue: 49 --- count: 9\nvalue: 46 --- count: 9\nvalue: 47 --- count: 7\nvalue: 50 --- count: 6\nvalue: 51 --- count: 6\nvalue: 41 --- count: 6\nvalue: 52 --- count: 5\nvalue: 54 --- count: 3\nvalue: 56 --- count: 2\nvalue: 6 --- count: 2\nvalue: 53 --- count: 2\nvalue: 55 --- count: 2\nvalue: 57 --- count: 1\n" ] ], [ [ "To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.\n\nRun the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.\n\nThese indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch.utils.data as data\n\n# Randomly sample a caption length, and sample indices with that length.\nindices = data_loader.dataset.get_train_indices()\nprint('sampled indices:', indices)\n\n# Create and assign a batch sampler to retrieve a batch with the sampled indices.\nnew_sampler = data.sampler.SubsetRandomSampler(indices=indices)\ndata_loader.batch_sampler.sampler = new_sampler\n \n# Obtain the batch.\nimages, captions = next(iter(data_loader))\n \nprint('images.shape:', images.shape)\nprint('captions.shape:', captions.shape)\n\n# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.\nprint('images:', images)\nprint('captions:', captions)", "sampled indices: [136419, 364634, 234087, 155450, 402281, 256511, 138858, 7970, 235383, 337948]\nimages.shape: torch.Size([10, 3, 224, 224])\ncaptions.shape: torch.Size([10, 13])\nimages: tensor([[[[ 0.9817, 0.9817, 0.9817, ..., 1.7694, 1.7180, 1.6667],\n [ 0.9817, 0.9817, 0.9646, ..., 1.8379, 1.8379, 1.8037],\n [ 0.9988, 0.9988, 0.9817, ..., 1.9749, 2.0263, 2.0434],\n ...,\n [-0.1143, -0.1143, -0.1657, ..., 0.7933, 0.7762, 0.6906],\n [-0.1143, -0.1143, -0.2171, ..., 0.8276, 0.8104, 0.7077],\n [-0.1486, -0.1486, -0.1999, ..., 0.8104, 0.7591, 0.7248]],\n\n [[ 1.3081, 1.3256, 1.3256, ..., 1.8859, 1.8333, 1.8158],\n [ 1.3256, 1.3256, 1.3431, ..., 1.9034, 1.8859, 1.9384],\n [ 1.3081, 1.3081, 1.3431, ..., 2.1134, 2.1485, 2.1835],\n ...,\n [-0.3025, -0.3025, -0.3025, ..., 0.9580, 0.9230, 0.8529],\n [-0.3025, -0.2850, -0.3375, ..., 0.9755, 0.9580, 0.8704],\n [-0.3025, -0.2850, -0.3375, ..., 0.9755, 0.9230, 0.8704]],\n\n [[ 1.8905, 1.8905, 1.8905, ..., 1.9603, 1.8731, 1.8208],\n [ 1.8731, 1.8905, 1.8905, ..., 1.9428, 1.9603, 1.9777],\n [ 1.8383, 1.8557, 1.8731, ..., 2.1520, 2.2043, 2.2566],\n ...,\n [-0.3055, -0.3404, -0.3055, ..., 1.1237, 1.0888, 0.9668],\n [-0.3055, -0.3404, -0.3404, ..., 1.1585, 1.1411, 1.0191],\n [-0.3230, -0.3404, -0.3578, ..., 1.1759, 1.1411, 1.0365]]],\n\n\n [[[ 2.1804, 2.0948, 1.5810, ..., 1.1358, 1.1358, 1.1358],\n [ 2.2318, 2.1633, 1.2385, ..., 1.1358, 1.1529, 1.1358],\n [ 2.2147, 2.1462, 2.0092, ..., 1.1358, 1.1529, 1.1358],\n ...,\n [-0.2513, -0.1828, -0.0801, ..., 1.1358, 1.0673, 1.0844],\n [-0.2513, -0.1828, -0.0972, ..., 1.1187, 1.1187, 1.1187],\n [-0.2513, -0.1828, -0.0801, ..., 1.0844, 1.1529, 1.0844]],\n\n [[ 1.7283, 1.6583, 1.0455, ..., 1.3081, 1.3081, 1.3081],\n [ 2.0609, 1.9034, 0.7129, ..., 1.3256, 1.3256, 1.3081],\n [ 2.4111, 2.3761, 1.9909, ..., 1.3431, 1.3431, 1.3256],\n ...,\n [-0.0049, 0.0651, 0.1702, ..., 1.1506, 1.0805, 1.0980],\n [-0.0049, 0.0651, 0.1527, ..., 1.1506, 1.1506, 1.1331],\n [-0.0049, 0.0651, 0.1702, ..., 1.1155, 1.1681, 1.1155]],\n\n [[ 2.1520, 2.2740, 1.8905, ..., 1.4374, 1.4374, 1.4374],\n [ 2.4308, 2.4308, 1.6291, ..., 1.4548, 1.4548, 1.4374],\n [ 2.6226, 2.5877, 2.5006, ..., 1.4897, 1.4897, 1.4722],\n ...,\n [ 0.0779, 0.1476, 0.2696, ..., 0.9842, 0.9145, 0.9494],\n [ 0.0779, 0.1476, 0.2522, ..., 1.0539, 1.0539, 0.9319],\n [ 0.0779, 0.1651, 0.2522, ..., 1.0714, 1.0888, 0.9145]]],\n\n\n [[[-0.6281, -0.7993, -1.2103, ..., 0.4851, 0.1254, -0.0116],\n [-1.1418, -1.1418, -1.1760, ..., 1.8379, 1.7352, 1.7009],\n [-0.0801, 0.1254, 0.1597, ..., 2.0777, 2.0948, 2.0605],\n ...,\n [ 1.3242, 1.2899, 1.2214, ..., 1.2899, 1.4783, 1.5468],\n [ 1.1187, 1.2385, 1.2214, ..., 1.2728, 1.3927, 1.4440],\n [ 1.1358, 1.1358, 1.2043, ..., 1.3070, 1.3584, 1.3927]],\n\n [[-0.5126, -0.6877, -1.1078, ..., 0.6254, 0.2577, 0.1176],\n [-1.0378, -1.0378, -1.0728, ..., 2.0084, 1.9034, 1.8683],\n [ 0.0476, 0.2577, 0.2927, ..., 2.2535, 2.2710, 2.2360],\n ...,\n [ 1.4832, 1.4482, 1.3782, ..., 1.4482, 1.6408, 1.7108],\n [ 1.2731, 1.3957, 1.3782, ..., 1.4307, 1.5532, 1.6057],\n [ 1.2906, 1.2906, 1.3606, ..., 1.4657, 1.5182, 1.5532]],\n\n [[-0.2881, -0.4624, -0.8807, ..., 0.8448, 0.4788, 0.3393],\n [-0.8110, -0.8110, -0.8458, ..., 2.2217, 2.1171, 2.0823],\n [ 0.2696, 0.4788, 0.5136, ..., 2.4657, 2.4831, 2.4483],\n ...,\n [ 1.6988, 1.6640, 1.5942, ..., 1.6640, 1.8557, 1.9254],\n [ 1.4897, 1.6117, 1.5942, ..., 1.6465, 1.7685, 1.8208],\n [ 1.5071, 1.5071, 1.5768, ..., 1.6814, 1.7337, 1.7685]]],\n\n\n ...,\n\n\n [[[-1.6042, -1.7240, -1.5699, ..., -1.3815, -1.6555, -0.8507],\n [-0.9534, -1.0048, -0.9192, ..., -1.0390, -1.6727, -0.8335],\n [-1.4158, -1.3987, -1.2274, ..., -0.9363, -1.3815, -0.6281],\n ...,\n [ 0.7419, 0.8961, 0.8276, ..., -1.4843, -1.4672, -1.4158],\n [ 0.5707, 0.6563, 0.5878, ..., -1.5185, -1.5014, -1.5014],\n [ 0.7419, 0.7248, 0.6563, ..., -1.5357, -1.5185, -1.5185]],\n\n [[-1.4055, -1.4405, -1.3354, ..., -1.0378, -1.3704, -0.6176],\n [-0.6352, -0.6001, -0.5476, ..., -0.5651, -1.3179, -0.5651],\n [-1.1779, -1.0903, -0.9328, ..., -0.6702, -1.2129, -0.5301],\n ...,\n [ 0.9755, 1.1331, 1.0630, ..., -1.4405, -1.4230, -1.3880],\n [ 0.8354, 0.9230, 0.9055, ..., -1.4755, -1.4580, -1.4580],\n [ 0.9930, 0.9930, 0.9405, ..., -1.4930, -1.4755, -1.4755]],\n\n [[-1.3339, -1.3513, -1.2990, ..., -0.9853, -1.1421, -0.3230],\n [-0.6541, -0.6193, -0.6018, ..., -0.6541, -1.1770, -0.2707],\n [-1.0724, -1.0550, -0.9330, ..., -0.6890, -1.0724, -0.3404],\n ...,\n [-0.3230, -0.1487, -0.1487, ..., -1.3687, -1.3687, -1.3513],\n [-0.5844, -0.4275, -0.4798, ..., -1.3687, -1.3513, -1.3513],\n [-0.2881, -0.3055, -0.3578, ..., -1.3861, -1.3687, -1.3687]]],\n\n\n [[[-0.8678, -0.8164, -0.7993, ..., -0.7308, -0.7479, -0.8164],\n [-0.8678, -0.8164, -0.7137, ..., -0.7308, -0.7308, -0.7993],\n [-0.8164, -0.8164, -0.7479, ..., -0.6623, -0.7137, -0.7479],\n ...,\n [ 0.0227, 0.0912, 0.1254, ..., 0.3309, 0.2967, 0.2453],\n [-0.0458, -0.0287, -0.0116, ..., 0.2967, 0.2796, 0.2967],\n [-0.0458, 0.0227, 0.0912, ..., 0.2624, 0.2624, 0.2111]],\n\n [[-0.1450, -0.1450, -0.1625, ..., -0.1275, -0.1275, -0.1450],\n [-0.1275, -0.1450, -0.1450, ..., -0.1099, -0.1099, -0.1275],\n [-0.1450, -0.1450, -0.1275, ..., -0.1275, -0.0924, -0.0749],\n ...,\n [ 0.1352, 0.2052, 0.2577, ..., 0.4328, 0.3978, 0.3803],\n [ 0.1176, 0.1176, 0.1176, ..., 0.3452, 0.3803, 0.4153],\n [ 0.1527, 0.2052, 0.2227, ..., 0.3803, 0.3978, 0.3627]],\n\n [[ 1.0191, 1.0191, 1.0017, ..., 1.0539, 1.0539, 1.0714],\n [ 1.0539, 1.0191, 1.0365, ..., 1.0365, 1.0365, 1.0365],\n [ 1.0191, 1.0191, 1.0191, ..., 0.8274, 0.9668, 0.9842],\n ...,\n [ 0.4614, 0.5311, 0.5659, ..., 0.6705, 0.6008, 0.6008],\n [ 0.4265, 0.4439, 0.4265, ..., 0.5659, 0.5659, 0.5834],\n [ 0.4439, 0.4962, 0.5136, ..., 0.5311, 0.5485, 0.5136]]],\n\n\n [[[-0.8335, -1.5699, -1.4843, ..., -1.4843, -1.4329, -1.5185],\n [-0.7308, -1.5870, -1.5357, ..., -1.5528, -1.5357, -1.5014],\n [-0.5767, -1.5528, -1.5870, ..., -1.4672, -1.4500, -1.3987],\n ...,\n [ 2.0948, 2.0948, 2.0605, ..., 2.1290, 2.1462, 2.0948],\n [ 2.1119, 2.1804, 2.0948, ..., 2.0948, 2.0434, 2.1119],\n [ 2.1119, 2.0948, 2.0777, ..., 2.1290, 2.0434, 2.0948]],\n\n [[-0.6702, -1.4755, -1.4580, ..., -1.4230, -1.3704, -1.4580],\n [-0.5301, -1.4930, -1.4755, ..., -1.4755, -1.4580, -1.4405],\n [-0.3375, -1.4580, -1.4930, ..., -1.3704, -1.3529, -1.3004],\n ...,\n [ 1.8859, 2.0609, 1.9734, ..., 1.9734, 1.9559, 1.8859],\n [ 2.0609, 2.1835, 1.9559, ..., 2.0084, 1.9209, 1.9559],\n [ 1.9909, 1.9734, 1.9209, ..., 1.9384, 1.8683, 1.7983]],\n\n [[-0.4101, -1.2990, -1.2293, ..., -1.1770, -1.1247, -1.2119],\n [-0.3055, -1.2816, -1.2467, ..., -1.2293, -1.2293, -1.1944],\n [-0.1138, -1.2293, -1.2816, ..., -1.1421, -1.1247, -1.0724],\n ...,\n [ 1.5071, 1.7337, 1.5942, ..., 1.6640, 1.6640, 1.5245],\n [ 1.6640, 1.9080, 1.6465, ..., 1.6640, 1.4897, 1.5768],\n [ 1.5420, 1.6640, 1.6117, ..., 1.7163, 1.4897, 1.4548]]]])\ncaptions: tensor([[ 0, 32, 1366, 13, 3, 14, 21, 3, 2238, 124, 63, 18,\n 1],\n [ 0, 3, 569, 407, 64, 130, 39, 257, 13, 3, 1367, 18,\n 1],\n [ 0, 3, 169, 39, 3, 755, 360, 3, 9157, 13, 1751, 18,\n 1],\n [ 0, 3, 1020, 170, 101, 2620, 77, 32, 392, 13, 3, 204,\n 1],\n [ 0, 20, 2403, 1619, 224, 77, 121, 13, 3, 123, 876, 18,\n 1],\n [ 0, 3, 98, 224, 77, 121, 13, 73, 739, 13, 109, 18,\n 1],\n [ 0, 3, 330, 13, 1026, 13, 3, 98, 175, 294, 753, 18,\n 1],\n [ 0, 47, 718, 169, 728, 160, 1493, 1575, 732, 3, 147, 18,\n 1],\n [ 0, 3, 254, 1692, 368, 160, 755, 207, 111, 3, 2569, 18,\n 1],\n [ 0, 3, 1826, 115, 324, 39, 257, 13, 3, 1168, 13, 1255,\n 1]])\n" ] ], [ [ "Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!\n\nYou will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.\n\n> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__\n\nIn the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.", "_____no_output_____" ], [ "<a id='step3'></a>\n## Step 3: Experiment with the CNN Encoder\n\nRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**. ", "_____no_output_____" ] ], [ [ "# Watch for any changes in model.py, and re-load it automatically.\n%load_ext autoreload\n%autoreload 2\n\n# Import EncoderCNN and DecoderRNN. \nfrom model import EncoderCNN, DecoderRNN", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ] ], [ [ "In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(device)", "cuda\n" ] ], [ [ "Run the code cell below to instantiate the CNN encoder in `encoder`. \n\nThe pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.", "_____no_output_____" ] ], [ [ "# Specify the dimensionality of the image embedding.\nembed_size = 256\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Initialize the encoder. (Optional: Add additional arguments if necessary.)\nencoder = EncoderCNN(embed_size)\n\n# Move the encoder to GPU if CUDA is available.\nencoder.to(device)\n \n# Move last batch of images (from Step 2) to GPU if CUDA is available. \nimages = images.to(device)\n\n# Pass the images through the encoder.\nfeatures = encoder(images)\n\nprint('type(features):', type(features))\nprint('features.shape:', features.shape)\n\n# Check that your encoder satisfies some requirements of the project! :D\nassert type(features)==torch.Tensor, \"Encoder output needs to be a PyTorch Tensor.\" \nassert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), \"The shape of the encoder output is incorrect.\"", "type(features): <class 'torch.Tensor'>\nfeatures.shape: torch.Size([10, 256])\n" ], [ "print('type(features):', type(features))\nprint('features.shape:', features.shape)", "type(features): <class 'torch.Tensor'>\nfeatures.shape: torch.Size([10, 256])\n" ] ], [ [ "The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.\n\n![Encoder](images/encoder.png)\n\nYou are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers). \n\n> You are **not** required to change anything about the encoder.\n\nFor this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.\n\nIf you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.", "_____no_output_____" ], [ "<a id='step4'></a>\n## Step 4: Implement the RNN Decoder\n\nBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)\n\n> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.\n\nYour decoder will be an instance of the `DecoderRNN` class and must accept as input:\n- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with\n- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.\n\nNote that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. \n> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. \n\nAlthough you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. \n\n![Decoder](images/decoder.png)\n\nIn the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.", "_____no_output_____" ] ], [ [ "# Specify the number of features in the hidden state of the RNN decoder.\nhidden_size = 512\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Store the size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the decoder.\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move the decoder to GPU if CUDA is available.\ndecoder.to(device)\n \n# Move last batch of captions (from Step 1) to GPU if CUDA is available \ncaptions = captions.to(device)\n\n# Pass the encoder output and captions through the decoder.\noutputs = decoder(features, captions)\n\nprint('type(outputs):', type(outputs))\nprint('outputs.shape:', outputs.shape)\n\n# Check that your decoder satisfies some requirements of the project! :D\nassert type(outputs)==torch.Tensor, \"Decoder output needs to be a PyTorch Tensor.\"\nassert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), \"The shape of the decoder output is incorrect.\"", "type(outputs): <class 'torch.Tensor'>\noutputs.shape: torch.Size([10, 13, 9947])\n" ] ], [ [ "When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec5c53e3e1d37193705bc15094d2ed738a32e6f5
6,781
ipynb
Jupyter Notebook
hw0_release/numpy learning.ipynb
MrMorning/solutionOfCS131
36dc9f197335d70601973b024e3ad5cb816bd26e
[ "MIT" ]
1
2021-11-05T09:55:14.000Z
2021-11-05T09:55:14.000Z
hw0_release/numpy learning.ipynb
MrMorning/solutionOfCS131
36dc9f197335d70601973b024e3ad5cb816bd26e
[ "MIT" ]
26
2020-03-24T18:07:06.000Z
2022-03-12T00:12:27.000Z
hw0_release/numpy learning.ipynb
MrMorning/solutionOfCS131
36dc9f197335d70601973b024e3ad5cb816bd26e
[ "MIT" ]
null
null
null
21.124611
70
0.431647
[ [ [ "import numpy as np", "_____no_output_____" ], [ "a = np.zeros(3)", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "type(a)", "_____no_output_____" ], [ "a = np.ones((4,5,3))\nprint(a.T.shape)\na = np.floor(10*np.random.random((3,4)))\nprint(a)", "(3, 5, 4)\n[[0. 6. 8. 5.]\n [0. 8. 4. 8.]\n [8. 8. 7. 1.]]\n" ], [ "def mat(n):\n ret = np.arange(2, n+2, 1)\n for i in range(1, n):\n tmp = np.arange(2, n+2, 1)\n tmp += i\n ret = np.vstack((ret, tmp))\n return np.sin(ret)\nnp.linalg.det(mat(3))", "_____no_output_____" ], [ "b = np.random.random((4, 4, 3))\nprint(b)\n#print(b * [0, 1, 1])\nc = b.T\nc[0] *= 0\nprint(c.T)", "[[[0.22163 0.56397798 0.51889627]\n [0.35344946 0.91259537 0.7831207 ]\n [0.34226994 0.99375776 0.98266259]\n [0.91472113 0.6946419 0.90019873]]\n\n [[0.50804138 0.83322357 0.24753943]\n [0.68040563 0.20922316 0.96180657]\n [0.79343135 0.23875182 0.95990027]\n [0.53087301 0.43498326 0.69569669]]\n\n [[0.64519865 0.8984017 0.60234422]\n [0.33165848 0.36131434 0.08728744]\n [0.74633227 0.51646522 0.44240583]\n [0.97824882 0.58178182 0.28241009]]\n\n [[0.30795047 0.27525321 0.21248003]\n [0.79703423 0.33782183 0.88318138]\n [0.2580018 0.56652543 0.48698822]\n [0.90060013 0.27556447 0.60549407]]]\n[[[0. 0.56397798 0.51889627]\n [0. 0.91259537 0.7831207 ]\n [0. 0.99375776 0.98266259]\n [0. 0.6946419 0.90019873]]\n\n [[0. 0.83322357 0.24753943]\n [0. 0.20922316 0.96180657]\n [0. 0.23875182 0.95990027]\n [0. 0.43498326 0.69569669]]\n\n [[0. 0.8984017 0.60234422]\n [0. 0.36131434 0.08728744]\n [0. 0.51646522 0.44240583]\n [0. 0.58178182 0.28241009]]\n\n [[0. 0.27525321 0.21248003]\n [0. 0.33782183 0.88318138]\n [0. 0.56652543 0.48698822]\n [0. 0.27556447 0.60549407]]]\n" ], [ "a = np.arange(27).reshape((3,3,3))", "_____no_output_____" ], [ "x = [[0, 1], [0, 1]]\ny = [[0, 1], [0, 1]]", "_____no_output_____" ], [ "a[x,y]", "_____no_output_____" ], [ "a.argmax(axis = 0)", "_____no_output_____" ], [ "r = np.random.random((2,3,4))", "_____no_output_____" ], [ "r", "_____no_output_____" ], [ "r.argmax(axis = 1)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5c5c3c0c99c9ad3fe0c6933de644e24da12a74
260,864
ipynb
Jupyter Notebook
The Sparks Foundation/Task 3/Task 4 - Desion tree on Iris Dataset.ipynb
shadow09rj/Machine-Learning
da3bb8140be3608d9953046bbe22fc3871787b02
[ "MIT" ]
1
2020-10-03T17:50:13.000Z
2020-10-03T17:50:13.000Z
The Sparks Foundation/Task 3/Task 4 - Desion tree on Iris Dataset.ipynb
shadow09rj/Machine-Learning
da3bb8140be3608d9953046bbe22fc3871787b02
[ "MIT" ]
null
null
null
The Sparks Foundation/Task 3/Task 4 - Desion tree on Iris Dataset.ipynb
shadow09rj/Machine-Learning
da3bb8140be3608d9953046bbe22fc3871787b02
[ "MIT" ]
null
null
null
686.484211
152,664
0.946727
[ [ [ "# <h1 align='center'>Task 4- Decision tree Classifier on Iris dataset</h1>\n\n\n\n\n\n## <h2 align='center'>By</h2>\n\n\n\n \n## <h3 align='center'>Rohit Joshi</h3>\n\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "# data loading\ndata = pd.read_csv(\"iris.csv\")\ndata.drop('Id',axis=1,inplace=True)\ndata.head()", "_____no_output_____" ], [ "data.Species.unique()", "_____no_output_____" ], [ "# class distribution \nplt.figure()\nax = plt.gca()\nsns.countplot(data['Species'],palette='bright',color='viridis',ax= ax);\nplt.show()", "_____no_output_____" ], [ "\nfigure,axes = plt.subplots(2,2,figsize=(15,10))\naxes =axes.flatten()\nfor ax,col in zip(axes,data.columns):\n sns.boxplot(x=col,y='Species',data=data,ax=ax)\n ax.set_xlabel(col)\nplt.show()", "_____no_output_____" ], [ "\nfigure,axes = plt.subplots(2,2,figsize=(15,10))\naxes =axes.flatten()\n\nfor ax,col in zip(axes,data.columns):\n sns.distplot(data[col],ax=ax)\n ax.set_xlabel(col)\nplt.show()", "_____no_output_____" ] ], [ [ "# Model development", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test,y_train,y_test = train_test_split(data.iloc[:,[0,1,2,3]],data.iloc[:,-1],test_size=0.20,random_state=0)", "_____no_output_____" ], [ "#Grid search and cv\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report,confusion_matrix\n\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train,y_train)\n\nprint(\"Train score: {}\".format(model.score(X_train,y_train)))\nprint(\"Test score: {}\".format(model.score(X_test,y_test)))\npred = model.predict(X_test)\nprint(classification_report(y_test,pred))\nprint(confusion_matrix(y_test,pred))\n", "Train score: 1.0\nTest score: 1.0\n precision recall f1-score support\n\n Iris-setosa 1.00 1.00 1.00 11\nIris-versicolor 1.00 1.00 1.00 13\n Iris-virginica 1.00 1.00 1.00 6\n\n accuracy 1.00 30\n macro avg 1.00 1.00 1.00 30\n weighted avg 1.00 1.00 1.00 30\n\n[[11 0 0]\n [ 0 13 0]\n [ 0 0 6]]\n" ], [ "#printing decision tree graph\n\nfrom sklearn.tree import plot_tree\n\nplt.figure(figsize=(20,10))\n_ = plot_tree(model,feature_names =[i for i in data.columns if i !='Species'],\n class_names=data['Species'].unique(),\n filled=True)\n", "_____no_output_____" ], [ "plt.savefig(\"Iris.png\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec5c661c6577e2749eb6b3039eef263a5915ccfb
180,160
ipynb
Jupyter Notebook
notebooks/02_FastText.ipynb
medinaalonso/NLP
c8a0a3ce372c4289b14d526eae0e55ca2549a892
[ "MIT" ]
3
2019-09-02T02:08:36.000Z
2020-08-30T06:53:34.000Z
notebooks/02_FastText.ipynb
medinaalonso/NLP
c8a0a3ce372c4289b14d526eae0e55ca2549a892
[ "MIT" ]
null
null
null
notebooks/02_FastText.ipynb
medinaalonso/NLP
c8a0a3ce372c4289b14d526eae0e55ca2549a892
[ "MIT" ]
1
2020-08-29T00:14:11.000Z
2020-08-29T00:14:11.000Z
113.308176
23,154
0.768645
[ [ [ "# fastText y clustering espectral\n\nfasText es un método, creado por Facebook, para generar representaciones vectoriales de palabras (embeddings). Es similar a Word2Vec pero tiene la característica de modelar no sólo palabras completas sino también subcadenas (subwords). Gracias a esto, y a la noción de \"composición\", puede construir la representación de una palabra que nunca vio en el entrenamiento, a partir de la combinación de l representaciones de sus partes/subcadenas. \n\nEn este notebook entrenaremos representaciones fastText a partir de un corpus en español y posteriormente realizaremos agrupamiento usando la técnica de clustering espectral.\n\n\n## 1. Representación vectorial de palabras con fastText\n\nVamos a instalar fastText de manera nativa usando el repositorio de github y la línea de comandos:\n\n", "_____no_output_____" ] ], [ [ "!wget https://github.com/facebookresearch/fastText/archive/v0.9.1.zip\n!unzip v0.9.1.zip\n%cd fastText-0.9.1\n!make", "--2019-08-19 16:34:13-- https://github.com/facebookresearch/fastText/archive/v0.9.1.zip\nResolving github.com (github.com)... 52.74.223.119\nConnecting to github.com (github.com)|52.74.223.119|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://codeload.github.com/facebookresearch/fastText/zip/v0.9.1 [following]\n--2019-08-19 16:34:19-- https://codeload.github.com/facebookresearch/fastText/zip/v0.9.1\nResolving codeload.github.com (codeload.github.com)... 13.229.189.0\nConnecting to codeload.github.com (codeload.github.com)|13.229.189.0|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: unspecified [application/zip]\nSaving to: ‘v0.9.1.zip’\n\nv0.9.1.zip [ <=> ] 4.13M 2.18MB/s in 1.9s \n\n2019-08-19 16:34:22 (2.18 MB/s) - ‘v0.9.1.zip’ saved [4327207]\n\nArchive: v0.9.1.zip\nb5b7d307274ce00ef52198fbc692ed3bd11d9856\n creating: fastText-0.9.1/\n creating: fastText-0.9.1/.circleci/\n inflating: fastText-0.9.1/.circleci/cmake_test.sh \n inflating: fastText-0.9.1/.circleci/config.yml \n inflating: fastText-0.9.1/.circleci/gcc_test.sh \n inflating: fastText-0.9.1/.circleci/pip_test.sh \n inflating: fastText-0.9.1/.circleci/pull_data.sh \n inflating: fastText-0.9.1/.circleci/python_test.sh \n inflating: fastText-0.9.1/.circleci/run_locally.sh \n inflating: fastText-0.9.1/.circleci/setup_circleimg.sh \n inflating: fastText-0.9.1/.circleci/setup_debian.sh \n inflating: fastText-0.9.1/.gitignore \n inflating: fastText-0.9.1/CMakeLists.txt \n inflating: fastText-0.9.1/CODE_OF_CONDUCT.md \n inflating: fastText-0.9.1/CONTRIBUTING.md \n inflating: fastText-0.9.1/LICENSE \n inflating: fastText-0.9.1/MANIFEST.in \n inflating: fastText-0.9.1/Makefile \n inflating: fastText-0.9.1/README.md \n creating: fastText-0.9.1/alignment/\n inflating: fastText-0.9.1/alignment/README.md \n inflating: fastText-0.9.1/alignment/align.py \n inflating: fastText-0.9.1/alignment/eval.py \n inflating: fastText-0.9.1/alignment/example.sh \n inflating: fastText-0.9.1/alignment/unsup_align.py \n inflating: fastText-0.9.1/alignment/utils.py \n inflating: fastText-0.9.1/classification-example.sh \n inflating: fastText-0.9.1/classification-results.sh \n creating: fastText-0.9.1/crawl/\n inflating: fastText-0.9.1/crawl/README.md \n inflating: fastText-0.9.1/crawl/dedup.cc \n inflating: fastText-0.9.1/crawl/download_crawl.sh \n inflating: fastText-0.9.1/crawl/filter_dedup.sh \n inflating: fastText-0.9.1/crawl/filter_utf8.cc \n inflating: fastText-0.9.1/crawl/process_wet_file.sh \n creating: fastText-0.9.1/docs/\n inflating: fastText-0.9.1/docs/aligned-vectors.md \n inflating: fastText-0.9.1/docs/api.md \n inflating: fastText-0.9.1/docs/cheatsheet.md \n inflating: fastText-0.9.1/docs/crawl-vectors.md \n inflating: fastText-0.9.1/docs/dataset.md \n inflating: fastText-0.9.1/docs/english-vectors.md \n inflating: fastText-0.9.1/docs/faqs.md \n inflating: fastText-0.9.1/docs/language-identification.md \n inflating: fastText-0.9.1/docs/options.md \n inflating: fastText-0.9.1/docs/pretrained-vectors.md \n inflating: fastText-0.9.1/docs/python-module.md \n inflating: fastText-0.9.1/docs/references.md \n inflating: fastText-0.9.1/docs/supervised-models.md \n inflating: fastText-0.9.1/docs/supervised-tutorial.md \n inflating: fastText-0.9.1/docs/support.md \n inflating: fastText-0.9.1/docs/unsupervised-tutorials.md \n inflating: fastText-0.9.1/eval.py \n inflating: fastText-0.9.1/get-wikimedia.sh \n creating: fastText-0.9.1/python/\n inflating: fastText-0.9.1/python/README.md \n inflating: fastText-0.9.1/python/README.rst \n creating: fastText-0.9.1/python/benchmarks/\n inflating: fastText-0.9.1/python/benchmarks/README.rst \n inflating: fastText-0.9.1/python/benchmarks/get_word_vector.py \n creating: fastText-0.9.1/python/doc/\n creating: fastText-0.9.1/python/doc/examples/\n inflating: fastText-0.9.1/python/doc/examples/FastTextEmbeddingBag.py \n inflating: fastText-0.9.1/python/doc/examples/bin_to_vec.py \n inflating: fastText-0.9.1/python/doc/examples/compute_accuracy.py \n inflating: fastText-0.9.1/python/doc/examples/get_vocab.py \n inflating: fastText-0.9.1/python/doc/examples/train_supervised.py \n inflating: fastText-0.9.1/python/doc/examples/train_unsupervised.py \n creating: fastText-0.9.1/python/fasttext_module/\n creating: fastText-0.9.1/python/fasttext_module/fasttext/\n inflating: fastText-0.9.1/python/fasttext_module/fasttext/FastText.py \n inflating: fastText-0.9.1/python/fasttext_module/fasttext/__init__.py \n creating: fastText-0.9.1/python/fasttext_module/fasttext/pybind/\n inflating: fastText-0.9.1/python/fasttext_module/fasttext/pybind/fasttext_pybind.cc \n creating: fastText-0.9.1/python/fasttext_module/fasttext/tests/\n inflating: fastText-0.9.1/python/fasttext_module/fasttext/tests/__init__.py \n inflating: fastText-0.9.1/python/fasttext_module/fasttext/tests/test_configurations.py \n inflating: fastText-0.9.1/python/fasttext_module/fasttext/tests/test_script.py \n creating: fastText-0.9.1/python/fasttext_module/fasttext/util/\n inflating: fastText-0.9.1/python/fasttext_module/fasttext/util/__init__.py \n inflating: fastText-0.9.1/python/fasttext_module/fasttext/util/util.py \n inflating: fastText-0.9.1/quantization-example.sh \n inflating: fastText-0.9.1/runtests.py \n creating: fastText-0.9.1/scripts/\n creating: fastText-0.9.1/scripts/kbcompletion/\n inflating: fastText-0.9.1/scripts/kbcompletion/README.md \n inflating: fastText-0.9.1/scripts/kbcompletion/data.sh \n inflating: fastText-0.9.1/scripts/kbcompletion/eval.cpp \n inflating: fastText-0.9.1/scripts/kbcompletion/fb15k.sh \n inflating: fastText-0.9.1/scripts/kbcompletion/fb15k237.sh \n inflating: fastText-0.9.1/scripts/kbcompletion/svo.sh \n inflating: fastText-0.9.1/scripts/kbcompletion/wn18.sh \n creating: fastText-0.9.1/scripts/quantization/\n inflating: fastText-0.9.1/scripts/quantization/quantization-results.sh \n extracting: fastText-0.9.1/setup.cfg \n inflating: fastText-0.9.1/setup.py \n creating: fastText-0.9.1/src/\n inflating: fastText-0.9.1/src/args.cc \n inflating: fastText-0.9.1/src/args.h \n inflating: fastText-0.9.1/src/densematrix.cc \n inflating: fastText-0.9.1/src/densematrix.h \n inflating: fastText-0.9.1/src/dictionary.cc \n inflating: fastText-0.9.1/src/dictionary.h \n inflating: fastText-0.9.1/src/fasttext.cc \n inflating: fastText-0.9.1/src/fasttext.h \n inflating: fastText-0.9.1/src/loss.cc \n inflating: fastText-0.9.1/src/loss.h \n inflating: fastText-0.9.1/src/main.cc \n inflating: fastText-0.9.1/src/matrix.cc \n inflating: fastText-0.9.1/src/matrix.h \n inflating: fastText-0.9.1/src/meter.cc \n inflating: fastText-0.9.1/src/meter.h \n inflating: fastText-0.9.1/src/model.cc \n inflating: fastText-0.9.1/src/model.h \n inflating: fastText-0.9.1/src/productquantizer.cc \n inflating: fastText-0.9.1/src/productquantizer.h \n inflating: fastText-0.9.1/src/quantmatrix.cc \n inflating: fastText-0.9.1/src/quantmatrix.h \n inflating: fastText-0.9.1/src/real.h \n inflating: fastText-0.9.1/src/utils.cc \n inflating: fastText-0.9.1/src/utils.h \n inflating: fastText-0.9.1/src/vector.cc \n inflating: fastText-0.9.1/src/vector.h \n creating: fastText-0.9.1/tests/\n inflating: fastText-0.9.1/tests/fetch_test_data.sh \n creating: fastText-0.9.1/website/\n inflating: fastText-0.9.1/website/README.md \n creating: fastText-0.9.1/website/blog/\n inflating: fastText-0.9.1/website/blog/2016-08-18-blog-post.md \n inflating: fastText-0.9.1/website/blog/2017-05-02-blog-post.md \n inflating: fastText-0.9.1/website/blog/2017-10-02-blog-post.md \n inflating: fastText-0.9.1/website/blog/2019-06-25-blog-post.md \n creating: fastText-0.9.1/website/core/\n inflating: fastText-0.9.1/website/core/Footer.js \n inflating: fastText-0.9.1/website/package.json \n creating: fastText-0.9.1/website/pages/\n creating: fastText-0.9.1/website/pages/en/\n inflating: fastText-0.9.1/website/pages/en/index.js \n inflating: fastText-0.9.1/website/sidebars.json \n inflating: fastText-0.9.1/website/siteConfig.js \n creating: fastText-0.9.1/website/static/\n creating: fastText-0.9.1/website/static/docs/\n creating: fastText-0.9.1/website/static/docs/en/\n creating: fastText-0.9.1/website/static/docs/en/html/\n extracting: fastText-0.9.1/website/static/docs/en/html/.classfasttext_1_1QMatrix-members.html.i4eKqy \n inflating: fastText-0.9.1/website/static/docs/en/html/annotated.html \n inflating: fastText-0.9.1/website/static/docs/en/html/annotated_dup.js \n inflating: fastText-0.9.1/website/static/docs/en/html/args_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/args_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/args_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/args_8h_source.html \n extracting: fastText-0.9.1/website/static/docs/en/html/bc_s.png \n inflating: fastText-0.9.1/website/static/docs/en/html/bdwn.png \n inflating: fastText-0.9.1/website/static/docs/en/html/classes.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Args-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Args.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Args.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Dictionary-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Dictionary.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Dictionary.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1FastText-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1FastText.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1FastText.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Matrix-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Matrix.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Matrix.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Model-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Model.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Model.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1ProductQuantizer-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1ProductQuantizer.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1ProductQuantizer.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1QMatrix-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1QMatrix.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1QMatrix.js \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Vector-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Vector.html \n inflating: fastText-0.9.1/website/static/docs/en/html/classfasttext_1_1Vector.js \n inflating: fastText-0.9.1/website/static/docs/en/html/closed.png \n inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/dictionary_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/dir_68267d1309a1af8e8297ef4c3efbcdba.html \n inflating: fastText-0.9.1/website/static/docs/en/html/dir_68267d1309a1af8e8297ef4c3efbcdba.js \n extracting: fastText-0.9.1/website/static/docs/en/html/doc.png \n inflating: fastText-0.9.1/website/static/docs/en/html/doxygen.css \n extracting: fastText-0.9.1/website/static/docs/en/html/doxygen.png \n inflating: fastText-0.9.1/website/static/docs/en/html/dynsections.js \n inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/fasttext_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/favicon.png \n inflating: fastText-0.9.1/website/static/docs/en/html/files.html \n inflating: fastText-0.9.1/website/static/docs/en/html/files.js \n extracting: fastText-0.9.1/website/static/docs/en/html/folderclosed.png \n extracting: fastText-0.9.1/website/static/docs/en/html/folderopen.png \n inflating: fastText-0.9.1/website/static/docs/en/html/functions.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_0x7e.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_b.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_c.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_d.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_dup.js \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_e.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_f.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_func.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_g.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_i.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_k.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_l.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_m.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_n.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_o.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_p.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_q.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_r.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_s.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_t.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_u.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_v.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_vars.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_w.html \n inflating: fastText-0.9.1/website/static/docs/en/html/functions_z.html \n inflating: fastText-0.9.1/website/static/docs/en/html/globals.html \n inflating: fastText-0.9.1/website/static/docs/en/html/globals_defs.html \n inflating: fastText-0.9.1/website/static/docs/en/html/globals_func.html \n inflating: fastText-0.9.1/website/static/docs/en/html/index.html \n inflating: fastText-0.9.1/website/static/docs/en/html/jquery.js \n inflating: fastText-0.9.1/website/static/docs/en/html/main_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/main_8cc.js \n inflating: fastText-0.9.1/website/static/docs/en/html/matrix_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/matrix_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/matrix_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/menu.js \n inflating: fastText-0.9.1/website/static/docs/en/html/menudata.js \n inflating: fastText-0.9.1/website/static/docs/en/html/model_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/model_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/model_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/model_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacefasttext.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacefasttext.js \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacefasttext_1_1utils.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers_enum.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers_func.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespacemembers_type.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespaces.html \n inflating: fastText-0.9.1/website/static/docs/en/html/namespaces.js \n extracting: fastText-0.9.1/website/static/docs/en/html/nav_f.png \n inflating: fastText-0.9.1/website/static/docs/en/html/nav_g.png \n inflating: fastText-0.9.1/website/static/docs/en/html/nav_h.png \n inflating: fastText-0.9.1/website/static/docs/en/html/navtree.css \n inflating: fastText-0.9.1/website/static/docs/en/html/navtree.js \n inflating: fastText-0.9.1/website/static/docs/en/html/navtreedata.js \n inflating: fastText-0.9.1/website/static/docs/en/html/navtreeindex0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/navtreeindex1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/open.png \n inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8cc.js \n inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/productquantizer_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/qmatrix_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/qmatrix_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/qmatrix_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/real_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/real_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/real_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/resize.js \n creating: fastText-0.9.1/website/static/docs/en/html/search/\n extracting: fastText-0.9.1/website/static/docs/en/html/search/.files_7.html.StRRNc \n extracting: fastText-0.9.1/website/static/docs/en/html/search/.variables_a.html.1MGQ27 \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_10.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_10.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_11.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_11.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_12.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_12.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_13.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_13.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_14.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_14.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_15.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_15.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_16.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_16.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_17.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_17.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_4.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_4.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_5.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_5.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_6.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_6.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_7.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_7.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_8.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_8.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_9.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_9.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_a.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_a.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_b.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_b.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_c.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_c.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_d.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_d.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_e.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_e.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_f.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/all_f.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_4.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_4.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_5.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_5.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_6.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_6.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_7.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_7.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_8.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/classes_8.js \n extracting: fastText-0.9.1/website/static/docs/en/html/search/close.png \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/defines_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enums_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_4.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_4.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_5.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/enumvalues_5.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_4.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_4.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_5.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_5.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_6.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_6.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_7.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_7.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_8.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/files_8.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_10.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_10.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_11.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_11.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_12.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_12.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_13.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_13.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_14.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_14.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_15.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_15.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_16.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_16.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_17.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_17.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_4.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_4.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_5.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_5.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_6.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_6.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_7.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_7.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_8.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_8.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_9.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_9.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_a.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_a.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_b.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_b.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_c.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_c.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_d.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_d.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_e.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_e.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_f.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/functions_f.js \n extracting: fastText-0.9.1/website/static/docs/en/html/search/mag_sel.png \n inflating: fastText-0.9.1/website/static/docs/en/html/search/namespaces_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/namespaces_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/nomatches.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/search.css \n inflating: fastText-0.9.1/website/static/docs/en/html/search/search.js \n extracting: fastText-0.9.1/website/static/docs/en/html/search/search_l.png \n inflating: fastText-0.9.1/website/static/docs/en/html/search/search_m.png \n extracting: fastText-0.9.1/website/static/docs/en/html/search/search_r.png \n inflating: fastText-0.9.1/website/static/docs/en/html/search/searchdata.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/typedefs_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_0.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_0.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_1.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_1.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_10.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_10.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_11.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_11.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_12.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_12.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_13.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_13.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_2.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_2.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_3.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_3.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_4.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_4.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_5.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_5.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_6.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_6.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_7.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_7.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_8.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_8.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_9.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_9.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_a.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_a.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_b.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_b.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_c.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_c.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_d.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_d.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_e.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_e.js \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_f.html \n inflating: fastText-0.9.1/website/static/docs/en/html/search/variables_f.js \n inflating: fastText-0.9.1/website/static/docs/en/html/splitbar.png \n inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1Node-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1Node.html \n inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1Node.js \n inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1entry-members.html \n inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1entry.html \n inflating: fastText-0.9.1/website/static/docs/en/html/structfasttext_1_1entry.js \n extracting: fastText-0.9.1/website/static/docs/en/html/sync_off.png \n extracting: fastText-0.9.1/website/static/docs/en/html/sync_on.png \n extracting: fastText-0.9.1/website/static/docs/en/html/tab_a.png \n extracting: fastText-0.9.1/website/static/docs/en/html/tab_b.png \n extracting: fastText-0.9.1/website/static/docs/en/html/tab_h.png \n extracting: fastText-0.9.1/website/static/docs/en/html/tab_s.png \n inflating: fastText-0.9.1/website/static/docs/en/html/tabs.css \n inflating: fastText-0.9.1/website/static/docs/en/html/utils_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/utils_8cc.js \n inflating: fastText-0.9.1/website/static/docs/en/html/utils_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/utils_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/utils_8h_source.html \n inflating: fastText-0.9.1/website/static/docs/en/html/vector_8cc.html \n inflating: fastText-0.9.1/website/static/docs/en/html/vector_8cc.js \n inflating: fastText-0.9.1/website/static/docs/en/html/vector_8h.html \n inflating: fastText-0.9.1/website/static/docs/en/html/vector_8h.js \n inflating: fastText-0.9.1/website/static/docs/en/html/vector_8h_source.html \n inflating: fastText-0.9.1/website/static/fasttext.css \n creating: fastText-0.9.1/website/static/img/\n creating: fastText-0.9.1/website/static/img/authors/\n inflating: fastText-0.9.1/website/static/img/authors/armand_joulin.jpg \n inflating: fastText-0.9.1/website/static/img/authors/christian_puhrsch.png \n inflating: fastText-0.9.1/website/static/img/authors/edouard_grave.jpeg \n inflating: fastText-0.9.1/website/static/img/authors/piotr_bojanowski.jpg \n inflating: fastText-0.9.1/website/static/img/authors/tomas_mikolov.jpg \n creating: fastText-0.9.1/website/static/img/blog/\n inflating: fastText-0.9.1/website/static/img/blog/2016-08-18-blog-post-img1.png \n inflating: fastText-0.9.1/website/static/img/blog/2016-08-18-blog-post-img2.png \n inflating: fastText-0.9.1/website/static/img/blog/2017-05-02-blog-post-img1.jpg \n inflating: fastText-0.9.1/website/static/img/blog/2017-05-02-blog-post-img2.jpg \n inflating: fastText-0.9.1/website/static/img/blog/2017-10-02-blog-post-img1.png \n inflating: fastText-0.9.1/website/static/img/cbo_vs_skipgram.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-api.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-bg-web.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-color-square.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-color-web.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-faq.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-tutorial.png \n inflating: fastText-0.9.1/website/static/img/fasttext-icon-white-web.png \n inflating: fastText-0.9.1/website/static/img/fasttext-logo-color-web.png \n inflating: fastText-0.9.1/website/static/img/fasttext-logo-white-web.png \n inflating: fastText-0.9.1/website/static/img/logo-color.png \n inflating: fastText-0.9.1/website/static/img/model-black.png \n inflating: fastText-0.9.1/website/static/img/model-blue.png \n inflating: fastText-0.9.1/website/static/img/model-red.png \n inflating: fastText-0.9.1/website/static/img/ogimage.png \n inflating: fastText-0.9.1/website/static/img/oss_logo.png \n inflating: fastText-0.9.1/wikifil.pl \n inflating: fastText-0.9.1/word-vector-example.sh \n/content/fastText-0.9.1\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/args.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/matrix.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/dictionary.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/loss.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/productquantizer.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/densematrix.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/quantmatrix.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/vector.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/model.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/utils.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/meter.cc\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG -c src/fasttext.cc\n\u001b[01m\u001b[Ksrc/fasttext.cc:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kvoid fasttext::FastText::quantize(const fasttext::Args&)\u001b[m\u001b[K’:\n\u001b[01m\u001b[Ksrc/fasttext.cc:323:45:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kstd::vector<int> fasttext::FastText::selectEmbeddings(int32_t) const\u001b[m\u001b[K’ is deprecated: selectEmbeddings is being deprecated. [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n auto idx = selectEmbeddings(qargs.cutoff\u001b[01;35m\u001b[K)\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[Ksrc/fasttext.cc:293:22:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n std::vector<int32_t> \u001b[01;36m\u001b[KFastText\u001b[m\u001b[K::selectEmbeddings(int32_t cutoff) const {\n \u001b[01;36m\u001b[K^~~~~~~~\u001b[m\u001b[K\n\u001b[01m\u001b[Ksrc/fasttext.cc:\u001b[m\u001b[K In member function ‘\u001b[01m\u001b[Kvoid fasttext::FastText::lazyComputeWordVectors()\u001b[m\u001b[K’:\n\u001b[01m\u001b[Ksrc/fasttext.cc:551:40:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K‘\u001b[01m\u001b[Kvoid fasttext::FastText::precomputeWordVectors(fasttext::DenseMatrix&)\u001b[m\u001b[K’ is deprecated: precomputeWordVectors is being deprecated. [\u001b[01;35m\u001b[K-Wdeprecated-declarations\u001b[m\u001b[K]\n precomputeWordVectors(*wordVectors_\u001b[01;35m\u001b[K)\u001b[m\u001b[K;\n \u001b[01;35m\u001b[K^\u001b[m\u001b[K\n\u001b[01m\u001b[Ksrc/fasttext.cc:534:6:\u001b[m\u001b[K \u001b[01;36m\u001b[Knote: \u001b[m\u001b[Kdeclared here\n void \u001b[01;36m\u001b[KFastText\u001b[m\u001b[K::precomputeWordVectors(DenseMatrix& wordVectors) {\n \u001b[01;36m\u001b[K^~~~~~~~\u001b[m\u001b[K\nc++ -pthread -std=c++0x -march=native -O3 -funroll-loops -DNDEBUG args.o matrix.o dictionary.o loss.o productquantizer.o densematrix.o quantmatrix.o vector.o model.o utils.o meter.o fasttext.o src/main.cc -o fasttext\n" ] ], [ [ "Una vez instalado fasText, podemos empezar a entrenar modelos de representaciones vectoriales a partir de un corpus.\n\nMontamos el sistema de archivos de Drive:\n", "_____no_output_____" ] ], [ [ "#Montamos el contenido de Google Drive\nfrom google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=email%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdocs.test%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive.photos.readonly%20https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fpeopleapi.readonly&response_type=code\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ] ], [ [ "Para entrenar el método de Fasttext, utilizaremos la combinación de dos corpus pequeños: a) el Corpus del Español Mexicano COntemporáneo (CEMC); y b) el corpus paralelo náhuatl-español Axolotl (la parte en español). \n\n\nEl entrenamiento se hace a través de la línea de comandos, se pueden específicar diversos hiperparámetros ([Consultar documentación](https://fasttext.cc/docs/en/unsupervised-tutorial.html)). Dos parámetros necesarios son el archivo input y el archivo output, que generará el entrenamiento.", "_____no_output_____" ] ], [ [ "#Se generan dos archivos: cemc.bin (modelo) y cemc.vec (este último es literalmente un archivo de texto con un vector por línea)\n#Se pueden descargar usando el visualizador de archivos (pestaña izq.) fastText-0.9.1/result\n!mkdir result \n!./fasttext cbow -input /content/drive/My\\ Drive/Curso_RIIAA/data/cemcytodo.txt -output result/cemc", "/bin/bash: ./fasttext: No such file or directory\n" ] ], [ [ "Una vez generado el modelo, podemos utilizar estos vectores de diferentes maneras, por ejemplo para una palabra devolver las palabras más cercanas/similares:\n\n\n\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "!./fasttext nn result/cemc.bin", "Query word? mole\natole 0.975429\nmetate 0.946135\ntomate 0.943589\nole 0.939852\néchale 0.937425\nhuele 0.935428\nhule 0.934699\nhíjole 0.932914\nlate 0.930711\njitomate 0.92915\nQuery word? ^C\n" ] ], [ [ "#2. Cargando los vectores en Python\nPodemos cargar los modelos obtenidos de Fasttext y manipularlos directamente desde python, utilizando el paquete gensim.", "_____no_output_____" ] ], [ [ "# Install / Upgrade Gensim\n!pip install --upgrade gensim", "Collecting gensim\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/40/3d/89b27573f56abcd1b8c9598b240f53c45a3c79aa0924a24588e99716043b/gensim-3.8.0-cp36-cp36m-manylinux1_x86_64.whl (24.2MB)\n\u001b[K |████████████████████████████████| 24.2MB 42.0MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: six>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.12.0)\nRequirement already satisfied, skipping upgrade: scipy>=0.18.1 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.3.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.16.4)\nRequirement already satisfied, skipping upgrade: smart-open>=1.7.0 in /usr/local/lib/python3.6/dist-packages (from gensim) (1.8.4)\nRequirement already satisfied, skipping upgrade: boto3 in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.7.0->gensim) (1.9.205)\nRequirement already satisfied, skipping upgrade: requests in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.7.0->gensim) (2.21.0)\nRequirement already satisfied, skipping upgrade: boto>=2.32 in /usr/local/lib/python3.6/dist-packages (from smart-open>=1.7.0->gensim) (2.49.0)\nRequirement already satisfied, skipping upgrade: s3transfer<0.3.0,>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from boto3->smart-open>=1.7.0->gensim) (0.2.1)\nRequirement already satisfied, skipping upgrade: botocore<1.13.0,>=1.12.205 in /usr/local/lib/python3.6/dist-packages (from boto3->smart-open>=1.7.0->gensim) (1.12.205)\nRequirement already satisfied, skipping upgrade: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->smart-open>=1.7.0->gensim) (0.9.4)\nRequirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (2.8)\nRequirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (1.24.3)\nRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (3.0.4)\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->smart-open>=1.7.0->gensim) (2019.6.16)\nRequirement already satisfied, skipping upgrade: python-dateutil<3.0.0,>=2.1; python_version >= \"2.7\" in /usr/local/lib/python3.6/dist-packages (from botocore<1.13.0,>=1.12.205->boto3->smart-open>=1.7.0->gensim) (2.5.3)\nRequirement already satisfied, skipping upgrade: docutils<0.15,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.13.0,>=1.12.205->boto3->smart-open>=1.7.0->gensim) (0.14)\nInstalling collected packages: gensim\n Found existing installation: gensim 3.6.0\n Uninstalling gensim-3.6.0:\n Successfully uninstalled gensim-3.6.0\nSuccessfully installed gensim-3.8.0\n" ] ], [ [ "Cargamos el modelo:", "_____no_output_____" ] ], [ [ "from gensim.models.wrappers import FastText\n\n#Carga modelo pre-entrenado\nmodel = FastText.load_fasttext_format('result/cemc')\n", "_____no_output_____" ] ], [ [ "Una vez cargado el modelo podemos jugar con los vectores directamente desde python. Aquí también se pueden buscar palabras similares o bien determinar la similitud entre una y otra palabra:\n", "_____no_output_____" ] ], [ [ "#Buscar las palabras más similares a query\nprint(model.most_similar('azteca'))\nprint(model.most_similar('mexicano'))\n\n#Similitud entre dos palabras dasdas\nprint(model.similarity('mexico', 'país'))", "[('zapoteca', 0.9707117676734924), ('barbilla', 0.9341251850128174), ('polla', 0.9327656626701355), ('chilena', 0.9314919114112854), ('azuela', 0.9312282800674438), ('orilla', 0.9310535192489624), ('bonilla', 0.9304966330528259), ('zorrilla', 0.9283484220504761), ('borbolla', 0.9271571040153503), ('chueca', 0.9267408847808838)]\n[('americano', 0.9683598279953003), ('mexico', 0.9493384957313538), ('norteamericano', 0.9465785622596741), ('africano', 0.933608889579773), ('republicano', 0.9239773750305176), ('mexicanismo', 0.9033154249191284), ('latinoamericano', 0.9010395407676697), ('universitario', 0.9005328416824341), ('organizador', 0.8923201560974121), ('italiano', 0.8891371488571167)]\n0.3114357\n" ] ], [ [ "Una de las ventajas de fastText es que, además de obtener los vectores de palabras que se encontraban en el vocabulario de entrenamiento, es capaz de construir representaciones vectoriales de palabras que no estaban en este vocabulario (*Out-Of-Vocabulary words, OOV*). Esto se realiza a través de una operación de composición de subwords.", "_____no_output_____" ] ], [ [ "#Palabra dentro del vocabulario\nexistent_word = \"computadora\"\nprint(existent_word in model.wv.vocab)\n#Obtención del vector de esta palabra\nvector_computadora = model.wv[existent_word]\n\n#Palabra oov\noov_word = \"computadorsota\"\nprint(oov_word in model.wv.vocab)\n#Obtención del vector de oov\nvector_oov = model.wv[oov_word]\n\n#Similitud entre ambos\nprint(model.similarity(existent_word, oov_word))", "True\nFalse\n0.96310055\n" ] ], [ [ "#3. Agrupamiento con spectral clustering\n\nUna vez obtenidos los vectores de Fasttext, podemos aplicar el algoritmo de spectral clustering, vamos a agrupar y visualizar los datos obtenidos.", "_____no_output_____" ] ], [ [ "#Paquetería necesaria\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nimport pandas as pd\nimport networkx as nx\nfrom scipy.linalg import eig\nfrom operator import itemgetter", "_____no_output_____" ] ], [ [ "Dado que Fasttext permite representar palabras OOV, podemos darle cualquier lista arbitraría de léxico y obtener vectores que los representen.", "_____no_output_____" ] ], [ [ "#Lista de palabras para trabajar\nvocab_words = ['amo','amas','amamos','ama','aman','come','como','comemos','comen','toco','tocas','tocan','tocamos','gato','minino','gatito','gatos','mininos',\n 'flor','flores','mininito','computadora','computadoras']", "_____no_output_____" ] ], [ [ "Obtenemos los vectores que representan a las palabras anteriores y los guardamos en un arreglo.", "_____no_output_____" ] ], [ [ "#Tamaño del vocabulario\nN = len(vocab_words)\n\n#Matrix de NxNúmero de dimmensiones\nX = np.zeros((N,100))\n#Llenamos la matriz con los vectores palabra\nfor i,w in enumerate(vocab_words):\n X[i] = model.wv[w]\n\nprint(X.shape)", "(23, 100)\n" ] ], [ [ "Podemos visualizar estos datos a partir de la siguiente función:", "_____no_output_____" ] ], [ [ "#Función para plotear\ndef plot_words(Z,ids,color='blue'):\n #Reduce a dos dimensiones con PCA\n Z = PCA(n_components=2).fit_transform(Z)\n r=0\n #Plotea las dimensiones\n plt.scatter(Z[:,0],Z[:,1], marker='o', c=color)\n for label,x,y in zip(ids, Z[:,0], Z[:,1]):\n #Agrega las etiquetas\n plt.annotate(label, xy=(x,y), xytext=(-1,1), textcoords='offset points', ha='center', va='bottom')\n r+=1\n\n#Ploteo de los datos\nplot_words(X, vocab_words)\nplt.show()", "_____no_output_____" ] ], [ [ "A partir de la matriz de vectores, aplicaremos el algoritmo de spectral clustering. Para esto, crearemos una matriz de adyacencia que represente al grafo. Utilizaremos la siguiente función kernel para cuantificar la similitud entre dos vectores $$k(u,v) = \\frac{1}{||u-v||+1}$$\nAdemás, utilizaremos un planteamiento de k-nearest graph, donde sólo consideraremos adyacentes los k-vecinos más cercanos.", "_____no_output_____" ] ], [ [ "#Matriz de adyacencia\nA = np.zeros((N,N))\n\nfor k,u in enumerate(X):\n #Vecinos de u, con sus distancias\n candidates_for_k = {}\n \n for j,v in enumerate(X):\n #Distancia euclideana\n dist = np.linalg.norm(u-v)\n #Guarda la distancia de los vecinos\n candidates_for_k[j] = dist\n \n #Ordena los vecinos por distancia \n neighbors = sorted(candidates_for_k.items(), key=itemgetter(1))\n #El primer vecino es siempre el mismo nodo (||u-u||=0)\n neighbors.pop(0)\n \n #Toma los 1-vecinos más cercanos\n nn = neighbors[:1]\n \n for neigh, weight in nn:\n #Llena la matriz de adyacencia\n A[k,neigh] = 1./(weight+1) \n A[neigh,k] = 1./(weight+1) \n\nprint(A.shape) ", "(23, 23)\n" ], [ "#Se comprueba que la matriz sea simétrica\nprint((A == A.T).all())", "True\n" ] ], [ [ "Podemos visualizar la matriz de adyacencia en un formato de tabla:", "_____no_output_____" ] ], [ [ "#Formato de tabla con Pandas\ndf = pd.DataFrame(A, index=vocab_words, columns=vocab_words)\nprint(df.to_string())", " amo amas amamos ama aman come como comemos comen toco tocas tocan tocamos gato minino gatito gatos mininos flor flores mininito computadora computadoras\namo 0.000000 0.000000 0.000000 0.143727 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.161970 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\namas 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.16533 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.139763 0.000000 0.000000 0.000000\namamos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.322724 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\nama 0.143727 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.139422 0.000000 0.000000 0.000000 0.000000\naman 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.207049 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ncome 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.165684 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ncomo 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.197728 0.000000\ncomemos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.275011 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ncomen 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.205471 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ntoco 0.000000 0.000000 0.000000 0.000000 0.000000 0.165684 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.228812 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ntocas 0.000000 0.165330 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ntocan 0.000000 0.000000 0.000000 0.000000 0.207049 0.000000 0.000000 0.000000 0.205471 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ntocamos 0.000000 0.000000 0.322724 0.000000 0.000000 0.000000 0.000000 0.275011 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ngato 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.282277 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\nminino 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.278849 0.000000 0.000000\ngatito 0.161970 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.228812 0.00000 0.000000 0.000000 0.282277 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.289433 0.000000 0.000000\ngatos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.209625 0.000000 0.000000 0.000000 0.000000 0.000000\nmininos 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.209625 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\nflor 0.000000 0.000000 0.000000 0.139422 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\nflores 0.000000 0.139763 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\nmininito 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.278849 0.289433 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000\ncomputadora 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.197728 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.233841\ncomputadoras 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.233841 0.000000\n" ] ], [ [ "También podemos visualizar el grafo en formato de red:", "_____no_output_____" ] ], [ [ "#Indexado de labels\nedges = {i:dat for i,dat in enumerate(vocab_words)}\n\n#Formato de red con Networkx\nnx.draw_networkx(nx.from_numpy_array(A), with_labels=True, labels=edges, font_size=8)", "_____no_output_____" ] ], [ [ "Ya que hemos obtenido la matriz de adyacencia, podemos calcular la matriz Laplaciana y obtener su descomposición espectral.", "_____no_output_____" ] ], [ [ "#Se obtiene la matriz Laplaciana\nL = np.diag(A.sum(0)) - A\n\n#Se calculan los eigen valores y eigen vectores de L\neig_vals, eig_vecs = eig(L)\n\n#Se ordenan con respecto a los eigenvalores\nvalues = sorted(zip(eig_vals.real,eig_vecs), key=itemgetter(0))\n\n#Obtenemos ambos eigens\nvals, vecs = zip(*values)\n\n#Se crea una matriz de eigenvectores\nmatrix = np.array(vecs)\n\n#Visualización de eigenvalores\nplt.plot(np.array(vals),'o')\nplt.show()", "_____no_output_____" ] ], [ [ "Tomamos, entonces, los k eigenvectores que mejor representen nuestros datos y así podemos visualizar los resultados obtenidos con la función de ploteo que hemos definido más arriba.", "_____no_output_____" ] ], [ [ "#Obtiene la matriz de eigenvectores laplacianos\nX_hat = matrix[5:17].T.real\n\n#Ploteo de datos\nprint(X_hat.shape)\nplot_words(X_hat,vocab_words)\nplt.show()", "(23, 12)\n" ] ], [ [ "Finalmente, aplicamos k-medias para descubrir grupos en los datos proyectados:", "_____no_output_____" ] ], [ [ "#Número de centroides\ncentroids=10\n#Aplicación de kmenas\nkmeans = KMeans(n_clusters=centroids).fit(X_hat)\n\n#Obtención de los clusters\npred_lables = kmeans.predict(X_hat)\n\n#Plot de clusters\nplot_words(X_hat, vocab_words, color=pred_lables)\nplt.show()", "_____no_output_____" ] ], [ [ "\n---\n\n### Alternativa opcional\n\nSe puede aplicar Fasttext a una fracción de corpus real de Wikipedia en inglés (muy tardado entrenar)\n", "_____no_output_____" ] ], [ [ "!mkdir data\n!wget -c http://mattmahoney.net/dc/enwik9.zip -P data\n!unzip data/enwik9.zip -d data\n", "--2019-08-15 21:52:28-- http://mattmahoney.net/dc/enwik9.zip\nResolving mattmahoney.net (mattmahoney.net)... 67.195.197.75\nConnecting to mattmahoney.net (mattmahoney.net)|67.195.197.75|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 322592222 (308M) [application/zip]\nSaving to: ‘data/enwik9.zip’\n\nenwik9.zip 18%[==> ] 57.90M 855KB/s eta 5m 1s ^C\nArchive: data/enwik9.zip\n End-of-central-directory signature not found. Either this file is not\n a zipfile, or it constitutes one disk of a multi-part archive. In the\n latter case the central directory and zipfile comment will be found on\n the last disk(s) of this archive.\nunzip: cannot find zipfile directory in one of data/enwik9.zip or\n data/enwik9.zip.zip, and cannot find data/enwik9.zip.ZIP, period.\n" ], [ "!perl wikifil.pl data/enwik9 > data/fil9", "Can't open data/enwik9: No such file or directory at wikifil.pl line 12.\n" ], [ "!mkdir result\n!./fasttext cbow -input data/fil9 -output result/fil9", "mkdir: cannot create directory ‘result’: File exists\nRead 0M words\nNumber of words: 0\nNumber of labels: 0\nterminate called after throwing an instance of 'std::invalid_argument'\n what(): Empty vocabulary. Try a smaller -minCount value.\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec5c6e66b23f77df31bcd6d4ca9a304ddb2bcc1a
11,372
ipynb
Jupyter Notebook
ReinforcementLearning/DAT257x/library/LabFiles/Module 6/Ex 6.2 Deep Q-Learning.ipynb
iCalculated/ReinforcementLearningLearning
1d12fee1628b02ae874cdc8bd90b010292054229
[ "MIT" ]
48
2018-05-07T11:17:04.000Z
2022-03-30T18:16:27.000Z
ReinforcementLearning/DAT257x/library/LabFiles/Module 6/Ex 6.2 Deep Q-Learning.ipynb
iCalculated/ReinforcementLearningLearning
1d12fee1628b02ae874cdc8bd90b010292054229
[ "MIT" ]
5
2018-02-26T00:23:24.000Z
2019-10-09T21:29:39.000Z
ReinforcementLearning/DAT257x/library/LabFiles/Module 6/Ex 6.2 Deep Q-Learning.ipynb
iCalculated/ReinforcementLearningLearning
1d12fee1628b02ae874cdc8bd90b010292054229
[ "MIT" ]
55
2018-04-09T16:32:27.000Z
2022-01-14T22:50:29.000Z
35.873817
115
0.527084
[ [ [ "# DAT257x: Reinforcement Learning Explained\n\n## Lab 6: Function Approximation\n\n### Exercise 6.2: Deep Q-Learning", "_____no_output_____" ] ], [ [ "import numpy as np\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \n\nfrom lib.envs.simple_rooms import SimpleRoomsEnv\nfrom lib.simulation import Experiment\n\ntry:\n import chainer\nexcept ImportError as e:\n !pip install chainer\n import chainer\n \nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import initializers, optimizers, Chain, Variable", "_____no_output_____" ], [ "class Agent(object): \n \n def __init__(self, actions):\n self.actions = actions\n self.num_actions = len(actions)\n\n def act(self, state):\n raise NotImplementedError", "_____no_output_____" ], [ "class DQLearningAgent(Agent):\n \"\"\"Q-Learning agent with function approximation.\"\"\"\n\n def __init__(self, actions, obs_size, **kwargs):\n super(DQLearningAgent, self).__init__(actions)\n\n self.obs_size = obs_size\n \n self.step_counter = 0\n self.epsilon = kwargs.get('epsilon', .01) \n # if epsilon set to 1, it will be decayed over time\n if self.epsilon == 1:\n self.epsilon_decay = True\n else:\n self.epsilon_decay = False\n \n self.gamma = kwargs.get('gamma', .99)\n \n self.minibatch_size = kwargs.get('minibatch_size', 32)\n self.epoch_length = kwargs.get('epoch_length', 100)\n self.tau = kwargs.get('tau', .001)\n self.model_network = QNetwork(self.obs_size, self.num_actions, kwargs.get('nhidden', 512))\n self.target_network = QNetwork(self.obs_size, self.num_actions, kwargs.get('nhidden', 512))\n self.target_network.copyparams(self.model_network)\n \n self.optimizer = self.init_optimizer(self.model_network, kwargs.get('learning_rate', .5))\n\n self.memory = ReplayMemory(self.obs_size, kwargs.get('mem_size', 10000))\n \n self.current_loss = .0\n \n def act(self, state):\n \n if np.random.random() < self.epsilon:\n i = np.random.randint(0,len(self.actions))\n else: \n Q = self.model_network(Variable(state.reshape(1, state.shape[0])))\n i = Q.data.argmax()\n \n self.step_counter += 1 \n # decay epsilon after each epoch\n if self.epsilon_decay:\n if self.step_counter % self.epoch_length == 0:\n self.epsilon = max(.01, self.epsilon * .95)\n \n action = self.actions[i] \n return action \n \n def learn(self, state1, action1, reward, state2, done):\n self.memory.observe(state1, action1, reward, done)\n # start training after 1 epoch\n if self.step_counter > self.epoch_length:\n self.current_loss = self.update_model()\n\n def init_optimizer(self, model, learning_rate):\n\n optimizer = optimizers.SGD(learning_rate)\n # optimizer = optimizers.Adam(alpha=learning_rate)\n # optimizer = optimizers.AdaGrad(learning_rate)\n # optimizer = optimizers.RMSpropGraves(learning_rate, 0.95, self.momentum, 1e-2)\n\n optimizer.setup(model)\n return optimizer\n \n def update_model(self):\n (s, action, reward, s_next, is_terminal) = self.memory.sample_minibatch(self.minibatch_size)\n\n # compute Q targets (max_a' Q_hat(s_next, a'))\n Q_hat = self.target_network(s_next)\n Q_hat_max = F.max(Q_hat, axis=1, keepdims=True)\n y = (1-is_terminal)*self.gamma*Q_hat_max + reward\n\n # compute Q(s, action)\n Q = self.model_network(s)\n Q_subset = F.reshape(F.select_item(Q, action), (self.minibatch_size, 1))\n\n # compute Huber loss\n error = y - Q_subset\n loss_clipped = abs(error) * (abs(error.data) > 1) + (error**2) * (abs(error.data) <= 1)\n loss = F.sum(loss_clipped) / self.minibatch_size\n\n # perform model update\n self.model_network.zerograds() ## zero out the accumulated gradients in all network parameters\n loss.backward()\n self.optimizer.update()\n\n # target network tracks the model\n for dst, src in zip(self.target_network.params(), self.model_network.params()):\n dst.data = self.tau * src.data + (1 - self.tau) * dst.data\n\n return loss.data", "_____no_output_____" ], [ "class QNetwork(Chain):\n \"\"\"The neural network architecture as a Chainer Chain - here: single hidden layer\"\"\"\n\n def __init__(self, obs_size, num_actions, nhidden):\n \"\"\"Initialize weights\"\"\"\n # use LeCunUniform weight initialization for weights\n self.initializer = initializers.LeCunUniform()\n self.bias_initializer = initializers.Uniform(1e-4)\n\n super(QNetwork, self).__init__(\n feature_layer = L.Linear(obs_size, nhidden,\n initialW = self.initializer,\n initial_bias = self.bias_initializer),\n action_values = L.Linear(nhidden, num_actions, \n initialW=self.initializer,\n initial_bias = self.bias_initializer)\n )\n\n def __call__(self, x):\n \"\"\"implements forward pass\"\"\"\n h = F.relu(self.feature_layer(x))\n return self.action_values(h)", "_____no_output_____" ], [ "class ReplayMemory(object):\n \"\"\"Implements basic replay memory\"\"\"\n\n def __init__(self, observation_size, max_size):\n self.observation_size = observation_size\n self.num_observed = 0\n self.max_size = max_size\n self.samples = {\n 'obs' : np.zeros(self.max_size * 1 * self.observation_size,\n dtype=np.float32).reshape(self.max_size, 1, self.observation_size),\n 'action' : np.zeros(self.max_size * 1, dtype=np.int16).reshape(self.max_size, 1),\n 'reward' : np.zeros(self.max_size * 1).reshape(self.max_size, 1),\n 'terminal' : np.zeros(self.max_size * 1, dtype=np.int16).reshape(self.max_size, 1),\n }\n\n def observe(self, state, action, reward, done):\n index = self.num_observed % self.max_size\n self.samples['obs'][index, :] = state\n self.samples['action'][index, :] = action\n self.samples['reward'][index, :] = reward\n self.samples['terminal'][index, :] = done\n\n self.num_observed += 1\n\n def sample_minibatch(self, minibatch_size):\n max_index = min(self.num_observed, self.max_size) - 1\n sampled_indices = np.random.randint(max_index, size=minibatch_size)\n\n s = Variable(np.asarray(self.samples['obs'][sampled_indices, :], dtype=np.float32))\n s_next = Variable(np.asarray(self.samples['obs'][sampled_indices+1, :], dtype=np.float32))\n\n a = Variable(self.samples['action'][sampled_indices].reshape(minibatch_size))\n r = self.samples['reward'][sampled_indices].reshape((minibatch_size, 1))\n done = self.samples['terminal'][sampled_indices].reshape((minibatch_size, 1))\n\n return (s, a, r, s_next, done)", "_____no_output_____" ], [ "interactive = True\n%matplotlib nbagg\nenv = SimpleRoomsEnv()\nagent = DQLearningAgent(range(env.action_space.n), obs_size=16)\nexperiment = Experiment(env, agent)\nexperiment.run_qlearning(10, interactive)", "_____no_output_____" ], [ "interactive = False\n%matplotlib inline\nenv = SimpleRoomsEnv()\nagent = DQLearningAgent(range(env.action_space.n), obs_size=16)\nexperiment = Experiment(env, agent)\nexperiment.run_qlearning(50, interactive)", "_____no_output_____" ], [ "interactive = False\n%matplotlib inline\nenv = SimpleRoomsEnv()\nagent = DQLearningAgent(range(env.action_space.n), obs_size=16)\nexperiment = Experiment(env, agent)\nexperiment.run_qlearning(200, interactive)", "_____no_output_____" ], [ "interactive = False\n%matplotlib inline\nenv = SimpleRoomsEnv()\nagent = DQLearningAgent(range(env.action_space.n), obs_size=16, epsilon=1)\nexperiment = Experiment(env, agent)\nexperiment.run_qlearning(200, interactive)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5c80882416b4ef65e7c43db86fb2969903f598
173,171
ipynb
Jupyter Notebook
src/DQI model1 optimal theta simulations.ipynb
JakartaLaw/speciale
95d89c281b9d8f73065a823cba97a5bedcbf129d
[ "MIT" ]
null
null
null
src/DQI model1 optimal theta simulations.ipynb
JakartaLaw/speciale
95d89c281b9d8f73065a823cba97a5bedcbf129d
[ "MIT" ]
null
null
null
src/DQI model1 optimal theta simulations.ipynb
JakartaLaw/speciale
95d89c281b9d8f73065a823cba97a5bedcbf129d
[ "MIT" ]
null
null
null
119.263774
36,684
0.795341
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle", "_____no_output_____" ], [ "fig_scale = 0.7\nFIGSIZE = (14*fig_scale, 8*fig_scale)", "_____no_output_____" ], [ "with open('..//data//women_hours_empirical', 'rb') as f:\n women_hours = pickle.load(f)", "_____no_output_____" ], [ "df_action = pd.read_pickle('..//data//dqi_actions_model1_opt_beta_simulations.pkl')\ndf_states = pd.read_pickle('..//data//dqi_states_model1_opt_beta_simulations.pkl')\n\ndf = pd.read_pickle('..//data//dqi_model1_opt_beta_simulations.pkl')", "_____no_output_____" ], [ "mean = np.array(df_action.drop('episode',axis=1).loc[df_action['actions'] > 0].groupby('Q').mean())\nstd = np.array(df_action.drop('episode',axis=1).loc[df_action['actions'] > 0].groupby('Q').std())\n\nupper_bound = mean + std\nlower_bound = mean - std\n\nf, ax = plt.subplots(1, 1, figsize=FIGSIZE)\nax.plot(range(18,61), mean, label='Simulated')\nax.plot(range(18,61), women_hours[18:61], label='True')\n\nax.fill_between(range(18,61), upper_bound.reshape(-1), lower_bound.reshape(-1), alpha=0.3, label='1 std. of simulation')\nax.set_xlabel('age')\nax.set_ylabel('hours')\nax.legend(loc='lower right')\n\nf.savefig(\"..//figures//dqi_model1_estimation_labour_supply.png\")", "_____no_output_____" ], [ "f, ax = plt.subplots(1,1, figsize=FIGSIZE)\nwomen_frac = df_action.drop('episode',axis=1).loc[df_action['actions'] > 0].groupby('Q').count() / 5000\nax.plot(women_frac)\nax.set_title(\"Fraction Of Women In The Labour Force\")\nax.set_ylabel('fraction')\nax.set_xlabel('age')\nf.savefig('..//figures//dqi_model1_women_in_labour_force_fraction.png')", "_____no_output_____" ], [ "on_child_households = df_states.groupby('episode').max().loc[df_states.groupby('episode').max()['K'] == 1].index\nres = list(on_child_households)", "_____no_output_____" ], [ "def find_first_child(q, k, k_next):\n if k == 0.0 and k_next == 1.0 and q > 23 and q < 50:\n return 1\n else:\n return 0\n ", "_____no_output_____" ], [ "\n\ndf_single_child = df_states.loc[df_states['episode'].isin(res)]", "_____no_output_____" ], [ "df_single_child['K_next'] = df_single_child['K'].shift(-1)\n", "/Users/jeppejohansen/.local/share/virtualenvs/speciale-wQK9LG7Y/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "df_single_child['birth'] = df_single_child.apply(lambda row: find_first_child(row['Q'], row['K'], row['K_next']), axis=1)", "/Users/jeppejohansen/.local/share/virtualenvs/speciale-wQK9LG7Y/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "def get_start_and_finish(e, q, birth):\n if birth == 1:\n start = q - 4\n finish = q + 10\n \n return (e, q, start, finish)\n return None\n\ntmp = df_single_child.apply(lambda row: get_start_and_finish(row['episode'], row['Q'], row['birth']), axis=1)", "_____no_output_____" ], [ "timeline_offsets = [obs for obs in tmp if obs is not None]\ntimeline_offsets\n", "_____no_output_____" ], [ "def offset_marker(e, q, timeline_offsets):\n for row in timeline_offsets:\n if row[0] == e:\n if row[2] <= q <= row[3]:\n return q - row[1]\n return None\n ", "_____no_output_____" ], [ "df_single_child['first_child_offset'] = df_single_child.apply(lambda x: offset_marker( \\\n x['episode'], x['Q'], \\\n timeline_offsets), axis=1)", "/Users/jeppejohansen/.local/share/virtualenvs/speciale-wQK9LG7Y/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n" ], [ "df_single_child_plot = df_single_child.loc[df_single_child['first_child_offset'].notnull()]", "_____no_output_____" ], [ "rel_episodes = list(set(df_single_child_plot.episode))", "_____no_output_____" ], [ "f, ax = plt.subplots(1,1, figsize=FIGSIZE)\nsingle_child_plot = df_single_child_plot.groupby('first_child_offset').mean()['action']\nax.plot(single_child_plot, label='single-child mothers')\nax.axvline(0, c='red', ls='--', label='birth')\nax.set_xlabel('years from birth')\nax.set_ylabel('hours')\nax.set_title('Average Number Of Hours Of Single-child Women In Relations To Birth')\nf.savefig('..//figures//women_supplied_hours_dqi_model1_birth_onset.png')", "_____no_output_____" ], [ "plt.plot(df_single_child.groupby('Q').sum()['birth'])", "_____no_output_____" ] ], [ [ "# Women at thirty before and after child", "_____no_output_____" ] ], [ [ "episodes_30_first_child = list(set(df_single_child.loc[(df_single_child['birth'] == 1) & \\\n (df_single_child['Q'] == 30.0)]['episode']))", "_____no_output_____" ], [ "len(episodes_30_first_child)", "_____no_output_____" ], [ "zero_child_households = df_states.groupby('episode').max().loc[df_states.groupby('episode').max()['K'] == 0].index\nzero_res = list(zero_child_households)", "_____no_output_____" ], [ "f, ax = plt.subplots(1,1, figsize=FIGSIZE)\nax.plot(df_states.loc[df_states['episode'].isin(zero_res)].groupby('Q').mean()['action'], label='no children')\nax.plot(df_single_child.groupby('Q').mean()['action'], label='1 child')\nax.axvline(30, ls='--', label='child birth', color='black')\nax.set_title(\"Single Child Vs. No Children Women\")\nax.legend()\nf.savefig('..//figures//dqi_single_child_vs_no_child_model1.png')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec5c887dc76dc598a8ab3496b3b9a03366e0c2b5
438,914
ipynb
Jupyter Notebook
train_xgb_ranker_in_eraboost.ipynb
mst-projects/numerai
5b06f92044e5fe4f2c8412aaf80b68fe2bb7b322
[ "MIT" ]
null
null
null
train_xgb_ranker_in_eraboost.ipynb
mst-projects/numerai
5b06f92044e5fe4f2c8412aaf80b68fe2bb7b322
[ "MIT" ]
null
null
null
train_xgb_ranker_in_eraboost.ipynb
mst-projects/numerai
5b06f92044e5fe4f2c8412aaf80b68fe2bb7b322
[ "MIT" ]
null
null
null
123.290449
18,282
0.754594
[ [ [ "<a href=\"https://colab.research.google.com/github/mst-projects/numerai/blob/main/train_xgb_ranker_in_eraboost.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Connect to Google Drive", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "import gc\nimport pandas as pd\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "Read training data", "_____no_output_____" ] ], [ [ "training_data = pd.read_csv(\"https://numerai-public-datasets.s3-us-west-2.amazonaws.com/latest_numerai_training_data.csv.xz\")\ntraining_data", "_____no_output_____" ], [ "feature_names = [f for f in training_data.columns if f.startswith(\"feature\")]", "_____no_output_____" ], [ "TOURNAMENT_NAME = \"\"\nTARGET_NAME = f\"target{TOURNAMENT_NAME}\"\nPREDICTION_NAME = f\"prediction{TOURNAMENT_NAME}\"\ntrain_features = training_data[feature_names]\ntrain_target = training_data[TARGET_NAME]", "_____no_output_____" ] ], [ [ "Defining necessary functions for valuation in training", "_____no_output_____" ] ], [ [ "def ar1(x):\n return np.corrcoef(x[:-1], x[1:])[0,1]\n\ndef autocorr_penalty(x):\n n = len(x)\n p = ar1(x)\n return np.sqrt(1 + 2*np.sum([((n - i)/n)*p**i for i in range(1,n)]))\n\ndef smart_sharpe(x):\n return np.mean(x)/(np.std(x, ddof=1)*autocorr_penalty(x))\n\ndef spearmanr(target, pred):\n return np.corrcoef(\n target,\n pred.rank(pct=True, method=\"first\")\n )[0, 1]", "_____no_output_____" ] ], [ [ "Select model type and train", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt \nfrom xgboost import XGBRegressor\nimport xgboost as xgb\n\ndef era_boost_train(X, y, era_col, proportion=0.5, trees_per_step=10, num_iters=200):\n # Choose one from three models:\n # 1 GradientBoostingRegressor\n # model = GradientBoostingRegressor(max_depth=5, learning_rate=0.01, max_features=\"sqrt\", subsample=0.5, n_estimators=trees_per_step, warm_start=(num_iters>1))\n # 2 XGB Ranker\n model= xgb.XGBRanker(\n max_depth=5,\n learning_rate=0.01,\n n_estimators=trees_per_step,\n # early_stopping_rounds=20,\n n_jobs=-1,\n colsample_bytree=0.2) \n # 3 XGB Regressor \n # model = XGBRegressor(max_depth=5, learning_rate=0.01, n_estimators=trees_per_step, n_jobs=-1, colsample_bytree=0.1)\n features = X.columns\n cdf = training_data.groupby('era').agg(['count'])\n group = cdf[cdf.columns[0]].values\n del cdf\n # dtrain = xgb.DMatrix(training_data[feature_names], label=training_data['target'])\n model.fit(X, y, group=group)\n \n # model.fit(X, y)\n new_df = X.copy()\n new_df[\"target\"] = y\n new_df[\"era\"] = era_col\n \n for i in range(num_iters - 1):\n print(\"---------------\")\n print(f\"iteration {i}\")\n \n # peridict on train and show score per each era\n preds = model.predict(X)\n new_df[\"pred\"] = preds\n era_scores = pd.Series(index=new_df[\"era\"].unique())\n \n for era in new_df[\"era\"].unique():\n era_df = new_df[new_df[\"era\"] == era]\n # print(era_df.columns)\n era_scores[era] = spearmanr(era_df[\"pred\"], era_df[\"target\"])\n \n era_scores.sort_values(inplace=True)\n worst_eras = era_scores[era_scores <= era_scores.quantile(proportion)].index\n print(\"worst eras: \", list(worst_eras))\n worst_df = new_df[new_df[\"era\"].isin(worst_eras)]\n era_scores.sort_index(inplace=True)\n era_scores.plot(kind=\"bar\")\n \n # show performance\n print(\"\")\n print(\"performance over time\")\n plt.show()\n print(\"\")\n print(\"autocorrelation: \", ar1(era_scores))\n print(\"mean correlation: \", np.mean(era_scores))\n print(\"sharpe: \", np.mean(era_scores)/np.std(era_scores))\n print(\"smart sharpe: \", smart_sharpe(era_scores))\n\n model.n_estimators += trees_per_step\n booster = model.get_booster()\n \n # fitting on worst eras\n cdf = worst_df.groupby('era').agg(['count'])\n group = cdf[cdf.columns[0]].values\n del cdf\n # dtrain = xgb.DMatrix(training_data[feature_names], label=training_data['target_kazutsugi'])\n model.fit(worst_df[features], worst_df[\"target\"], group=group, xgb_model=booster)\n # model.fit(worst_df[features], worst_df[\"target\"])\n \n return model\n\nboost_model = era_boost_train(train_features, train_target, era_col=training_data[\"era\"], proportion=0.5, trees_per_step=10, num_iters=20)", "---------------\niteration 0\nworst eras: ['era103', 'era68', 'era91', 'era104', 'era41', 'era66', 'era58', 'era107', 'era69', 'era85', 'era9', 'era27', 'era84', 'era60', 'era81', 'era74', 'era50', 'era113', 'era7', 'era31', 'era112', 'era106', 'era89', 'era82', 'era46', 'era101', 'era87', 'era40', 'era79', 'era54', 'era116', 'era65', 'era100', 'era67', 'era28', 'era73', 'era49', 'era21', 'era42', 'era80', 'era114', 'era18', 'era33', 'era32', 'era119', 'era70', 'era117', 'era56', 'era110', 'era26', 'era17', 'era57', 'era102', 'era3', 'era15', 'era71', 'era1', 'era37', 'era75', 'era78']\n\nperformance over time\n" ], [ "# if you choose simple XGB Ranker without eraboost, please run this column instead of above/\n# xgbranker = xgb.XGBRanker(\n# max_depth=5,\n# learning_rate=0.05200136293873378,\n# n_estimators=2000,\n# # early_stopping_rounds=20,\n# n_jobs=-1,\n# colsample_bytree=0.17010869551014374 \n# )\n\n# cdf = training_data.groupby('era').agg(['count'])\n# group = cdf[cdf.columns[0]].values\n# del cdf\n# # dtrain = xgb.DMatrix(training_data[feature_names], label=training_data['target_kazutsugi'])\n# model = xgbranker.fit(train_features, train_target, group=group)", "_____no_output_____" ], [ "# import pickle\n# pickle.dump(boost_model, open(\"\", \"wb\"))", "_____no_output_____" ], [ "del training_data\ngc.collect()", "_____no_output_____" ] ], [ [ "Read tournament data", "_____no_output_____" ] ], [ [ "tournament_data = pd.read_csv(\"https://numerai-public-datasets.s3-us-west-2.amazonaws.com/latest_numerai_tournament_data.csv.xz\")\ntournament_data", "_____no_output_____" ] ], [ [ "set validation data", "_____no_output_____" ] ], [ [ "validation_data = tournament_data[tournament_data['data_type'] == 'validation']\nvalidation_data['era']", "_____no_output_____" ], [ "del tournament_data\ngc.collect()", "_____no_output_____" ], [ "np.unique(validation_data['era'].values)", "_____no_output_____" ] ], [ [ "Predict", "_____no_output_____" ] ], [ [ "model = boost_model\npredictions = model.predict(validation_data[feature_names])\nvalidation_data[PREDICTION_NAME] = predictions", "_____no_output_____" ] ], [ [ "define functions for validation", "_____no_output_____" ] ], [ [ "def ar1(x):\n return np.corrcoef(x[:-1], x[1:])[0,1]\n\ndef autocorr_penalty(x):\n n = len(x)\n p = ar1(x)\n return np.sqrt(1 + 2*np.sum([((n - i)/n)*p**i for i in range(1,n)]))\n\ndef sharpe(x):\n return np.mean(x)/np.std(x, ddof=1)\n\ndef smart_sharpe(x):\n return np.mean(x)/(np.std(x, ddof=1)*autocorr_penalty(x))\n\ndef numerai_sharpe(x):\n return ((np.mean(x) - 0.010415154) / np.std(x)) * np.sqrt(12)\n\ndef spearmanr(target, pred):\n return np.corrcoef(\n target,\n pred.rank(pct=True, method=\"first\")\n )[0, 1]\n\ndef sortino_ratio(x, target=0.010415154):\n xt = x# - target\n return np.mean(xt) / (np.sum(np.minimum(0, xt)**2)/(len(xt)-1))**.5\n\ndef score(df):\n # method=\"first\" breaks ties based on order in array\n pct_ranks = df[PREDICTION_NAME].rank(pct=True, method=\"first\")\n targets = df[TARGET_NAME]\n return np.corrcoef(targets, pct_ranks)[0, 1]", "_____no_output_____" ] ], [ [ "validation", "_____no_output_____" ] ], [ [ "validation_correlations = validation_data.groupby(\"era\").apply(score)\nprint(f\"On validation the correlation has mean {validation_correlations.mean()} and std {validation_correlations.std()}\")", "On validation the correlation has mean 0.02290884753437273 and std 0.022909680997520377\n" ], [ "print(validation_correlations)\nvalidation_correlations.plot()", "_____no_output_____" ], [ "print(\"sharpe: \", sharpe(validation_correlations))\nprint(\"smart sharpe: \", smart_sharpe(validation_correlations))\nprint(\"sortino_ratio: \", sortino_ratio(validation_correlations))", "sharpe: 0.9999636196092063\nsmart sharpe: 0.8342879653469281\nsortino_ratio: 2.736607553221841\n" ] ], [ [ "Neutralization", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler\ndef _neutralize(df, columns, by, proportion=1.0):\n scores = df[columns]\n exposures = df[by].values\n scores = scores - proportion * exposures.dot(np.linalg.pinv(exposures).dot(scores))\n return scores / scores.std()\ndef _normalize(df):\n X = (df.rank(method=\"first\") - 0.5) / len(df)\n return scipy.stats.norm.ppf(X)\ndef normalize_and_neutralize(df, columns, by, proportion=1.0):\n # Convert the scores to a normal distribution\n df[columns] = _normalize(df[columns])\n df[columns] = _neutralize(df, columns, by, proportion)\n return df[columns]", "_____no_output_____" ], [ "import scipy\nvalidation_data[\"preds\"] = predictions\nvalidation_data[\"preds_neutralized\"] = validation_data.groupby(\"era\").apply(\n lambda x: normalize_and_neutralize(x, [\"preds\"], feature_names, 0.5) # neutralize by 50% within each era\n)\nscaler = MinMaxScaler()\nvalidation_data[\"preds_neutralized\"] = scaler.fit_transform(validation_data[[\"preds_neutralized\"]]) # transform back to 0-1", "_____no_output_____" ], [ "validation_data[PREDICTION_NAME] = validation_data['preds_neutralized']\nvalidation_correlations = validation_data.groupby(\"era\").apply(score)\nprint(f\"On validation the correlation has mean {validation_correlations.mean()} and std {validation_correlations.std()}\")", "On validation the correlation has mean 0.02369156724058234 and std 0.019135479911992073\n" ], [ "print(\"sharpe: \", sharpe(validation_correlations))\nprint(\"smart sharpe: \", smart_sharpe(validation_correlations))\nprint(\"sortino_ratio: \", sortino_ratio(validation_correlations))", "sharpe: 1.2380963189606233\nsmart sharpe: 1.061127783235635\nsortino_ratio: 5.131936052235536\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec5c8897d1226edbc49923d726eb23ab73c1d726
6,090
ipynb
Jupyter Notebook
keyphrase_graphs/notebooks/NodeEmbeddings.ipynb
etherlabsio/hinton
eedd99f6a5fbbc38283bbf945dff4f4006a3d1f5
[ "MIT" ]
null
null
null
keyphrase_graphs/notebooks/NodeEmbeddings.ipynb
etherlabsio/hinton
eedd99f6a5fbbc38283bbf945dff4f4006a3d1f5
[ "MIT" ]
null
null
null
keyphrase_graphs/notebooks/NodeEmbeddings.ipynb
etherlabsio/hinton
eedd99f6a5fbbc38283bbf945dff4f4006a3d1f5
[ "MIT" ]
1
2020-04-19T11:08:02.000Z
2020-04-19T11:08:02.000Z
20.644068
96
0.508374
[ [ [ "import os, sys\nsys.path.insert(0, '../src')", "_____no_output_____" ], [ "print(os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'src')))", "_____no_output_____" ], [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "%matplotlib widget", "_____no_output_____" ], [ "from graphrank.graphrank import GraphRank\nfrom graphrank.utils import GraphUtils, TextPreprocess\nfrom graphrank import dgraph as dg\nimport networkx as nx\nimport json as js", "_____no_output_____" ], [ "from scipy.spatial.distance import cosine", "_____no_output_____" ], [ "gr = GraphRank()\ntp = TextPreprocess()\nutils = GraphUtils()", "_____no_output_____" ], [ "def read_json(json_file):\n with open(json_file) as f:\n article = js.load(f)\n return article", "_____no_output_____" ], [ "article = read_json('embed_hr_json.json')", "_____no_output_____" ] ], [ [ "## Build graph from sentences", "_____no_output_____" ] ], [ [ "g = nx.Graph()", "_____no_output_____" ], [ "for i, articles in enumerate(article):\n sent_id = articles['sent_id']\n sentence = articles['sentence']\n embed = articles['embedding_list']\n if sent_id not in g.nodes():\n g.add_node(sent_id, text=sentence, embed_list=embed)\n for j in range(i + 1, len(article)):\n neighbor_s_id = article[j]['sent_id']\n neighbor_sentece = article[j]['sentence']\n neighbor_embed = article[j]['embedding_list']\n similarity_score = cosine(embed, neighbor_embed)\n if neighbor_s_id not in g.nodes():\n g.add_node(neighbor_s_id, text=neighbor_sentece, embed_list=neighbor_embed)\n g.add_edge(sent_id, neighbor_s_id, sim_score=similarity_score)", "_____no_output_____" ], [ "list(g.edges.data())", "_____no_output_____" ] ], [ [ "## Run PageRank on default weights (w=1)", "_____no_output_____" ] ], [ [ "pg_rank_def = nx.pagerank_scipy(g, weight=None)", "_____no_output_____" ], [ "pg_rank_def", "_____no_output_____" ] ], [ [ "## Run PageRank on similarity scores", "_____no_output_____" ] ], [ [ "pg_rank_sim = nx.pagerank_scipy(g, weight='sim_score')", "_____no_output_____" ], [ "pg_rank_sim", "_____no_output_____" ], [ "def sort_by_value(item_list, order='desc'):\n \"\"\"\n A utility function to sort lists by their value.\n Args:\n item_list:\n order:\n\n Returns:\n\n \"\"\"\n\n if order == 'desc':\n sorted_list = sorted(item_list, key=lambda x: (x[1], x[0]), reverse=True)\n else:\n sorted_list = sorted(item_list, key=lambda x: (x[1], x[0]), reverse=False)\n\n return sorted_list", "_____no_output_____" ], [ "sorted_rank = dict(sort_by_value(pg_rank_sim.items()))", "_____no_output_____" ], [ "g.nodes.data()[0]", "_____no_output_____" ], [ "for k, v in sorted_rank.items():\n print(k, g.nodes.data()[k].get('text'))\n print(v)\n print()", "_____no_output_____" ], [ "bet = nx.degree_centrality(g)", "_____no_output_____" ], [ "bet", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5c9b5b7656cf9138b56ea2a0d5dab6fb90a363
36,794
ipynb
Jupyter Notebook
Python/Ejercicios Python/Ejercicios python_2.ipynb
MariaPCampos/Big_Data-Data_Science
5a38e2f1016307dccc9ee61cc6c5104c2b1ba3f4
[ "MIT" ]
null
null
null
Python/Ejercicios Python/Ejercicios python_2.ipynb
MariaPCampos/Big_Data-Data_Science
5a38e2f1016307dccc9ee61cc6c5104c2b1ba3f4
[ "MIT" ]
null
null
null
Python/Ejercicios Python/Ejercicios python_2.ipynb
MariaPCampos/Big_Data-Data_Science
5a38e2f1016307dccc9ee61cc6c5104c2b1ba3f4
[ "MIT" ]
null
null
null
41.202688
651
0.610779
[ [ [ "Programación para *Data Science*\n============================\n\nIntro101 - 05.1: Conceptos avanzados de Python\n--------------------------------------\n\nEn este Notebook encontraréis dos conjuntos de ejercicios: un primer conjunto de ejercicios para practicar y que no puntuan, pero que recomendamos intentar resolver y un segundo conjunto que evaluaremos como actividad.\n\n\nAdemás, veréis que todas las actividades tienen una etiqueta que indica los recursos necesarios para llevarla a cabo. Hay tres posibles etiquetas:\n\n* <span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 3px; \">NM</span> **Sólo materiales**: las herramientas necesarias para realizar la actividad se pueden encontrar en los materiales de la asignatura. \n\n* <span style=\"font-family: Courier New; background-color: #ffcc5c; color: #000000; padding: 3px; \">EG</span> **Consulta externa guiada**: la actividad puede requerir hacer uso de herramientas que no se encuentran en los materiales de la asignatura, pero el enunciado contiene indicaciones de dónde o cómo encontrar la información adicional necesaria para resolver la actividad.\n\n* <span style=\"font-family: Courier New; background-color: #f2ae72; color: #000000; padding: 3px; \">EI</span> **Consulta externa independente**: la actividad puede requerir hacer uso de herramientas que no se encuentran en los materiales de la asignatura, y el enunciado puede no incluir la descripción de dónde o cómo encontrar esta información adicional. Será necesario que el estudiante busque esta información utilizando los recursos que se han explicado en la asignatura.\n\nEs importante notar que estas etiquetas no indican el nivel de dificultad del ejercicio, sino únicamente la necesidad de consulta de documentación externa para su resolución. Además, recordad que las **etiquetas son informativas**, pero podréis consultar referencias externas en cualquier momento (aunque no se indique explícitamente) o puede ser que podáis hacer una actividad sin consultar ningún tipo de documentación. Por ejemplo, para resolver una actividad que sólo requiera los materiales de la asignatura, puedéis consultar referencias externas si queréis, ya sea tanto para ayudaros en la resolución como para ampliar el conocimiento!\n", "_____no_output_____" ], [ "---\n\n## Ejercicios y preguntas teóricas para la actividad\n\nA continuación, encontraréis los **ejercicios y preguntas teóricas que debéis completar en esta actividad** y que forman parte de la evaluación de esta unidad.", "_____no_output_____" ], [ "### Ejercicio 1\n\nUn número primo es aquél que solo es divisible por él mismo y por 1.\n\na) Escribe un código que compruebe si un número `x = 15` es solo divisible por 1 o por el mismo. Escribe este código usando un iterador (un `for` o un `while`) que barra todos los valores desde `2` a `x-1`. Crea una variable `divisible` que tenga por defecto valor `False` y asigne el valor `True` si a lo largo de la iteración encuentra un número natural divisible. Puedes usar el operador modulo `a % b` para saber si un numero `b` es divisible por `a`.\n\n<span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 2px; \">NM</span>", "_____no_output_____" ] ], [ [ "x = 15\ndivisible = False\nfor i in range(2, x - 1):\n if x % i == 0:\n divisible = True", "_____no_output_____" ] ], [ [ "\nb) Convierte tu código anterior en una función que compruebe si el número del argumento es primo o no, devolviendo True is es primo y False si no es primo. Comprueba tu función con los valores 492366587, 492366585, 48947 y 2,\n\n<span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 2px; \">NM</span>", "_____no_output_____" ] ], [ [ "# Definimos la función.\ndef function_prime(number):\n prime = True\n for i in range(2, number - 1):\n if number % i == 0:\n prime = False\n return prime\n\n# Comprobamos la función.\nprint(function_prime(492366587))\nprint(function_prime(492366585))\nprint(function_prime(48947))\nprint(function_prime(2))", "True\nFalse\nTrue\nTrue\n" ] ], [ [ "\nc) En el cálculo de la función anterior, una vez se ha encontrado un número que es divisible dentro del rango ya no tiene sentido comprobar el resto de números del rango. Por ejemplo si 10 ya es divisble entre 2, ya no hace falta probar de 3 en adelante pues ya sabemos que el número no es primo. \n\nModifica la función anterior de la siguiente forma: \n- Una vez se encuentra el divisor, la iteración se interrumpe para no probar el resto de enteros. \n- La función devuelve\n - **Si es primo**: True\n - **Si no es primo**, el primer divisor mayor que 1.\n \nPuedes hacer uso del comando *break* dentro de un bucle para interrumpir este, puedes consultar más información sobre break en la documentación de python [aquí](https://docs.python.org/2/tutorial/controlflow.html).\n\nComprueba tu función con los valores 492366585, 492366587, 48947 y 2,\n\n<span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 2px; \">NM</span>", "_____no_output_____" ] ], [ [ "# Definimos la función.\ndef function_prime(number):\n prime = True\n for i in range(2, number - 1):\n if number % i == 0:\n prime = i\n break\n return prime\n\n# Comprobamos la función.\nprint(function_prime(492366587))\nprint(function_prime(492366585))\nprint(function_prime(48947))\nprint(function_prime(2))", "True\n3\nTrue\nTrue\n" ] ], [ [ "### Ejercicio 2\n\nLa Covid-19 es una enfermedad producida por la infección del virus SARS-CoV-2. La infección es transmisible de persona a persona y su contagiosidad depende de la cantidad del virus en las vías respiratorias. Si cada persona contagiada transmite la enfermedad a $\\beta$ contactos en promedio por periodo de tiempo $t$, es posible estimar la evolución del contagio con un modelo matemático sencillo.\n\nPara $t=1$día, las transmisiones en España se han estimado a partir de su histórico de las semanas de Febrero y Marzo del 2020 una $\\beta = 0.35$ transmissiones por día por infectado.\n\nDurante un periodo de tiempo (por ejempo un día $d$) la tasa de nuevos contagios se puede estimar como una proporción al número de contagiados del periodo anterior $N$: \n\n$$ \\Delta N = N_{1} - N = \\beta \\cdot N$$ (1)\n\nPor tanto, podemos proyectar el número futuro de afectados como \n\n$$ N_{1} = N + \\beta \\cdot N = (1+\\beta) \\cdot N$$ (2) \n\nEn dos días:\n\n$$ N_{2} = (1+\\beta) \\cdot N_{1} = (1+\\beta)^2 \\cdot N$$ (3)\n\nY en general en D días tendremos \n\n$$N_{D} = (1+\\beta)^D \\cdot N$$ (4)\n \nAsumiendo este sencillo modelo: \n\na) Implementa una función de dos parámetros (N: población infectada inicial, D: número de días), que devuelva el cálculo de afectados para D días siguiendo la ecuación (4). Suponiendo una población afectada de 4250 (población afectada en españa a día 13 de Marzo de 2020), usa la función para calcular la población estimada en 1, 2, 7 y 30 días. \n\n<span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 2px; \">NM</span>", "_____no_output_____" ] ], [ [ "# Creamos una función de dos parámetros (donde N es la población infectada inicial y D el número de días), que devuelva el\n# cálculo de infectados para D días siguiendo la ecuación '𝑁𝐷=(1+𝛽)𝐷⋅𝑁'\n\ndef cal_infected(N, D):\n \"\"\"\n Calcula el número de infectados virus SARS-CoV-2 en base a un modelo de expansión de la enfermedad.\n \"\"\"\n beta = 0.35\n total_infected = ((1 + beta)**D) * N\n return total_infected\n\n# Probamos la función con una población concreta y varios números de días distintos.\nprint(cal_infected(4250, 1))\nprint(cal_infected(4250, 2))\nprint(cal_infected(4250, 7))\nprint(cal_infected(4250, 30))", "5737.5\n7745.625000000001\n34731.64149433595\n34546335.38087164\n" ] ], [ [ "\nb) Sabiendo que los Servicios de Medicina Intensiva (SMI) disponen de 3363 camas para enfermos graves, y suponiendo que un 10% de los afectados por el covid-19 requerirán de SMI y una supervivencia del 2,5% (Exitus), escribe un código que calcule:\n- El día en curso (Día)\n- El total de afectados por el virus para cada día d (Afectados)\n- El total de ingresados en SMI por el virus para cada día d (Críticos)\n- El total de Exitus por el virus para cada día d (Exitus)\n- Si los servicios de SMI no pueden aceptar los ingresados para cada día $d$ (Estado: indicando Saturación/No Saturación)\n\nImprime en pantalla la información de cada día durante una simulación de tres semanas, suponiendo que no hay recuperaciones, con una población afectada inicial 4250 y una $\\beta = 0.35$ constante. \n\n\n<span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 2px; \">NM</span>", "_____no_output_____" ] ], [ [ "# Mostramos por pantalla la actualización diaria del progreso de expansión de la enfermedad y la presión hospitalaria.\n\nSMI_beds = 3363\ndays = 21\n\n# Iteramos sobre los días de la simulación\nfor day in range(0, days):\n print(\"Dia: \" + str(day))\n total_infected = int(cal_infected(4250, day))\n print(\"El total actual de infectados por el virus es de: \" + str(total_infected))\n total_critical = int(total_infected * 0.1)\n print(\"El total actual de personas en estado crítico en el hospital es de: \" + str(total_critical))\n total_exitus = int(total_infected * 0.025)\n print(\"El total actual de personas curadas tras haber sido infectadas por el virus es de: \" + str(total_exitus))\n # Establecemos las condiciones para establecer el estado de saturación del hospital y lo mostramos por pantalla.\n if SMI_beds < total_critical:\n print(\"El hospital se encuentra saturado, con un total de casos críticos de: \" + str(total_critical))\n else:\n print(\"El hospital no se encuentra saturado, con un total de casos críticos de: \" + str(total_critical))", "Dia: 0\nEl total actual de infectados por el virus es de: 4250\nEl total actual de personas en estado crítico en el hospital es de: 425\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 106\nEl hospital no se encuentra saturado, con un total de casos críticos de: 425\nDia: 1\nEl total actual de infectados por el virus es de: 5737\nEl total actual de personas en estado crítico en el hospital es de: 573\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 143\nEl hospital no se encuentra saturado, con un total de casos críticos de: 573\nDia: 2\nEl total actual de infectados por el virus es de: 7745\nEl total actual de personas en estado crítico en el hospital es de: 774\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 193\nEl hospital no se encuentra saturado, con un total de casos críticos de: 774\nDia: 3\nEl total actual de infectados por el virus es de: 10456\nEl total actual de personas en estado crítico en el hospital es de: 1045\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 261\nEl hospital no se encuentra saturado, con un total de casos críticos de: 1045\nDia: 4\nEl total actual de infectados por el virus es de: 14116\nEl total actual de personas en estado crítico en el hospital es de: 1411\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 352\nEl hospital no se encuentra saturado, con un total de casos críticos de: 1411\nDia: 5\nEl total actual de infectados por el virus es de: 19057\nEl total actual de personas en estado crítico en el hospital es de: 1905\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 476\nEl hospital no se encuentra saturado, con un total de casos críticos de: 1905\nDia: 6\nEl total actual de infectados por el virus es de: 25727\nEl total actual de personas en estado crítico en el hospital es de: 2572\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 643\nEl hospital no se encuentra saturado, con un total de casos críticos de: 2572\nDia: 7\nEl total actual de infectados por el virus es de: 34731\nEl total actual de personas en estado crítico en el hospital es de: 3473\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 868\nEl hospital se encuentra saturado, con un total de casos críticos de: 3473\nDia: 8\nEl total actual de infectados por el virus es de: 46887\nEl total actual de personas en estado crítico en el hospital es de: 4688\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 1172\nEl hospital se encuentra saturado, con un total de casos críticos de: 4688\nDia: 9\nEl total actual de infectados por el virus es de: 63298\nEl total actual de personas en estado crítico en el hospital es de: 6329\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 1582\nEl hospital se encuentra saturado, con un total de casos críticos de: 6329\nDia: 10\nEl total actual de infectados por el virus es de: 85452\nEl total actual de personas en estado crítico en el hospital es de: 8545\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 2136\nEl hospital se encuentra saturado, con un total de casos críticos de: 8545\nDia: 11\nEl total actual de infectados por el virus es de: 115361\nEl total actual de personas en estado crítico en el hospital es de: 11536\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 2884\nEl hospital se encuentra saturado, con un total de casos críticos de: 11536\nDia: 12\nEl total actual de infectados por el virus es de: 155737\nEl total actual de personas en estado crítico en el hospital es de: 15573\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 3893\nEl hospital se encuentra saturado, con un total de casos críticos de: 15573\nDia: 13\nEl total actual de infectados por el virus es de: 210246\nEl total actual de personas en estado crítico en el hospital es de: 21024\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 5256\nEl hospital se encuentra saturado, con un total de casos críticos de: 21024\nDia: 14\nEl total actual de infectados por el virus es de: 283832\nEl total actual de personas en estado crítico en el hospital es de: 28383\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 7095\nEl hospital se encuentra saturado, con un total de casos críticos de: 28383\nDia: 15\nEl total actual de infectados por el virus es de: 383173\nEl total actual de personas en estado crítico en el hospital es de: 38317\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 9579\nEl hospital se encuentra saturado, con un total de casos críticos de: 38317\nDia: 16\nEl total actual de infectados por el virus es de: 517284\nEl total actual de personas en estado crítico en el hospital es de: 51728\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 12932\nEl hospital se encuentra saturado, con un total de casos críticos de: 51728\nDia: 17\nEl total actual de infectados por el virus es de: 698333\nEl total actual de personas en estado crítico en el hospital es de: 69833\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 17458\nEl hospital se encuentra saturado, con un total de casos críticos de: 69833\nDia: 18\nEl total actual de infectados por el virus es de: 942750\nEl total actual de personas en estado crítico en el hospital es de: 94275\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 23568\nEl hospital se encuentra saturado, con un total de casos críticos de: 94275\nDia: 19\nEl total actual de infectados por el virus es de: 1272713\nEl total actual de personas en estado crítico en el hospital es de: 127271\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 31817\nEl hospital se encuentra saturado, con un total de casos críticos de: 127271\nDia: 20\nEl total actual de infectados por el virus es de: 1718162\nEl total actual de personas en estado crítico en el hospital es de: 171816\nEl total actual de personas curadas tras haber sido infectadas por el virus es de: 42954\nEl hospital se encuentra saturado, con un total de casos críticos de: 171816\n" ] ], [ [ "\nc) Convierte el código anterior en una función que genere un archivo de texto con nombre `output.txt`, siguiendo este formato: \n```\nDia, Afectados, Críticos, Exitus, Estado\n0, 4250, 425, 106, No Saturación\n1, 5737, 573, 143, No Saturación\n2, 7745, 774, 193, No Saturación\n...\n```\nCon los parámetros de entrada $N$, $D$, $\\beta$, camas SMI.\n\n<span style=\"font-family: Courier New; background-color: #82b74b; color: #000000; padding: 2px; \">NM</span>", "_____no_output_____" ] ], [ [ "# Importamos la librería que utilizaremos.\nimport os\n\n# Redefinimos la función cal_infected para que incluya beta como parámetro.\ndef cal_infected(N, D, beta):\n \"\"\"\n Calcula el número de infectados virus SARS-CoV-2 en base a un modelo de expansión de la enfermedad.\n \"\"\"\n total_infected = ((1 + beta)**D) * N\n return total_infected\n\n# Creamos la función que genere el archivo.\ndef generate_file_covid(N, D, beta, SMI_beds):\n \"\"\"\n Genera un archivo con datos actualizados de la expansión del virus SARS-CoV-2.\n \"\"\"\n file_output_covid = open(\"output_covid.txt\",\"w\")\n # Escribimos en el fichero el header y añadimos un salto de línea. \n file_output_covid.write(\"Days, Infected, Critical, Exitus, State\" + os.linesep)\n \n # Iteramos sobre los días de la simulación.\n for day in range(0, D):\n total_infected = int(cal_infected(N, day, beta))\n total_critical = int(total_infected * 0.1)\n total_exitus = int(total_infected * 0.025)\n # Establecemos las condiciones de estado de saturación del hospital.\n if SMI_beds < total_critical:\n state = \"Saturación\"\n else:\n state = \"No Saturación\"\n \n # Escribimos en el fichero concatenando las distintas variables.\n file_output_covid.write(str(day) + \",\" + str(total_infected) + \",\" + str(total_critical) + \",\" + str(total_exitus) + \",\" + state + os.linesep)\n\n # Cerramos el fichero.\n file_output_covid.close() \n\n# Llamamos a la función.\ngenerate_file_covid(4250, 21, 0.35, 3363)\n\n# Mostramos el resultado para comprobar que se ha generado correctamente.\nwith open(\"output_covid.txt\") as file_output_covid:\n for line in file_output_covid:\n print(line)", "Days, Infected, Critical, Exitus, State\n\n\n\n0,4250,425,106,No Saturación\n\n\n\n1,5737,573,143,No Saturación\n\n\n\n2,7745,774,193,No Saturación\n\n\n\n3,10456,1045,261,No Saturación\n\n\n\n4,14116,1411,352,No Saturación\n\n\n\n5,19057,1905,476,No Saturación\n\n\n\n6,25727,2572,643,No Saturación\n\n\n\n7,34731,3473,868,Saturación\n\n\n\n8,46887,4688,1172,Saturación\n\n\n\n9,63298,6329,1582,Saturación\n\n\n\n10,85452,8545,2136,Saturación\n\n\n\n11,115361,11536,2884,Saturación\n\n\n\n12,155737,15573,3893,Saturación\n\n\n\n13,210246,21024,5256,Saturación\n\n\n\n14,283832,28383,7095,Saturación\n\n\n\n15,383173,38317,9579,Saturación\n\n\n\n16,517284,51728,12932,Saturación\n\n\n\n17,698333,69833,17458,Saturación\n\n\n\n18,942750,94275,23568,Saturación\n\n\n\n19,1272713,127271,31817,Saturación\n\n\n\n20,1718162,171816,42954,Saturación\n\n\n\n" ] ], [ [ "### Ejercicio 3\n\nDado el siguiente diccionario:", "_____no_output_____" ] ], [ [ "d = {\"Alex\":344334443, \"Eva\":5533443, \"Cristina\":443355, \"Jonas\":33223324}", "_____no_output_____" ] ], [ [ "Escribid una función que pregunte al usuario que introduzca el nombre de una persona y muestre por pantalla el nombre de la persona y su teléfono. \n\nTened en cuenta que:\n\n- La función debe controlar que el valor introducido por el usuario es un nombre que existe en el diccionario. En caso contrario, mostrará un mensaje de error (\"El nombre introducido no corresponde a ninguna persona\") y devolverá el valor False.\n- Debéis tener en cuenta que el nombre de las personas que nos pasan por parámetro puede ser en minúsculas, mayúsculas o una combinación de ambas, y que debemos encontrar el número de teléfono aunque la capitalización de la cadena entrada por el usuario no sea exactamente la misma que hemos almacenado en el diccionario.\n- Suponed que no hay acentos en los nombres.\n\nNota 1: Para realizar la actividad, tendréis que capturar un texto que entrará el usuario. Consultad la [documentación oficial de la función input](https://docs.python.org/3/library/functions.html#input) para ver cómo hacerlo.\n\nNota 2: También tendréis que pensar cómo tratar el hecho de que el usuario pueda utilizar mayúsculas y minúsculas en la escritura del nombre en el diccionario. ¡Os animamos a usar un buscador para intentar encontrar alguna alternativa para resolver este subproblema! ¡Recordad citar las referencias que hayáis usado para resolverlo!\n\n<span style=\"font-family: Courier New; background-color: #ffcc5c; color: #000000; padding: 3px; \">EG</span>", "_____no_output_____" ] ], [ [ "d = {\"Alex\":344334443, \"Eva\":5533443, \"Cristina\":443355, \"Jonas\":33223324}\n\n# Creamos una función que muestre por pantalla el nombre de una persona y su teléfono.\ndef print_name_tlf():\n \"\"\"\n Pedimos un nombre al usuario y mostramos por pantalla el nombre introducido así como el teléfono asoaciado a ese nombre.\n \"\"\"\n # Usamos la función capitalize para que las mayúsculas y minúsculas que pueda introducir el usuario coincidan con el\n # diccionario.\n user_name = input(\"Introduzca un nombre\").capitalize()\n if user_name in d:\n print(\"El nombre introducido es \" + user_name + \" y el número de teléfono asociado a este nombre es \" + str(d[user_name]))\n else:\n print(\"El nombre introducido no corresponde a ninguna persona\")\n return False\n \n\n# Llamamos a la función.\nprint_name_tlf()", "Introduzca un nombreCRISTINA\nEl nombre introducido es Cristina y el número de teléfono asociado a este nombre es 443355\n" ] ], [ [ "Referencias consultadas:\n\n*https://docs.python.org/3/library/stdtypes.html?highlight=capitalize#str.capitalize*", "_____no_output_____" ], [ "### Ejercicio 4\n\nPython dispone de un **idiom** muy útil conocido como `list comprehension`. Utilizando este **idiom**, proporcionad una expresión que devuelva las listas siguientes.\n\nNota: Para realizar esta actividad necesitaréis investigar qué son las `list comprehension` y qué sintaxis utilizan. Para ello, se recomienda en primer lugar que utilicéis un buscador para encontrar información genérica sobre esta construcción. Después, os recomendamos que consultéis stackoverflow para ver algunos ejemplos de problemas que se pueden resolver con esta construcción.\n\n\n[stackoverflow](https://stackoverflow.com/) es un sitio de preguntas-y-respuestas muy popular entre programadores. Veréis que para la gran mayoría de las dudas que tengáis, habrá alguien que ya les habrá tenido (y consultado) anteriormente! Así pues, más allá de preguntar vosotros mismos las dudas allí (nosotros ya tenemos el foro del aula para ello!), consultar esta web os permitirá ver qué soluciones proponen otros programadores a estas dudas. A menudo habrá más de una solución a un mismo problema, y podréis valorar cuál es la más adecuada para vuestro problema.\n\nPara ver ejemplos de problemas que son adecuados para resolver con **list comprehensions**, os recomendamos leer las siguientes páginas:\n* https://stackoverflow.com/questions/12555443/squaring-all-elements-in-a-list\n* https://stackoverflow.com/questions/18551458/how-to-frame-two-for-loops-in-list-comprehension-python\n* https://stackoverflow.com/questions/24442091/list-comprehension-with-condition\n* https://stackoverflow.com/questions/41676212/i-want-to-return-only-the-odd-numbers-in-a-list\n* https://stackoverflow.com/questions/4260280/if-else-in-a-list-comprehension\n\n<span style=\"font-family: Courier New; background-color: #ffcc5c; color: #000000; padding: 3px; \">EG</span>\n", "_____no_output_____" ], [ "a) Una lista con los valores $4 x^2$ donde $x$ es cada uno de los números de la lista `list_1`:", "_____no_output_____" ] ], [ [ "list_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n[4 * x**2 for x in list_1]", "_____no_output_____" ] ], [ [ "b) Una lista con los valores $x/(x+1)$ donde $x$ es cada uno de los números de la lista `list_1`:", "_____no_output_____" ] ], [ [ "list_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n[x/(x + 1) for x in list_1]", "_____no_output_____" ] ], [ [ "c) Una lista con los valores $4x^2/(4x^2-1)$ donde $x$ es cada uno de los números de la lista `list_1`:", "_____no_output_____" ] ], [ [ "list_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n[(4 * x**2)/(4 * x**2 - 1) for x in list_1]", "_____no_output_____" ] ], [ [ "### Ejercicio 5\n\nLas funciones `lambda` son formas de expresar y definir funciones pequeñas sin necesidad de usar el constructor `def funcion():`.\n\nLee sobre las funciones lambda, por ejemplo [aquí](https://www.w3schools.com/python/python_lambda.asp) o [aquí](https://realpython.com/python-lambda/)\n\nEscribe una función $f$ con argumento $n$, $f(n)$, que **devuelva una función** lambda, que esta a su vez devuelva $n$ copias de una cadena de caracteres en su argumento: \n\n<span style=\"font-family: Courier New; background-color: #f2ae72; color: #000000; padding: 3px; \">EI</span>", "_____no_output_____" ] ], [ [ "# Definimos una función que devuelve una función lambda.\n\ndef f(n):\n return(lambda x: x * n)\n\nr = f(5)\nprint(r(\"hola\")) # Donde deberíamos ver 5 copias del literal \"Hola\"\n\n\n# Otra forma de llamar a la función y comprobar el resultado.\nprint(f(4)(\"casa\"))\nprint(f(4)(\"pelota\"))\nprint(f(4)(\"árbol\"))", "holaholaholaholahola\ncasacasacasacasa\npelotapelotapelotapelota\nárbolárbolárbolárbol\n" ] ], [ [ "### Ejercicio Opcional\n\nExiste una expresión atribuida a John Wallis (1616) para la estimación del valor de $\\pi$, consistente en: \n\n$$\n\\frac{\\pi}{2} = \\prod_{n=1}^{N} (\\frac{4n^2}{4n^2 - 1})\n$$\nsi $N$ es suficientemente grande $N \\to \\infty$. \n\nEscribe una función que, dado una aproximación N, calcule una estimación de $\\pi$ siguiendo la fórmula de Wallis. \n\n\n\n**Consideraciones:**\n\n- Investigad las funciones map, reduce\n- También podéis usar una list comprehension\n- Las funciones lambda os pueden ser útiles\n\n<span style=\"font-family: Courier New; background-color: #f2ae72; color: #000000; padding: 3px; \">EI</span>", "_____no_output_____" ] ], [ [ "# Importamos la librería numpy que utilizaremos.\nimport numpy\n\n# Definimos la función para el cálculo del número pi.\ndef cal_pi(N):\n \"\"\"\n Estima el valor del número pi en base a un número dado.\n \"\"\"\n # Utilizamos una list comprehension para aplicar la expresión de John Wallis y guardamos el resultado en una variable.\n pi = numpy.prod([(4 * n**2 / (4 * n**2 - 1)) for n in range(1,N + 1)]) * 2\n \n # Mostramos por pantalla el valor pi con un mensaje personalizado.\n print(\"La estimación del valor del número pi con un N de \" + str(N) + \" es de: \" + str(pi))\n return pi\n\n# Probamos la función.\nprint(cal_pi(40))\nprint(cal_pi(1000000))\nprint(cal_pi(100000000))", "La estimación del valor del número pi con un N de 40 es de: 3.1222603264214372\n3.1222603264214372\nLa estimación del valor del número pi con un N de 1000000 es de: 3.141591868192149\n3.141591868192149\nLa estimación del valor del número pi con un N de 100000000 es de: 3.141592643066262\n3.141592643066262\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ec5caa36c95cfb17b670a60aed1566247edafbb8
1,259
ipynb
Jupyter Notebook
leaf-phenotyping-nb.ipynb
danforthcenter/reu
0c18c271a181df39d614475c303bce2eb6452dee
[ "MIT" ]
null
null
null
leaf-phenotyping-nb.ipynb
danforthcenter/reu
0c18c271a181df39d614475c303bce2eb6452dee
[ "MIT" ]
null
null
null
leaf-phenotyping-nb.ipynb
danforthcenter/reu
0c18c271a181df39d614475c303bce2eb6452dee
[ "MIT" ]
1
2019-05-29T14:54:54.000Z
2019-05-29T14:54:54.000Z
21.706897
91
0.515488
[ [ [ "# Import PlantCV \nfrom plantcv import plantcv as pcv\nimport matplotlib", "_____no_output_____" ], [ "class options:\n def __init__(self):\n self.image = \"examples/shoot-images/VIS_SV_0_z2500_h2_g0_e82_180256.png\"\n self.debug = \"plot\"\n self.writeimg= False\n self.result = \".\"\n self.outdir = \".\"\n \n# Get options\nargs = options()\n\n# Set debug to the global parameter \npcv.params.debug = args.debug", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ec5cb64903507eb2e3ede179b6fcb73ee982268a
63,996
ipynb
Jupyter Notebook
Neural Networks/Introduction to Neural Networks/Neural Nets.ipynb
Iamsdt/UdacityDeepLearningNanodegree
507c2ce620f42e36271549471b819d3d7fceb1b6
[ "Apache-2.0" ]
1
2019-11-01T19:26:30.000Z
2019-11-01T19:26:30.000Z
Neural Networks/Introduction to Neural Networks/Neural Nets.ipynb
Iamsdt/UdacityDeepLearningNanodegree
507c2ce620f42e36271549471b819d3d7fceb1b6
[ "Apache-2.0" ]
null
null
null
Neural Networks/Introduction to Neural Networks/Neural Nets.ipynb
Iamsdt/UdacityDeepLearningNanodegree
507c2ce620f42e36271549471b819d3d7fceb1b6
[ "Apache-2.0" ]
1
2020-10-31T13:42:25.000Z
2020-10-31T13:42:25.000Z
63,996
63,996
0.713357
[ [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "# Data", "_____no_output_____" ] ], [ [ "x_data = [1.0, 2.0, 3.0, 4.0, 5.0]\ny_data = [2.0, 4.0, 6.0, 8.0, 10.0]", "_____no_output_____" ] ], [ [ "# random weight", "_____no_output_____" ] ], [ [ "w = 1.0", "_____no_output_____" ], [ "def forward(x):\n return (w * x) + 0 # bias zero", "_____no_output_____" ], [ "def loss(output, y):\n return (output - y) * (output - y)", "_____no_output_____" ], [ "w_list = []\nerror_list = []", "_____no_output_____" ], [ "for w in np.arange(0.0, 4.1, 0.1):\n print(w)\n l_sum = 0\n for x, y in zip(x_data, y_data):\n output = forward(x)\n l = loss(output, y)\n l_sum += l\n print(\"\\t X:\", x) \n print(\"\\t Y:\", y) \n print(\"\\t Output:\", output) \n print(\"\\t loss:\", l)\n \n print(\"Loss\", l_sum / 5)\n w_list.append(w)\n error_list.append(l_sum/5)", "0.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.0\n\t loss: 4.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.0\n\t loss: 16.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.0\n\t loss: 36.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 0.0\n\t loss: 64.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 0.0\n\t loss: 100.0\nLoss 44.0\n0.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.1\n\t loss: 3.61\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.2\n\t loss: 14.44\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.30000000000000004\n\t loss: 32.49\n\t X: 4.0\n\t Y: 8.0\n\t Output: 0.4\n\t loss: 57.76\n\t X: 5.0\n\t Y: 10.0\n\t Output: 0.5\n\t loss: 90.25\nLoss 39.71\n0.2\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.2\n\t loss: 3.24\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.4\n\t loss: 12.96\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.6000000000000001\n\t loss: 29.160000000000004\n\t X: 4.0\n\t Y: 8.0\n\t Output: 0.8\n\t loss: 51.84\n\t X: 5.0\n\t Y: 10.0\n\t Output: 1.0\n\t loss: 81.0\nLoss 35.64\n0.30000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.30000000000000004\n\t loss: 2.8899999999999997\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.6000000000000001\n\t loss: 11.559999999999999\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.9000000000000001\n\t loss: 26.009999999999998\n\t X: 4.0\n\t Y: 8.0\n\t Output: 1.2000000000000002\n\t loss: 46.239999999999995\n\t X: 5.0\n\t Y: 10.0\n\t Output: 1.5000000000000002\n\t loss: 72.25\nLoss 31.79\n0.4\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.4\n\t loss: 2.5600000000000005\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.8\n\t loss: 10.240000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 1.2000000000000002\n\t loss: 23.04\n\t X: 4.0\n\t Y: 8.0\n\t Output: 1.6\n\t loss: 40.96000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 2.0\n\t loss: 64.0\nLoss 28.160000000000004\n0.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.5\n\t loss: 2.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.0\n\t loss: 9.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 1.5\n\t loss: 20.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 2.0\n\t loss: 36.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 2.5\n\t loss: 56.25\nLoss 24.75\n0.6000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.6000000000000001\n\t loss: 1.9599999999999997\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.2000000000000002\n\t loss: 7.839999999999999\n\t X: 3.0\n\t Y: 6.0\n\t Output: 1.8000000000000003\n\t loss: 17.639999999999993\n\t X: 4.0\n\t Y: 8.0\n\t Output: 2.4000000000000004\n\t loss: 31.359999999999996\n\t X: 5.0\n\t Y: 10.0\n\t Output: 3.0000000000000004\n\t loss: 49.0\nLoss 21.559999999999995\n0.7000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.7000000000000001\n\t loss: 1.6899999999999995\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.4000000000000001\n\t loss: 6.759999999999998\n\t X: 3.0\n\t Y: 6.0\n\t Output: 2.1\n\t loss: 15.209999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 2.8000000000000003\n\t loss: 27.039999999999992\n\t X: 5.0\n\t Y: 10.0\n\t Output: 3.5000000000000004\n\t loss: 42.25\nLoss 18.589999999999996\n0.8\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.8\n\t loss: 1.44\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.6\n\t loss: 5.76\n\t X: 3.0\n\t Y: 6.0\n\t Output: 2.4000000000000004\n\t loss: 12.959999999999997\n\t X: 4.0\n\t Y: 8.0\n\t Output: 3.2\n\t loss: 23.04\n\t X: 5.0\n\t Y: 10.0\n\t Output: 4.0\n\t loss: 36.0\nLoss 15.839999999999998\n0.9\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.9\n\t loss: 1.2100000000000002\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.8\n\t loss: 4.840000000000001\n\t X: 3.0\n\t Y: 6.0\n\t Output: 2.7\n\t loss: 10.889999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 3.6\n\t loss: 19.360000000000003\n\t X: 5.0\n\t Y: 10.0\n\t Output: 4.5\n\t loss: 30.25\nLoss 13.309999999999999\n1.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.0\n\t loss: 1.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.0\n\t loss: 4.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.0\n\t loss: 9.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 4.0\n\t loss: 16.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 5.0\n\t loss: 25.0\nLoss 11.0\n1.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.1\n\t loss: 0.8099999999999998\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.2\n\t loss: 3.2399999999999993\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.3000000000000003\n\t loss: 7.289999999999998\n\t X: 4.0\n\t Y: 8.0\n\t Output: 4.4\n\t loss: 12.959999999999997\n\t X: 5.0\n\t Y: 10.0\n\t Output: 5.5\n\t loss: 20.25\nLoss 8.91\n1.2000000000000002\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.2000000000000002\n\t loss: 0.6399999999999997\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.4000000000000004\n\t loss: 2.5599999999999987\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.6000000000000005\n\t loss: 5.759999999999997\n\t X: 4.0\n\t Y: 8.0\n\t Output: 4.800000000000001\n\t loss: 10.239999999999995\n\t X: 5.0\n\t Y: 10.0\n\t Output: 6.000000000000001\n\t loss: 15.999999999999993\nLoss 7.0399999999999965\n1.3\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.3\n\t loss: 0.48999999999999994\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.6\n\t loss: 1.9599999999999997\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.9000000000000004\n\t loss: 4.409999999999998\n\t X: 4.0\n\t Y: 8.0\n\t Output: 5.2\n\t loss: 7.839999999999999\n\t X: 5.0\n\t Y: 10.0\n\t Output: 6.5\n\t loss: 12.25\nLoss 5.389999999999999\n1.4000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.4000000000000001\n\t loss: 0.3599999999999998\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.8000000000000003\n\t loss: 1.4399999999999993\n\t X: 3.0\n\t Y: 6.0\n\t Output: 4.2\n\t loss: 3.2399999999999993\n\t X: 4.0\n\t Y: 8.0\n\t Output: 5.6000000000000005\n\t loss: 5.759999999999997\n\t X: 5.0\n\t Y: 10.0\n\t Output: 7.000000000000001\n\t loss: 8.999999999999995\nLoss 3.959999999999998\n1.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.5\n\t loss: 0.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.0\n\t loss: 1.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 4.5\n\t loss: 2.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 6.0\n\t loss: 4.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 7.5\n\t loss: 6.25\nLoss 2.75\n1.6\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.6\n\t loss: 0.15999999999999992\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.2\n\t loss: 0.6399999999999997\n\t X: 3.0\n\t Y: 6.0\n\t Output: 4.800000000000001\n\t loss: 1.4399999999999984\n\t X: 4.0\n\t Y: 8.0\n\t Output: 6.4\n\t loss: 2.5599999999999987\n\t X: 5.0\n\t Y: 10.0\n\t Output: 8.0\n\t loss: 4.0\nLoss 1.7599999999999993\n1.7000000000000002\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.7000000000000002\n\t loss: 0.0899999999999999\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.4000000000000004\n\t loss: 0.3599999999999996\n\t X: 3.0\n\t Y: 6.0\n\t Output: 5.1000000000000005\n\t loss: 0.809999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 6.800000000000001\n\t loss: 1.4399999999999984\n\t X: 5.0\n\t Y: 10.0\n\t Output: 8.5\n\t loss: 2.25\nLoss 0.9899999999999993\n1.8\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.8\n\t loss: 0.03999999999999998\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.6\n\t loss: 0.15999999999999992\n\t X: 3.0\n\t Y: 6.0\n\t Output: 5.4\n\t loss: 0.3599999999999996\n\t X: 4.0\n\t Y: 8.0\n\t Output: 7.2\n\t loss: 0.6399999999999997\n\t X: 5.0\n\t Y: 10.0\n\t Output: 9.0\n\t loss: 1.0\nLoss 0.43999999999999984\n1.9000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.9000000000000001\n\t loss: 0.009999999999999974\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.8000000000000003\n\t loss: 0.0399999999999999\n\t X: 3.0\n\t Y: 6.0\n\t Output: 5.7\n\t loss: 0.0899999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 7.6000000000000005\n\t loss: 0.1599999999999996\n\t X: 5.0\n\t Y: 10.0\n\t Output: 9.5\n\t loss: 0.25\nLoss 0.10999999999999988\n2.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.0\n\t loss: 0.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.0\n\t loss: 0.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.0\n\t loss: 0.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 8.0\n\t loss: 0.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 10.0\n\t loss: 0.0\nLoss 0.0\n2.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.1\n\t loss: 0.010000000000000018\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.2\n\t loss: 0.04000000000000007\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.300000000000001\n\t loss: 0.09000000000000043\n\t X: 4.0\n\t Y: 8.0\n\t Output: 8.4\n\t loss: 0.16000000000000028\n\t X: 5.0\n\t Y: 10.0\n\t Output: 10.5\n\t loss: 0.25\nLoss 0.11000000000000017\n2.2\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.2\n\t loss: 0.04000000000000007\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.4\n\t loss: 0.16000000000000028\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.6000000000000005\n\t loss: 0.36000000000000065\n\t X: 4.0\n\t Y: 8.0\n\t Output: 8.8\n\t loss: 0.6400000000000011\n\t X: 5.0\n\t Y: 10.0\n\t Output: 11.0\n\t loss: 1.0\nLoss 0.4400000000000004\n2.3000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.3000000000000003\n\t loss: 0.09000000000000016\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.6000000000000005\n\t loss: 0.36000000000000065\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.9\n\t loss: 0.8100000000000006\n\t X: 4.0\n\t Y: 8.0\n\t Output: 9.200000000000001\n\t loss: 1.4400000000000026\n\t X: 5.0\n\t Y: 10.0\n\t Output: 11.500000000000002\n\t loss: 2.2500000000000053\nLoss 0.990000000000002\n2.4000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.4000000000000004\n\t loss: 0.16000000000000028\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.800000000000001\n\t loss: 0.6400000000000011\n\t X: 3.0\n\t Y: 6.0\n\t Output: 7.200000000000001\n\t loss: 1.4400000000000026\n\t X: 4.0\n\t Y: 8.0\n\t Output: 9.600000000000001\n\t loss: 2.5600000000000045\n\t X: 5.0\n\t Y: 10.0\n\t Output: 12.000000000000002\n\t loss: 4.000000000000007\nLoss 1.760000000000003\n2.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.5\n\t loss: 0.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.0\n\t loss: 1.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 7.5\n\t loss: 2.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 10.0\n\t loss: 4.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 12.5\n\t loss: 6.25\nLoss 2.75\n2.6\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.6\n\t loss: 0.3600000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.2\n\t loss: 1.4400000000000004\n\t X: 3.0\n\t Y: 6.0\n\t Output: 7.800000000000001\n\t loss: 3.2400000000000024\n\t X: 4.0\n\t Y: 8.0\n\t Output: 10.4\n\t loss: 5.760000000000002\n\t X: 5.0\n\t Y: 10.0\n\t Output: 13.0\n\t loss: 9.0\nLoss 3.960000000000001\n2.7\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.7\n\t loss: 0.49000000000000027\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.4\n\t loss: 1.960000000000001\n\t X: 3.0\n\t Y: 6.0\n\t Output: 8.100000000000001\n\t loss: 4.410000000000006\n\t X: 4.0\n\t Y: 8.0\n\t Output: 10.8\n\t loss: 7.840000000000004\n\t X: 5.0\n\t Y: 10.0\n\t Output: 13.5\n\t loss: 12.25\nLoss 5.390000000000002\n2.8000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.8000000000000003\n\t loss: 0.6400000000000005\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.6000000000000005\n\t loss: 2.560000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 8.4\n\t loss: 5.760000000000002\n\t X: 4.0\n\t Y: 8.0\n\t Output: 11.200000000000001\n\t loss: 10.240000000000007\n\t X: 5.0\n\t Y: 10.0\n\t Output: 14.000000000000002\n\t loss: 16.000000000000014\nLoss 7.0400000000000045\n2.9000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.9000000000000004\n\t loss: 0.8100000000000006\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.800000000000001\n\t loss: 3.2400000000000024\n\t X: 3.0\n\t Y: 6.0\n\t Output: 8.700000000000001\n\t loss: 7.290000000000005\n\t X: 4.0\n\t Y: 8.0\n\t Output: 11.600000000000001\n\t loss: 12.96000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 14.500000000000002\n\t loss: 20.250000000000018\nLoss 8.910000000000007\n3.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.0\n\t loss: 1.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.0\n\t loss: 4.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.0\n\t loss: 9.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 12.0\n\t loss: 16.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 15.0\n\t loss: 25.0\nLoss 11.0\n3.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.1\n\t loss: 1.2100000000000002\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.2\n\t loss: 4.840000000000001\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.3\n\t loss: 10.890000000000004\n\t X: 4.0\n\t Y: 8.0\n\t Output: 12.4\n\t loss: 19.360000000000003\n\t X: 5.0\n\t Y: 10.0\n\t Output: 15.5\n\t loss: 30.25\nLoss 13.310000000000002\n3.2\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.2\n\t loss: 1.4400000000000004\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.4\n\t loss: 5.760000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.600000000000001\n\t loss: 12.96000000000001\n\t X: 4.0\n\t Y: 8.0\n\t Output: 12.8\n\t loss: 23.040000000000006\n\t X: 5.0\n\t Y: 10.0\n\t Output: 16.0\n\t loss: 36.0\nLoss 15.840000000000003\n3.3000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.3000000000000003\n\t loss: 1.6900000000000006\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.6000000000000005\n\t loss: 6.7600000000000025\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.9\n\t loss: 15.210000000000003\n\t X: 4.0\n\t Y: 8.0\n\t Output: 13.200000000000001\n\t loss: 27.04000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 16.5\n\t loss: 42.25\nLoss 18.590000000000003\n3.4000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.4000000000000004\n\t loss: 1.960000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.800000000000001\n\t loss: 7.840000000000004\n\t X: 3.0\n\t Y: 6.0\n\t Output: 10.200000000000001\n\t loss: 17.640000000000008\n\t X: 4.0\n\t Y: 8.0\n\t Output: 13.600000000000001\n\t loss: 31.360000000000017\n\t X: 5.0\n\t Y: 10.0\n\t Output: 17.0\n\t loss: 49.0\nLoss 21.560000000000006\n3.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.5\n\t loss: 2.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.0\n\t loss: 9.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 10.5\n\t loss: 20.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 14.0\n\t loss: 36.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 17.5\n\t loss: 56.25\nLoss 24.75\n3.6\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.6\n\t loss: 2.5600000000000005\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.2\n\t loss: 10.240000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 10.8\n\t loss: 23.040000000000006\n\t X: 4.0\n\t Y: 8.0\n\t Output: 14.4\n\t loss: 40.96000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 18.0\n\t loss: 64.0\nLoss 28.160000000000004\n3.7\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.7\n\t loss: 2.8900000000000006\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.4\n\t loss: 11.560000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 11.100000000000001\n\t loss: 26.010000000000016\n\t X: 4.0\n\t Y: 8.0\n\t Output: 14.8\n\t loss: 46.24000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 18.5\n\t loss: 72.25\nLoss 31.79000000000001\n3.8000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.8000000000000003\n\t loss: 3.240000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.6000000000000005\n\t loss: 12.960000000000004\n\t X: 3.0\n\t Y: 6.0\n\t Output: 11.4\n\t loss: 29.160000000000004\n\t X: 4.0\n\t Y: 8.0\n\t Output: 15.200000000000001\n\t loss: 51.84000000000002\n\t X: 5.0\n\t Y: 10.0\n\t Output: 19.0\n\t loss: 81.0\nLoss 35.64000000000001\n3.9000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.9000000000000004\n\t loss: 3.610000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.800000000000001\n\t loss: 14.440000000000005\n\t X: 3.0\n\t Y: 6.0\n\t Output: 11.700000000000001\n\t loss: 32.49000000000001\n\t X: 4.0\n\t Y: 8.0\n\t Output: 15.600000000000001\n\t loss: 57.76000000000002\n\t X: 5.0\n\t Y: 10.0\n\t Output: 19.5\n\t loss: 90.25\nLoss 39.71000000000001\n4.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 4.0\n\t loss: 4.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 8.0\n\t loss: 16.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 12.0\n\t loss: 36.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 16.0\n\t loss: 64.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 20.0\n\t loss: 100.0\nLoss 44.0\n" ], [ "w_list = []\nerror_list = []", "_____no_output_____" ], [ "for w in np.arange(0.0, 4.1, 0.1):\n print(w)\n l_sum = 0\n for x, y in zip(x_data, y_data):\n output = forward(x)\n l = loss(output, y)\n l_sum += l\n print(\"\\t X:\", x) \n print(\"\\t Y:\", y) \n print(\"\\t Output:\", output) \n print(\"\\t loss:\", l)\n \n print(\"Loss\", l_sum / 5)\n w_list.append(w)\n error_list.append(l_sum/5)", "0.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.0\n\t loss: 4.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.0\n\t loss: 16.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.0\n\t loss: 36.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 0.0\n\t loss: 64.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 0.0\n\t loss: 100.0\nLoss 44.0\n0.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.1\n\t loss: 3.61\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.2\n\t loss: 14.44\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.30000000000000004\n\t loss: 32.49\n\t X: 4.0\n\t Y: 8.0\n\t Output: 0.4\n\t loss: 57.76\n\t X: 5.0\n\t Y: 10.0\n\t Output: 0.5\n\t loss: 90.25\nLoss 39.71\n0.2\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.2\n\t loss: 3.24\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.4\n\t loss: 12.96\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.6000000000000001\n\t loss: 29.160000000000004\n\t X: 4.0\n\t Y: 8.0\n\t Output: 0.8\n\t loss: 51.84\n\t X: 5.0\n\t Y: 10.0\n\t Output: 1.0\n\t loss: 81.0\nLoss 35.64\n0.30000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.30000000000000004\n\t loss: 2.8899999999999997\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.6000000000000001\n\t loss: 11.559999999999999\n\t X: 3.0\n\t Y: 6.0\n\t Output: 0.9000000000000001\n\t loss: 26.009999999999998\n\t X: 4.0\n\t Y: 8.0\n\t Output: 1.2000000000000002\n\t loss: 46.239999999999995\n\t X: 5.0\n\t Y: 10.0\n\t Output: 1.5000000000000002\n\t loss: 72.25\nLoss 31.79\n0.4\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.4\n\t loss: 2.5600000000000005\n\t X: 2.0\n\t Y: 4.0\n\t Output: 0.8\n\t loss: 10.240000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 1.2000000000000002\n\t loss: 23.04\n\t X: 4.0\n\t Y: 8.0\n\t Output: 1.6\n\t loss: 40.96000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 2.0\n\t loss: 64.0\nLoss 28.160000000000004\n0.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.5\n\t loss: 2.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.0\n\t loss: 9.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 1.5\n\t loss: 20.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 2.0\n\t loss: 36.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 2.5\n\t loss: 56.25\nLoss 24.75\n0.6000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.6000000000000001\n\t loss: 1.9599999999999997\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.2000000000000002\n\t loss: 7.839999999999999\n\t X: 3.0\n\t Y: 6.0\n\t Output: 1.8000000000000003\n\t loss: 17.639999999999993\n\t X: 4.0\n\t Y: 8.0\n\t Output: 2.4000000000000004\n\t loss: 31.359999999999996\n\t X: 5.0\n\t Y: 10.0\n\t Output: 3.0000000000000004\n\t loss: 49.0\nLoss 21.559999999999995\n0.7000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.7000000000000001\n\t loss: 1.6899999999999995\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.4000000000000001\n\t loss: 6.759999999999998\n\t X: 3.0\n\t Y: 6.0\n\t Output: 2.1\n\t loss: 15.209999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 2.8000000000000003\n\t loss: 27.039999999999992\n\t X: 5.0\n\t Y: 10.0\n\t Output: 3.5000000000000004\n\t loss: 42.25\nLoss 18.589999999999996\n0.8\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.8\n\t loss: 1.44\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.6\n\t loss: 5.76\n\t X: 3.0\n\t Y: 6.0\n\t Output: 2.4000000000000004\n\t loss: 12.959999999999997\n\t X: 4.0\n\t Y: 8.0\n\t Output: 3.2\n\t loss: 23.04\n\t X: 5.0\n\t Y: 10.0\n\t Output: 4.0\n\t loss: 36.0\nLoss 15.839999999999998\n0.9\n\t X: 1.0\n\t Y: 2.0\n\t Output: 0.9\n\t loss: 1.2100000000000002\n\t X: 2.0\n\t Y: 4.0\n\t Output: 1.8\n\t loss: 4.840000000000001\n\t X: 3.0\n\t Y: 6.0\n\t Output: 2.7\n\t loss: 10.889999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 3.6\n\t loss: 19.360000000000003\n\t X: 5.0\n\t Y: 10.0\n\t Output: 4.5\n\t loss: 30.25\nLoss 13.309999999999999\n1.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.0\n\t loss: 1.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.0\n\t loss: 4.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.0\n\t loss: 9.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 4.0\n\t loss: 16.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 5.0\n\t loss: 25.0\nLoss 11.0\n1.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.1\n\t loss: 0.8099999999999998\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.2\n\t loss: 3.2399999999999993\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.3000000000000003\n\t loss: 7.289999999999998\n\t X: 4.0\n\t Y: 8.0\n\t Output: 4.4\n\t loss: 12.959999999999997\n\t X: 5.0\n\t Y: 10.0\n\t Output: 5.5\n\t loss: 20.25\nLoss 8.91\n1.2000000000000002\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.2000000000000002\n\t loss: 0.6399999999999997\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.4000000000000004\n\t loss: 2.5599999999999987\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.6000000000000005\n\t loss: 5.759999999999997\n\t X: 4.0\n\t Y: 8.0\n\t Output: 4.800000000000001\n\t loss: 10.239999999999995\n\t X: 5.0\n\t Y: 10.0\n\t Output: 6.000000000000001\n\t loss: 15.999999999999993\nLoss 7.0399999999999965\n1.3\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.3\n\t loss: 0.48999999999999994\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.6\n\t loss: 1.9599999999999997\n\t X: 3.0\n\t Y: 6.0\n\t Output: 3.9000000000000004\n\t loss: 4.409999999999998\n\t X: 4.0\n\t Y: 8.0\n\t Output: 5.2\n\t loss: 7.839999999999999\n\t X: 5.0\n\t Y: 10.0\n\t Output: 6.5\n\t loss: 12.25\nLoss 5.389999999999999\n1.4000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.4000000000000001\n\t loss: 0.3599999999999998\n\t X: 2.0\n\t Y: 4.0\n\t Output: 2.8000000000000003\n\t loss: 1.4399999999999993\n\t X: 3.0\n\t Y: 6.0\n\t Output: 4.2\n\t loss: 3.2399999999999993\n\t X: 4.0\n\t Y: 8.0\n\t Output: 5.6000000000000005\n\t loss: 5.759999999999997\n\t X: 5.0\n\t Y: 10.0\n\t Output: 7.000000000000001\n\t loss: 8.999999999999995\nLoss 3.959999999999998\n1.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.5\n\t loss: 0.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.0\n\t loss: 1.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 4.5\n\t loss: 2.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 6.0\n\t loss: 4.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 7.5\n\t loss: 6.25\nLoss 2.75\n1.6\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.6\n\t loss: 0.15999999999999992\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.2\n\t loss: 0.6399999999999997\n\t X: 3.0\n\t Y: 6.0\n\t Output: 4.800000000000001\n\t loss: 1.4399999999999984\n\t X: 4.0\n\t Y: 8.0\n\t Output: 6.4\n\t loss: 2.5599999999999987\n\t X: 5.0\n\t Y: 10.0\n\t Output: 8.0\n\t loss: 4.0\nLoss 1.7599999999999993\n1.7000000000000002\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.7000000000000002\n\t loss: 0.0899999999999999\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.4000000000000004\n\t loss: 0.3599999999999996\n\t X: 3.0\n\t Y: 6.0\n\t Output: 5.1000000000000005\n\t loss: 0.809999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 6.800000000000001\n\t loss: 1.4399999999999984\n\t X: 5.0\n\t Y: 10.0\n\t Output: 8.5\n\t loss: 2.25\nLoss 0.9899999999999993\n1.8\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.8\n\t loss: 0.03999999999999998\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.6\n\t loss: 0.15999999999999992\n\t X: 3.0\n\t Y: 6.0\n\t Output: 5.4\n\t loss: 0.3599999999999996\n\t X: 4.0\n\t Y: 8.0\n\t Output: 7.2\n\t loss: 0.6399999999999997\n\t X: 5.0\n\t Y: 10.0\n\t Output: 9.0\n\t loss: 1.0\nLoss 0.43999999999999984\n1.9000000000000001\n\t X: 1.0\n\t Y: 2.0\n\t Output: 1.9000000000000001\n\t loss: 0.009999999999999974\n\t X: 2.0\n\t Y: 4.0\n\t Output: 3.8000000000000003\n\t loss: 0.0399999999999999\n\t X: 3.0\n\t Y: 6.0\n\t Output: 5.7\n\t loss: 0.0899999999999999\n\t X: 4.0\n\t Y: 8.0\n\t Output: 7.6000000000000005\n\t loss: 0.1599999999999996\n\t X: 5.0\n\t Y: 10.0\n\t Output: 9.5\n\t loss: 0.25\nLoss 0.10999999999999988\n2.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.0\n\t loss: 0.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.0\n\t loss: 0.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.0\n\t loss: 0.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 8.0\n\t loss: 0.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 10.0\n\t loss: 0.0\nLoss 0.0\n2.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.1\n\t loss: 0.010000000000000018\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.2\n\t loss: 0.04000000000000007\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.300000000000001\n\t loss: 0.09000000000000043\n\t X: 4.0\n\t Y: 8.0\n\t Output: 8.4\n\t loss: 0.16000000000000028\n\t X: 5.0\n\t Y: 10.0\n\t Output: 10.5\n\t loss: 0.25\nLoss 0.11000000000000017\n2.2\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.2\n\t loss: 0.04000000000000007\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.4\n\t loss: 0.16000000000000028\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.6000000000000005\n\t loss: 0.36000000000000065\n\t X: 4.0\n\t Y: 8.0\n\t Output: 8.8\n\t loss: 0.6400000000000011\n\t X: 5.0\n\t Y: 10.0\n\t Output: 11.0\n\t loss: 1.0\nLoss 0.4400000000000004\n2.3000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.3000000000000003\n\t loss: 0.09000000000000016\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.6000000000000005\n\t loss: 0.36000000000000065\n\t X: 3.0\n\t Y: 6.0\n\t Output: 6.9\n\t loss: 0.8100000000000006\n\t X: 4.0\n\t Y: 8.0\n\t Output: 9.200000000000001\n\t loss: 1.4400000000000026\n\t X: 5.0\n\t Y: 10.0\n\t Output: 11.500000000000002\n\t loss: 2.2500000000000053\nLoss 0.990000000000002\n2.4000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.4000000000000004\n\t loss: 0.16000000000000028\n\t X: 2.0\n\t Y: 4.0\n\t Output: 4.800000000000001\n\t loss: 0.6400000000000011\n\t X: 3.0\n\t Y: 6.0\n\t Output: 7.200000000000001\n\t loss: 1.4400000000000026\n\t X: 4.0\n\t Y: 8.0\n\t Output: 9.600000000000001\n\t loss: 2.5600000000000045\n\t X: 5.0\n\t Y: 10.0\n\t Output: 12.000000000000002\n\t loss: 4.000000000000007\nLoss 1.760000000000003\n2.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.5\n\t loss: 0.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.0\n\t loss: 1.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 7.5\n\t loss: 2.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 10.0\n\t loss: 4.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 12.5\n\t loss: 6.25\nLoss 2.75\n2.6\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.6\n\t loss: 0.3600000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.2\n\t loss: 1.4400000000000004\n\t X: 3.0\n\t Y: 6.0\n\t Output: 7.800000000000001\n\t loss: 3.2400000000000024\n\t X: 4.0\n\t Y: 8.0\n\t Output: 10.4\n\t loss: 5.760000000000002\n\t X: 5.0\n\t Y: 10.0\n\t Output: 13.0\n\t loss: 9.0\nLoss 3.960000000000001\n2.7\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.7\n\t loss: 0.49000000000000027\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.4\n\t loss: 1.960000000000001\n\t X: 3.0\n\t Y: 6.0\n\t Output: 8.100000000000001\n\t loss: 4.410000000000006\n\t X: 4.0\n\t Y: 8.0\n\t Output: 10.8\n\t loss: 7.840000000000004\n\t X: 5.0\n\t Y: 10.0\n\t Output: 13.5\n\t loss: 12.25\nLoss 5.390000000000002\n2.8000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.8000000000000003\n\t loss: 0.6400000000000005\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.6000000000000005\n\t loss: 2.560000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 8.4\n\t loss: 5.760000000000002\n\t X: 4.0\n\t Y: 8.0\n\t Output: 11.200000000000001\n\t loss: 10.240000000000007\n\t X: 5.0\n\t Y: 10.0\n\t Output: 14.000000000000002\n\t loss: 16.000000000000014\nLoss 7.0400000000000045\n2.9000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 2.9000000000000004\n\t loss: 0.8100000000000006\n\t X: 2.0\n\t Y: 4.0\n\t Output: 5.800000000000001\n\t loss: 3.2400000000000024\n\t X: 3.0\n\t Y: 6.0\n\t Output: 8.700000000000001\n\t loss: 7.290000000000005\n\t X: 4.0\n\t Y: 8.0\n\t Output: 11.600000000000001\n\t loss: 12.96000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 14.500000000000002\n\t loss: 20.250000000000018\nLoss 8.910000000000007\n3.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.0\n\t loss: 1.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.0\n\t loss: 4.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.0\n\t loss: 9.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 12.0\n\t loss: 16.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 15.0\n\t loss: 25.0\nLoss 11.0\n3.1\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.1\n\t loss: 1.2100000000000002\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.2\n\t loss: 4.840000000000001\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.3\n\t loss: 10.890000000000004\n\t X: 4.0\n\t Y: 8.0\n\t Output: 12.4\n\t loss: 19.360000000000003\n\t X: 5.0\n\t Y: 10.0\n\t Output: 15.5\n\t loss: 30.25\nLoss 13.310000000000002\n3.2\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.2\n\t loss: 1.4400000000000004\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.4\n\t loss: 5.760000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.600000000000001\n\t loss: 12.96000000000001\n\t X: 4.0\n\t Y: 8.0\n\t Output: 12.8\n\t loss: 23.040000000000006\n\t X: 5.0\n\t Y: 10.0\n\t Output: 16.0\n\t loss: 36.0\nLoss 15.840000000000003\n3.3000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.3000000000000003\n\t loss: 1.6900000000000006\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.6000000000000005\n\t loss: 6.7600000000000025\n\t X: 3.0\n\t Y: 6.0\n\t Output: 9.9\n\t loss: 15.210000000000003\n\t X: 4.0\n\t Y: 8.0\n\t Output: 13.200000000000001\n\t loss: 27.04000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 16.5\n\t loss: 42.25\nLoss 18.590000000000003\n3.4000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.4000000000000004\n\t loss: 1.960000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 6.800000000000001\n\t loss: 7.840000000000004\n\t X: 3.0\n\t Y: 6.0\n\t Output: 10.200000000000001\n\t loss: 17.640000000000008\n\t X: 4.0\n\t Y: 8.0\n\t Output: 13.600000000000001\n\t loss: 31.360000000000017\n\t X: 5.0\n\t Y: 10.0\n\t Output: 17.0\n\t loss: 49.0\nLoss 21.560000000000006\n3.5\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.5\n\t loss: 2.25\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.0\n\t loss: 9.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 10.5\n\t loss: 20.25\n\t X: 4.0\n\t Y: 8.0\n\t Output: 14.0\n\t loss: 36.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 17.5\n\t loss: 56.25\nLoss 24.75\n3.6\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.6\n\t loss: 2.5600000000000005\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.2\n\t loss: 10.240000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 10.8\n\t loss: 23.040000000000006\n\t X: 4.0\n\t Y: 8.0\n\t Output: 14.4\n\t loss: 40.96000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 18.0\n\t loss: 64.0\nLoss 28.160000000000004\n3.7\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.7\n\t loss: 2.8900000000000006\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.4\n\t loss: 11.560000000000002\n\t X: 3.0\n\t Y: 6.0\n\t Output: 11.100000000000001\n\t loss: 26.010000000000016\n\t X: 4.0\n\t Y: 8.0\n\t Output: 14.8\n\t loss: 46.24000000000001\n\t X: 5.0\n\t Y: 10.0\n\t Output: 18.5\n\t loss: 72.25\nLoss 31.79000000000001\n3.8000000000000003\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.8000000000000003\n\t loss: 3.240000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.6000000000000005\n\t loss: 12.960000000000004\n\t X: 3.0\n\t Y: 6.0\n\t Output: 11.4\n\t loss: 29.160000000000004\n\t X: 4.0\n\t Y: 8.0\n\t Output: 15.200000000000001\n\t loss: 51.84000000000002\n\t X: 5.0\n\t Y: 10.0\n\t Output: 19.0\n\t loss: 81.0\nLoss 35.64000000000001\n3.9000000000000004\n\t X: 1.0\n\t Y: 2.0\n\t Output: 3.9000000000000004\n\t loss: 3.610000000000001\n\t X: 2.0\n\t Y: 4.0\n\t Output: 7.800000000000001\n\t loss: 14.440000000000005\n\t X: 3.0\n\t Y: 6.0\n\t Output: 11.700000000000001\n\t loss: 32.49000000000001\n\t X: 4.0\n\t Y: 8.0\n\t Output: 15.600000000000001\n\t loss: 57.76000000000002\n\t X: 5.0\n\t Y: 10.0\n\t Output: 19.5\n\t loss: 90.25\nLoss 39.71000000000001\n4.0\n\t X: 1.0\n\t Y: 2.0\n\t Output: 4.0\n\t loss: 4.0\n\t X: 2.0\n\t Y: 4.0\n\t Output: 8.0\n\t loss: 16.0\n\t X: 3.0\n\t Y: 6.0\n\t Output: 12.0\n\t loss: 36.0\n\t X: 4.0\n\t Y: 8.0\n\t Output: 16.0\n\t loss: 64.0\n\t X: 5.0\n\t Y: 10.0\n\t Output: 20.0\n\t loss: 100.0\nLoss 44.0\n" ], [ "import matplotlib.pyplot as plt\nplt.plot(w_list,error_list)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5cbc18398e14bc23c3812506521a5fd19aa0a7
6,499
ipynb
Jupyter Notebook
chapter2/homework/computer/3-29/201611680434.329-checkpoint.ipynb
hpishacker/python_tutorial
9005f0db9dae10bdc1d1c3e9e5cf2268036cd5bd
[ "MIT" ]
76
2017-09-26T01:07:26.000Z
2021-02-23T03:06:25.000Z
chapter2/homework/computer/3-29/201611680434.329-checkpoint.ipynb
hpishacker/python_tutorial
9005f0db9dae10bdc1d1c3e9e5cf2268036cd5bd
[ "MIT" ]
5
2017-12-10T08:40:11.000Z
2020-01-10T03:39:21.000Z
chapter2/homework/computer/3-29/201611680434.329-checkpoint.ipynb
hacker-14/python_tutorial
4a110b12aaab1313ded253f5207ff263d85e1b56
[ "MIT" ]
112
2017-09-26T01:07:30.000Z
2021-11-25T19:46:51.000Z
25.586614
103
0.460071
[ [ [ "def compute_sum(end):\n i = 1\n total_n = 1\n\n while i < end:\n i = i + 1\n total_n = total_n * i\n\n return total_n\n\nn = int(input('请输入第1个整数,以回车结束。'))\nm = int(input('请输入第2个整数,以回车结束。'))\nk = int(input('请输入第3个整数,以回车结束。'))\n\nprint('最终的和是:', compute_sum(m) + compute_sum(n) + compute_sum(k))\n", "请输入第1个整数,以回车结束。3\n请输入第2个整数,以回车结束。4\n请输入第3个整数,以回车结束。5\n最终的和是: 150\n" ], [ "def compute_add(num):\n i = 0\n total = 0\n while i < num:\n j = 2 * i + 1\n i = i + 1\n if i%2 != 0:\n total = total + 1/j\n else:\n total = total - 1/j\n return total\n\nm = int(input('请输入第一个整数,回车结束 '))\nprint ('4倍的和:tatol1 =',4*compute_add(m))\nn = int(input('请输入第二个整数,回车结束 '))\nprint ('4倍的和:tatol2 =',4*compute_add(n))\n", "请输入第一个整数,回车结束 1000\n4倍的和:tatol1 = 3.140592653839794\n请输入第二个整数,回车结束 10000\n4倍的和:tatol2 = 3.1414926535900345\n" ], [ "def constellation(name,month,day):\n if month > 12 or month < 1 or day < 1 or day > 31:\n print ('你输入的数字非法!')\n if (month == 3 and 21<=day<=31) or (month == 4 and 1<=day<=19):\n print (name,'你是非常有性格的白羊座!',sep = ',')\n elif (month == 4 and 20<=day<=30) or (month == 5 and 1<=day<=20):\n print (name,'你是非常有性格的金牛座!',sep = ',')\n elif (month == 5 and 21<=day<=31) or (month == 6 and 1<=day<=21):\n print (name,'你是非常有性格的双子座!',sep = ',')\n elif (month == 6 and 22<=day<=30) or (month == 7 and 1<=day<=22):\n print (name,'你是非常有性格的巨蟹座!',sep = ',')\n elif (month == 7 and 23<=day<=31) or (month == 8 and 1<=day<=22):\n print (name,'你是非常有性格的狮子座!',sep = ',')\n elif (month == 8 and 23<=day<=31) or (month == 9 and 1<=day<=22):\n print (name,'你是非常有性格的处女座!',sep = ',')\n elif (month == 9 and 23<=day<=30) or (month == 10 and 1<=day<=23):\n print (name,'你是非常有性格的天秤座!',sep = ',')\n elif (month == 10 and 24<=day<=31) or (month == 11 and 1<=day<=22):\n print (name,'你是非常有性格的天蝎座!',sep = ',')\n elif (month == 11 and 23<=day<=30) or (month == 12 and 1<=day<=21):\n print (name,'你是非常有性格的射手座!',sep = ',')\n elif (month == 12 and 22<=day<=31) or (month == 1 and 1<=day<=19):\n print (name,'你是非常有性格的魔蝎座!',sep = ',')\n elif (month == 1 and 20<=day<=31) or (month == 2 and 1<=day<=18):\n print (name,'你是非常有性格的水瓶座!',sep = ',')\n elif (month == 2 and 19<=day<=28) or (month == 3 and 1<=day<=20):\n print (name,'你是非常有性格的双鱼座!',sep = ',')\n \nname = input('请输入你的姓名,回车结束 ')\nmonth = int(input('请输入你生日的月份,回车结束 '))\nday = int(input('请输入你生日的日号,回车结束 '))\nconstellation(name,month,day)\n", "请输入你的姓名,回车结束 zxt\n请输入你生日的月份,回车结束 12\n请输入你生日的日号,回车结束 6\nzxt,你是非常有性格的射手座!\n" ], [ "def change(word):\n if word.endswith('s') or word.endswith('x') or word.endswith('sh') or word.endswith('ch'):\n print (word+'es')\n elif word.endswith('y'):\n print (word[0:len(word)-1]+'ies')\n else :\n print (word+'s')\n \nwhile True :\n word = input('请输入一个要转换的英语单词,回车结束,输入‘回车’结束 ')\n if word == '回车':\n break\n change(word)\n ", "请输入一个要转换的英语单词,回车结束,输入‘回车’结束 book\nbooks\n请输入一个要转换的英语单词,回车结束,输入‘回车’结束 school\nschools\n请输入一个要转换的英语单词,回车结束,输入‘回车’结束 回车\n" ], [ "def computer_sum(m,n,k):\n while m <= n:\n m += k\n return m\n\nm = int(input('请输入第一个整数,回车结束 '))\nn = int(input('请输入第二个整数,回车结束 '))\nk = int(input('请输入第三个整数,回车结束 '))\nprint ('tatol =',computer_sum(m,n,k))", "请输入第一个整数,回车结束 3\n请输入第二个整数,回车结束 6\n请输入第三个整数,回车结束 2\ntatol = 7\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec5cc99c948b6729c11cad03bf00468d1fd7781e
18,991
ipynb
Jupyter Notebook
appyters/ABA_Aging_Harmonizome_ETL/ABA Aging.ipynb
shui02/appyter-catalog
dfa15946d151daeb7d7b1bc9af9e48428474f012
[ "CC0-1.0" ]
null
null
null
appyters/ABA_Aging_Harmonizome_ETL/ABA Aging.ipynb
shui02/appyter-catalog
dfa15946d151daeb7d7b1bc9af9e48428474f012
[ "CC0-1.0" ]
null
null
null
appyters/ABA_Aging_Harmonizome_ETL/ABA Aging.ipynb
shui02/appyter-catalog
dfa15946d151daeb7d7b1bc9af9e48428474f012
[ "CC0-1.0" ]
null
null
null
21.386261
249
0.521036
[ [ [ "# Harmonizome ETL: Aging, Dementia and Traumatic Brain Injury Study", "_____no_output_____" ], [ "Created by: Charles Dai <br>\nCredit to: Moshe Silverstein\n\nData Source Home: http://www.brain-map.org/ <br>\nData Source Download: http://aging.brain-map.org/download/index", "_____no_output_____" ] ], [ [ "# appyter init\nfrom appyter import magic\nmagic.init(lambda _=globals: _())", "_____no_output_____" ], [ "import sys\nimport os\nimport zipfile\nfrom datetime import date\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport harmonizome.utility_functions as uf\nimport harmonizome.lookup as lookup", "_____no_output_____" ], [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "### Notebook Information", "_____no_output_____" ] ], [ [ "print('This notebook was run on:', date.today(), '\\nPython version:', sys.version)", "_____no_output_____" ] ], [ [ "# Initialization", "_____no_output_____" ] ], [ [ "%%appyter hide_code\n\n{% do SectionField(\n name='data',\n title='Upload Data',\n img='load_icon.png'\n) %}\n\n{% do SectionField(\n name='settings',\n title='Settings',\n img='setting_icon.png'\n) %}", "_____no_output_____" ], [ "%%appyter code_eval\n\n{% do DescriptionField(\n name='description',\n text='The examples below were sourced from <a href=\"http://aging.brain-map.org/download/index\" target=\"_blank\">aging.brain-map.org</a>. If clicking on the examples does not work, they should be downloaded directly from the source.',\n section='data'\n) %}\n\n{% set m_file = FileField(\n constraint='.*\\.zip$',\n name='expression_matrix', \n label='Expression Matrix', \n default='gene_expression_matrix_2016-03-03.zip',\n examples={\n 'gene_expression_matrix_2016-03-03.zip': 'http://aging.brain-map.org/api/v2/well_known_file_download/502999992'\n },\n section='data'\n) %}\n\n{% set donor_file = FileField(\n constraint='.*\\.csv$',\n name='donor_metadata', \n label='Donor Metadata', \n default='DonorInformation.csv',\n examples={\n 'DonorInformation.csv': 'http://aging.brain-map.org/api/v2/data/query.csv?criteria=model::ApiTbiDonorDetail,rma::options[num_rows$eqall]'\n },\n section='data'\n) %}", "_____no_output_____" ], [ "%%appyter code_eval\n\n{% set cohort = ChoiceField(\n name='cohort',\n label='Cohort',\n description='All: all patients. Dementia/TBI: patients with dementia or traumatic brain injury. No Disease: healthy patients.',\n choices={\n 'All': 'All',\n 'Dementia/TBI': 'Disease',\n 'No Disease': 'Healthy'\n },\n default='All',\n section='settings'\n) %}", "_____no_output_____" ] ], [ [ "### Load Mapping Dictionaries", "_____no_output_____" ] ], [ [ "symbol_lookup, geneid_lookup = lookup.get_lookups()", "_____no_output_____" ] ], [ [ "### Output Path", "_____no_output_____" ] ], [ [ "%%appyter code_exec\n\noutput_name = 'aba_aging_{{cohort}}'.lower()\n\npath = 'Output/ABA-Aging-{{cohort}}'\nif not os.path.exists(path):\n os.makedirs(path)", "_____no_output_____" ] ], [ [ "# Load Data", "_____no_output_____" ] ], [ [ "%%appyter code_exec\n\nwith zipfile.ZipFile({{m_file}}) as zipf:\n with zipf.open('fpkm_table_normalized.csv') as matrix_file:\n matrix = pd.read_csv(matrix_file, index_col=0)\n with zipf.open('columns-samples.csv') as sample_file:\n sample_meta = pd.read_csv(sample_file, index_col=0)\n with zipf.open('rows-genes.csv') as gene_file:\n gene_meta = pd.read_csv(gene_file, index_col=0)", "_____no_output_____" ], [ "matrix.head()", "_____no_output_____" ], [ "matrix.shape", "_____no_output_____" ], [ "sample_meta.head()", "_____no_output_____" ], [ "sample_meta.shape", "_____no_output_____" ], [ "gene_meta.head()", "_____no_output_____" ] ], [ [ "## Load Donor Metadata", "_____no_output_____" ] ], [ [ "%%appyter code_exec\n\ndonor_meta = pd.read_csv(\n {{donor_file}}, \n index_col=0\n)", "_____no_output_____" ], [ "donor_meta.head()", "_____no_output_____" ], [ "donor_meta.shape", "_____no_output_____" ] ], [ [ "# Pre-process Data", "_____no_output_____" ], [ "## Select Cohort", "_____no_output_____" ] ], [ [ "%%appyter code_exec\n\nif '{{cohort}}' == 'Healthy':\n cohort = np.logical_and(donor_meta['ever_tbi_w_loc'] == 'N', \n donor_meta['act_demented'] == 'No Dementia')\nif '{{cohort}}' == 'Disease':\n cohort = np.logical_or(donor_meta['ever_tbi_w_loc'] == 'Y', \n donor_meta['act_demented'] == 'Dementia')\nif '{{cohort}}' == 'All':\n cohort = donor_meta['ever_tbi_w_loc'].astype('bool')\n\ndonor_meta = donor_meta[cohort]\nsample_cohort = sample_meta['donor_id'].isin(donor_meta.index)\nsample_meta = sample_meta[sample_cohort]", "_____no_output_____" ] ], [ [ "## Map Sample Meta to Sample ID", "_____no_output_____" ] ], [ [ "matrix.columns = matrix.columns.astype('int')\nmatrix_cohort = matrix.columns.isin(sample_meta.index)\nmatrix = matrix[matrix.columns[matrix_cohort]]\nmatrix.head()", "_____no_output_____" ] ], [ [ "## Map Gene to Row", "_____no_output_____" ] ], [ [ "matrix.index = gene_meta['gene_symbol']\nmatrix.index.name = 'Gene Symbol'\nmatrix.columns.name = 'RNA-Seq Profile ID'\nmatrix.head()", "_____no_output_____" ] ], [ [ "## Save Unfiltered Matrix to file", "_____no_output_____" ] ], [ [ "uf.save_data(matrix, path, output_name + '_matrix_unfiltered', \n compression='npz', dtype=np.float32)", "_____no_output_____" ] ], [ [ "# Filter Data", "_____no_output_____" ], [ "## Map Gene Symbols to Up-to-date Approved Gene Symbols", "_____no_output_____" ] ], [ [ "matrix = uf.map_symbols(matrix, symbol_lookup)\nmatrix.shape", "_____no_output_____" ] ], [ [ "## Merge Duplicate Genes By Rows and Duplicate Columns", "_____no_output_____" ] ], [ [ "matrix = uf.merge(matrix, 'row')\nmatrix = uf.merge(matrix, 'column')\nmatrix.shape", "_____no_output_____" ] ], [ [ "## Remove Data that is More Than 95% Missing and Impute Missing Data", "_____no_output_____" ] ], [ [ "matrix = uf.remove_impute(matrix)\nmatrix.head()", "_____no_output_____" ], [ "matrix.shape", "_____no_output_____" ] ], [ [ "## Log2 Transform", "_____no_output_____" ] ], [ [ "matrix = uf.log2(matrix)\nmatrix.head()", "_____no_output_____" ] ], [ [ "## Normalize Matrix (Quantile Normalize the Matrix by Column)", "_____no_output_____" ] ], [ [ "matrix = uf.quantile_normalize(matrix)\nmatrix.head()", "_____no_output_____" ] ], [ [ "## Normalize Matrix (Z-Score the Rows)", "_____no_output_____" ] ], [ [ "matrix = uf.zscore(matrix)\nmatrix.head()", "_____no_output_____" ] ], [ [ "## Histogram of First Sample", "_____no_output_____" ] ], [ [ "matrix.iloc[:, 0].hist(bins=100)", "_____no_output_____" ] ], [ [ "## Histogram of First Gene", "_____no_output_____" ] ], [ [ "matrix.iloc[0, :].hist(bins=100)", "_____no_output_____" ] ], [ [ "## Save Filtered Matrix", "_____no_output_____" ] ], [ [ "uf.save_data(matrix, path, output_name + '_matrix_filtered', \n ext='tsv', compression='gzip')", "_____no_output_____" ] ], [ [ "# Analyze Data", "_____no_output_____" ], [ "## Create Gene List", "_____no_output_____" ] ], [ [ "gene_list = uf.gene_list(matrix, geneid_lookup)\ngene_list.head()", "_____no_output_____" ], [ "gene_list.shape", "_____no_output_____" ], [ "uf.save_data(gene_list, path, output_name + '_gene_list',\n ext='tsv', compression='gzip', index=False)", "_____no_output_____" ] ], [ [ "## Create Attribute List", "_____no_output_____" ] ], [ [ "attribute_list = uf.attribute_list(matrix, sample_meta)\nattribute_list.head()", "_____no_output_____" ], [ "attribute_list.shape", "_____no_output_____" ], [ "uf.save_data(attribute_list, path, output_name + '_attribute_list',\n ext='tsv', compression='gzip')", "_____no_output_____" ] ], [ [ "## Create matrix of Standardized values (values between -1, and 1)", "_____no_output_____" ] ], [ [ "standard_matrix = uf.standardized_matrix(matrix)\nstandard_matrix.head()", "_____no_output_____" ], [ "uf.save_data(standard_matrix, path, output_name + '_standard_matrix',\n ext='tsv', compression='gzip')", "_____no_output_____" ] ], [ [ "## Plot of A Single Celltype, Normalized Value vs. Standardized Value", "_____no_output_____" ] ], [ [ "plt.plot(matrix[matrix.columns[0]],\n standard_matrix[standard_matrix.columns[0]], 'bo')\nplt.xlabel('Normalized Values')\nplt.ylabel('Standardized Values')\nplt.title(standard_matrix.columns[0])\nplt.grid(True)", "_____no_output_____" ] ], [ [ "## Create Ternary Matrix", "_____no_output_____" ] ], [ [ "ternary_matrix = uf.ternary_matrix(standard_matrix)\nternary_matrix.head()", "_____no_output_____" ], [ "uf.save_data(ternary_matrix, path, output_name + '_ternary_matrix',\n ext='tsv', compression='gzip')", "_____no_output_____" ] ], [ [ "## Create Gene and Attribute Set Libraries", "_____no_output_____" ] ], [ [ "uf.save_setlib(ternary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')", "_____no_output_____" ], [ "uf.save_setlib(ternary_matrix, 'gene', 'down', path, output_name + '_gene_down_set')", "_____no_output_____" ], [ "uf.save_setlib(ternary_matrix, 'attribute', 'up', path, \n output_name + '_attribute_up_set')", "_____no_output_____" ], [ "uf.save_setlib(ternary_matrix, 'attribute', 'down', path, \n output_name + '_attribute_down_set')", "_____no_output_____" ] ], [ [ "## Create Attribute Similarity Matrix", "_____no_output_____" ] ], [ [ "attribute_similarity_matrix = uf.similarity_matrix(standard_matrix.T, 'cosine')\nattribute_similarity_matrix.head()", "_____no_output_____" ], [ "uf.save_data(attribute_similarity_matrix, path,\n output_name + '_attribute_similarity_matrix', \n ext='tsv', compression='gzip')", "_____no_output_____" ] ], [ [ "## Create Gene Similarity Matrix", "_____no_output_____" ] ], [ [ "gene_similarity_matrix = uf.similarity_matrix(standard_matrix, 'cosine')\ngene_similarity_matrix.head()", "_____no_output_____" ], [ "uf.save_data(gene_similarity_matrix, path, \n output_name + '_gene_similarity_matrix',\n compression='npz', symmetric=True, dtype=np.float32)", "_____no_output_____" ] ], [ [ "## Create Gene-Attribute Edge List", "_____no_output_____" ] ], [ [ "edge_list = uf.edge_list(standard_matrix)\nuf.save_data(edge_list, path, output_name + '_edge_list', \n ext='tsv', compression='gzip')", "_____no_output_____" ] ], [ [ "# Create Downloadable Save File", "_____no_output_____" ] ], [ [ "uf.archive(path)", "_____no_output_____" ] ], [ [ "### Link to download output files: [click here](./output_archive.zip)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5ccac6482e1621d697dbbcc9479e5cc0d6f504
6,008
ipynb
Jupyter Notebook
Chapter5.ipynb
kaibadash/bert-book
816964d0eb942add6cde530e85780bc3a1465a14
[ "MIT" ]
null
null
null
Chapter5.ipynb
kaibadash/bert-book
816964d0eb942add6cde530e85780bc3a1465a14
[ "MIT" ]
null
null
null
Chapter5.ipynb
kaibadash/bert-book
816964d0eb942add6cde530e85780bc3a1465a14
[ "MIT" ]
null
null
null
6,008
6,008
0.653129
[ [ [ "# 5章\n- 以下で実行するコードには確率的な処理が含まれていることがあり、コードの出力結果と本書に記載されている出力例が異なることがあります。", "_____no_output_____" ] ], [ [ "# 5-1\n!pip install transformers==4.5.0 fugashi==1.1.0 ipadic==1.0.0", "_____no_output_____" ], [ "# 5-2\nimport numpy as np\nimport torch\nfrom transformers import BertJapaneseTokenizer, BertForMaskedLM", "_____no_output_____" ], [ "# 5-3\nmodel_name = 'cl-tohoku/bert-base-japanese-whole-word-masking'\ntokenizer = BertJapaneseTokenizer.from_pretrained(model_name)\nbert_mlm = BertForMaskedLM.from_pretrained(model_name)\nbert_mlm = bert_mlm.cuda()", "_____no_output_____" ], [ "# 5-4\ntext = '今日は[MASK]へ行く。'\ntokens = tokenizer.tokenize(text)\nprint(tokens)", "_____no_output_____" ], [ "# 5-5\n# 文章を符号化し、GPUに配置する。\ninput_ids = tokenizer.encode(text, return_tensors='pt')\ninput_ids = input_ids.cuda()\n\n# BERTに入力し、分類スコアを得る。\n# 系列長を揃える必要がないので、単にiput_idsのみを入力します。\nwith torch.no_grad():\n output = bert_mlm(input_ids=input_ids)\n scores = output.logits", "_____no_output_____" ], [ "# 5-6\n# ID列で'[MASK]'(IDは4)の位置を調べる\nmask_position = input_ids[0].tolist().index(4) \n\n# スコアが最も良いトークンのIDを取り出し、トークンに変換する。\nid_best = scores[0, mask_position].argmax(-1).item()\ntoken_best = tokenizer.convert_ids_to_tokens(id_best)\ntoken_best = token_best.replace('##', '')\n\n# [MASK]を上で求めたトークンで置き換える。\ntext = text.replace('[MASK]',token_best)\n\nprint(text)", "_____no_output_____" ], [ "# 5-7\ndef predict_mask_topk(text, tokenizer, bert_mlm, num_topk):\n \"\"\"\n 文章中の最初の[MASK]をスコアの上位のトークンに置き換える。\n 上位何位まで使うかは、num_topkで指定。\n 出力は穴埋めされた文章のリストと、置き換えられたトークンのスコアのリスト。\n \"\"\"\n # 文章を符号化し、BERTで分類スコアを得る。\n input_ids = tokenizer.encode(text, return_tensors='pt')\n input_ids = input_ids.cuda()\n with torch.no_grad():\n output = bert_mlm(input_ids=input_ids)\n scores = output.logits\n\n # スコアが上位のトークンとスコアを求める。\n mask_position = input_ids[0].tolist().index(4) \n topk = scores[0, mask_position].topk(num_topk)\n ids_topk = topk.indices # トークンのID\n tokens_topk = tokenizer.convert_ids_to_tokens(ids_topk) # トークン\n scores_topk = topk.values.cpu().numpy() # スコア\n\n # 文章中の[MASK]を上で求めたトークンで置き換える。\n text_topk = [] # 穴埋めされたテキストを追加する。\n for token in tokens_topk:\n token = token.replace('##', '')\n text_topk.append(text.replace('[MASK]', token, 1))\n\n return text_topk, scores_topk\n\ntext = '今日は[MASK]へ行く。'\ntext_topk, _ = predict_mask_topk(text, tokenizer, bert_mlm, 10)\nprint(*text_topk, sep='\\n')", "_____no_output_____" ], [ "# 5-8\ndef greedy_prediction(text, tokenizer, bert_mlm):\n \"\"\"\n [MASK]を含む文章を入力として、貪欲法で穴埋めを行った文章を出力する。\n \"\"\"\n # 前から順に[MASK]を一つづつ、スコアの最も高いトークンに置き換える。\n for _ in range(text.count('[MASK]')):\n text = predict_mask_topk(text, tokenizer, bert_mlm, 1)[0][0]\n return text\n\ntext = '今日は[MASK][MASK]へ行く。'\ngreedy_prediction(text, tokenizer, bert_mlm)", "_____no_output_____" ], [ "# 5-9\ntext = '今日は[MASK][MASK][MASK][MASK][MASK]'\ngreedy_prediction(text, tokenizer, bert_mlm)", "_____no_output_____" ], [ "# 5-10\ndef beam_search(text, tokenizer, bert_mlm, num_topk):\n \"\"\"\n ビームサーチで文章の穴埋めを行う。\n \"\"\"\n num_mask = text.count('[MASK]')\n text_topk = [text]\n scores_topk = np.array([0])\n for _ in range(num_mask):\n # 現在得られている、それぞれの文章に対して、\n # 最初の[MASK]をスコアが上位のトークンで穴埋めする。\n text_candidates = [] # それぞれの文章を穴埋めした結果を追加する。\n score_candidates = [] # 穴埋めに使ったトークンのスコアを追加する。\n for text_mask, score in zip(text_topk, scores_topk):\n text_topk_inner, scores_topk_inner = predict_mask_topk(\n text_mask, tokenizer, bert_mlm, num_topk\n )\n text_candidates.extend(text_topk_inner)\n score_candidates.append( score + scores_topk_inner )\n\n # 穴埋めにより生成された文章の中から合計スコアの高いものを選ぶ。\n score_candidates = np.hstack(score_candidates)\n idx_list = score_candidates.argsort()[::-1][:num_topk]\n text_topk = [ text_candidates[idx] for idx in idx_list ]\n scores_topk = score_candidates[idx_list]\n\n return text_topk\n\ntext = \"今日は[MASK][MASK]へ行く。\"\ntext_topk = beam_search(text, tokenizer, bert_mlm, 10)\nprint(*text_topk, sep='\\n')", "_____no_output_____" ], [ "# 5-11\ntext = '今日は[MASK][MASK][MASK][MASK][MASK]'\ntext_topk = beam_search(text, tokenizer, bert_mlm, 10)\nprint(*text_topk, sep='\\n')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5cd23d3951b1fc7d93d559455e3335ff7c2046
116,338
ipynb
Jupyter Notebook
09-Machine-Learning-II/DecisionTree.ipynb
peterbengkui/DataProgramming
90ee1181acaaae754b2f2fd028638e07c4c6cde7
[ "CC0-1.0" ]
1
2020-02-07T19:00:10.000Z
2020-02-07T19:00:10.000Z
09-Machine-Learning-II/DecisionTree.ipynb
peterbengkui/DataProgramming
90ee1181acaaae754b2f2fd028638e07c4c6cde7
[ "CC0-1.0" ]
null
null
null
09-Machine-Learning-II/DecisionTree.ipynb
peterbengkui/DataProgramming
90ee1181acaaae754b2f2fd028638e07c4c6cde7
[ "CC0-1.0" ]
1
2020-02-12T17:44:10.000Z
2020-02-12T17:44:10.000Z
35.984534
255
0.332351
[ [ [ "<script>\nvar css = '.container { width: 100% !important; padding-left: 1em; padding-right: 2em; } div.output_stderr { background: #FFA; }',\n head = document.head || document.getElementsByTagName('head')[0],\n style = document.createElement('style');\n\nstyle.type = 'text/css';\nif (style.styleSheet){\n style.styleSheet.cssText = css;\n} else {\n style.appendChild(document.createTextNode(css));\n}\n\nhead.appendChild(style);\n</script>", "_____no_output_____" ] ], [ [ "# %load nbinit.py\nfrom IPython.display import HTML\nHTML(\"<style>.container { width: 100% !important; padding-left: 1em; padding-right: 2em; } div.output_stderr { background: #FFA; }</style>\")", "_____no_output_____" ] ], [ [ "# Decision Tree\nLet's see how well a decision tree can classify the data. Hereby we need to consider\n1. the parameters to the classifier, and\n2. the features of the data set that will be used.\nWe may just explore the impact of the maximum depth of the decision tree. Two of the 16 features ('day' and 'month') may not be useful because they reflect a date, and we're not looking for seasonal effects. So, it's fairly safe to take them out.\n\nOnce the dataset is loaded we will convert the categorical data into numeric values.\n\nFinding the right parameters and features for the best performing classifier can be a challenge. The number of possible configurations grows quickly, and knowing how they perform requires training and testing with each of them.\n\nWe may also run the training and testing on a configuration multiple times with different random splits of the data set. The performance metrics will be avaraged over the iterations.\n\nWe use percision, recall, and the F1 score to evaluate each configuration.\n", "_____no_output_____" ] ], [ [ "### Load Packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nimport sklearn.tree\nimport pydot_ng as pdot\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_recall_fscore_support\nimport itertools", "_____no_output_____" ] ], [ [ "## Reading Data", "_____no_output_____" ] ], [ [ "### Read data\nDATAFILE = '/home/data/archive.ics.uci.edu/BankMarketing/bank.csv'\ndf = pd.read_csv(DATAFILE, sep=';')", "_____no_output_____" ], [ "### use sets and '-' difference operation 'A-B'. Also there is a symmetric different '^'\nall_features = set(df.columns)-set(['y'])\nnum_features = set(df.describe().columns)\ncat_features = all_features-num_features\nprint(\"All features: \", \", \".join(all_features), \"\\nNumerical features: \", \", \".join(num_features), \"\\nCategorical features: \", \", \".join(cat_features))", "All features: balance, day, education, previous, loan, contact, pdays, marital, duration, job, campaign, month, poutcome, age, default, housing \nNumerical features: balance, day, duration, previous, campaign, age, pdays \nCategorical features: job, education, month, loan, contact, poutcome, default, marital, housing\n" ], [ "### convert to categorical variables to numeric ones\nlevel_substitution = {}\n\ndef levels2index(levels):\n dct = {}\n for i in range(len(levels)):\n dct[levels[i]] = i\n return dct\n\ndf_num = df.copy()\n\nfor c in cat_features:\n level_substitution[c] = levels2index(df[c].unique())\n df_num[c].replace(level_substitution[c], inplace=True)\n\n## same for target\ndf_num.y.replace({'no':0, 'yes':1}, inplace=True)\ndf_num", "_____no_output_____" ], [ "### create feature matrix and target vector\nX = df_num[list(all_features-set(['day', 'month']))].as_matrix()\ny = df_num.y.as_matrix()\nX, y", "_____no_output_____" ] ], [ [ "## Evaluation\nTest how Maximum Depth of tree impacts performance", "_____no_output_____" ] ], [ [ "for d in [3, 5, 7, 11, 13]:\n clf = DecisionTreeClassifier(max_depth=d)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)\n clf.fit(X_train, y_train)\n ŷ = clf.predict(X_test)\n print('Depth %d' % d)\n print(classification_report(y_test, ŷ))", "Depth 3\n precision recall f1-score support\n\n 0 0.93 0.97 0.95 1620\n 1 0.55 0.35 0.43 189\n\navg / total 0.89 0.90 0.89 1809\n\nDepth 5\n precision recall f1-score support\n\n 0 0.93 0.96 0.94 1620\n 1 0.51 0.34 0.41 189\n\navg / total 0.88 0.90 0.89 1809\n\nDepth 7\n precision recall f1-score support\n\n 0 0.93 0.96 0.94 1620\n 1 0.51 0.34 0.41 189\n\navg / total 0.88 0.90 0.89 1809\n\nDepth 11\n precision recall f1-score support\n\n 0 0.93 0.94 0.93 1620\n 1 0.41 0.38 0.39 189\n\navg / total 0.87 0.88 0.88 1809\n\nDepth 13\n precision recall f1-score support\n\n 0 0.93 0.92 0.92 1620\n 1 0.37 0.42 0.40 189\n\navg / total 0.87 0.86 0.87 1809\n\n" ] ], [ [ "Two methods from `sklearn.metrics` can be helpful:\n1. `confusion_matrix` produces a confusion matrix\n2. `precision_recall_fscore_support` returns a matrix with values for each of them across all target levels.", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_test, ŷ)\ncm", "_____no_output_____" ], [ "prf1s = precision_recall_fscore_support(y_test, ŷ)\nprf1s", "_____no_output_____" ], [ "perf = None\nfor i in range(100):\n if type(perf)!=type(None):\n perf = np.vstack((perf, np.array(prf1s).reshape(1,8)))\n else:\n perf = np.array(prf1s).reshape(1,8)\nperf_agg = perf.mean(axis=0)\npd.DataFrame(perf_agg.reshape(1,8), columns=[[b for a in ['Precision', 'Recall', 'F1_score', 'Support'] for b in [a, a]], ['no', 'yes']*4])\n##pd.DataFrame([5,5, 'a|b|c'] + list(perf.mean(axis=0)), columns=perf_df.columns)", "_____no_output_____" ], [ "performance_df = pd.DataFrame(columns=[\n ['Params']*3 + [b for a in ['Precision', 'Recall', 'F1_score', 'Support'] for b in [a, a]],\n ['MaxDepth', 'Nfeature', 'Features'] + ['no', 'yes']*4\n ])\ntempdf = pd.concat([\n pd.DataFrame({'a': [1], 'b': [2], 'c': ['Hello']}),\n pd.DataFrame(np.zeros((1,8)))\n ], axis=1, ignore_index=True)\n\ntempdf.columns=performance_df.columns\n#performance_df\ntempdf", "_____no_output_____" ], [ "pd.DataFrame(np.zeros(8).reshape(1,8))", "_____no_output_____" ] ], [ [ "## The Heavy Lifting\nNow, let's run the performance evaluation across a number of configurations. We'll collect the results for each configuration into a dataframe.", "_____no_output_____" ] ], [ [ "# creating a template (i.e. empty table)\nperformance_template_df = pd.DataFrame(columns= [\n ['Params']*3 + [b for a in ['Precision', 'Recall', 'F1_score', 'Support'] for b in [a, a]],\n ['MaxDepth', 'Nfeature', 'Features'] + ['no', 'yes']*4\n ])\nperformance_template_df", "_____no_output_____" ] ], [ [ "The following code implements nested loops for MaxDepth, number and permutation of features. In addition, we have an internal loop to\naggregate the performance metrics over a number of different random splits.\n\nThe outer two loops, however, only iterate over one value each. The commmented code shows how they should run...", "_____no_output_____" ] ], [ [ "%%time\nperformance_df = performance_template_df.copy() #-- always start fresh\n\nfor MaxDepth in [5]: ###range(5,9):\n for Nftr in [8]: ###[len(all_features) - k for k in range(len(all_features)-2))]:\n for ftrs in itertools.combinations(all_features-set(['day', 'month']), Nftr):\n X = df_num[list(ftrs)].as_matrix()\n clf = DecisionTreeClassifier(max_depth=MaxDepth)\n\n perf_arr = None #-- this array will hold results for different random samples\n for i in range(10): ### running train and test on different random samples\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=i)\n clf.fit(X_train, y_train)\n ŷ = clf.predict(X_test)\n #Prec, Recall, F1, Supp \n prf1s = precision_recall_fscore_support(y_test, ŷ)\n\n ## \n if type(perf_arr)!=type(None):\n perf_arr = np.vstack((perf, np.array(prf1s).reshape(1,8)))\n else:\n perf_arr = np.array(prf1s).reshape(1,8)\n perf_agg = perf_arr.mean(axis=0) #-- mean over rows, for each column\n perf_df = pd.concat([ #-- creating a 1 row dataframe is a bit tricky because of the different data types\n pd.DataFrame({'a': [MaxDepth], 'b': [Nftr], 'c': ['|'.join(list(ftrs))]}),\n pd.DataFrame(perf_agg.reshape(1, 8))\n ], axis=1, ignore_index=True)\n perf_df.columns=performance_df.columns\n performance_df = performance_df.append(perf_df, ignore_index=True)", "/usr/lib64/python3.4/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n" ], [ "performance_df", "_____no_output_____" ] ], [ [ "That took a while (about 2 minutes). Once computations take that long we should look at a different way to implement them ... ** outside the notebook **.", "_____no_output_____" ], [ "Let's see what the best performing configuration with respect to the F1-score of 'yes' is:", "_____no_output_____" ] ], [ [ "best = performance_df.F1_score.yes.argmax()\nprint(performance_df.iloc[best])\nprint(\"\\nFeatures: \", ', '.join([ '\"%s\"'%f for f in performance_df.iloc[best].Params.Features.split('|') ], ))", "Params MaxDepth 5\n Nfeature 8\n Features balance|education|previous|loan|contact|pdays|...\nPrecision no 0.93322\n yes 0.46445\nRecall no 0.942833\n yes 0.42338\nF1_score no 0.937998\n yes 0.442813\nSupport no 1611.73\n yes 188.317\nName: 9, dtype: object\n\nFeatures: \"balance\", \"education\", \"previous\", \"loan\", \"contact\", \"pdays\", \"duration\", \"poutcome\"\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec5ce53f722d5eddd69b11a428c9e1574f97a223
132,345
ipynb
Jupyter Notebook
CNN_PyTorch/4-2. Classify FashionMNIST, solution 1.ipynb
46gom/DLND_Exercises
eb4bc8600af7b23d05191c7156bc90ce0ed7af79
[ "MIT" ]
4
2020-03-06T06:05:58.000Z
2020-03-13T05:10:17.000Z
CNN_PyTorch/4-2. Classify FashionMNIST, solution 1.ipynb
46gom/DLND_Exercises
eb4bc8600af7b23d05191c7156bc90ce0ed7af79
[ "MIT" ]
8
2021-03-19T04:53:14.000Z
2022-03-12T00:03:52.000Z
CNN_PyTorch/4-2. Classify FashionMNIST, solution 1.ipynb
46gom/DLND_Exercises
eb4bc8600af7b23d05191c7156bc90ce0ed7af79
[ "MIT" ]
4
2019-11-19T00:08:13.000Z
2020-03-13T05:10:23.000Z
185.617111
57,424
0.875621
[ [ [ "# CNN for Classification\n---\nIn this and the next notebook, we define **and train** a CNN to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist). \n\nWe are providing two solutions to show you how different network structures and training strategies can affect the performance and accuracy of a CNN. This first solution will be a simple CNN with two convolutional layers. \n\nPlease note that this is just one possible solution out of many!", "_____no_output_____" ], [ "### Load the [data](https://pytorch.org/docs/stable/torchvision/datasets.html)\n\nIn this cell, we load in both **training and test** datasets from the FashionMNIST class.", "_____no_output_____" ] ], [ [ "# our basic libraries\nimport torch\nimport torchvision\n\n# data loading and transforming\nfrom torchvision.datasets import FashionMNIST\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\n# The output of torchvision datasets are PILImage images of range [0, 1]. \n# We transform them to Tensors for input into a CNN\n\n## Define a transform to read the data in as a tensor\ndata_transform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = FashionMNIST(root='./data', train=True,\n download=True, transform=data_transform)\n\ntest_data = FashionMNIST(root='./data', train=False,\n download=True, transform=data_transform)\n\n\n# Print out some stats about the training and test data\nprint('Train data, number of images: ', len(train_data))\nprint('Test data, number of images: ', len(test_data))", "Train data, number of images: 60000\nTest data, number of images: 10000\n" ], [ "# prepare data loaders, set the batch_size\n## TODO: you can try changing the batch_size to be larger or smaller\n## when you get to training your network, see how batch_size affects the loss\nbatch_size = 20\n\ntrain_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)\n\n# specify the image classes\nclasses = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']", "_____no_output_____" ] ], [ [ "### Visualize some training data\n\nThis cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(batch_size):\n ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(classes[labels[idx]])", "_____no_output_____" ] ], [ [ "### Define the network architecture\n\nThe various layers that make up any neural network are documented, [here](https://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll use a simple series of layers:\n* Convolutional layers\n* Maxpooling layers\n* Fully-connected (linear) layers\n\nYou are also encouraged to look at adding [dropout layers](https://pytorch.org/docs/stable/nn.html#dropout) to avoid overfitting this data.\n\n---\n\nTo define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in.\n\nNote: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network.\n\n#### Define the Layers in ` __init__`\nAs a reminder, a conv/pool layer may be defined like this (in `__init__`):\n```\n# 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel\nself.conv1 = nn.Conv2d(1, 32, 3)\n\n# maxpool that uses a square window of kernel_size=2, stride=2\nself.pool = nn.MaxPool2d(2, 2) \n```\n\n#### Refer to Layers in `forward`\nThen referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied:\n```\nx = self.pool(F.relu(self.conv1(x)))\n```\n\nYou must place any layers with trainable weights, such as convolutional layers, in the `__init__` function and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, may appear *only* in the `forward` function. In practice, you'll often see conv/pool layers defined in `__init__` and activations defined in `forward`.\n\n#### Convolutional layer\nThe first convolution layer has been defined for you, it takes in a 1 channel (grayscale) image and outputs 10 feature maps as output, after convolving the image with 3x3 filters.\n\n#### Flattening\n\nRecall that to move from the output of a convolutional/pooling layer to a linear layer, you must first flatten your extracted features into a vector. If you've used the deep learning library, Keras, you may have seen this done by `Flatten()`, and in PyTorch you can flatten an input `x` with `x = x.view(x.size(0), -1)`.\n\n### TODO: Define the rest of the layers\n\nIt will be up to you to define the other layers in this network; we have some recommendations, but you may change the architecture and parameters as you see fit.\n\nRecommendations/tips:\n* Use at least two convolutional layers\n* Your output must be a linear layer with 10 outputs (for the 10 classes of clothing)\n* Use a dropout layer to avoid overfitting\n\n### A note on output size\n\nFor any convolutional layer, the output feature maps will have the specified depth (a depth of 10 for 10 filters in a convolutional layer) and the dimensions of the produced feature maps (width/height) can be computed as the _input image_ width/height, W, minus the filter size, F, divided by the stride, S, all + 1. The equation looks like: `output_dim = (W-F)/S + 1`, for an assumed padding size of 0. You can find a derivation of this formula, [here](http://cs231n.github.io/convolutional-networks/#conv).\n\nFor a pool layer with a size 2 and stride 2, the output dimension will be reduced by a factor of 2. Read the comments in the code below to see the output size for each layer.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n \n # 1 input image channel (grayscale), 10 output channels/feature maps\n # 3x3 square convolution kernel\n ## output size = (W-F)/S +1 = (28-3)/1 +1 = 26\n # the output Tensor for one image, will have the dimensions: (10, 26, 26)\n # after one pool layer, this becomes (10, 13, 13)\n self.conv1 = nn.Conv2d(1, 10, 3)\n \n # maxpool layer\n # pool with kernel_size=2, stride=2\n self.pool = nn.MaxPool2d(2, 2)\n \n # second conv layer: 10 inputs, 20 outputs, 3x3 conv\n ## output size = (W-F)/S +1 = (13-3)/1 +1 = 11\n # the output tensor will have dimensions: (20, 11, 11)\n # after another pool layer this becomes (20, 5, 5); 5.5 is rounded down\n self.conv2 = nn.Conv2d(10, 20, 3)\n \n # 20 outputs * the 5*5 filtered/pooled map size\n # 10 output channels (for the 10 classes)\n self.fc1 = nn.Linear(20*5*5, 10)\n \n\n # define the feedforward behavior\n def forward(self, x):\n # two conv/relu + pool layers\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n\n # prep for linear layer\n # flatten the inputs into a vector\n x = x.view(x.size(0), -1)\n \n # one linear layer\n x = F.relu(self.fc1(x))\n # a softmax layer to convert the 10 outputs into a distribution of class scores\n x = F.log_softmax(x, dim=1)\n \n # final output\n return x\n\n# instantiate and print your Net\nnet = Net()\nprint(net)", "Net(\n (conv1): Conv2d(1, 10, kernel_size=(3, 3), stride=(1, 1))\n (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (conv2): Conv2d(10, 20, kernel_size=(3, 3), stride=(1, 1))\n (fc1): Linear(in_features=500, out_features=10, bias=True)\n)\n" ] ], [ [ "### TODO: Specify the loss function and optimizer\n\nLearn more about [loss functions](https://pytorch.org/docs/stable/nn.html#loss-functions) and [optimizers](https://pytorch.org/docs/stable/optim.html) in the online documentation.\n\nNote that for a classification problem like this, one typically uses cross entropy loss, which can be defined in code like: `criterion = nn.CrossEntropyLoss()`; cross entropy loss combines `softmax` and `NLL loss` so, alternatively (as in this example), you may see NLL Loss being used when the output of our Net is a distribution of class scores. \n\nPyTorch also includes some standard stochastic optimizers like stochastic gradient descent and Adam. You're encouraged to try different optimizers and see how your model responds to these choices as it trains.\n", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\n## TODO: specify loss function \n# cross entropy loss combines softmax and nn.NLLLoss() in one single class.\ncriterion = nn.NLLLoss()\n\n## TODO: specify optimizer \n# stochastic gradient descent with a small learning rate\noptimizer = optim.SGD(net.parameters(), lr=0.001)", "_____no_output_____" ] ], [ [ "### A note on accuracy\n\nIt's interesting to look at the accuracy of your network **before and after** training. This way you can really see that your network has learned something. In the next cell, let's see what the accuracy of an untrained network is (we expect it to be around 10% which is the same accuracy as just guessing for all 10 classes).", "_____no_output_____" ] ], [ [ "# Calculate accuracy before training\ncorrect = 0\ntotal = 0\n\n# Iterate through test dataset\nfor images, labels in test_loader:\n\n # forward pass to get outputs\n # the outputs are a series of class scores\n outputs = net(images)\n\n # get the predicted class from the maximum value in the output-list of class scores\n _, predicted = torch.max(outputs.data, 1)\n\n # count up total number of correct labels\n # for which the predicted and true labels are equal\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\n# calculate the accuracy\n# to convert `correct` from a Tensor into a scalar, use .item()\naccuracy = 100.0 * correct.item() / total\n\n# print it out!\nprint('Accuracy before training: ', accuracy)", "Accuracy before training: 10.52\n" ] ], [ [ "### Train the Network\n\nBelow, we've defined a `train` function that takes in a number of epochs to train for. \n* The number of epochs is how many times a network will cycle through the entire training dataset. \n* Inside the epoch loop, we loop over the training dataset in batches; recording the loss every 1000 batches.\n\nHere are the steps that this training function performs as it iterates over the training dataset:\n\n1. Zero's the gradients to prepare for a forward pass\n2. Passes the input through the network (forward pass)\n3. Computes the loss (how far is the predicted classes are from the correct labels)\n4. Propagates gradients back into the network’s parameters (backward pass)\n5. Updates the weights (parameter update)\n6. Prints out the calculated loss\n\n", "_____no_output_____" ] ], [ [ "def train(n_epochs):\n \n loss_over_time = [] # to track the loss as the network trains\n \n for epoch in range(n_epochs): # loop over the dataset multiple times\n \n running_loss = 0.0\n \n for batch_i, data in enumerate(train_loader):\n # get the input images and their corresponding labels\n inputs, labels = data\n\n # zero the parameter (weight) gradients\n optimizer.zero_grad()\n\n # forward pass to get outputs\n outputs = net(inputs)\n\n # calculate the loss\n loss = criterion(outputs, labels)\n\n # backward pass to calculate the parameter gradients\n loss.backward()\n\n # update the parameters\n optimizer.step()\n\n # print loss statistics\n # to convert loss into a scalar and add it to running_loss, we use .item()\n running_loss += loss.item()\n \n if batch_i % 1000 == 999: # print every 1000 batches\n avg_loss = running_loss/1000\n # record and print the avg loss over the 1000 batches\n loss_over_time.append(avg_loss)\n print('Epoch: {}, Batch: {}, Avg. Loss: {}'.format(epoch + 1, batch_i+1, avg_loss))\n running_loss = 0.0\n\n print('Finished Training')\n return loss_over_time\n", "_____no_output_____" ], [ "# define the number of epochs to train for\nn_epochs = 30 # start small to see if your model works, initially\n\n# call train and record the loss over time\ntraining_loss = train(n_epochs)", "Epoch: 1, Batch: 1000, Avg. Loss: 2.2868175230026244\nEpoch: 1, Batch: 2000, Avg. Loss: 2.2556393740177154\nEpoch: 1, Batch: 3000, Avg. Loss: 2.205124769091606\nEpoch: 2, Batch: 1000, Avg. Loss: 2.1203471163511276\nEpoch: 2, Batch: 2000, Avg. Loss: 2.0477432513237\nEpoch: 2, Batch: 3000, Avg. Loss: 1.9815359983444214\nEpoch: 3, Batch: 1000, Avg. Loss: 1.8996226536035539\nEpoch: 3, Batch: 2000, Avg. Loss: 1.8147404186725618\nEpoch: 3, Batch: 3000, Avg. Loss: 1.7321927509307862\nEpoch: 4, Batch: 1000, Avg. Loss: 1.578281832098961\nEpoch: 4, Batch: 2000, Avg. Loss: 1.5266655530929565\nEpoch: 4, Batch: 3000, Avg. Loss: 1.4980273706316949\nEpoch: 5, Batch: 1000, Avg. Loss: 1.4726707084178925\nEpoch: 5, Batch: 2000, Avg. Loss: 1.4684425346851349\nEpoch: 5, Batch: 3000, Avg. Loss: 1.4494574863910674\nEpoch: 6, Batch: 1000, Avg. Loss: 1.4456084757447243\nEpoch: 6, Batch: 2000, Avg. Loss: 1.4305420234799384\nEpoch: 6, Batch: 3000, Avg. Loss: 1.4167207903862\nEpoch: 7, Batch: 1000, Avg. Loss: 1.407468405842781\nEpoch: 7, Batch: 2000, Avg. Loss: 1.4014562340378762\nEpoch: 7, Batch: 3000, Avg. Loss: 1.4169175248742103\nEpoch: 8, Batch: 1000, Avg. Loss: 1.4001869242191314\nEpoch: 8, Batch: 2000, Avg. Loss: 1.3928540123105049\nEpoch: 8, Batch: 3000, Avg. Loss: 1.3802106212377547\nEpoch: 9, Batch: 1000, Avg. Loss: 1.3772892249822617\nEpoch: 9, Batch: 2000, Avg. Loss: 1.3825336514115334\nEpoch: 9, Batch: 3000, Avg. Loss: 1.3705663481354713\nEpoch: 10, Batch: 1000, Avg. Loss: 1.3694153184890747\nEpoch: 10, Batch: 2000, Avg. Loss: 1.3810064570605756\nEpoch: 10, Batch: 3000, Avg. Loss: 1.341630794942379\nEpoch: 11, Batch: 1000, Avg. Loss: 1.3677116389870643\nEpoch: 11, Batch: 2000, Avg. Loss: 1.3436600825190543\nEpoch: 11, Batch: 3000, Avg. Loss: 1.350110428392887\nEpoch: 12, Batch: 1000, Avg. Loss: 1.3445810881853104\nEpoch: 12, Batch: 2000, Avg. Loss: 1.3499431834816933\nEpoch: 12, Batch: 3000, Avg. Loss: 1.3393242843151092\nEpoch: 13, Batch: 1000, Avg. Loss: 1.3306362637281417\nEpoch: 13, Batch: 2000, Avg. Loss: 1.330379969418049\nEpoch: 13, Batch: 3000, Avg. Loss: 1.3463139534592627\nEpoch: 14, Batch: 1000, Avg. Loss: 1.3359011572301387\nEpoch: 14, Batch: 2000, Avg. Loss: 1.3317513466477393\nEpoch: 14, Batch: 3000, Avg. Loss: 1.3167364555597305\nEpoch: 15, Batch: 1000, Avg. Loss: 1.3136654596626758\nEpoch: 15, Batch: 2000, Avg. Loss: 1.3182315327227117\n" ] ], [ [ "## Visualizing the loss\n\nA good indication of how much your network is learning as it trains is the loss over time. In this example, we printed and recorded the average loss for each 1000 batches and for each epoch. Let's plot it and see how the loss decreases (or doesn't) over time.\n\nIn this case, you can see that it takes a little bit for a big initial loss decrease, and the loss is flattening out over time.", "_____no_output_____" ] ], [ [ "# visualize the loss as the network trained\nplt.plot(training_loss)\nplt.xlabel('1000\\'s of batches')\nplt.ylabel('loss')\nplt.ylim(0, 2.5) # consistent scale\nplt.show()", "_____no_output_____" ] ], [ [ "### Test the Trained Network\n\nOnce you are satisfied with how the loss of your model has decreased, there is one last step: test!\n\nYou must test your trained model on a previously unseen dataset to see if it generalizes well and can accurately classify this new dataset. For FashionMNIST, which contains many pre-processed training images, a good model should reach **greater than 85% accuracy** on this test dataset. If you are not reaching this value, try training for a larger number of epochs, tweaking your hyperparameters, or adding/subtracting layers from your CNN.", "_____no_output_____" ] ], [ [ "# initialize tensor and lists to monitor test loss and accuracy\ntest_loss = torch.zeros(1)\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\n# set the module to evaluation mode\nnet.eval()\n\nfor batch_i, data in enumerate(test_loader):\n \n # get the input images and their corresponding labels\n inputs, labels = data\n \n # forward pass to get outputs\n outputs = net(inputs)\n\n # calculate the loss\n loss = criterion(outputs, labels)\n \n # update average test loss \n test_loss = test_loss + ((torch.ones(1) / (batch_i + 1)) * (loss.data - test_loss))\n \n # get the predicted class from the maximum value in the output-list of class scores\n _, predicted = torch.max(outputs.data, 1)\n \n # compare predictions to true label\n # this creates a `correct` Tensor that holds the number of correctly classified images in a batch\n correct = np.squeeze(predicted.eq(labels.data.view_as(predicted)))\n \n # calculate test accuracy for *each* object class\n # we get the scalar value of correct items for a class, by calling `correct[i].item()`\n for i in range(batch_size):\n label = labels.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\nprint('Test Loss: {:.6f}\\n'.format(test_loss.numpy()[0]))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n classes[i], 100 * class_correct[i] / class_total[i],\n np.sum(class_correct[i]), np.sum(class_total[i])))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\n \nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))", "Test Loss: 0.784023\n\nTest Accuracy of T-shirt/top: 92% (925/1000)\nTest Accuracy of Trouser: 96% (967/1000)\nTest Accuracy of Pullover: 0% ( 0/1000)\nTest Accuracy of Dress: 87% (873/1000)\nTest Accuracy of Coat: 91% (911/1000)\nTest Accuracy of Sandal: 94% (945/1000)\nTest Accuracy of Shirt: 0% ( 0/1000)\nTest Accuracy of Sneaker: 93% (935/1000)\nTest Accuracy of Bag: 96% (967/1000)\nTest Accuracy of Ankle boot: 93% (938/1000)\n\nTest Accuracy (Overall): 74% (7461/10000)\n" ] ], [ [ "### Visualize sample test results\n\nFormat: predicted class (true class)", "_____no_output_____" ] ], [ [ "# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n# get predictions\npreds = np.squeeze(net(images).data.max(1, keepdim=True)[1].numpy())\nimages = images.numpy()\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(batch_size):\n ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(\"{} ({})\".format(classes[preds[idx]], classes[labels[idx]]),\n color=(\"green\" if preds[idx]==labels[idx] else \"red\"))", "_____no_output_____" ] ], [ [ "### Question: What are some weaknesses of your model? (And how might you improve these in future iterations.)", "_____no_output_____" ], [ "**Answer**: This model performs well on everything but shirts and pullovers (0% accuracy); it looks like this incorrectly classifies most of those as a coat which has a similar overall shape. Because it performs well on everything but these two classes, I suspect this model is overfitting certain classes at the cost of generalization. I suspect that this accuracy could be improved by adding some dropout layers to aoid overfitting.", "_____no_output_____" ] ], [ [ "# Saving the model\nmodel_dir = 'saved_models/'\nmodel_name = 'fashion_net_simple.pt'\n\n# after training, save your model parameters in the dir 'saved_models'\n# when you're ready, un-comment the line below\ntorch.save(net.state_dict(), model_dir+model_name)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec5ced0d57940de75a56f9cea14d232741805e72
1,438
ipynb
Jupyter Notebook
docs/auto_examples/plot_UMAP.ipynb
timgates42/hypertools
9ac3dc11123419f2f00d596ac5920db2486cc0a1
[ "MIT" ]
1,681
2017-01-28T00:28:02.000Z
2022-03-11T00:57:13.000Z
docs/auto_examples/plot_UMAP.ipynb
timgates42/hypertools
9ac3dc11123419f2f00d596ac5920db2486cc0a1
[ "MIT" ]
170
2017-01-27T22:59:09.000Z
2022-02-12T03:47:46.000Z
docs/auto_examples/plot_UMAP.ipynb
timgates42/hypertools
9ac3dc11123419f2f00d596ac5920db2486cc0a1
[ "MIT" ]
180
2017-02-01T04:34:42.000Z
2022-02-22T15:46:23.000Z
26.62963
287
0.520862
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Visualizing the digits dataset using UMAP\n\n\nThis example loads in some data from the scikit-learn digits dataset and plots\nit using UMAP.\n\n", "_____no_output_____" ] ], [ [ "# Code source: Andrew Heusser and Leland McInnes\n# License: MIT\n\nfrom sklearn import datasets\nimport hypertools as hyp\n\ndigits = datasets.load_digits(n_class=6)\ndata = digits.data\nhue = digits.target.astype('str')\n\nhyp.plot(data, '.', reduce='UMAP', hue=hue, ndims=2)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
ec5cf005cb0f43bd34c0fb349064868da9e32eba
12,337
ipynb
Jupyter Notebook
notebooks/00_quick_start/sar_movieratings_with_azureml_designer.ipynb
danyang-liu/recommenders
558733f366983d576953a407ab7180b1642dbc5b
[ "MIT" ]
28
2021-11-12T08:26:40.000Z
2022-03-27T07:21:24.000Z
examples/00_quick_start/sar_movieratings_with_azureml_designer.ipynb
Chow1996/recommenders
302bfcce72d75f090362cb609875de7e258ed99e
[ "MIT" ]
5
2021-11-10T02:58:32.000Z
2022-03-21T16:13:11.000Z
examples/00_quick_start/sar_movieratings_with_azureml_designer.ipynb
Chow1996/recommenders
302bfcce72d75f090362cb609875de7e258ed99e
[ "MIT" ]
9
2021-11-03T07:14:47.000Z
2022-02-22T13:42:04.000Z
41.962585
474
0.627219
[ [ [ "# Quickstart to integrate Recommenders in AzureML Designer\n\nThis notebook shows how to integrate any algorithm in Recommenders library into AzureML Designer. \n\n[AzureML Designer](https://docs.microsoft.com/en-us/azure/machine-learning/concept-designer) lets you visually connect datasets and modules on an interactive canvas to create machine learning models. \n\n![img](https://recodatasets.blob.core.windows.net/images/designer-drag-and-drop.gif)\n\nOne of the features of AzureML Designer is that it is possible for developers to integrate any python library to make it available as a module. In this notebook are are going to show how to integrate [SAR](sar_movielens.ipynb) and several other modules in Designer\n\n\n## Installation\n\nThe first step is to install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) and Module CLI extension. Assuming that you have installed the Recommenders environment `reco_base` as explained in the [SETUP.md](../../SETUP.md), you need to install:\n```bash\nconda activate reco_base\npip install azure-cli\n# Uninstall azure-cli-ml (the `az ml` commands)\naz extension remove -n azure-cli-ml\n# Install local version of azure-cli-ml (which includes `az ml module` commands)\naz extension add --source https://azuremlsdktestpypi.azureedge.net/CLI-SDK-Runners-Validation/13082891/azure_cli_ml-0.1.0.13082891-py3-none-any.whl --pip-extra-index-urls https://azuremlsdktestpypi.azureedge.net/CLI-SDK-Runners-Validation/13082891 --yes\n```\n\n## Module implementation\n\nThe scenario that we are going to reproduce in Designer, as a reference example, is the content of the [SAR quickstart notebook](sar_movielens.ipynb). In it, we load a dataset, split it into train and test sets, train SAR algorithm, predict using the test set and compute several ranking metrics (precision at k, recall at k, MAP and nDCG).\n\nFor the pipeline that we want to create in Designer, we need to build the following modules:\n\n- Stratified splitter\n- SAR training\n- SAR prediction\n- Precision at k\n- Recall at k\n- MAP\n- nDCG\n\nThe python code is defined with a python entry and a yaml file. All the python entries and yaml files for this pipeline can be found in [reco_utils/azureml/azureml_designer_modules](../../reco_utils/azureml/azureml_designer_modules).\n\n\n### Define python entry\n\nTo illustrate how a python entry is defined we are going to explain the [precision at k entry](../../reco_utils/azureml/azureml_designer_modules/entries/precision_at_k_entry.py). A simplified version of the code is shown next:\n\n```python\n# Dependencies\nfrom azureml.studio.core.data_frame_schema import DataFrameSchema\nfrom azureml.studio.core.io.data_frame_directory import (\n load_data_frame_from_directory,\n save_data_frame_to_directory,\n)\nfrom reco_utils.evaluation.python_evaluation import precision_at_k\n\n# First, the input variables of precision_at_k are defined as argparse arguments\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--rating-true\", help=\"True DataFrame.\")\n parser.add_argument(\"--rating-pred\", help=\"Predicted DataFrame.\")\n parser.add_argument(\n \"--col-user\", type=str, help=\"A string parameter with column name for user.\"\n )\n # ... more arguments\n args, _ = parser.parse_known_args()\n\n # This module has two main inputs from the canvas, the true and predicted labels\n # they are loaded into the runtime as a pandas DataFrame\n rating_true = load_data_frame_from_directory(args.rating_true).data\n rating_pred = load_data_frame_from_directory(args.rating_pred).data\n\n # The python function is instantiated and the computation is performed\n eval_precision = precision_at_k(rating_true, rating_pred)\n \n # To output the result to Designer, we write it as a DataFrame\n score_result = pd.DataFrame({\"precision_at_k\": [eval_precision]})\n save_data_frame_to_directory(\n args.score_result,\n score_result,\n schema=DataFrameSchema.data_frame_to_dict(score_result),\n )\n```\n\n\n### Define module specification yaml\n\nOnce we have the python entry, we need to create the yaml file that will interact with Designer, [precision_at_k.yaml](../../reco_utils/azureml/azureml_designer_modules/module_specs/precision_at_k.yaml).\n\n```yaml\nmoduleIdentifier: \n namespace: microsoft.com/cat\n moduleName: Precision at K\n moduleVersion: 1.1.0\ndescription: \"Precision at K metric from Recommenders repo: https://github.com/Microsoft/Recommenders.\"\nmetadata:\n annotations:\n tags: [\"Recommenders\", \"Metrics\"]\ninputs:\n- name: Rating true\n type: DataFrameDirectory\n description: True DataFrame.\n- name: Rating pred\n type: DataFrameDirectory\n description: Predicted DataFrame.\n- name: User column\n type: String\n default: UserId\n description: Column name of user IDs.\n- name: Item column\n type: String\n default: MovieId\n description: Column name of item IDs.\n- name: Rating column\n type: String\n default: Rating\n description: Column name of ratings.\n- name: Prediction column\n type: String\n default: prediction\n description: Column name of predictions.\n- name: Relevancy method\n type: String\n default: top_k\n description: method for determining relevancy ['top_k', 'by_threshold'].\n- name: Top k\n type: Integer\n default: 10\n description: Number of top k items per user.\n- name: Threshold\n type: Float\n default: 10.0\n description: Threshold of top items per user.\noutputs:\n- name: Score\n type: DataFrameDirectory\n description: Precision at k (min=0, max=1).\nimplementation:\n container:\n amlEnvironment:\n python:\n condaDependenciesFile: sar_conda.yaml\n additionalIncludes:\n - ../../../\n command: [python, reco_utils/azureml/azureml_designer_modules/entries/precision_at_k_entry.py]\n args:\n - --rating-true\n - inputPath: Rating true\n - --rating-pred\n - inputPath: Rating pred\n - --col-user\n - inputValue: User column\n - --col-item\n - inputValue: Item column\n - --col-rating\n - inputValue: Rating column\n - --col-prediction\n - inputValue: Prediction column\n - --relevancy-method\n - inputValue: Relevancy method\n - --k\n - inputValue: Top k\n - --threshold\n - inputValue: Threshold\n - --score-result\n - outputPath: Score\n```\n\nIn the yaml file we can see a number of sections. The heading defines attributes like name, version or description. In the section inputs, all inputs are defined. The two main dataframes have ports, which can be connected to other modules. The inputs without port appear in a canvas menu. The output is defined as a DataFrame as well. The last section, implementation, defines the conda environment, the associated python entry and the arguments to the python file.\n\n\n## Module Registration\n\nOnce the code is implemented, we need to register it as an AzureML Designer custom module. The registration can be performed following these simple steps:", "_____no_output_____" ] ], [ [ "!az login", "_____no_output_____" ], [ "!az account set -s \"Your subscription name\"\n!az ml folder attach -w \"Your workspace name\" -g \"Your resource group name\"", "_____no_output_____" ], [ "import os\nimport tempfile\nimport shutil\nimport subprocess", "_____no_output_____" ], [ "# Regsiter modules with spec via Azure CLI\nroot_path = os.path.abspath(os.path.join(os.getcwd(), \"../../\"))\nspecs_folder = os.path.join(root_path, \"reco_utils/azureml/azureml_designer_modules/module_specs\")\ngithub_prefix = 'https://github.com/microsoft/recommenders/blob/master/reco_utils/azureml/azureml_designer_modules/module_specs/'\nspecs = os.listdir(specs_folder)\nfor spec in specs:\n spec_path = github_prefix + spec\n print(f\"Start to register module spec: {spec} ...\")\n subprocess.run(f\"az ml module register --spec-file {spec_path}\", shell=True)\n print(f\"Done.\")", "Start to register module spec: map.yaml ...\nDone.\nStart to register module spec: ndcg.yaml ...\nDone.\nStart to register module spec: precision_at_k.yaml ...\nDone.\nStart to register module spec: recall_at_k.yaml ...\nDone.\nStart to register module spec: sar_conda.yaml ...\nDone.\nStart to register module spec: sar_score.yaml ...\nDone.\nStart to register module spec: sar_train.yaml ...\nDone.\nStart to register module spec: stratified_splitter.yaml ...\nDone.\n" ] ], [ [ "## Running Recommenders in AzureML Designer\n\nOnce the modules are registered, they will appear in the canvas as the module `Recommenders`. There you will be able to create a pipeline like this:\n\n![img](https://recodatasets.blob.core.windows.net/images/azureml_designer_sar_precisionatk.png)\n\nNow, thanks to AzureML Designer, users can compute the latest state of the art algorithms in recommendation systems without writing a line of python code.\n\n## References\n\n1. [AzureML Designer documentation](https://docs.microsoft.com/en-us/azure/machine-learning/concept-designer)\n1. [Tutorial: Prediction of automobile price](https://docs.microsoft.com/en-us/azure/machine-learning/tutorial-designer-automobile-price-train-score)\n1. [Tutorial: Classification of time flight delays](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-designer-sample-classification-flight-delay)\n1. [Tutorial: Text classification of company categories](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-designer-sample-text-classification)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
ec5cf151ce9e6ac9b4f2b606853d1a738c7e2774
6,439
ipynb
Jupyter Notebook
notebooks/step-4-automate-ml-experiments.ipynb
mlrepa/automate-ml-with-dvc
b54a2e4818a991362d304890828df70359bab84a
[ "MIT" ]
4
2021-04-11T17:30:14.000Z
2021-07-27T10:09:53.000Z
notebooks/step-4-automate-ml-experiments.ipynb
mlrepa/automate-ml-with-dvc
b54a2e4818a991362d304890828df70359bab84a
[ "MIT" ]
null
null
null
notebooks/step-4-automate-ml-experiments.ipynb
mlrepa/automate-ml-with-dvc
b54a2e4818a991362d304890828df70359bab84a
[ "MIT" ]
1
2021-09-05T04:15:07.000Z
2021-09-05T04:15:07.000Z
22.833333
77
0.497593
[ [ [ "%load_ext autoreload\n%autoreload 2\n\nimport os\nimport sys\nimport yaml", "_____no_output_____" ], [ "# # Set the repository root as a working directory \n\n# %cd ..", "_____no_output_____" ] ], [ [ "# Init DVC repository\n\n\n1. Checkout to new branch\n\n```bash\ngit checkout -b experiments\n```\n\n2. Init DVC repository and setup DVC remote storage\n\n\n```bash\ndvc init\n```\n\n3. Add DVC repository under git control\n\n```bash\ngit add .\ngit commit -m \"Init and configure DVC\"\n```\n\n4. Add `local` as a defult DVC remote storage \n\n```bash\nmkdir -p /tmp/dvc/dvc-5-demo-project-iris\ndvc remote add -d local /tmp/dvc/dvc-5-demo-project-iris\ngit add .\ngit commit -m \"add dvc remote - local\"\n```", "_____no_output_____" ], [ "# Create and run stages for a DVC pipeline\n\n**Add data_load stage**\n\n```bash\ndvc stage add -n data_load \\\n -d src/pipelines/data_load.py \\\n -o data/raw/iris.csv \\\n -p base,data_load \\\n python src/pipelines/data_load.py \\\n --config=params.yaml\n```\n\n**Add Featurization stage**\n\n```bash\ndvc stage add -n featurize \\\n -d src/pipelines/featurize.py \\\n -d data/raw/iris.csv \\\n -o data/processed/featured_iris.csv \\\n -p base,data_load,featurize \\\n python src/pipelines/featurize.py \\\n --config=params.yaml\n```\n\n**Add Split dataset into train/test stage**\n\n```bash\ndvc stage add -n data_split \\\n -d src/pipelines/data_split.py \\\n -d data/processed/featured_iris.csv \\\n -o data/processed/train_iris.csv \\\n -o data/processed/test_iris.csv \\\n -p base,featurize,data_split \\\n python src/pipelines/data_split.py \\\n --config=params.yaml\n```\n\n\n**Add Train stage**\n\n```bash\ndvc stage add -n train \\\n -d src/pipelines/train.py \\\n -d data/processed/train_iris.csv \\\n -o models/model.joblib \\\n -p base,featurize.target_column,data_split.train_path,train \\\n python src/pipelines/train.py \\\n --config=params.yaml\n\n```\n\n**Add Evaluate stage**\n\n```bash\ndvc stage add -n evaluate \\\n -d src/pipelines/evaluate.py \\\n -d data/processed/test_iris.csv \\\n -d models/model.joblib \\\n -o reports/confusion_matrix.png \\\n -m reports/metrics.json \\\n --plots reports/classess.csv \\\n -p base,featurize.target_column,data_split.test_path,evaluate \\\n python src/pipelines/evaluate.py \\\n --config=params.yaml\n```\n\n**Run pipeline**\n\n```bash\ndvc repro\n```", "_____no_output_____" ], [ "# Reproduce pipeline\n\na) Pipeline is up to date. Nothing to reproduce.\n\n```bash\ndvc repro\n```\n\nb) Use `-f` to forced reproducing of pipeline\n\n```bash\ndvc repro -f\n```\n", "_____no_output_____" ], [ "# Commit code changes & store artifacts\n\n```bash\ngit add .\ngit commit -m \"Create DVC pipeline\"\n```\n", "_____no_output_____" ], [ "# Push data to a remote DVC storage\n\nPush data to the remote storage\n\n```bash\ndvc push\n```\n", "_____no_output_____" ], [ "# Push `experiments` branch to GitLab remote repository \n\n\n```bash \n\ngit push origin experiments\n```", "_____no_output_____" ] ] ]
[ "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ec5d03f0ef1a510e706733cdbc509b59b7aadcb8
344,166
ipynb
Jupyter Notebook
pytorch_forecasting_hello.ipynb
PatrikDurdevic/timeseries-notebooks
18020d69a90fb0abe956b9c1bd80a6d2a9a6b6d8
[ "MIT" ]
28
2021-01-03T20:01:27.000Z
2022-03-10T12:28:22.000Z
pytorch_forecasting_hello.ipynb
PatrikDurdevic/timeseries-notebooks
18020d69a90fb0abe956b9c1bd80a6d2a9a6b6d8
[ "MIT" ]
1
2021-01-05T19:53:29.000Z
2021-01-05T19:53:29.000Z
pytorch_forecasting_hello.ipynb
PatrikDurdevic/timeseries-notebooks
18020d69a90fb0abe956b9c1bd80a6d2a9a6b6d8
[ "MIT" ]
14
2021-01-04T20:24:49.000Z
2022-01-17T04:58:59.000Z
39.127558
15,202
0.516983
[ [ [ "<a href=\"https://colab.research.google.com/github/microprediction/timeseries-notebooks/blob/main/pytorch_forecasting_hello.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!pip install pytorch-forecasting", "_____no_output_____" ] ], [ [ "# NOT quite done yet\n", "_____no_output_____" ] ], [ [ "import pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor\nfrom pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer\nfrom pytorch_forecasting.data.examples import get_stallion_data\nfrom pytorch_forecasting import Baseline, TemporalFusionTransformer, TimeSeriesDataSet\nfrom pytorch_forecasting.data import GroupNormalizer\nfrom pytorch_forecasting.metrics import SMAPE, PoissonLoss, QuantileLoss\nfrom pytorch_forecasting.models.temporal_fusion_transformer.tuning import optimize_hyperparameters\nfrom pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor\nfrom pytorch_lightning.loggers import TensorBoardLogger", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning:\n\npandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n\n" ], [ "!pip install microprediction", "_____no_output_____" ] ], [ [ "# Hello world example\nSee https://www.microprediction.com/blog/popular-timeseries-packages for more packages", "_____no_output_____" ] ], [ [ "from microprediction import MicroReader\nmr = MicroReader()\nYS = mr.get_lagged_values(name='emojitracker-twitter-face_with_medical_mask.json')[:200]", "_____no_output_____" ], [ "import pytorch_lightning as pl\nimport pandas as pd\nfrom pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor\nfrom pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer\n\ndef run(ys):\n \"\"\" Apply model to univariate time series\n # Not really the strength of this library but why not?\n :param ys: Vector of observations\n :param theta: Unused (at present) parameters or hyper-params\n :return: Vector of predictions\n \"\"\"\n burnin = len(ys)\n data = pd.DataFrame(columns=['y'],data=ys[:burnin])\n data[\"time_idx\"] = list(range(burnin))\n data[\"group_id\"] = [\"same\" for _ in range(burnin)]\n max_prediction_length = 1\n max_encoder_length = 24\n training_cutoff = data[\"time_idx\"].max() - max_prediction_length\n training = TimeSeriesDataSet(\n data[lambda x: x.time_idx <= training_cutoff],\n time_idx=\"time_idx\",\n target=\"y\",\n min_encoder_length=max_encoder_length // 2, # keep encoder length long (as it is in the validation set)\n max_encoder_length=max_encoder_length,\n min_prediction_length=1,\n max_prediction_length=max_prediction_length,\n add_relative_time_idx=True,\n add_target_scales=True,\n add_encoder_length=True,\n group_ids=[\"group_id\"]\n )\n validation = TimeSeriesDataSet.from_dataset(training, data, predict=True, stop_randomization=True)\n batch_size = 128 # set this between 32 to 128\n train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\n val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size * 10, num_workers=0)\n\n pl.seed_everything(42)\n trainer = pl.Trainer(\n gpus=0,\n # clipping gradients is a hyperparameter and important to prevent divergance\n # of the gradient for recurrent neural networks\n gradient_clip_val=0.1,\n )\n\n tft = TemporalFusionTransformer.from_dataset(\n training,\n # not meaningful for finding the learning rate but otherwise very important\n learning_rate=0.03,\n hidden_size=16, # most important hyperparameter apart from learning rate\n # number of attention heads. Set to up to 4 for large datasets\n attention_head_size=1,\n dropout=0.1, # between 0.1 and 0.3 are good values\n hidden_continuous_size=8, # set to <= hidden_size\n output_size=7, # 7 quantiles by default\n loss=QuantileLoss(),\n # reduce learning rate if no improvement in validation loss after x epochs\n reduce_on_plateau_patience=4,\n )\n print(f\"Number of parameters in network: {tft.size() / 1e3:.1f}k\")\n # find optimal learning rate\n res = trainer.tuner.lr_find(\n tft,\n train_dataloader=train_dataloader,\n val_dataloaders=val_dataloader,\n max_lr=10.0,\n min_lr=1e-6,\n )\n\n # configure network and trainer\n early_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=10, verbose=False, mode=\"min\")\n lr_logger = LearningRateMonitor() # log the learning rate\n\n trainer = pl.Trainer(\n max_epochs=30,\n gpus=0,\n weights_summary=\"top\",\n gradient_clip_val=0.1,\n limit_train_batches=30, # coment in for training, running valiation every 30 batches\n # fast_dev_run=True, # comment in to check that networkor dataset has no serious bugs\n callbacks=[lr_logger, early_stop_callback],\n logger=TensorBoardLogger(\"lightning_logs\"),\n )\n\n tft = TemporalFusionTransformer.from_dataset(\n training,\n learning_rate=0.03,\n hidden_size=16,\n attention_head_size=1,\n dropout=0.1,\n hidden_continuous_size=8,\n output_size=7, # 7 quantiles by default\n loss=QuantileLoss(),\n log_interval=10,\n # uncomment for learning rate finder and otherwise, e.g. to 10 for logging every 10 batches\n reduce_on_plateau_patience=4,\n )\n print(f\"Number of parameters in network: {tft.size() / 1e3:.1f}k\")\n trainer.fit(\n tft,\n train_dataloader=train_dataloader,\n val_dataloaders=val_dataloader,\n )\n\n # load the best model according to the validation loss\n # (given that we use early stopping, this is not necessarily the last epoch)\n best_model_path = trainer.checkpoint_callback.best_model_path\n best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)\n\n # ----------------------------------------------------\n # Out of sample ... (this is wrong ... need to fix it)\n encoder_data = data[lambda x: x.time_idx > x.time_idx.max() - max_encoder_length]\n last_data = data[lambda x: x.time_idx == x.time_idx.max()]\n decoder_data = pd.concat(\n [last_data for i in range(1, max_prediction_length + 1)],\n ignore_index=True,\n )\n num_decoder = len(decoder_data)\n decoder_data[\"time_idx\"] = list(range(num_decoder))\n # combine encoder and decoder data\n new_prediction_data = pd.concat([encoder_data, decoder_data], ignore_index=True)\n new_raw_predictions, new_x = best_tft.predict(new_prediction_data, mode=\"raw\", return_x=True)\n\n return ys\n\n\nXS = run(YS)\nlen(XS)\n", "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/utilities/distributed.py:49: UserWarning:\n\nYou have set progress_bar_refresh_rate < 20 on Google Colab. This may crash. Consider using progress_bar_refresh_rate >= 20 in Trainer.\n\nGPU available: False, used: False\nTPU available: None, using: 0 TPU cores\n\n | Name | Type | Params\n----------------------------------------------------------------------------------------\n0 | loss | QuantileLoss | 0 \n1 | logging_metrics | ModuleList | 0 \n2 | input_embeddings | MultiEmbedding | 0 \n3 | prescalers | ModuleDict | 64 \n4 | static_variable_selection | VariableSelectionNetwork | 1.7 K \n5 | encoder_variable_selection | VariableSelectionNetwork | 528 \n6 | decoder_variable_selection | VariableSelectionNetwork | 528 \n7 | static_context_variable_selection | GatedResidualNetwork | 1.1 K \n8 | static_context_initial_hidden_lstm | GatedResidualNetwork | 1.1 K \n9 | static_context_initial_cell_lstm | GatedResidualNetwork | 1.1 K \n10 | static_context_enrichment | GatedResidualNetwork | 1.1 K \n11 | lstm_encoder | LSTM | 2.2 K \n12 | lstm_decoder | LSTM | 2.2 K \n13 | post_lstm_gate_encoder | GatedLinearUnit | 544 \n14 | post_lstm_add_norm_encoder | AddNorm | 32 \n15 | static_enrichment | GatedResidualNetwork | 1.4 K \n16 | multihead_attn | InterpretableMultiHeadAttention | 1.1 K \n17 | post_attn_gate_norm | GateAddNorm | 576 \n18 | pos_wise_ff | GatedResidualNetwork | 1.1 K \n19 | pre_output_gate_norm | GateAddNorm | 576 \n20 | output_layer | Linear | 119 \n----------------------------------------------------------------------------------------\n17.0 K Trainable params\n0 Non-trainable params\n17.0 K Total params\n" ], [ "", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.plot(YS[125:150],'*b')\nplt.plot(XS[125:150],'g')\nplt.legend(['data','prior'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ec5d0c7690b617450cf6ec1e44106605d3fd2787
224,372
ipynb
Jupyter Notebook
script_TAN/preproc/PreProcessingWorkflowJuly05onCluster.ipynb
GanshengT/INSERM_EEG_Enrico_Proc
343edc32e5c9705213189a088855c635b31ca22b
[ "CNRI-Python" ]
1
2020-07-28T16:09:54.000Z
2020-07-28T16:09:54.000Z
script_TAN/preproc/PreProcessingWorkflowJuly05onCluster.ipynb
GanshengT/INSERM_EEG_Enrico_Proc
343edc32e5c9705213189a088855c635b31ca22b
[ "CNRI-Python" ]
1
2019-08-16T13:59:53.000Z
2019-08-19T16:37:35.000Z
script_TAN/preproc/PreProcessingWorkflowJuly05onCluster.ipynb
GanshengT/INSERM_EEG_Enrico_Proc
343edc32e5c9705213189a088855c635b31ca22b
[ "CNRI-Python" ]
null
null
null
165.222386
75,004
0.818449
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec5d0e4193399e896c63318d36bacd4c95e589ca
1,159
ipynb
Jupyter Notebook
building-chatbots-in-python/atis-fold.ipynb
Shawn-Ng/natural-language-processing
fac6eaea9ff632e967d616a1e7510848fcc5b8ee
[ "BSD-2-Clause" ]
1
2018-06-29T08:31:16.000Z
2018-06-29T08:31:16.000Z
building-chatbots-in-python/atis-fold.ipynb
Shawn-Ng/natural-language-processing
fac6eaea9ff632e967d616a1e7510848fcc5b8ee
[ "BSD-2-Clause" ]
null
null
null
building-chatbots-in-python/atis-fold.ipynb
Shawn-Ng/natural-language-processing
fac6eaea9ff632e967d616a1e7510848fcc5b8ee
[ "BSD-2-Clause" ]
null
null
null
17.298507
59
0.486626
[ [ [ "import gzip\nimport numpy as np\nimport pickle", "_____no_output_____" ], [ "with gzip.open(\"data/atis.pkl.gz\", \"rb\") as f:\n data = pickle.load(f, encoding='ISO-8859-1')", "_____no_output_____" ], [ "np.shape(data)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ec5d1e0738d1f3bfcf32827ae9910bf5b0f05e4c
15,931
ipynb
Jupyter Notebook
docs/_downloads/f41f6eb36b8dff464b9cd21b8ea30765/super_resolution_with_onnxruntime.ipynb
leejh1230/PyTorch-tutorials-kr
ebbf44b863ff96c597631e28fc194eafa590c9eb
[ "BSD-3-Clause" ]
1
2019-12-05T05:16:44.000Z
2019-12-05T05:16:44.000Z
docs/_downloads/f41f6eb36b8dff464b9cd21b8ea30765/super_resolution_with_onnxruntime.ipynb
leejh1230/PyTorch-tutorials-kr
ebbf44b863ff96c597631e28fc194eafa590c9eb
[ "BSD-3-Clause" ]
null
null
null
docs/_downloads/f41f6eb36b8dff464b9cd21b8ea30765/super_resolution_with_onnxruntime.ipynb
leejh1230/PyTorch-tutorials-kr
ebbf44b863ff96c597631e28fc194eafa590c9eb
[ "BSD-3-Clause" ]
null
null
null
68.373391
1,320
0.655075
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\nExporting a Model from PyTorch to ONNX and Running it using ONNXRuntime\n=======================================================================\n\nIn this tutorial, we describe how to convert a model defined\nin PyTorch into the ONNX format and then run it with ONNXRuntime. \n\nONNXRuntime is a performance-focused engine for ONNX models,\nwhich inferences efficiently across multiple platforms and hardware\n(Windows, Linux, and Mac and on both CPUs and GPUs).\nONNXRuntime has proved to considerably increase performance over\nmultiple models as explained `here\n<https://cloudblogs.microsoft.com/opensource/2019/05/22/onnx-runtime-machine-learning-inferencing-0-4-release>`__\n\nFor this tutorial, you will need to install `onnx <https://github.com/onnx/onnx>`__\nand `onnxruntime <https://github.com/microsoft/onnxruntime>`__.\nYou can get binary builds of onnx and onnxrunimte with\n``pip install onnx onnxruntime``.\nNote that ONNXRuntime is compatible with Python versions 3.5 to 3.7.\n\n``NOTE``: This tutorial needs PyTorch master branch which can be installed by following\nthe instructions `here <https://github.com/pytorch/pytorch#from-source>`__\n", "_____no_output_____" ] ], [ [ "# Some standard imports\nimport io\nimport numpy as np\n\nfrom torch import nn\nimport torch.utils.model_zoo as model_zoo\nimport torch.onnx", "_____no_output_____" ] ], [ [ "Super-resolution is a way of increasing the resolution of images, videos\nand is widely used in image processing or video editing. For this\ntutorial, we will use a small super-resolution model.\n\nFirst, let's create a SuperResolution model in PyTorch. \nThis model uses the efficient sub-pixel convolution layer described in\n`\"Real-Time Single Image and Video Super-Resolution Using an Efficient\nSub-Pixel Convolutional Neural Network\" - Shi et al <https://arxiv.org/abs/1609.05158>`__\nfor increasing the resolution of an image by an upscale factor.\nThe model expects the Y component of the YCbCr of an image as an input, and\noutputs the upscaled Y component in super resolution. \n\n`The\nmodel <https://github.com/pytorch/examples/blob/master/super_resolution/model.py>`__\ncomes directly from PyTorch's examples without modification:\n\n\n", "_____no_output_____" ] ], [ [ "# Super Resolution model definition in PyTorch\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\nclass SuperResolutionNet(nn.Module):\n def __init__(self, upscale_factor, inplace=False):\n super(SuperResolutionNet, self).__init__()\n\n self.relu = nn.ReLU(inplace=inplace)\n self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))\n self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))\n self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))\n self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))\n self.pixel_shuffle = nn.PixelShuffle(upscale_factor)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n x = self.relu(self.conv3(x))\n x = self.pixel_shuffle(self.conv4(x))\n return x\n\n def _initialize_weights(self):\n init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))\n init.orthogonal_(self.conv4.weight)\n\n# Create the super-resolution model by using the above model definition.\ntorch_model = SuperResolutionNet(upscale_factor=3)", "_____no_output_____" ] ], [ [ "Ordinarily, you would now train this model; however, for this tutorial,\nwe will instead download some pre-trained weights. Note that this model\nwas not trained fully for good accuracy and is used here for\ndemonstration purposes only.\n\nIt is important to call ``torch_model.eval()`` or ``torch_model.train(False)``\nbefore exporting the model, to turn the model to inference mode.\nThis is required since operators like dropout or batchnorm behave\ndifferently in inference and training mode. \n\n\n", "_____no_output_____" ] ], [ [ "# Load pretrained model weights\nmodel_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'\nbatch_size = 1 # just a random number\n\n# Initialize model with the pretrained weights\nmap_location = lambda storage, loc: storage\nif torch.cuda.is_available():\n map_location = None\ntorch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location))\n\n# set the model to inference mode\ntorch_model.eval()", "_____no_output_____" ] ], [ [ "Exporting a model in PyTorch works via tracing or scripting. This\ntutorial will use as an example a model exported by tracing. \nTo export a model, we call the ``torch.onnx.export()`` function.\nThis will execute the model, recording a trace of what operators\nare used to compute the outputs.\nBecause ``export`` runs the model, we need to provide an input\ntensor ``x``. The values in this can be random as long as it is the\nright type and size.\nNote that the input size will be fixed in the exported ONNX graph for\nall the input's dimensions, unless specified as a dynamic axes.\nIn this example we export the model with an input of batch_size 1,\nbut then specify the first dimension as dynamic in the ``dynamic_axes``\nparameter in ``torch.onnx.export()``. \nThe exported model will thus accept inputs of size [batch_size, 1, 224, 224]\nwhere batch_size can be variable. \n\nTo learn more details about PyTorch's export interface, check out the\n`torch.onnx documentation <https://pytorch.org/docs/master/onnx.html>`__.\n\n\n", "_____no_output_____" ] ], [ [ "# Input to the model\nx = torch.randn(batch_size, 1, 224, 224, requires_grad=True)\ntorch_out = torch_model(x)\n\n# Export the model\ntorch.onnx.export(torch_model, # model being run\n x, # model input (or a tuple for multiple inputs)\n \"super_resolution.onnx\", # where to save the model (can be a file or file-like object)\n export_params=True, # store the trained parameter weights inside the model file\n opset_version=10, # the onnx version to export the model to\n do_constant_folding=True, # wether to execute constant folding for optimization\n input_names = ['input'], # the model's input names\n output_names = ['output'], # the model's output names\n dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes\n 'output' : {0 : 'batch_size'}})", "_____no_output_____" ] ], [ [ "We also computed ``torch_out``, the output after of the model,\nwhich we will use to verify that the model we exported computes\nthe same values when run in onnxruntime.\n\nBut before verifying the model's output with onnxruntime, we will check\nthe onnx model with onnx's API.\nFirst, ``onnx.load(\"super_resolution.onnx\")`` will load the saved model and\nwill output a onnx.ModelProto structure (a top-level file/container format for bundling a ML model.\nFor more information `onnx.proto documentation <https://github.com/onnx/onnx/blob/master/onnx/onnx.proto>`__.).\nThen, ``onnx.checker.check_model(onnx_model)`` will verify the model's structure\nand confirm that the model has a valid schema.\nThe validity of the ONNX graph is verified by checking the model's\nversion, the graph's structure, as well as the nodes and their inputs\nand outputs.\n\n\n", "_____no_output_____" ] ], [ [ "import onnx\n\nonnx_model = onnx.load(\"super_resolution.onnx\")\nonnx.checker.check_model(onnx_model)", "_____no_output_____" ] ], [ [ "Now let's compute the output using ONNXRuntime's Python APIs.\nThis part can normally be done in a separate process or on another\nmachine, but we will continue in the same process so that we can\nverify that onnxruntime and PyTorch are computing the same value\nfor the network.\n\nIn order to run the model with ONNXRuntime, we need to create an\ninference session for the model with the chosen configuration\nparameters (here we use the default config).\nOnce the session is created, we evaluate the model using the run() api.\nThe output of this call is a list containing the outputs of the model\ncomputed by ONNXRuntime. \n\n\n", "_____no_output_____" ] ], [ [ "import onnxruntime\n\nort_session = onnxruntime.InferenceSession(\"super_resolution.onnx\")\n\ndef to_numpy(tensor):\n return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n\n# compute onnxruntime output prediction\nort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}\nort_outs = ort_session.run(None, ort_inputs)\n\n# compare onnxruntime and PyTorch results\nnp.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)\n\nprint(\"Exported model has been tested with ONNXRuntime, and the result looks good!\")", "_____no_output_____" ] ], [ [ "We should see that the output of PyTorch and onnxruntime runs match\nnumerically with the given precision (rtol=1e-03 and atol=1e-05).\nAs a side-note, if they do not match then there is an issue in the\nonnx exporter, so please contact us in that case.\n\n\n", "_____no_output_____" ], [ "Running the model on an image using ONNXRuntime\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n", "_____no_output_____" ], [ "So far we have exported a model from PyTorch and shown how to load it\nand run it in onnxruntime with a dummy tensor as an input.\n\n", "_____no_output_____" ], [ "For this tutorial, we will use a famous cat image used widely which\nlooks like below\n\n.. figure:: /_static/img/cat_224x224.jpg\n :alt: cat\n\n\n", "_____no_output_____" ], [ "First, let's load the image, pre-process it using standard PIL\npython library. Note that this preprocessing is the standard practice of\nprocessing data for training/testing neural networks.\n\nWe first resize the image to fit the size of the model's input (224x224).\nThen we split the image into its Y, Cb, and Cr components.\nThese components represent a greyscale image (Y), and\nthe blue-difference (Cb) and red-difference (Cr) chroma components.\nThe Y component being more sensitive to the human eye, we are \ninterested in this component which we will be transforming.\nAfter extracting the Y component, we convert it to a tensor which\nwill be the input of our model.\n\n\n", "_____no_output_____" ] ], [ [ "from PIL import Image\nimport torchvision.transforms as transforms\n\nimg = Image.open(\"./_static/img/cat.jpg\")\n\nresize = transforms.Resize([224, 224])\nimg = resize(img)\n\nimg_ycbcr = img.convert('YCbCr')\nimg_y, img_cb, img_cr = img_ycbcr.split()\n\nto_tensor = transforms.ToTensor()\nimg_y = to_tensor(img_y)\nimg_y.unsqueeze_(0)", "_____no_output_____" ] ], [ [ "Now, as a next step, let's take the tensor representing the\ngreyscale resized cat image and run the super-resolution model in\nONNXRuntime as explained previously.\n\n\n", "_____no_output_____" ] ], [ [ "ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(img_y)}\nort_outs = ort_session.run(None, ort_inputs)\nimg_out_y = ort_outs[0]", "_____no_output_____" ] ], [ [ "At this point, the output of the model is a tensor.\nNow, we'll process the output of the model to construct back the\nfinal output image from the output tensor, and save the image.\nThe post-processing steps have been adopted from PyTorch\nimplementation of super-resolution model\n`here <https://github.com/pytorch/examples/blob/master/super_resolution/super_resolve.py>`__.\n\n\n", "_____no_output_____" ] ], [ [ "img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')\n\n# get the output image follow post-processing step from PyTorch implementation\nfinal_img = Image.merge(\n \"YCbCr\", [\n img_out_y,\n img_cb.resize(img_out_y.size, Image.BICUBIC),\n img_cr.resize(img_out_y.size, Image.BICUBIC),\n ]).convert(\"RGB\")\n\n# Save the image, we will compare this with the output image from mobile device\nfinal_img.save(\"./_static/img/cat_superres_with_ort.jpg\")", "_____no_output_____" ] ], [ [ ".. figure:: /_static/img/cat_superres_with_ort.jpg\n :alt: output\\_cat\n\n\nONNXRuntime being a cross platform engine, you can run it across\nmultiple platforms and on both CPUs and GPUs.\n\nONNXRuntime can also be deployed to the cloud for model inferencing\nusing Azure Machine Learning Services. More information `here <https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-onnx>`__.\n\nMore information about ONNXRuntime's performance `here <https://github.com/microsoft/onnxruntime#high-performance>`__.\n\n\nFor more information about ONNXRuntime `here <https://github.com/microsoft/onnxruntime>`__.\n\n\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5d3282647aafd569d74a328b35c5a6d3fcbde9
7,880
ipynb
Jupyter Notebook
glove.py-master/demo/glove.py exploration.ipynb
AngusMonroe/CS224n
5fdb4bf38beccb63be1d5a1d6ed12dce7e8fbad7
[ "MIT" ]
6
2019-04-29T08:32:50.000Z
2021-02-15T11:09:33.000Z
glove.py-master/demo/glove.py exploration.ipynb
AngusMonroe/CS224n
5fdb4bf38beccb63be1d5a1d6ed12dce7e8fbad7
[ "MIT" ]
null
null
null
glove.py-master/demo/glove.py exploration.ipynb
AngusMonroe/CS224n
5fdb4bf38beccb63be1d5a1d6ed12dce7e8fbad7
[ "MIT" ]
2
2019-05-07T04:42:48.000Z
2019-09-14T07:23:02.000Z
33.109244
99
0.524873
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ec5d4237c543152da4d91b6e5367016d63bb9971
50,806
ipynb
Jupyter Notebook
transformer_translation.ipynb
dayangai/paper_implementation
aff83b4182c4c1a06d31e7a0cf88ee20a7192081
[ "MIT" ]
1
2022-03-20T17:27:05.000Z
2022-03-20T17:27:05.000Z
transformer_translation.ipynb
dayangai/paper_implementation
aff83b4182c4c1a06d31e7a0cf88ee20a7192081
[ "MIT" ]
null
null
null
transformer_translation.ipynb
dayangai/paper_implementation
aff83b4182c4c1a06d31e7a0cf88ee20a7192081
[ "MIT" ]
null
null
null
49.135397
1,100
0.580345
[ [ [ "import torch\nimport torch.nn as nn\n\n\nclass SelfAttention(nn.Module):\n def __init__(self, embed_size, heads):\n super(SelfAttention, self).__init__()\n self.embed_size = embed_size\n self.heads = heads\n self.head_dim = embed_size // heads\n\n assert (\n self.head_dim * heads == embed_size\n ), \"Embedding size needs to be divisible by heads\"\n\n self.values = nn.Linear(self.head_dim, self.head_dim, bias=False)\n self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False)\n self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False)\n self.fc_out = nn.Linear(heads * self.head_dim, embed_size)\n\n def forward(self, values, keys, query, mask):\n # Get number of training examples\n N = query.shape[0]\n\n value_len, key_len, query_len = values.shape[1], keys.shape[1], query.shape[1]\n\n # Split the embedding into self.heads different pieces\n values = values.reshape(N, value_len, self.heads, self.head_dim)\n keys = keys.reshape(N, key_len, self.heads, self.head_dim)\n query = query.reshape(N, query_len, self.heads, self.head_dim)\n\n values = self.values(values) # (N, value_len, heads, head_dim)\n keys = self.keys(keys) # (N, key_len, heads, head_dim)\n queries = self.queries(query) # (N, query_len, heads, heads_dim)\n\n # Einsum does matrix mult. for query*keys for each training example\n # with every other training example, don't be confused by einsum\n # it's just how I like doing matrix multiplication & bmm\n\n energy = torch.einsum(\"nqhd,nkhd->nhqk\", [queries, keys])\n # queries shape: (N, query_len, heads, heads_dim),\n # keys shape: (N, key_len, heads, heads_dim)\n # energy: (N, heads, query_len, key_len)\n\n # Mask padded indices so their weights become 0\n if mask is not None:\n energy = energy.masked_fill(mask == 0, float(\"-1e20\"))\n\n # Normalize energy values similarly to seq2seq + attention\n # so that they sum to 1. Also divide by scaling factor for\n # better stability\n attention = torch.softmax(energy / (self.embed_size ** (1 / 2)), dim=3)\n # attention shape: (N, heads, query_len, key_len)\n\n out = torch.einsum(\"nhql,nlhd->nqhd\", [attention, values]).reshape(\n N, query_len, self.heads * self.head_dim\n )\n # attention shape: (N, heads, query_len, key_len)\n # values shape: (N, value_len, heads, heads_dim)\n # out after matrix multiply: (N, query_len, heads, head_dim), then\n # we reshape and flatten the last two dimensions.\n\n out = self.fc_out(out)\n # Linear layer doesn't modify the shape, final shape will be\n # (N, query_len, embed_size)\n\n return out", "_____no_output_____" ], [ "class TransformerBlock(nn.Module):\n def __init__(self, embed_size, heads, dropout, forward_expansion):\n super(TransformerBlock, self).__init__()\n self.attention = SelfAttention(embed_size, heads)\n self.norm1 = nn.LayerNorm(embed_size)\n self.norm2 = nn.LayerNorm(embed_size)\n\n self.feed_forward = nn.Sequential(\n nn.Linear(embed_size, forward_expansion * embed_size),\n nn.ReLU(),\n nn.Linear(forward_expansion * embed_size, embed_size),\n )\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, value, key, query, mask):\n attention = self.attention(value, key, query, mask)\n\n # Add skip connection, run through normalization and finally dropout\n x = self.dropout(self.norm1(attention + query))\n forward = self.feed_forward(x)\n out = self.dropout(self.norm2(forward + x))\n return out", "_____no_output_____" ], [ "class Encoder(nn.Module):\n def __init__(\n self,\n src_vocab_size,\n embed_size,\n num_layers,\n heads,\n device,\n forward_expansion,\n dropout,\n max_length,\n ):\n\n super(Encoder, self).__init__()\n self.embed_size = embed_size\n self.device = device\n self.word_embedding = nn.Embedding(src_vocab_size, embed_size)\n self.position_embedding = nn.Embedding(max_length, embed_size)\n\n self.layers = nn.ModuleList(\n [\n TransformerBlock(\n embed_size,\n heads,\n dropout=dropout,\n forward_expansion=forward_expansion,\n )\n for _ in range(num_layers)\n ]\n )\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, mask):\n N, seq_length = x.shape\n positions = torch.arange(0, seq_length).expand(N, seq_length).to(self.device)\n out = self.dropout(\n (self.word_embedding(x) + self.position_embedding(positions))\n )\n\n # In the Encoder the query, key, value are all the same, it's in the\n # decoder this will change. This might look a bit odd in this case.\n for layer in self.layers:\n out = layer(out, out, out, mask)\n\n return out", "_____no_output_____" ], [ "class DecoderBlock(nn.Module):\n def __init__(self, embed_size, heads, forward_expansion, dropout, device):\n super(DecoderBlock, self).__init__()\n self.norm = nn.LayerNorm(embed_size)\n self.attention = SelfAttention(embed_size, heads=heads)\n self.transformer_block = TransformerBlock(\n embed_size, heads, dropout, forward_expansion\n )\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, value, key, src_mask, trg_mask):\n attention = self.attention(x, x, x, trg_mask)\n query = self.dropout(self.norm(attention + x))\n out = self.transformer_block(value, key, query, src_mask)\n return out", "_____no_output_____" ], [ "class Decoder(nn.Module):\n def __init__(\n self,\n trg_vocab_size,\n embed_size,\n num_layers,\n heads,\n forward_expansion,\n dropout,\n device,\n max_length,\n ):\n super(Decoder, self).__init__()\n self.device = device\n self.word_embedding = nn.Embedding(trg_vocab_size, embed_size)\n self.position_embedding = nn.Embedding(max_length, embed_size)\n\n self.layers = nn.ModuleList(\n [\n DecoderBlock(embed_size, heads, forward_expansion, dropout, device)\n for _ in range(num_layers)\n ]\n )\n self.fc_out = nn.Linear(embed_size, trg_vocab_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, enc_out, src_mask, trg_mask):\n N, seq_length = x.shape\n positions = torch.arange(0, seq_length).expand(N, seq_length).to(self.device)\n x = self.dropout((self.word_embedding(x) + self.position_embedding(positions)))\n\n for layer in self.layers:\n x = layer(x, enc_out, enc_out, src_mask, trg_mask)\n\n out = self.fc_out(x)\n\n return out\n ", "_____no_output_____" ], [ "class Transformer(nn.Module):\n def __init__(\n self,\n embed_size,\n src_vocab_size,\n trg_vocab_size,\n src_pad_idx,\n heads,\n num_layers,\n num_layer,#\n forward_expansion,\n dropout,\n max_length=100,\n \n device=\"cpu\",\n ):\n\n super(Transformer, self).__init__()\n\n self.encoder = Encoder(\n src_vocab_size,\n embed_size,\n num_layers,\n heads,\n device,\n forward_expansion,\n dropout,\n max_length,\n )\n\n self.decoder = Decoder(\n trg_vocab_size,\n embed_size,\n num_layers,\n heads,\n forward_expansion,\n dropout,\n device,\n max_length,\n )\n\n self.src_pad_idx = src_pad_idx\n self.trg_pad_idx = 1\n self.device = device\n\n def make_src_mask(self, src):\n src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)\n # (N, 1, 1, src_len)\n return src_mask.to(self.device)\n\n def make_trg_mask(self, trg):\n N, trg_len = trg.shape\n trg_mask = torch.tril(torch.ones((trg_len, trg_len))).expand(\n N, 1, trg_len, trg_len\n )\n\n return trg_mask.to(self.device)\n\n def forward(self, src, trg):\n src_mask = self.make_src_mask(src)\n trg_mask = self.make_trg_mask(trg)\n enc_src = self.encoder(src, src_mask)\n out = self.decoder(trg, enc_src, src_mask, trg_mask)\n return out", "_____no_output_____" ], [ "!sudo pip install spacy==3.0.6\n!sudo pip install torchtext==0.6.0\n!sudo python -m spacy download en_core_web_sm\n!sudo python -m spacy download de_core_news_sm", "Collecting spacy==3.0.6\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/1b/d8/0361bbaf7a1ff56b44dca04dace54c82d63dad7475b7d25ea1baefafafb2/spacy-3.0.6-cp37-cp37m-manylinux2014_x86_64.whl (12.8MB)\n\u001b[K |████████████████████████████████| 12.8MB 2.9MB/s \n\u001b[?25hCollecting pathy>=0.3.5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/13/87/5991d87be8ed60beb172b4062dbafef18b32fa559635a8e2b633c2974f85/pathy-0.5.2-py3-none-any.whl (42kB)\n\u001b[K |████████████████████████████████| 51kB 8.1MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (1.19.5)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (2.23.0)\nRequirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (4.41.1)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (20.9)\nRequirement already satisfied: typing-extensions<4.0.0.0,>=3.7.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (3.7.4.3)\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (3.0.5)\nRequirement already satisfied: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (0.4.1)\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (2.0.5)\nCollecting srsly<3.0.0,>=2.4.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/c3/84/dfdfc9f6f04f6b88207d96d9520b911e5fec0c67ff47a0dea31ab5429a1e/srsly-2.4.1-cp37-cp37m-manylinux2014_x86_64.whl (456kB)\n\u001b[K |████████████████████████████████| 460kB 51.3MB/s \n\u001b[?25hCollecting typer<0.4.0,>=0.3.0\n Downloading https://files.pythonhosted.org/packages/90/34/d138832f6945432c638f32137e6c79a3b682f06a63c488dcfaca6b166c64/typer-0.3.2-py3-none-any.whl\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (1.0.5)\nCollecting pydantic<1.8.0,>=1.7.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ca/fa/d43f31874e1f2a9633e4c025be310f2ce7a8350017579e9e837a62630a7e/pydantic-1.7.4-cp37-cp37m-manylinux2014_x86_64.whl (9.1MB)\n\u001b[K |████████████████████████████████| 9.1MB 24.0MB/s \n\u001b[?25hCollecting thinc<8.1.0,>=8.0.3\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/61/87/decceba68a0c6ca356ddcb6aea8b2500e71d9bc187f148aae19b747b7d3c/thinc-8.0.3-cp37-cp37m-manylinux2014_x86_64.whl (1.1MB)\n\u001b[K |████████████████████████████████| 1.1MB 53.5MB/s \n\u001b[?25hCollecting spacy-legacy<3.1.0,>=3.0.4\n Downloading https://files.pythonhosted.org/packages/8d/67/d4002a18e26bf29b17ab563ddb55232b445ab6a02f97bf17d1345ff34d3f/spacy_legacy-3.0.5-py2.py3-none-any.whl\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (2.11.3)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (57.0.0)\nCollecting catalogue<2.1.0,>=2.0.3\n Downloading https://files.pythonhosted.org/packages/9c/10/dbc1203a4b1367c7b02fddf08cb2981d9aa3e688d398f587cea0ab9e3bec/catalogue-2.0.4-py3-none-any.whl\nRequirement already satisfied: wasabi<1.1.0,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from spacy==3.0.6) (0.8.2)\nCollecting smart-open<4.0.0,>=2.2.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/11/9a/ba2d5f67f25e8d5bbf2fcec7a99b1e38428e83cb715f64dd179ca43a11bb/smart_open-3.0.0.tar.gz (113kB)\n\u001b[K |████████████████████████████████| 122kB 56.3MB/s \n\u001b[?25hRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0.6) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0.6) (2020.12.5)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0.6) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==3.0.6) (3.0.4)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->spacy==3.0.6) (2.4.7)\nRequirement already satisfied: click<7.2.0,>=7.1.1 in /usr/local/lib/python3.7/dist-packages (from typer<0.4.0,>=0.3.0->spacy==3.0.6) (7.1.2)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->spacy==3.0.6) (2.0.1)\nRequirement already satisfied: zipp>=0.5; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<2.1.0,>=2.0.3->spacy==3.0.6) (3.4.1)\nBuilding wheels for collected packages: smart-open\n Building wheel for smart-open (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for smart-open: filename=smart_open-3.0.0-cp37-none-any.whl size=107107 sha256=af837c16c37f2fe25c9715d57719a767fc4048f7c536d06d9209bf79950f4973\n Stored in directory: /root/.cache/pip/wheels/18/88/7c/f06dabd5e9cabe02d2269167bcacbbf9b47d0c0ff7d6ebcb78\nSuccessfully built smart-open\nInstalling collected packages: smart-open, typer, pathy, catalogue, srsly, pydantic, thinc, spacy-legacy, spacy\n Found existing installation: smart-open 5.0.0\n Uninstalling smart-open-5.0.0:\n Successfully uninstalled smart-open-5.0.0\n Found existing installation: catalogue 1.0.0\n Uninstalling catalogue-1.0.0:\n Successfully uninstalled catalogue-1.0.0\n Found existing installation: srsly 1.0.5\n Uninstalling srsly-1.0.5:\n Successfully uninstalled srsly-1.0.5\n Found existing installation: thinc 7.4.0\n Uninstalling thinc-7.4.0:\n Successfully uninstalled thinc-7.4.0\n Found existing installation: spacy 2.2.4\n Uninstalling spacy-2.2.4:\n Successfully uninstalled spacy-2.2.4\nSuccessfully installed catalogue-2.0.4 pathy-0.5.2 pydantic-1.7.4 smart-open-3.0.0 spacy-3.0.6 spacy-legacy-3.0.5 srsly-2.4.1 thinc-8.0.3 typer-0.3.2\nCollecting torchtext==0.6.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f2/17/e7c588245aece7aa93f360894179374830daf60d7ed0bbb59332de3b3b61/torchtext-0.6.0-py3-none-any.whl (64kB)\n\u001b[K |████████████████████████████████| 71kB 2.1MB/s \n\u001b[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from torchtext==0.6.0) (4.41.1)\nRequirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from torchtext==0.6.0) (2.23.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torchtext==0.6.0) (1.19.5)\nCollecting sentencepiece\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f5/99/e0808cb947ba10f575839c43e8fafc9cc44e4a7a2c8f79c60db48220a577/sentencepiece-0.1.95-cp37-cp37m-manylinux2014_x86_64.whl (1.2MB)\n\u001b[K |████████████████████████████████| 1.2MB 4.1MB/s \n\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from torchtext==0.6.0) (1.15.0)\nRequirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from torchtext==0.6.0) (1.8.1+cu101)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->torchtext==0.6.0) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->torchtext==0.6.0) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->torchtext==0.6.0) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->torchtext==0.6.0) (2020.12.5)\nRequirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch->torchtext==0.6.0) (3.7.4.3)\nInstalling collected packages: sentencepiece, torchtext\n Found existing installation: torchtext 0.9.1\n Uninstalling torchtext-0.9.1:\n Successfully uninstalled torchtext-0.9.1\nSuccessfully installed sentencepiece-0.1.95 torchtext-0.6.0\n2021-06-03 14:29:16.499811: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\nCollecting en-core-web-sm==3.0.0\n\u001b[?25l Downloading https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0-py3-none-any.whl (13.7MB)\n\u001b[K |████████████████████████████████| 13.7MB 449kB/s \n\u001b[?25hRequirement already satisfied: spacy<3.1.0,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from en-core-web-sm==3.0.0) (3.0.6)\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.0.5)\nRequirement already satisfied: thinc<8.1.0,>=8.0.3 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (8.0.3)\nRequirement already satisfied: srsly<3.0.0,>=2.4.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.4.1)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.23.0)\nRequirement already satisfied: typing-extensions<4.0.0.0,>=3.7.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (3.7.4.3)\nRequirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (1.19.5)\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (3.0.5)\nRequirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (4.41.1)\nRequirement already satisfied: pathy>=0.3.5 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (0.5.2)\nRequirement already satisfied: spacy-legacy<3.1.0,>=3.0.4 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (3.0.5)\nRequirement already satisfied: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (0.4.1)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (57.0.0)\nRequirement already satisfied: pydantic<1.8.0,>=1.7.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (1.7.4)\nRequirement already satisfied: catalogue<2.1.0,>=2.0.3 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.0.4)\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (1.0.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.11.3)\nRequirement already satisfied: wasabi<1.1.0,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (0.8.2)\nRequirement already satisfied: typer<0.4.0,>=0.3.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (0.3.2)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (20.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2020.12.5)\nRequirement already satisfied: smart-open<4.0.0,>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from pathy>=0.3.5->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (3.0.0)\nRequirement already satisfied: zipp>=0.5; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<2.1.0,>=2.0.3->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (3.4.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.0.1)\nRequirement already satisfied: click<7.2.0,>=7.1.1 in /usr/local/lib/python3.7/dist-packages (from typer<0.4.0,>=0.3.0->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (7.1.2)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->spacy<3.1.0,>=3.0.0->en-core-web-sm==3.0.0) (2.4.7)\nInstalling collected packages: en-core-web-sm\n Found existing installation: en-core-web-sm 2.2.5\n Uninstalling en-core-web-sm-2.2.5:\n Successfully uninstalled en-core-web-sm-2.2.5\nSuccessfully installed en-core-web-sm-3.0.0\n\u001b[38;5;2m✔ Download and installation successful\u001b[0m\nYou can now load the package via spacy.load('en_core_web_sm')\n2021-06-03 14:29:24.529764: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\nCollecting de-core-news-sm==3.0.0\n\u001b[?25l Downloading https://github.com/explosion/spacy-models/releases/download/de_core_news_sm-3.0.0/de_core_news_sm-3.0.0-py3-none-any.whl (19.3MB)\n\u001b[K |████████████████████████████████| 19.3MB 1.2MB/s \n\u001b[?25hRequirement already satisfied: spacy<3.1.0,>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from de-core-news-sm==3.0.0) (3.0.6)\nRequirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (1.0.5)\nRequirement already satisfied: pydantic<1.8.0,>=1.7.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (1.7.4)\nRequirement already satisfied: typer<0.4.0,>=0.3.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (0.3.2)\nRequirement already satisfied: catalogue<2.1.0,>=2.0.3 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.0.4)\nRequirement already satisfied: spacy-legacy<3.1.0,>=3.0.4 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (3.0.5)\nRequirement already satisfied: wasabi<1.1.0,>=0.8.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (0.8.2)\nRequirement already satisfied: pathy>=0.3.5 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (0.5.2)\nRequirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.0.5)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (20.9)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (57.0.0)\nRequirement already satisfied: srsly<3.0.0,>=2.4.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.4.1)\nRequirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.23.0)\nRequirement already satisfied: blis<0.8.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (0.4.1)\nRequirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (3.0.5)\nRequirement already satisfied: thinc<8.1.0,>=8.0.3 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (8.0.3)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.11.3)\nRequirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (4.41.1)\nRequirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (1.19.5)\nRequirement already satisfied: typing-extensions<4.0.0.0,>=3.7.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (3.7.4.3)\nRequirement already satisfied: click<7.2.0,>=7.1.1 in /usr/local/lib/python3.7/dist-packages (from typer<0.4.0,>=0.3.0->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (7.1.2)\nRequirement already satisfied: zipp>=0.5; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<2.1.0,>=2.0.3->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (3.4.1)\nRequirement already satisfied: smart-open<4.0.0,>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from pathy>=0.3.5->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (3.0.0)\nRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.4.7)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2020.12.5)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (3.0.4)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->spacy<3.1.0,>=3.0.0->de-core-news-sm==3.0.0) (2.0.1)\nInstalling collected packages: de-core-news-sm\nSuccessfully installed de-core-news-sm-3.0.0\n\u001b[38;5;2m✔ Download and installation successful\u001b[0m\nYou can now load the package via spacy.load('de_core_news_sm')\n" ], [ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport spacy\nfrom utils_scratch import translate_sentence, bleu, save_checkpoint, load_checkpoint\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchtext.datasets import Multi30k\nfrom torchtext.data import Field, BucketIterator\n\n\"\"\"\nTo install spacy languages do:\npython -m spacy download en\npython -m spacy download de\n\"\"\"\nspacy_ger = spacy.load(\"de_core_news_sm\")\nspacy_eng = spacy.load(\"en_core_web_sm\")", "_____no_output_____" ], [ "def tokenize_ger(text):\n return [tok.text for tok in spacy_ger.tokenizer(text)]\ndef tokenize_eng(text):\n return [tok.text for tok in spacy_eng.tokenizer(text)]", "_____no_output_____" ], [ "german=Field(tokenize=tokenize_ger,lower=True,init_token=\"<sos>\",eos_token=\"<eos>\")\nenglish=Field(tokenize=tokenize_eng,lower=True,init_token=\"<sos>\",eos_token=\"<eos>\")", "_____no_output_____" ], [ "train_data,valid_data,test_data=Multi30k.splits(\n exts=(\".de\",\".en\"),fields=(german,english)\n)", "downloading training.tar.gz\n" ], [ "german.build_vocab(train_data,max_size=10000,min_freq=2)\nenglish.build_vocab(train_data,max_size=10000,min_freq=2)", "_____no_output_____" ], [ "def translate_sentence(model, sentence, german, english, device, max_length=50):\n # Load german tokenizer\n spacy_ger = spacy.load(\"de_core_news_sm\")\n\n # Create tokens using spacy and everything in lower case (which is what our vocab is)\n if type(sentence) == str:\n tokens = [token.text.lower() for token in spacy_ger(sentence)]\n else:\n tokens = [token.lower() for token in sentence]\n\n # Add <SOS> and <EOS> in beginning and end respectively\n tokens.insert(0, german.init_token)\n tokens.append(german.eos_token)\n\n # Go through each german token and convert to an index\n text_to_indices = [german.vocab.stoi[token] for token in tokens]\n\n # Convert to Tensor\n sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(0).to(device)\n\n outputs = [english.vocab.stoi[\"<sos>\"]]\n for i in range(max_length):\n trg_tensor = torch.LongTensor(outputs).unsqueeze(0).to(device)\n \n with torch.no_grad():\n output = model(sentence_tensor, trg_tensor)\n \n best_guess = output.argmax(2)[:, -1].item()\n outputs.append(best_guess)\n \n if best_guess == english.vocab.stoi[\"<eos>\"]:\n break\n \n translated_sentence = [english.vocab.itos[idx] for idx in outputs]\n # remove start token\n return translated_sentence[1:]", "_____no_output_____" ], [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nload_model=False\nsave_model=True\n\nnum_epochs=20\nlearning_rate=1e-4\nbatch_size=32\n\nsrc_vocab_size=len(german.vocab)\ntrg_vocab_size=len(english.vocab)\nembedding_size=512\nnum_heads=8\nnum_encoder_layers=3\nnum_decoder_layers=3\ndropout=0.10\nmax_len=100\n\nforward_expansion=4\nsrc_pad_idx=english.vocab.stoi[\"<pad>\"]\nwriter=SummaryWriter(\"runs/loss_plot\")\nstep=0\n\ntrain_iterator,valid_iterator,test_iterator=BucketIterator.splits(\n (train_data,valid_data,test_data),\n batch_size=batch_size,\n sort_within_batch=True,\n sort_key=lambda x: len(x.src),\n device=device\n)", "_____no_output_____" ], [ "model=Transformer(\n embedding_size,\n src_vocab_size,\n trg_vocab_size,\n src_pad_idx,\n num_heads,\n num_encoder_layers,\n num_decoder_layers,\n forward_expansion,\n dropout,\n max_len,\n device\n ).to(device)\noptimizer=optim.Adam(model.parameters(),lr=learning_rate)\npad_idx=english.vocab.stoi[\"<pad>\"]\ncriterion=nn.CrossEntropyLoss(ignore_index=pad_idx)\nsentence=\"ein pferd geht unter einer brücke neben einem boot durch\"\n\nif load_model:\n load_checkpoint(torch.load(\"my_checkpoint.pth.ptar\"),model.optimzier)\n\nfor epoch in range(num_epochs):\n print(f\"[Epoch{epoch+1}/{num_epochs}]\")\n model.eval()\n translated_sentence=translate_sentence(\n model,sentence,german,english,device,max_length=100\n )\n print(f\"Translated example sentence: \\n {translated_sentence}\")\n model.train()\n \n for batch_idx,batch in enumerate(train_iterator):\n inp_data=batch.src.transpose(0,1).to(device)\n target=batch.trg.transpose(0,1).to(device)\n \n output=model(inp_data,target[:,:-1])\n \n output=output.reshape(-1,output.shape[2])\n target=target[:,1:].reshape(-1)\n optimizer.zero_grad()\n loss=criterion(output,target)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(),max_norm=1)\n optimizer.step()\n #writer.add_scalar(\"training loss\",loss,global_step=step)\n step+=1", "[Epoch1/20]\nTranslated example sentence: \n ['doorway', 'shielding', 'parasail', 'comforts', 'marked', 'ad', 'cup', 'shepherd', 'headband', 'geometric', 'interacting', 'stockings', 'preschool', 'hikers', 'metropolitan', 'pen', 'took', 'makeup', 'klein', 'shoe', 'dries', 'swimming', 'vegetation', 'multitasking', 'boys', 'rescue', 'spoon', 'structures', 'who', 'converses', 'better', 'seattle', 'shovel', 'hidden', 'driver', 'dark', 'mock', 'tambourine', 'hazard', 'they', 'object', 'league', 'pacifier', 'sections', 'upwards', 'blank', 'claus', 'whisking', 'cigarette', 'cakes', 'member', 'squat', 'participates', 'attempts', 'raises', 'famous', 'sandy', 'nature', 'feeding', 'lots', 'suspenders', 'teammates', 'foods', 'licking', 'gentlemen', 'necktie', 'couples', 'insect', 'electronics', 'science', 'idle', 'provide', 'created', 'picks', 'beads', 'removing', 'camps', 'butchering', 'clapping', 'tilts', 'workman', 'end', 'snowbank', 'artists', 'elephant', 'these', 'rugby', 'sundress', 'giant', 'ribbons', 'beneath', 'projection', 'attendant', 'gambling', 'couch', 'instructors', 'sequence', 'expressions', 'instructor', 'cash']\n[Epoch2/20]\nTranslated example sentence: \n ['a', 'person', 'is', 'walking', 'down', 'a', 'large', 'bike', '.', '<eos>']\n[Epoch3/20]\nTranslated example sentence: \n ['a', 'black', 'walking', 'down', 'a', 'building', 'is', 'walking', 'by', 'a', 'building', '.', '<eos>']\n[Epoch4/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'through', 'a', 'bridge', 'near', 'a', 'mountain', '.', '<eos>']\n[Epoch5/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'by', 'a', 'bridge', 'near', 'a', 'boat', '.', '<eos>']\n[Epoch6/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'by', 'a', 'bridge', 'next', 'to', 'a', 'bridge', '.', '<eos>']\n[Epoch7/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'by', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch8/20]\nTranslated example sentence: \n ['a', 'horse', 'walking', 'under', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch9/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'under', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch10/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'under', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch11/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'beside', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch12/20]\nTranslated example sentence: \n ['a', 'horse', 'is', 'walking', 'beside', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch13/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'under', 'a', 'bridge', 'next', 'to', 'a', 'bridge', '.', '<eos>']\n[Epoch14/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'under', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch15/20]\nTranslated example sentence: \n ['a', 'horse', 'is', 'walking', 'beside', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch16/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'beside', 'a', 'bridge', 'near', 'a', 'bridge', '.', '<eos>']\n[Epoch17/20]\nTranslated example sentence: \n ['a', 'horse', 'is', 'walking', 'under', 'a', 'bridge', 'next', 'to', 'a', 'boat', '.', '<eos>']\n[Epoch18/20]\nTranslated example sentence: \n ['a', 'horse', 'is', 'walking', 'beside', 'a', 'bridge', 'next', 'to', 'a', 'bridge', '.', '<eos>']\n[Epoch19/20]\nTranslated example sentence: \n ['a', 'horse', 'walks', 'by', 'a', 'bridge', 'near', 'a', 'boat', '.', '<eos>']\n[Epoch20/20]\nTranslated example sentence: \n ['a', 'horse', 'is', 'walking', 'beside', 'a', 'bridge', 'near', 'a', 'boat', '.', '<eos>']\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5d51501c661a530730a5bfdc48d0189184cb58
18,175
ipynb
Jupyter Notebook
Initial Stages.ipynb
abegpatel/Self-Driving-Autonomous-Cars
8623a9a4f3b9ca8412d330bdc1c94b2993d3e8e8
[ "MIT" ]
null
null
null
Initial Stages.ipynb
abegpatel/Self-Driving-Autonomous-Cars
8623a9a4f3b9ca8412d330bdc1c94b2993d3e8e8
[ "MIT" ]
null
null
null
Initial Stages.ipynb
abegpatel/Self-Driving-Autonomous-Cars
8623a9a4f3b9ca8412d330bdc1c94b2993d3e8e8
[ "MIT" ]
null
null
null
147.764228
14,624
0.881981
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nn_pts=100\nnp.random.seed(0)\ntop_region=np.array([np.random.normal(10,2,n_pts), np.random.normal(12,2,n_pts)]).T\nbottom_region= np.array([np.random.normal(5,2, n_pts), np.random.normal(6,2, n_pts)]).T\n_, ax= plt.subplots(figsize=(4,4))\nax.scatter(top_region[:,0], top_region[:,1], color='r')\nax.scatter(bottom_region[:,0], bottom_region[:,1], color='b')\n\nplt.show()", "_____no_output_____" ], [ "#gradient descent\n#minimize the error\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\ndef draw(x1,x2):\n ln=plt.plot(x1,x2,'-')\n plt.pause(0.0001)\n ln[0].remove()\ndef sigmoid(score):\n return 1/(1+np.exp(-score))\ndef calculate_error(line_parameters, points , y):\n n=points.shape[0]\n p= sigmoid(points*line_parameters)\n cross_entropy=-(1/n)*(np.log(p).T*y + np.log(1-p).T*(1-y))\n return cross_entropy\ndef gradient_descent(line_parameters, points, y , alpha):\n n=points.shape[0]\n for i in range(2000):\n p=sigmoid(points*line_parameters)\n gradient= points.T*(p-y)*(alpha/n)\n line_parameters = line_parameters - gradient\n \n w1=line_parameters.item(0)\n w2=line_parameters.item(1)\n b=line_parameters.item(2)\n \n x1=np.array([points[:,0].min(), points[:,0].max()])\n x2= -b/w2 + (x1*(-w1/w2))\n draw(x1,x2) \n \nn_pts=100\nnp.random.seed(0)\nbias= np.ones(n_pts)\ntop_region=np.array([np.random.normal(10,2,n_pts), np.random.normal(12,2,n_pts), bias]).T\nbottom_region= np.array([np.random.normal(5,2, n_pts), np.random.normal(6,2, n_pts), bias]).T\nall_points=np.vstack((top_region, bottom_region))\n \nline_parameters = np.matrix([np.zeros(3)]).T\n# x1=np.array([bottom_region[:,0].min(), top_region[:,0].max()])\n# x2= -b/w2 + (x1*(-w1/w2))\ny=np.array([np.zeros(n_pts), np.ones(n_pts)]).reshape(n_pts*2, 1)\n \n_, ax= plt.subplots(figsize=(4,4))\nax.scatter(top_region[:,0], top_region[:,1], color='r')\nax.scatter(bottom_region[:,0], bottom_region[:,1], color='b')\ngradient_descent(line_parameters, all_points, y , 0.06)\nplt.show()\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ec5d546df626b7cdcd79a9d122fad2c68d4f0eb2
2,929
ipynb
Jupyter Notebook
29ResNet.ipynb
chrzc/-
052586833bc769efa43b4ff167af1a7536a847f8
[ "MIT" ]
null
null
null
29ResNet.ipynb
chrzc/-
052586833bc769efa43b4ff167af1a7536a847f8
[ "MIT" ]
null
null
null
29ResNet.ipynb
chrzc/-
052586833bc769efa43b4ff167af1a7536a847f8
[ "MIT" ]
null
null
null
22.189394
110
0.488221
[ [ [ "# ResNet 相当于 在原有的块上 + 这样就是保证了", "_____no_output_____" ], [ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom d2l import torch as d2l", "_____no_output_____" ], [ "class Residual(nn.Module):\n def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):\n super().__init__()\n self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3,padding=1, stride=strides)\n self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)\n if use_1x1conv:\n self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)\n \n else:\n self.conv3 = None\n \n self.bn1 = nn.BatchNorm2d(num_channels)\n self.bn2 = nn.BatchNorm2d(num_channels)\n self.relu = nn.ReLU(inplace=True)\n \n def forward(self, X):\n Y = F.relu(self.bn1(self.conv1(X)))\n Y = self.bn2(self.conv2(Y))\n if self.conv3:\n X = self.conv3(X)\n Y += X\n return F.relu(Y)\n ", "_____no_output_____" ], [ "# 输入和输出形状一致\nblk = Residual(3, 3)\nX = torch.rand(4, 3, 6, 6)\nY = blk(X)\nY.shape\n", "_____no_output_____" ], [ "# 除了第一个块 其他高宽减半", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ec5d56a4c6fbc8cac6cfb80f9628bbd3a8cb38c2
50,671
ipynb
Jupyter Notebook
.ipynb_checkpoints/test-checkpoint.ipynb
ryanmdavis/BioTechTopics
b8a3da6d1ceef12fedcc32a4ff07520de7cf6d34
[ "MIT" ]
1
2018-04-13T03:42:38.000Z
2018-04-13T03:42:38.000Z
.ipynb_checkpoints/test-checkpoint.ipynb
ryanmdavis/BioTechTopics
b8a3da6d1ceef12fedcc32a4ff07520de7cf6d34
[ "MIT" ]
null
null
null
.ipynb_checkpoints/test-checkpoint.ipynb
ryanmdavis/BioTechTopics
b8a3da6d1ceef12fedcc32a4ff07520de7cf6d34
[ "MIT" ]
null
null
null
83.615512
16,117
0.618223
[ [ [ "<table style=\"float:left; border:none\">\n <tr style=\"border:none; background-color: #ffffff\">\n <td style=\"border:none\">\n <a href=\"http://bokeh.pydata.org/\"> \n <img \n src=\"assets/bokeh-transparent.png\" \n style=\"width:50px\"\n >\n </a> \n </td>\n <td style=\"border:none\">\n <h1>Bokeh Tutorial</h1>\n </td>\n </tr>\n</table>\n\n<div style=\"float:right;\"><h2>04. Data Sources and Transformations</h2></div>", "_____no_output_____" ] ], [ [ "from bokeh.io import output_notebook, show\nfrom bokeh.plotting import figure", "_____no_output_____" ], [ "output_notebook()", "_____no_output_____" ] ], [ [ "# Overview\n\nWe've seen how Bokeh can work well with Python lists, NumPy arrays, Pandas series, etc. At lower levels, these inputs are converted to a Bokeh `ColumnDataSource`. This data type is the central data source object used throughout Bokeh. Although Bokeh often creates them for us transparently, there are times when it is useful to create them explicitly.\n\nIn later sections we will see features like hover tooltips, computed transforms, and CustomJS interactions that make use of the `ColumnDataSource`, so let's take a quick look now. ", "_____no_output_____" ], [ "## Creating with Python Dicts\n\nThe `ColumnDataSource` can be imported from `bokeh.models`:", "_____no_output_____" ] ], [ [ "from bokeh.models import ColumnDataSource", "_____no_output_____" ] ], [ [ "The `ColumnDataSource` is a mapping of column names (strings) to sequences of values. Here is a simple example. The mapping is provided by passing a Python `dict` with string keys and simple Python lists as values. The values could also be NumPy arrays, or Pandas sequences.\n\n***NOTE: ALL the columns in a `ColumnDataSource` must always be the SAME length.***\n", "_____no_output_____" ] ], [ [ "source = ColumnDataSource(data={\n 'x' : [1, 2, 3, 4, 5],\n 'y' : [3, 7, 8, 5, 1],\n})", "_____no_output_____" ] ], [ [ "Up until now we have called functions like `p.circle` by passing in literal lists or arrays of data directly, when we do this, Bokeh creates a `ColumnDataSource` for us, automatically. But it is possible to specify a `ColumnDataSource` explicitly by passing it as the `source` argument to a glyph method. Whenever we do this, if we want a property (like `\"x\"` or `\"y\"` or `\"fill_color\"`) to have a sequence of values, we pass the ***name of the column*** that we would like to use for a property:", "_____no_output_____" ] ], [ [ "p = figure(plot_width=400, plot_height=400)\np.circle('x', 'y', size=20, source=source)\nshow(p)", "_____no_output_____" ], [ "# Exercise: create a column data source with NumPy arrays as column values and plot it\n", "_____no_output_____" ] ], [ [ "## Creating with Pandas DataFrames\n\nIt's also simple to create `ColumnDataSource` objects directly from Pandas data frames. To do this, just pass the data frame to `ColumnDataSource` when you create it:", "_____no_output_____" ] ], [ [ "from bokeh.sampledata.iris import flowers as df\n\nsource = ColumnDataSource(df)", "_____no_output_____" ] ], [ [ "Now we can use it as we did above by passing the column names to glhph methods:", "_____no_output_____" ] ], [ [ "p = figure(plot_width=400, plot_height=400)\np.circle('petal_length', 'petal_width', source=source)\nshow(p)", "_____no_output_____" ], [ "# Exercise: create a column data source with the autompg sample data frame and plot it\n\nfrom bokeh.sampledata.autompg import autompg_clean as df\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ec5d64605a43faa6b430cd93bf6e22d97c3d5651
30,211
ipynb
Jupyter Notebook
Tutorial.ipynb
abishek21/eda4nlp
07e94ea826ac6ab5be864ca688c07d31df66ef78
[ "MIT" ]
3
2020-05-20T16:38:16.000Z
2020-05-29T10:01:45.000Z
Tutorial.ipynb
abishek21/eda4nlp
07e94ea826ac6ab5be864ca688c07d31df66ef78
[ "MIT" ]
null
null
null
Tutorial.ipynb
abishek21/eda4nlp
07e94ea826ac6ab5be864ca688c07d31df66ef78
[ "MIT" ]
null
null
null
89.91369
21,240
0.806858
[ [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "## import ngram from ed4nlp package", "_____no_output_____" ] ], [ [ "from eda4nlp.utils import ngram", "_____no_output_____" ], [ "df=pd.read_csv(\"movies_metadata.csv\")\nuse_col=['overview']\ndf=df[use_col]\ndf=df.astype(str)", "C:\\Users\\abishek\\PycharmProjects\\vision\\venv\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3063: DtypeWarning: Columns (10) have mixed types.Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ] ], [ [ "## Calculate unigram,bigram,trigram count on this column", "_____no_output_____" ] ], [ [ "df.head(10)", "_____no_output_____" ] ], [ [ "## ngram takes 5 arguemnts\n#### ngram(corpus,ngram,stop_words,plot,topk)\n#### corpus--> pass the pandas columns\n#### ngram--> 1 if unigram, 2 if bigram , 3 if trigram\n#### stop_words--> default is None. To use stop_words pass 'english'\n#### plot=True, to plot the ngram vs count frequency distribution . defalut=False\n#### topk to visual topk ngram counts in the plot\n#### returns list with ngram and counts", "_____no_output_____" ] ], [ [ "common_words=ngram(df['overview'], ngram=2,stop_words='english',plot=True,topk=20)", "_____no_output_____" ], [ "df1=pd.DataFrame(common_words,columns=['ngrams','count'])", "_____no_output_____" ], [ "df1.head(10)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ec5d73dfc451e527d1107fdfadee34f1516b8c8e
116,701
ipynb
Jupyter Notebook
DataScienceProgramming/09-Machine-Learning-II/DecisionTree.ipynb
cartermin/MSA8090
c8d95fb7c71682b2197a391995b76f6043a905cc
[ "CC0-1.0" ]
7
2017-08-21T23:23:59.000Z
2020-12-16T23:57:11.000Z
DataScienceProgramming/09-Machine-Learning-II/DecisionTree.ipynb
cartermin/MSA8090
c8d95fb7c71682b2197a391995b76f6043a905cc
[ "CC0-1.0" ]
null
null
null
DataScienceProgramming/09-Machine-Learning-II/DecisionTree.ipynb
cartermin/MSA8090
c8d95fb7c71682b2197a391995b76f6043a905cc
[ "CC0-1.0" ]
7
2017-08-22T00:39:01.000Z
2019-09-09T01:46:29.000Z
35.84183
255
0.332996
[ [ [ "<script>\nvar css = '.container { width: 100% !important; padding-left: 1em; padding-right: 2em; } div.output_stderr { background: #FFA; }',\n head = document.head || document.getElementsByTagName('head')[0],\n style = document.createElement('style');\n\nstyle.type = 'text/css';\nif (style.styleSheet){\n style.styleSheet.cssText = css;\n} else {\n style.appendChild(document.createTextNode(css));\n}\n\nhead.appendChild(style);\n</script>", "_____no_output_____" ] ], [ [ "# %load nbinit.py\nfrom IPython.display import HTML\nHTML(\"<style>.container { width: 100% !important; padding-left: 1em; padding-right: 2em; } div.output_stderr { background: #FFA; }</style>\")", "_____no_output_____" ] ], [ [ "# Decision Tree\nLet's see how well a decision tree can classify the data. Hereby we need to consider\n1. the parameters to the classifier, and\n2. the features of the data set that will be used.\nWe may just explore the impact of the maximum depth of the decision tree. Two of the 16 features ('day' and 'month') may not be useful because they reflect a date, and we're not looking for seasonal effects. So, it's fairly safe to take them out.\n\nOnce the dataset is loaded we will convert the categorical data into numeric values.\n\nFinding the right parameters and features for the best performing classifier can be a challenge. The number of possible configurations grows quickly, and knowing how they perform requires training and testing with each of them.\n\nWe may also run the training and testing on a configuration multiple times with different random splits of the data set. The performance metrics will be avaraged over the iterations.\n\nWe use percision, recall, and the F1 score to evaluate each configuration.\n", "_____no_output_____" ] ], [ [ "### Load Packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nimport sklearn.tree\nimport pydot_ng as pdot\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_recall_fscore_support\nimport itertools", "_____no_output_____" ] ], [ [ "## Reading Data", "_____no_output_____" ] ], [ [ "### Read data\nDATAFILE = '/home/data/archive.ics.uci.edu/BankMarketing/bank.csv'\ndf = pd.read_csv(DATAFILE, sep=';')", "_____no_output_____" ], [ "### use sets and '-' difference operation 'A-B'. Also there is a symmetric different '^'\nall_features = set(df.columns)-set(['y'])\nnum_features = set(df.describe().columns)\ncat_features = all_features-num_features\nprint(\"All features: \", \", \".join(all_features), \"\\nNumerical features: \", \", \".join(num_features), \"\\nCategorical features: \", \", \".join(cat_features))", "All features: balance, day, education, previous, loan, contact, pdays, marital, duration, job, campaign, month, poutcome, age, default, housing \nNumerical features: balance, day, duration, previous, campaign, age, pdays \nCategorical features: job, education, month, loan, contact, poutcome, default, marital, housing\n" ], [ "### convert to categorical variables to numeric ones\nlevel_substitution = {}\n\ndef levels2index(levels):\n dct = {}\n for i in range(len(levels)):\n dct[levels[i]] = i\n return dct\n\ndf_num = df.copy()\n\nfor c in cat_features:\n level_substitution[c] = levels2index(df[c].unique())\n df_num[c].replace(level_substitution[c], inplace=True)\n\n## same for target\ndf_num.y.replace({'no':0, 'yes':1}, inplace=True)\ndf_num", "_____no_output_____" ], [ "### create feature matrix and target vector\nX = df_num[list(all_features-set(['day', 'month']))].as_matrix()\ny = df_num.y.as_matrix()\nX, y", "_____no_output_____" ] ], [ [ "## Evaluation\nTest how Maximum Depth of tree impacts performance", "_____no_output_____" ] ], [ [ "for d in [3, 5, 7, 11, 13]:\n clf = DecisionTreeClassifier(max_depth=d)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)\n clf.fit(X_train, y_train)\n ŷ = clf.predict(X_test)\n print('Depth %d' % d)\n print(classification_report(y_test, ŷ))", "Depth 3\n precision recall f1-score support\n\n 0 0.93 0.97 0.95 1620\n 1 0.55 0.35 0.43 189\n\navg / total 0.89 0.90 0.89 1809\n\nDepth 5\n precision recall f1-score support\n\n 0 0.93 0.96 0.94 1620\n 1 0.51 0.34 0.41 189\n\navg / total 0.88 0.90 0.89 1809\n\nDepth 7\n precision recall f1-score support\n\n 0 0.93 0.96 0.94 1620\n 1 0.51 0.34 0.41 189\n\navg / total 0.88 0.90 0.89 1809\n\nDepth 11\n precision recall f1-score support\n\n 0 0.93 0.94 0.93 1620\n 1 0.41 0.38 0.39 189\n\navg / total 0.87 0.88 0.88 1809\n\nDepth 13\n precision recall f1-score support\n\n 0 0.93 0.92 0.92 1620\n 1 0.37 0.42 0.40 189\n\navg / total 0.87 0.86 0.87 1809\n\n" ] ], [ [ "Two methods from `sklearn.metrics` can be helpful:\n1. `confusion_matrix` produces a confusion matrix\n2. `precision_recall_fscore_support` returns a matrix with values for each of them across all target levels.", "_____no_output_____" ] ], [ [ "cm = confusion_matrix(y_test, ŷ)\ncm", "_____no_output_____" ], [ "prf1s = precision_recall_fscore_support(y_test, ŷ)\nprf1s", "_____no_output_____" ], [ "perf = None\nfor i in range(100):\n if type(perf)!=type(None):\n perf = np.vstack((perf, np.array(prf1s).reshape(1,8)))\n else:\n perf = np.array(prf1s).reshape(1,8)\nperf_agg = perf.mean(axis=0)\npd.DataFrame(perf_agg.reshape(1,8), columns=[[b for a in ['Precision', 'Recall', 'F1_score', 'Support'] for b in [a, a]], ['no', 'yes']*4])\n##pd.DataFrame([5,5, 'a|b|c'] + list(perf.mean(axis=0)), columns=perf_df.columns)", "_____no_output_____" ], [ "performance_df = pd.DataFrame(columns=[\n ['Params']*3 + [b for a in ['Precision', 'Recall', 'F1_score', 'Support'] for b in [a, a]],\n ['MaxDepth', 'Nfeature', 'Features'] + ['no', 'yes']*4\n ])\ntempdf = pd.concat([\n pd.DataFrame({'a': [1], 'b': [2], 'c': ['Hello']}),\n pd.DataFrame(np.zeros((1,8)))\n ], axis=1, ignore_index=True)\n\ntempdf.columns=performance_df.columns\n#performance_df\ntempdf", "_____no_output_____" ], [ "pd.DataFrame(np.zeros(8).reshape(1,8))", "_____no_output_____" ] ], [ [ "## The Heavy Lifting\nNow, let's run the performance evaluation across a number of configurations. We'll collect the results for each configuration into a dataframe.", "_____no_output_____" ] ], [ [ "# creating a template (i.e. empty table)\nperformance_template_df = pd.DataFrame(columns= [\n ['Params']*3 + [b for a in ['Precision', 'Recall', 'F1_score', 'Support'] for b in [a, a]],\n ['MaxDepth', 'Nfeature', 'Features'] + ['no', 'yes']*4\n ])\nperformance_template_df", "_____no_output_____" ] ], [ [ "The following code implements nested loops for MaxDepth, number and permutation of features. In addition, we have an internal loop to\naggregate the performance metrics over a number of different random splits.\n\nThe outer two loops, however, only iterate over one value each. The commmented code shows how they should run...", "_____no_output_____" ] ], [ [ "%%time\nperformance_df = performance_template_df.copy() #-- always start fresh\n\nfor MaxDepth in [5]: ###range(5,9):\n for Nftr in [8]: ###[len(all_features) - k for k in range(len(all_features)-2))]:\n for ftrs in itertools.combinations(all_features-set(['day', 'month']), Nftr):\n X = df_num[list(ftrs)].as_matrix()\n clf = DecisionTreeClassifier(max_depth=MaxDepth)\n\n perf_arr = None #-- this array will hold results for different random samples\n for i in range(10): ### running train and test on different random samples\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=i)\n clf.fit(X_train, y_train)\n ŷ = clf.predict(X_test)\n #Prec, Recall, F1, Supp \n prf1s = precision_recall_fscore_support(y_test, ŷ)\n\n ## \n if type(perf_arr)!=type(None):\n perf_arr = np.vstack((perf, np.array(prf1s).reshape(1,8)))\n else:\n perf_arr = np.array(prf1s).reshape(1,8)\n perf_agg = perf_arr.mean(axis=0) #-- mean over rows, for each column\n perf_df = pd.concat([ #-- creating a 1 row dataframe is a bit tricky because of the different data types\n pd.DataFrame({'a': [MaxDepth], 'b': [Nftr], 'c': ['|'.join(list(ftrs))]}),\n pd.DataFrame(perf_agg.reshape(1, 8))\n ], axis=1, ignore_index=True)\n perf_df.columns=performance_df.columns\n performance_df = performance_df.append(perf_df, ignore_index=True)", "/usr/lib64/python3.4/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.\n 'precision', 'predicted', average, warn_for)\n" ], [ "performance_df", "_____no_output_____" ] ], [ [ "That took a while (about 2 minutes). Once computations take that long we should look at a different way to implement them ... ** outside the notebook **.", "_____no_output_____" ], [ "Let's see what the best performing configuration with respect to the F1-score of 'yes' is:", "_____no_output_____" ] ], [ [ "best = performance_df.F1_score.yes.argmax()\nprint(performance_df.iloc[best])\nprint(\"\\nFeatures: \", ', '.join([ '\"%s\"'%f for f in performance_df.iloc[best].Params.Features.split('|') ], ))", "Params MaxDepth 5\n Nfeature 8\n Features balance|education|previous|loan|contact|pdays|...\nPrecision no 0.93322\n yes 0.46445\nRecall no 0.942833\n yes 0.42338\nF1_score no 0.937998\n yes 0.442813\nSupport no 1611.73\n yes 188.317\nName: 9, dtype: object\n\nFeatures: \"balance\", \"education\", \"previous\", \"loan\", \"contact\", \"pdays\", \"duration\", \"poutcome\"\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
ec5d7d06b040712043d56165b66f51384eee16eb
17,976
ipynb
Jupyter Notebook
Chapter06/6.07 Building Capsule Networks in TensorFlow.ipynb
bharlow058/Packt-Deep-Learning
181bbd57c264c7bea635cb558d22c8b164167c8d
[ "MIT" ]
86
2018-12-29T15:36:20.000Z
2022-03-03T02:55:39.000Z
Chapter06/6.07 Building Capsule Networks in TensorFlow.ipynb
bharlow058/Packt-Deep-Learning
181bbd57c264c7bea635cb558d22c8b164167c8d
[ "MIT" ]
null
null
null
Chapter06/6.07 Building Capsule Networks in TensorFlow.ipynb
bharlow058/Packt-Deep-Learning
181bbd57c264c7bea635cb558d22c8b164167c8d
[ "MIT" ]
57
2018-12-19T14:03:32.000Z
2022-02-25T09:04:13.000Z
33.662921
487
0.554016
[ [ [ "## Building Capsule Networks in TensorFlow", "_____no_output_____" ], [ "Now, we will learn how to implement capsule networks in tensorflow. We will use our favorite MNIST dataset to learn how capsule networks recognize the handwritten digits. ", "_____no_output_____" ], [ "## Import the libraries\n\nImport the required libraries:", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\ntf.logging.set_verbosity(tf.logging.ERROR)", "_____no_output_____" ] ], [ [ "## Load the dataset\n\nLoad the MNIST dataset:", "_____no_output_____" ] ], [ [ "mnist = input_data.read_data_sets(\"data/mnist\",one_hot=True)", "Extracting data/mnist/train-images-idx3-ubyte.gz\nExtracting data/mnist/train-labels-idx1-ubyte.gz\nExtracting data/mnist/t10k-images-idx3-ubyte.gz\nExtracting data/mnist/t10k-labels-idx1-ubyte.gz\n" ] ], [ [ "Set the batch size and epsilon:", "_____no_output_____" ] ], [ [ "batch_size = 50\nepsilon = 1e-9", "_____no_output_____" ] ], [ [ "## Defining Squash function\n\nWe learned that the squash function converts the length of the vector into probability and it is given as,\n\n$$\\vec{v}_{j}=\\frac{\\left\\|\\vec{s}_{j}\\right\\|^{2}}{1+\\left\\|\\vec{s}_{j}\\right\\|^{2}} \\frac{\\vec{s}_{j}}{\\left\\|\\vec{s}_{j}\\right\\|} \\tag{1}$$\n\n\nThe above Squash function can be defined as follows:", "_____no_output_____" ] ], [ [ "def squash(sj):\n \n sj_norm = tf.reduce_sum(tf.square(sj), -2, keep_dims=True)\n scalar_factor = sj_norm / (1 + sj_norm) / tf.sqrt(sj_norm + epsilon)\n\n vj = scalar_factor * sj \n\n return vj", "_____no_output_____" ] ], [ [ "## Defining Dynamic Routing Algorithm\n\n\nNow we will see how the dynamic routing algorithm is implemented. We use variable names as same notations we learned in the dynamic routing algorithm so that we can easily follow the steps. You can also check the comments on each line of code for better understanding.\n\nFirst, define the function as dynamic_routing which takes the ui - previous capsules, bij - coupling coefficients and num_routing - number of routing iterations as inputs and returns the activity vector vj as an output.", "_____no_output_____" ] ], [ [ "def dynamic_routing(ui, bij, num_routing=10):\n \n #initialize weights wij by drawing from a random normal distribution\n wij = tf.get_variable('Weight', shape=(1, 1152, 160, 8, 1), dtype=tf.float32, \n initializer=tf.random_normal_initializer(0.01))\n\n #initialize biases with a constant value\n biases = tf.get_variable('bias', shape=(1, 1, 10, 16, 1))\n \n #define the primary capsules: (tf.tile replicates the tensor n times)\n ui = tf.tile(ui, [1, 1, 160, 1, 1])\n\n #compute the prediction vector\n u_hat = tf.reduce_sum(wij * ui, axis=3, keep_dims=True)\n \n #reshape the prediction vector\n u_hat = tf.reshape(u_hat, shape=[-1, 1152, 10, 16, 1])\n\n #stop gradient computation in the prediction vector\n u_hat_stopped = tf.stop_gradient(u_hat, name='stop_gradient')\n\n #perform dynamic routing for number of routing iterations\n for r in range(num_routing):\n \n #refer dynamic routing algorithm in the book for the detailed explanation on the following steps\n with tf.variable_scope('iter_' + str(r)):\n \n #step 1\n cij = tf.nn.softmax(bij, dim=2)\n \n #step 2\n if r == num_routing - 1:\n\n sj = tf.multiply(cij, u_hat)\n\n sj = tf.reduce_sum(sj, axis=1, keep_dims=True) + biases\n\n vj = squash(sj)\n\n elif r < num_routing - 1: \n\n sj = tf.multiply(cij, u_hat_stopped)\n\n sj = tf.reduce_sum(sj, axis=1, keep_dims=True) + biases\n\n vj = squash(sj)\n\n vj_tiled = tf.tile(vj, [1, 1152, 1, 1, 1])\n\n coupling_coeff = tf.reduce_sum(u_hat_stopped * vj_tiled, axis=3, keep_dims=True)\n\n #step 3\n bij += coupling_coeff\n\n return vj", "_____no_output_____" ] ], [ [ "## Compute Primary capsules and Digit capsules\n\n\nCompute primary capsules which extracts the basic features and digit capsules which recognizes the digits", "_____no_output_____" ] ], [ [ "graph = tf.Graph()\n\nwith graph.as_default() as g:\n \n #placeholders for the input and output\n x = tf.placeholder(tf.float32, [batch_size, 784])\n y = tf.placeholder(tf.float32, [batch_size,10])\n \n #reshape the input x\n x_image = tf.reshape(x, [-1,28,28,1])\n\n #perform the convolutional operation and get the convolutional input,\n with tf.name_scope('convolutional_input'):\n input_data = tf.contrib.layers.conv2d(inputs=x_image, num_outputs=256, \n kernel_size=9, padding='valid')\n \n \n #compute the primary capsules which extract the basic features such as edges. \n #first, compute the capsules using convolution operation:\n capsules = []\n\n for i in range(8):\n\n with tf.name_scope('capsules_' + str(i)):\n \n #convolution operation\n output = tf.contrib.layers.conv2d(inputs=input_data, num_outputs=32,kernel_size=9,\n stride=2, padding='valid')\n \n #reshape the output\n output = tf.reshape(output, [batch_size, -1, 1, 1])\n \n #store the output which is capsule in the capsules list\n capsules.append(output)\n \n #concatenate all the capsules and form the primary capsule \n primary_capsule = tf.concat(capsules, axis=2)\n \n #squash the primary capsule and get the probability i.e apply squash function and get the probability\n primary_capsule = squash(primary_capsule)\n \n\n #compute digit capsules using dynamic routing\n with tf.name_scope('dynamic_routing'):\n \n #reshape the primary capsule\n outputs = tf.reshape(primary_capsule, shape=(batch_size, -1, 1, primary_capsule.shape[-2].value, 1))\n \n #initialize bij with 0s\n bij = tf.constant(np.zeros([1, primary_capsule.shape[1].value, 10, 1, 1], dtype=np.float32))\n \n #compute the digit capsules using dynamic routing algorithm which takes \n #the reshaped primary capsules and bij as inputs and returns the activity vector \n digit_capsules = dynamic_routing(outputs, bij)\n \n digit_capsules = tf.squeeze(digit_capsules, axis=1)", "_____no_output_____" ] ], [ [ "## Masking the Digit Capsule\n\nWhy do we need to mask the digit capsule? We learned that in order to make sure that the network has learned the important features, we use a three-layer network called decoder network which tries to reconstruct the original image from the digit capsules. If the decoder is able to reconstruct the image successfully from the digit capsules then it means the network has learned important features of the image else the network has not learned the correct features of the image.\n\nThe digit capsules contain the activity vector for all the digits. But the decoder wants to reconstruct only the given input digit (input image). So, we mask out the activity vector of all the digits except for the correct digit. Then we use this masked digit capsule to reconstruct the given input image. ", "_____no_output_____" ] ], [ [ "with graph.as_default() as g:\n with tf.variable_scope('Masking'):\n \n # select the activity vector of given input image using the actual label y and mask out others\n masked_v = tf.multiply(tf.squeeze(digit_capsules), tf.reshape(y, (-1, 10, 1)))", "_____no_output_____" ] ], [ [ "## Defining the Decoder\n\n\nDefine the decoder network for reconstructing the image. It consists of three fully connected networks. ", "_____no_output_____" ] ], [ [ "with graph.as_default() as g:\n \n with tf.name_scope('Decoder'):\n \n #masked digit capsule\n v_j = tf.reshape(masked_v, shape=(batch_size, -1))\n \n #first fully connected layer \n fc1 = tf.contrib.layers.fully_connected(v_j, num_outputs=512)\n \n #second fully connected layer\n fc2 = tf.contrib.layers.fully_connected(fc1, num_outputs=1024)\n\n #reconstructed image\n reconstructed_image = tf.contrib.layers.fully_connected(fc2, num_outputs=784, activation_fn=tf.sigmoid)", "_____no_output_____" ] ], [ [ "## Compute Accuracy\n\nNow, we compute the accuracy of our model.", "_____no_output_____" ] ], [ [ "with graph.as_default() as g:\n with tf.variable_scope('accuracy'):\n \n #compute the length of each activity vector in the digit capsule \n v_length = tf.sqrt(tf.reduce_sum(tf.square(digit_capsules), axis=2, keep_dims=True) + epsilon)\n \n #apply softmax to the length and get the probabilities\n softmax_v = tf.nn.softmax(v_length, dim=1)\n \n #select the index which got the highest probability this will give us the predicted digit \n argmax_idx = tf.to_int32(tf.argmax(softmax_v, axis=1)) \n predicted_digit = tf.reshape(argmax_idx, shape=(batch_size, ))\n \n #compute the accuracy\n actual_digit = tf.to_int32(tf.argmax(y, axis=1))\n \n correct_pred = tf.equal(predicted_digit,actual_digit)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))", "_____no_output_____" ] ], [ [ "## Calculating Loss\n\nAs we now, we compute two types of loss - Margin loss and reconstruction loss\n\nMargin loss $L_k$ for a digit (class) $k$ is given as: \n\n$$ L_{k}=T_{k} \\max \\left(0, m^{+}-\\left\\|v_{k}\\right\\|\\right)^{2}+\\lambda\\left(1-T_{k}\\right) \\max \\left(0,\\left\\|v_{k}\\right\\|-m^{-}\\right)^{2} \\tag{2} $$\n\nThe total margin loss is the sum of the loss of all classes and is given as:\n\n$$ \\operatorname{Margin Loss} =\\sum_{k} L_{k} \\tag{3} $$\n\nReconstruction loss is given as a squared difference between the reconstructed and original\nimage: \n\n$$ \\operatorname{Reconstruction Loss} = (\\text { Reconstructed Image }-\\text { Original Image })^{2} \\tag{4}$$\n\nThus the final loss is given as:\n\n$$ \\operatorname{Loss} = \\operatorname{Margin Loss} + \\operatorname{alpha} * \\operatorname{Reconstruction Loss} \\tag{5} $$", "_____no_output_____" ] ], [ [ "lambda_ = 0.5\nalpha = 0.0005\n\nwith graph.as_default() as g:\n\n #margin loss\n max_left = tf.square(tf.maximum(0.,0.9 - v_length))\n max_right = tf.square(tf.maximum(0., v_length - 0.1))\n\n T_k = y\n \n #compute margin loss L_k for class k as given in (2)\n L_k = T_k * max_left + lambda_ * (1 - T_k) * max_right\n \n #compute total margin as given in refer equation (3)\n margin_loss = tf.reduce_mean(tf.reduce_sum(L_k, axis=1))\n \n #reshape and get the original image\n original_image = tf.reshape(x, shape=(batch_size, -1))\n \n #compute reconstruction loss as shown in (4)\n squared = tf.square(reconstructed_image - original_image)\n reconstruction_loss = tf.reduce_mean(squared)\n\n #compute total loss which is the weighted sum of margin and reconstructed loss as shown in (5)\n total_loss = margin_loss + alpha * reconstruction_loss", "_____no_output_____" ] ], [ [ "## Optimize loss\n\nMinimize the loss using Adam Optimizer,", "_____no_output_____" ] ], [ [ "with graph.as_default() as g:\n optimizer = tf.train.AdamOptimizer(0.0001).minimize(total_loss)", "_____no_output_____" ] ], [ [ "## Start Training the Capsule Network", "_____no_output_____" ], [ "Set the number of epochs and number of steps:", "_____no_output_____" ] ], [ [ "num_epochs = 100\nnum_steps = int(len(mnist.train.images)/batch_size)", "_____no_output_____" ] ], [ [ "Now start the tensorflow session and perform training:", "_____no_output_____" ] ], [ [ "with tf.Session(graph=graph) as sess:\n\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n\n for epoch in range(num_epochs):\n for iteration in range(num_steps):\n batch_data, batch_labels = mnist.train.next_batch(batch_size)\n feed_dict = {x : batch_data, y : batch_labels}\n \n _, loss, acc = sess.run([optimizer, total_loss, accuracy], feed_dict=feed_dict)\n\n if iteration%10 == 0:\n print('Epoch: {}, iteration:{}, Loss:{} Accuracy: {}'.format(epoch,iteration,loss,acc))", "Epoch: 0, iteration:0, Loss:0.553201019764 Accuracy: 0.140000000596\nEpoch: 0, iteration:10, Loss:0.543244898319 Accuracy: 0.119999997318\nEpoch: 0, iteration:20, Loss:0.531144499779 Accuracy: 0.119999997318\nEpoch: 0, iteration:30, Loss:0.526307284832 Accuracy: 0.119999997318\nEpoch: 0, iteration:40, Loss:0.526460289955 Accuracy: 0.140000000596\n" ] ], [ [ "We just learned how capsule network works step by step and how to build capsule network in tensorflow, in the next chapter we will study in detail about the various algorithms that are used for learning the text representations.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5d85c97f86fb951e0327330a86e3ca06622770
6,868
ipynb
Jupyter Notebook
01_utils.ipynb
sso0090/garden2
4ecfe4b679cd17e3c9f9536cd48c8baebfb2cd5f
[ "Apache-2.0" ]
null
null
null
01_utils.ipynb
sso0090/garden2
4ecfe4b679cd17e3c9f9536cd48c8baebfb2cd5f
[ "Apache-2.0" ]
2
2021-09-28T01:20:50.000Z
2022-02-26T06:55:14.000Z
01_utils.ipynb
sso0090/garden2
4ecfe4b679cd17e3c9f9536cd48c8baebfb2cd5f
[ "Apache-2.0" ]
null
null
null
22.666667
243
0.524752
[ [ [ "## Exploring the data", "_____no_output_____" ], [ "So let's learn how we set up the data. ", "_____no_output_____" ] ], [ [ "# default_exp utils", "_____no_output_____" ], [ "#hide\nimport os,sys", "_____no_output_____" ], [ "#export\nroot = \"D:/data_sets/24_garden\"", "_____no_output_____" ], [ "#export\nimport tensorflow as tf", "_____no_output_____" ] ], [ [ "In order to parse the data, we need to \"unpack\" each sample to bring it into a `Dataset`. `tfrecord` has two special functions we can use, `unpack_int_64_list` and `unpack_bytes_list` (our classes are integers and our images are bytes)", "_____no_output_____" ] ], [ [ "#export\ndef unpack_int64_list(feature):\n return feature.int64_list.value", "_____no_output_____" ], [ "#export\ndef unpack_bytes_list(feature):\n return feature.bytes_list.value", "_____no_output_____" ], [ "#export\ndef unpack_sample(feats):\n return {\n 'class' : unpack_int64_list(feats['class']),\n 'image' : unpack_bytes_list(feats['image'])\n }", "_____no_output_____" ], [ "#export\nclass Reader:\n def __init__(self, fname, unpack_sample, compression=None):\n self._engine = iter(tf.compat.v1.io.tf_record_iterator(\n fname, compression_code(compression)))\n self._unpack_sample = unpack_sample\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n def __iter__(self):\n return self\n\n def __next__(self):\n buffer = next(self._engine)\n example = tf.train.Example()\n example.ParseFromString(buffer)\n return self._unpack_sample(example.features.feature)\n\n def read_sample(self):\n try:\n return __next__(self)\n except StopIteration:\n return None", "_____no_output_____" ], [ "#export\ndef compression_code(compression):\n if compression is None:\n return None\n code = _tf_compression_revmap.get(compression)\n if code is None:\n raise ValueError(\n 'Unknown or unsupported compression type: ' + compression)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ec5d86a32a66fb5e6c0ded7dead0d1c1b90efd1d
38,724
ipynb
Jupyter Notebook
src/1_Preliminaries.ipynb
hogansung/udacity-computer-vision-nanodegree-program-project-2
3c9cfb42532f5149003017acb9c950c0d2d7499b
[ "MIT" ]
null
null
null
src/1_Preliminaries.ipynb
hogansung/udacity-computer-vision-nanodegree-program-project-2
3c9cfb42532f5149003017acb9c950c0d2d7499b
[ "MIT" ]
null
null
null
src/1_Preliminaries.ipynb
hogansung/udacity-computer-vision-nanodegree-program-project-2
3c9cfb42532f5149003017acb9c950c0d2d7499b
[ "MIT" ]
null
null
null
42.836283
704
0.630927
[ [ [ "# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.\n\nNote that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Explore the Data Loader\n- [Step 2](#step2): Use the Data Loader to Obtain Batches\n- [Step 3](#step3): Experiment with the CNN Encoder\n- [Step 4](#step4): Implement the RNN Decoder", "_____no_output_____" ], [ "<a id='step1'></a>\n## Step 1: Explore the Data Loader\n\nWe have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. \n\nIn the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. \n\n> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.\n\nThe `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:\n1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.\n2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.\n3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.\n4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. \n5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. \n\nWe will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\n!pip install nltk\nimport nltk\nnltk.download('punkt')\nfrom data_loader_wrapper import DataLoaderWrapper\nfrom torchvision import transforms\n\n# Define a transform to pre-process the training images.\ntransform_train = transforms.Compose(\n [\n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize(\n (0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225),\n ),\n ]\n)\n\n# Set the minimum word count threshold.\nvocab_threshold = 5\n\n# Specify the batch size.\nbatch_size = 10\n\n# Obtain the data loader.\ndata_loader_wrapper = DataLoaderWrapper(\n transform=transform_train,\n batch_size_for_training=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=True,\n)", "Requirement already satisfied: nltk in /home/hogan/anaconda3/lib/python3.8/site-packages (3.6.1)\nRequirement already satisfied: joblib in /home/hogan/anaconda3/lib/python3.8/site-packages (from nltk) (1.0.1)\nRequirement already satisfied: regex in /home/hogan/anaconda3/lib/python3.8/site-packages (from nltk) (2021.4.4)\nRequirement already satisfied: click in /home/hogan/anaconda3/lib/python3.8/site-packages (from nltk) (7.1.2)\nRequirement already satisfied: tqdm in /home/hogan/anaconda3/lib/python3.8/site-packages (from nltk) (4.59.0)\n" ] ], [ [ "When you ran the code cell above, the data loader was stored in the variable `data_loader`. \n\nYou can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n### Exploring the `__getitem__` Method\n\nThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). \n\nWhen the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).\n\n#### Image Pre-Processing \n\nImage pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):\n```python\n# Convert image to tensor and pre-process using transform\nimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\nimage = self.transform(image)\n```\nAfter loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. \n\n#### Caption Pre-Processing \n\nThe captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.\n\nTo understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:\n```python\ndef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n ...\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n ...\n```\nFrom the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. \n\nWe use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):\n\n```python\n# Convert caption to tensor of word ids.\ntokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1\ncaption = [] # line 2\ncaption.append(self.vocab(self.vocab.start_word)) # line 3\ncaption.extend([self.vocab(token) for token in tokens]) # line 4\ncaption.append(self.vocab(self.vocab.end_word)) # line 5\ncaption = torch.Tensor(caption).long() # line 6\n```\n\nAs you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.", "_____no_output_____" ] ], [ [ "sample_caption = 'A person doing a trick on a rail while riding a skateboard.'", "_____no_output_____" ] ], [ [ "In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.", "_____no_output_____" ] ], [ [ "import nltk\n\nsample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())\nprint(sample_tokens)", "['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']\n" ] ], [ [ "In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.\n\nThis special start word (`\"<start>\"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=\"<start>\"`).\n\nAs you will see below, the integer `0` is always used to mark the start of a caption.", "_____no_output_____" ] ], [ [ "sample_caption = []\n\nstart_word = data_loader_wrapper.dataset_for_training.vocab.start_word\nprint('Special start word:', start_word)\nsample_caption.append(data_loader_wrapper.dataset_for_training.vocab(start_word))\nprint(sample_caption)", "Special start word: <start>\n[0]\n" ] ], [ [ "In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.", "_____no_output_____" ] ], [ [ "sample_caption.extend([data_loader_wrapper.dataset_for_training.vocab(token) for token in sample_tokens])\nprint(sample_caption)", "[0, 3, 98, 754, 3, 396, 39, 3, 1010, 207, 139, 3, 753, 18]\n" ] ], [ [ "In **`line 5`**, we append a final integer to mark the end of the caption. \n\nIdentical to the case of the special start word (above), the special end word (`\"<end>\"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=\"<end>\"`).\n\nAs you will see below, the integer `1` is always used to mark the end of a caption.", "_____no_output_____" ] ], [ [ "end_word = data_loader_wrapper.dataset_for_training.vocab.end_word\nprint('Special end word:', end_word)\n\nsample_caption.append(data_loader_wrapper.dataset_for_training.vocab(end_word))\nprint(sample_caption)", "Special end word: <end>\n[0, 3, 98, 754, 3, 396, 39, 3, 1010, 207, 139, 3, 753, 18, 1]\n" ] ], [ [ "Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).", "_____no_output_____" ] ], [ [ "import torch\n\nsample_caption = torch.Tensor(sample_caption).long()\nprint(sample_caption)", "tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1010, 207, 139, 3,\n 753, 18, 1])\n" ] ], [ [ "And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:\n```\n[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]\n```\nThis list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:\n```\n[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]\n```\nFinally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. \n\nAs you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. \n\n```python\ndef __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx[self.unk_word]\n return self.word2idx[word]\n```\n\nThe `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.\n\nUse the code cell below to view a subset of this dictionary.", "_____no_output_____" ] ], [ [ "# Preview the word2idx dictionary.\ndict(list(data_loader_wrapper.dataset_for_training.vocab.word2idx.items())[:10])", "_____no_output_____" ] ], [ [ "We also print the total number of keys.", "_____no_output_____" ] ], [ [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader_wrapper.dataset_for_training.vocab))", "Total number of tokens in vocabulary: 8852\n" ] ], [ [ "As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader. ", "_____no_output_____" ] ], [ [ "# # Modify the minimum word count threshold.\n# vocab_threshold = 4\n\n# # Obtain the data loader.\n# data_loader_wrapper = DataLoaderWrapper(\n# transform=transform_train,\n# batch_size_for_training=batch_size,\n# vocab_threshold=vocab_threshold,\n# vocab_from_file=False,\n# )", "_____no_output_____" ], [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader_wrapper.dataset_for_training.vocab))", "Total number of tokens in vocabulary: 8852\n" ] ], [ [ "There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`\"<start>\"`) and special end word (`\"<end>\"`). There is one more special token, corresponding to unknown words (`\"<unk>\"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.", "_____no_output_____" ] ], [ [ "unk_word = data_loader_wrapper.dataset_for_training.vocab.unk_word\nprint('Special unknown word:', unk_word)\n\nprint('All unknown words are mapped to this integer:', data_loader_wrapper.dataset_for_training.vocab(unk_word))", "Special unknown word: <unk>\nAll unknown words are mapped to this integer: 2\n" ] ], [ [ "Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions. ", "_____no_output_____" ] ], [ [ "print(data_loader_wrapper.dataset_for_training.vocab('jfkafejw'))\nprint(data_loader_wrapper.dataset_for_training.vocab('ieowoqjf'))", "2\n2\n" ] ], [ [ "The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.\n\nIf you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. \n\nBut once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.\n\nNote that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.", "_____no_output_____" ] ], [ [ "# # Obtain the data loader (from file). Note that it runs much faster than before!\n# data_loader_wrapper = DataLoaderWrapper(\n# transform=transform_train,\n# batch_size_for_training=batch_size,\n# vocab_threshold=None,\n# vocab_from_file=True,\n# )", "_____no_output_____" ] ], [ [ "In the next section, you will learn how to use the data loader to obtain batches of training data.", "_____no_output_____" ], [ "<a id='step2'></a>\n## Step 2: Use the Data Loader to Obtain Batches\n\nThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). \n\nIn the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare. ", "_____no_output_____" ] ], [ [ "from collections import Counter\n\n# Tally the total number of training captions with each length.\ncounter = Counter(data_loader_wrapper.dataset_for_training.caption_lengths)\nlengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\nfor value, count in lengths:\n print('value: %2d --- count: %5d' % (value, count))", "value: 13 --- count: 3\nvalue: 11 --- count: 3\nvalue: 10 --- count: 1\nvalue: 14 --- count: 1\nvalue: 18 --- count: 1\nvalue: 12 --- count: 1\n" ] ], [ [ "To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.\n\nRun the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.\n\nThese indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch.utils.data as data\n\n# Randomly sample a caption length, and sample indices with that length.\nindices = data_loader_wrapper.dataset_for_training.get_train_indices()\nprint('sampled indices:', indices)\n \n# Obtain the batch.\nimages, captions = next(iter(data_loader_wrapper.get_data_loader_for_training()))\n \nprint('images.shape:', images.shape)\nprint('captions.shape:', captions.shape)\n\n# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.\n# print('images:', images)\n# print('captions:', captions)", "sampled indices: [272, 272, 255, 148, 272, 272, 272, 272, 148, 255]\nimages.shape: torch.Size([10, 3, 224, 224])\ncaptions.shape: torch.Size([10, 13])\n" ] ], [ [ "Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!\n\nYou will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.\n\n> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__\n\nIn the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.", "_____no_output_____" ], [ "<a id='step3'></a>\n## Step 3: Experiment with the CNN Encoder\n\nRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**. ", "_____no_output_____" ] ], [ [ "# Watch for any changes in model.py, and re-load it automatically.\n%load_ext autoreload\n%autoreload 2\n\n# Import EncoderCNN and DecoderRNN. \nfrom model import EncoderCNN, DecoderRNN", "_____no_output_____" ] ], [ [ "In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ] ], [ [ "Run the code cell below to instantiate the CNN encoder in `encoder`. \n\nThe pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.", "_____no_output_____" ] ], [ [ "# Specify the dimensionality of the image embedding.\nembed_size = 256\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Initialize the encoder. (Optional: Add additional arguments if necessary.)\nencoder = EncoderCNN(embed_size)\n\n# Move the encoder to GPU if CUDA is available.\nencoder.to(device)\n \n# Move last batch of images (from Step 2) to GPU if CUDA is available. \nimages = images.to(device)\n\n# Pass the images through the encoder.\nfeatures = encoder(images)\n\nprint('type(features):', type(features))\nprint('features.shape:', features.shape)\n\n# Check that your encoder satisfies some requirements of the project! :D\nassert type(features)==torch.Tensor, \"Encoder output needs to be a PyTorch Tensor.\" \nassert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), \"The shape of the encoder output is incorrect.\"", "Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /home/hogan/.cache/torch/hub/checkpoints/resnet50-19c8e357.pth\n" ] ], [ [ "The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.\n\n![Encoder](images/encoder.png)\n\nYou are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers). \n\n> You are **not** required to change anything about the encoder.\n\nFor this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.\n\nIf you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.", "_____no_output_____" ], [ "<a id='step4'></a>\n## Step 4: Implement the RNN Decoder\n\nBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)\n\n> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.\n\nYour decoder will be an instance of the `DecoderRNN` class and must accept as input:\n- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with\n- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.\n\nNote that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. \n> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. \n\nAlthough you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. \n\n![Decoder](images/decoder.png)\n\nIn the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.", "_____no_output_____" ] ], [ [ "# Specify the number of features in the hidden state of the RNN decoder.\nhidden_size = 512\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Store the size of the vocabulary.\nvocab_size = len(data_loader_wrapper.dataset_for_training.vocab)\n\n# Initialize the decoder.\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move the decoder to GPU if CUDA is available.\ndecoder.to(device)\n \n# Move last batch of captions (from Step 1) to GPU if CUDA is available \ncaptions = captions.to(device)\n\n# Pass the encoder output and captions through the decoder.\noutputs = decoder(features, captions)\n\nprint('type(outputs):', type(outputs))\nprint('outputs.shape:', outputs.shape)\n\n# Check that your decoder satisfies some requirements of the project! :D\nassert type(outputs)==torch.Tensor, \"Decoder output needs to be a PyTorch Tensor.\"\nassert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), \"The shape of the decoder output is incorrect.\"", "type(outputs): <class 'torch.Tensor'>\noutputs.shape: torch.Size([10, 13, 8852])\n" ] ], [ [ "When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ec5d9d32b9ecc6b002f0aabb9e270193643c2650
30,469
ipynb
Jupyter Notebook
cnn/motif_discovery.ipynb
solislemuslab/dna-nn-theory
9e996d5f453e1d620dadca0c276cb4a68e2b68e5
[ "MIT" ]
1
2021-06-02T22:27:46.000Z
2021-06-02T22:27:46.000Z
cnn/motif_discovery.ipynb
solislemuslab/dna-nn-theory
9e996d5f453e1d620dadca0c276cb4a68e2b68e5
[ "MIT" ]
null
null
null
cnn/motif_discovery.ipynb
solislemuslab/dna-nn-theory
9e996d5f453e1d620dadca0c276cb4a68e2b68e5
[ "MIT" ]
1
2020-07-08T19:53:30.000Z
2020-07-08T19:53:30.000Z
66.671772
16,588
0.396206
[ [ [ "!pip install -q biopython\n\n%load_ext autoreload\n%autoreload 2\n\nfrom google.colab import drive\ndrive.mount('/content/drive')", "\u001b[?25l\r\u001b[K |▏ | 10kB 23.6MB/s eta 0:00:01\r\u001b[K |▎ | 20kB 16.4MB/s eta 0:00:01\r\u001b[K |▍ | 30kB 14.0MB/s eta 0:00:01\r\u001b[K |▋ | 40kB 12.9MB/s eta 0:00:01\r\u001b[K |▊ | 51kB 8.9MB/s eta 0:00:01\r\u001b[K |▉ | 61kB 9.3MB/s eta 0:00:01\r\u001b[K |█ | 71kB 9.5MB/s eta 0:00:01\r\u001b[K |█▏ | 81kB 10.5MB/s eta 0:00:01\r\u001b[K |█▎ | 92kB 9.4MB/s eta 0:00:01\r\u001b[K |█▌ | 102kB 8.6MB/s eta 0:00:01\r\u001b[K |█▋ | 112kB 8.6MB/s eta 0:00:01\r\u001b[K |█▊ | 122kB 8.6MB/s eta 0:00:01\r\u001b[K |██ | 133kB 8.6MB/s eta 0:00:01\r\u001b[K |██ | 143kB 8.6MB/s eta 0:00:01\r\u001b[K |██▏ | 153kB 8.6MB/s eta 0:00:01\r\u001b[K |██▎ | 163kB 8.6MB/s eta 0:00:01\r\u001b[K |██▌ | 174kB 8.6MB/s eta 0:00:01\r\u001b[K |██▋ | 184kB 8.6MB/s eta 0:00:01\r\u001b[K |██▊ | 194kB 8.6MB/s eta 0:00:01\r\u001b[K |███ | 204kB 8.6MB/s eta 0:00:01\r\u001b[K |███ | 215kB 8.6MB/s eta 0:00:01\r\u001b[K |███▏ | 225kB 8.6MB/s eta 0:00:01\r\u001b[K |███▍ | 235kB 8.6MB/s eta 0:00:01\r\u001b[K |███▌ | 245kB 8.6MB/s eta 0:00:01\r\u001b[K |███▋ | 256kB 8.6MB/s eta 0:00:01\r\u001b[K |███▉ | 266kB 8.6MB/s eta 0:00:01\r\u001b[K |████ | 276kB 8.6MB/s eta 0:00:01\r\u001b[K |████ | 286kB 8.6MB/s eta 0:00:01\r\u001b[K |████▏ | 296kB 8.6MB/s eta 0:00:01\r\u001b[K |████▍ | 307kB 8.6MB/s eta 0:00:01\r\u001b[K |████▌ | 317kB 8.6MB/s eta 0:00:01\r\u001b[K |████▋ | 327kB 8.6MB/s eta 0:00:01\r\u001b[K |████▉ | 337kB 8.6MB/s eta 0:00:01\r\u001b[K |█████ | 348kB 8.6MB/s eta 0:00:01\r\u001b[K |█████ | 358kB 8.6MB/s eta 0:00:01\r\u001b[K |█████▎ | 368kB 8.6MB/s eta 0:00:01\r\u001b[K |█████▍ | 378kB 8.6MB/s eta 0:00:01\r\u001b[K |█████▌ | 389kB 8.6MB/s eta 0:00:01\r\u001b[K |█████▊ | 399kB 8.6MB/s eta 0:00:01\r\u001b[K |█████▉ | 409kB 8.6MB/s eta 0:00:01\r\u001b[K |██████ | 419kB 8.6MB/s eta 0:00:01\r\u001b[K |██████ | 430kB 8.6MB/s eta 0:00:01\r\u001b[K |██████▎ | 440kB 8.6MB/s eta 0:00:01\r\u001b[K |██████▍ | 450kB 8.6MB/s eta 0:00:01\r\u001b[K |██████▌ | 460kB 8.6MB/s eta 0:00:01\r\u001b[K |██████▊ | 471kB 8.6MB/s eta 0:00:01\r\u001b[K |██████▉ | 481kB 8.6MB/s eta 0:00:01\r\u001b[K |███████ | 491kB 8.6MB/s eta 0:00:01\r\u001b[K |███████▏ | 501kB 8.6MB/s eta 0:00:01\r\u001b[K |███████▎ | 512kB 8.6MB/s eta 0:00:01\r\u001b[K |███████▍ | 522kB 8.6MB/s eta 0:00:01\r\u001b[K |███████▋ | 532kB 8.6MB/s eta 0:00:01\r\u001b[K |███████▊ | 542kB 8.6MB/s eta 0:00:01\r\u001b[K |███████▉ | 552kB 8.6MB/s eta 0:00:01\r\u001b[K |████████ | 563kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▏ | 573kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▎ | 583kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▍ | 593kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▋ | 604kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▊ | 614kB 8.6MB/s eta 0:00:01\r\u001b[K |████████▉ | 624kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████ | 634kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████▏ | 645kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████▎ | 655kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████▌ | 665kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████▋ | 675kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████▊ | 686kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████▉ | 696kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████ | 706kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████▏ | 716kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████▎ | 727kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████▌ | 737kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████▋ | 747kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████▊ | 757kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████ | 768kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████ | 778kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████▏ | 788kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████▍ | 798kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████▌ | 808kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████▋ | 819kB 8.6MB/s eta 0:00:01\r\u001b[K |███████████▉ | 829kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████ | 839kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████ | 849kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████▏ | 860kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████▍ | 870kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████▌ | 880kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████▋ | 890kB 8.6MB/s eta 0:00:01\r\u001b[K |████████████▉ | 901kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████ | 911kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████ | 921kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████▎ | 931kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████▍ | 942kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████▌ | 952kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 962kB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 972kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████ | 983kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████ | 993kB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████▎ | 1.0MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████▍ | 1.0MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████▌ | 1.0MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████▊ | 1.0MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 1.0MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████▎ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████▋ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████▉ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████▏ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████▎ | 1.1MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████▍ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████▋ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████▊ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████▉ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████▏ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████▎ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████▋ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████▊ | 1.2MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████▉ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████▏ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████▎ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████▌ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████▊ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████ | 1.3MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████▏ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████▍ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████▌ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████▋ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████▊ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████▍ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████▌ | 1.4MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████▋ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████▉ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▍ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▌ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▊ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▉ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 1.5MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████▎ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████▍ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████▌ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████▊ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████▉ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████▏ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████▎ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 1.6MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████▋ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████▊ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████▉ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████▏ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████▍ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████▋ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████▊ | 1.7MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████▏ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████▎ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████▋ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████▊ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████▉ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████▏ | 1.8MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████▎ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████▋ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████▊ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████▏ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████▌ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████▋ | 1.9MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████▏ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████▍ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████▌ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████▋ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 2.0MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▎ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▍ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▌ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▉ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▎ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▍ | 2.1MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▌ | 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▉ | 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████ | 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▎| 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▍| 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▋| 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▊| 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 2.2MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 2.3MB 8.6MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 2.3MB 8.6MB/s \n\u001b[?25hDrive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "DATA_DIR = '/content/drive/MyDrive/data/chip-seq/'\nLOG_DIR = '/content/drive/MyDrive/dna-nn/results/'", "_____no_output_____" ], [ "!cp -r '/content/drive/MyDrive/dna-nn/dna_nn' .\n!ls dna_nn", "dataset.py download.py load.py model.py\n" ], [ "import gc\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom tensorflow import keras\n\nfrom dna_nn.dataset import motif_discovery, motif_discovery_raw\nfrom dna_nn.model import models, evaluate", "_____no_output_____" ] ], [ [ "# 2D features", "_____no_output_____" ] ], [ [ "x_shape, train_ds, validation_data, test_ds = motif_discovery(DATA_DIR + 'motif_discovery_train.fasta', \n DATA_DIR + 'motif_discovery_test.fasta')", "_____no_output_____" ], [ "models", "_____no_output_____" ], [ "key = 'cnn_zeng_4_conv2d_l2'\ndataset = 'motif_discovery'\n\nkeras.backend.clear_session()\nmodel = models[key](x_shape)\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')\nmodel.summary()", "_____no_output_____" ], [ "csv_path = LOG_DIR + f'{key}-{dataset}-dynamics.csv'\nmodel_path = LOG_DIR + f'{key}-{dataset}.h5'\n\ncallbacks = [\n keras.callbacks.CSVLogger(csv_path),\n keras.callbacks.LambdaCallback(\n on_epoch_end=lambda epoch, logs: gc.collect(),\n # on_train_end=lambda logs: model.save(model_path)\n ),\n keras.callbacks.ModelCheckpoint(model_path, save_best_only=True)\n]\n\nhistory = model.fit(train_ds, epochs=50, validation_data=validation_data,\n callbacks=callbacks, verbose=3)", "_____no_output_____" ], [ "model = keras.models.load_model(model_path)\ntest_loss, test_accuracy = model.evaluate(test_ds, verbose=3)\n\ny_score = model.predict(test_ds)\ny_true = [int(y) for x, y in test_ds.unbatch()]\nevaluate(model, history, test_accuracy, y_score, y_true, LOG_DIR, key, dataset)", "_____no_output_____" ], [ "models", "_____no_output_____" ], [ "from sklearn.metrics import roc_curve, precision_recall_curve\nimport numpy as np\n\ndataset = 'motif_discovery'\nkey = 'cnn_zeng_4_conv2d'\nmodel_path = LOG_DIR + f'{key}-motif_discovery.h5'\n\nmodel = keras.models.load_model(model_path)\ntest_loss, test_accuracy = model.evaluate(test_ds, verbose=3)\n\ny_score = model.predict(test_ds)\ny_true = [int(y) for x, y in test_ds.unbatch()]\n\nprecision, recall, thresholds = precision_recall_curve(y_true, y_score)\npr = pd.DataFrame({\n 'precision': precision,\n 'recall': recall,\n 'thresholds': np.append(thresholds, np.nan)\n})\npr.to_csv(LOG_DIR + f'{key}-{dataset}-pr.csv', index=False)", "_____no_output_____" ] ], [ [ "# 1D features", "_____no_output_____" ] ], [ [ "x_shape, train_ds, validation_data, test_ds = motif_discovery(DATA_DIR + 'motif_discovery_train.fasta', \n DATA_DIR + 'motif_discovery_test.fasta',\n 3, 2, False)", "_____no_output_____" ], [ "models", "_____no_output_____" ], [ "key = 'deepram_recurrent_onehot'\ndataset = 'motif_discovery'\n\nkeras.backend.clear_session()\nmodel = models[key](x_shape)\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')\nmodel.summary()", "_____no_output_____" ], [ "csv_path = LOG_DIR + f'{key}-{dataset}-dynamics.csv'\nmodel_path = LOG_DIR + f'{key}-{dataset}.h5'\n\ncallbacks = [\n keras.callbacks.CSVLogger(csv_path),\n keras.callbacks.LambdaCallback(\n on_epoch_end=lambda epoch, logs: gc.collect(),\n # on_train_end=lambda logs: model.save(model_path)\n ),\n keras.callbacks.ModelCheckpoint(model_path, save_best_only=True)\n]\n\nhistory = model.fit(train_ds, epochs=50, validation_data=validation_data,\n callbacks=callbacks, verbose=3)", "_____no_output_____" ], [ "model = keras.models.load_model(model_path)\ntest_loss, test_accuracy = model.evaluate(test_ds, verbose=3)\n\ny_score = model.predict(test_ds)\ny_true = [int(y) for x, y in test_ds.unbatch()]\nevaluate(model, history, test_accuracy, y_score, y_true, LOG_DIR, key, dataset)", "_____no_output_____" ] ], [ [ "# Raw features", "_____no_output_____" ] ], [ [ "x_shape, train_ds, validation_data, test_ds = motif_discovery_raw(DATA_DIR + 'motif_discovery_train.fasta', \n DATA_DIR + 'motif_discovery_test.fasta')", "_____no_output_____" ], [ "models", "_____no_output_____" ], [ "key = 'deepram_recurrent_embed'\ndataset = 'motif_discovery'\n\nkeras.backend.clear_session()\nmodel = models[key](x_shape)\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')\nmodel.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding (Embedding) (None, 101, 256) 25856 \n_________________________________________________________________\ndropout (Dropout) (None, 101, 256) 0 \n_________________________________________________________________\nlstm (LSTM) (None, 101, 64) 82176 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 128) 98816 \n_________________________________________________________________\ndense (Dense) (None, 128) 16512 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 129 \n=================================================================\nTotal params: 223,489\nTrainable params: 223,489\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "csv_path = LOG_DIR + f'{key}-{dataset}-dynamics.csv'\nmodel_path = LOG_DIR + f'{key}-{dataset}.h5'\n\ncallbacks = [\n keras.callbacks.CSVLogger(csv_path),\n keras.callbacks.LambdaCallback(\n on_epoch_end=lambda epoch, logs: gc.collect(),\n # on_train_end=lambda logs: model.save(model_path)\n ),\n keras.callbacks.ModelCheckpoint(model_path, save_best_only=True)\n]\n\nhistory = model.fit(train_ds, epochs=50, validation_data=validation_data,\n callbacks=callbacks, verbose=3)", "_____no_output_____" ], [ "model = keras.models.load_model(model_path)\ntest_loss, test_accuracy = model.evaluate(test_ds, verbose=3)\n\ny_score = model.predict(test_ds)\ny_true = [int(y) for x, y in test_ds.unbatch()]\nevaluate(model, history, test_accuracy, y_score, y_true, LOG_DIR, key, dataset)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
ec5da04a40ffc41496670495cc88af86457e9866
180,899
ipynb
Jupyter Notebook
dlnd_image_classification.ipynb
jorcus/DLND-Image-Classification
becfc44b509d56abfb1f0d2188eb5d75c3deed66
[ "MIT" ]
null
null
null
dlnd_image_classification.ipynb
jorcus/DLND-Image-Classification
becfc44b509d56abfb1f0d2188eb5d75c3deed66
[ "MIT" ]
null
null
null
dlnd_image_classification.ipynb
jorcus/DLND-Image-Classification
becfc44b509d56abfb1f0d2188eb5d75c3deed66
[ "MIT" ]
null
null
null
111.666049
64,710
0.79614
[ [ [ "# Image Classification\nIn this project, you'll classify images from the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images.\n## Get the Data\nRun the following cell to download the [CIFAR-10 dataset for python](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz).", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nfrom urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport problem_unittests as tests\nimport tarfile\n\ncifar10_dataset_folder_path = 'cifar-10-batches-py'\n\n# Use Floyd's cifar-10 dataset if present\nfloyd_cifar10_location = '/input/cifar-10/python.tar.gz'\nif isfile(floyd_cifar10_location):\n tar_gz_path = floyd_cifar10_location\nelse:\n tar_gz_path = 'cifar-10-python.tar.gz'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(tar_gz_path):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:\n urlretrieve(\n 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tar_gz_path,\n pbar.hook)\n\nif not isdir(cifar10_dataset_folder_path):\n with tarfile.open(tar_gz_path) as tar:\n tar.extractall()\n tar.close()\n\n\ntests.test_folder_path(cifar10_dataset_folder_path)", "All files found!\n" ] ], [ [ "## Explore the Data\nThe dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following:\n* airplane\n* automobile\n* bird\n* cat\n* deer\n* dog\n* frog\n* horse\n* ship\n* truck\n\nUnderstanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). The `sample_id` is the id for a image and label pair in the batch.\n\nAsk yourself \"What are all possible labels?\", \"What is the range of values for the image data?\", \"Are the labels in order or random?\". Answers to questions like these will help you preprocess the data and end up with better predictions.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\nimport numpy as np\n\n# Explore the dataset\nbatch_id = 5\nsample_id = 3\nhelper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)", "\nStats of batch 5:\nSamples: 10000\nLabel Counts: {0: 1014, 1: 1014, 2: 952, 3: 1016, 4: 997, 5: 1025, 6: 980, 7: 977, 8: 1003, 9: 1022}\nFirst 20 Labels: [1, 8, 5, 1, 5, 7, 4, 3, 8, 2, 7, 2, 0, 1, 5, 9, 6, 2, 0, 8]\n\nExample of Image 3:\nImage - Min Value: 0 Max Value: 252\nImage - Shape: (32, 32, 3)\nLabel - Label Id: 1 Name: automobile\n" ] ], [ [ "## Implement Preprocess Functions\n### Normalize\nIn the cell below, implement the `normalize` function to take in image data, `x`, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as `x`.", "_____no_output_____" ] ], [ [ "def normalize(image_data):\n \"\"\"\n Normalize the image data with Min-Max scaling to a range of [a, b]\n :param image_data: The image data to be normalized\n :return: Normalized image data\n \"\"\"\n a = -0.5\n b = 0.5\n min_val = 0\n max_val = 255\n return a + ( ( (image_data - min_val)*(b - a) )/( max_val - min_val ) )\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_normalize(normalize)", "Tests Passed\n" ] ], [ [ "### One-hot encode\nJust like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the `one_hot_encode` function. The input, `x`, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to `one_hot_encode`. Make sure to save the map of encodings outside the function.\n\nHint: Don't reinvent the wheel.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import LabelBinarizer\ndef one_hot_encode(x):\n \"\"\"\n One hot encode a list of sample labels. Return a one-hot encoded vector for each label.\n : x: List of sample Labels\n : return: Numpy array of one-hot encoded labels\n \"\"\"\n # TODO: Implement Function\n lb = LabelBinarizer()\n lb.fit(np.arange(0, 10))\n return lb.transform(x)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_one_hot_encode(one_hot_encode)", "Tests Passed\n" ] ], [ [ "### Randomize Data\nAs you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset.", "_____no_output_____" ], [ "## Preprocess all the data and save it\nRunning the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport pickle\nimport problem_unittests as tests\nimport helper\n\n# Load the Preprocessed Validation data\nvalid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))", "_____no_output_____" ] ], [ [ "## Build the network\nFor the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.\n\n>**Note:** If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages to build each layer, except the layers you build in the \"Convolutional and Max Pooling Layer\" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.\n\n>However, if you would like to get the most out of this course, try to solve all the problems _without_ using anything from the TF Layers packages. You **can** still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the `conv2d` class, [tf.layers.conv2d](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d), you would want to use the TF Neural Network version of `conv2d`, [tf.nn.conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d). \n\nLet's begin!\n\n### Input\nThe neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions\n* Implement `neural_net_image_input`\n * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)\n * Set the shape using `image_shape` with batch size set to `None`.\n * Name the TensorFlow placeholder \"x\" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).\n* Implement `neural_net_label_input`\n * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)\n * Set the shape using `n_classes` with batch size set to `None`.\n * Name the TensorFlow placeholder \"y\" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).\n* Implement `neural_net_keep_prob_input`\n * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) for dropout keep probability.\n * Name the TensorFlow placeholder \"keep_prob\" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).\n\nThese names will be used at the end of the project to load your saved model.\n\nNote: `None` for shapes in TensorFlow allow for a dynamic size.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\ndef neural_net_image_input(image_shape):\n \"\"\"\n Return a Tensor for a batch of image input\n : image_shape: Shape of the images\n : return: Tensor for image input.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder(tf.float32, shape=[None, *image_shape], name='x')\n\n\ndef neural_net_label_input(n_classes):\n \"\"\"\n Return a Tensor for a batch of label input\n : n_classes: Number of classes\n : return: Tensor for label input.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder(tf.float32, shape=[None, n_classes], name='y')\n\n\ndef neural_net_keep_prob_input():\n \"\"\"\n Return a Tensor for keep probability\n : return: Tensor for keep probability.\n \"\"\"\n # TODO: Implement Function\n return tf.placeholder(tf.float32, name='keep_prob')\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntf.reset_default_graph()\ntests.test_nn_image_inputs(neural_net_image_input)\ntests.test_nn_label_inputs(neural_net_label_input)\ntests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)", "Image Input Tests Passed.\nLabel Input Tests Passed.\nKeep Prob Tests Passed.\n" ] ], [ [ "### Convolution and Max Pooling Layer\nConvolution layers have a lot of success with images. For this code cell, you should implement the function `conv2d_maxpool` to apply convolution then max pooling:\n* Create the weight and bias using `conv_ksize`, `conv_num_outputs` and the shape of `x_tensor`.\n* Apply a convolution to `x_tensor` using weight and `conv_strides`.\n * We recommend you use same padding, but you're welcome to use any padding.\n* Add bias\n* Add a nonlinear activation to the convolution.\n* Apply Max Pooling using `pool_ksize` and `pool_strides`.\n * We recommend you use same padding, but you're welcome to use any padding.\n\n**Note:** You **can't** use [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) for **this** layer, but you can still use TensorFlow's [Neural Network](https://www.tensorflow.org/api_docs/python/tf/nn) package. You may still use the shortcut option for all the **other** layers.", "_____no_output_____" ] ], [ [ "def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):\n \"\"\"\n Apply convolution then max pooling to x_tensor\n :param x_tensor: TensorFlow Tensor\n :param conv_num_outputs: Number of outputs for the convolutional layer\n :param conv_ksize: kernal size 2-D Tuple for the convolutional layer\n :param conv_strides: Stride 2-D Tuple for convolution\n :param pool_ksize: kernal size 2-D Tuple for pool\n :param pool_strides: Stride 2-D Tuple for pool\n : return: A tensor that represents convolution and max pooling of x_tensor\n \"\"\"\n input_shape = x_tensor.get_shape().as_list()\n\n filter_weights = tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], input_shape[3], conv_num_outputs], mean=0, stddev=0.1))\n filter_bias = tf.Variable(tf.zeros([conv_num_outputs]))\n strides = [1, conv_strides[0], conv_strides[1], 1]\n padding = 'SAME'\n conv = tf.nn.conv2d(x_tensor, filter_weights, strides, padding)\n conv = tf.nn.bias_add(conv, filter_bias)\n conv = tf.nn.relu(conv)\n \n ksize = [1, pool_ksize[0], pool_ksize[1], 1]\n strides = [1, pool_strides[0], pool_strides[1], 1]\n padding = 'SAME'\n return tf.nn.max_pool(conv,ksize,strides,padding=padding)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_con_pool(conv2d_maxpool)", "Tests Passed\n" ] ], [ [ "### Flatten Layer\nImplement the `flatten` function to change the dimension of `x_tensor` from a 4-D tensor to a 2-D tensor. The output should be the shape (*Batch Size*, *Flattened Image Size*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.", "_____no_output_____" ] ], [ [ "def flatten(x_tensor):\n \"\"\"\n Flatten x_tensor to (Batch Size, Flattened Image Size)\n : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.\n : return: A tensor of size (Batch Size, Flattened Image Size).\n \"\"\"\n # TODO: Implement Function\n return tf.contrib.layers.flatten(x_tensor)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_flatten(flatten)", "Tests Passed\n" ] ], [ [ "### Fully-Connected Layer\nImplement the `fully_conn` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.", "_____no_output_____" ] ], [ [ "def fully_conn(x_tensor, num_outputs):\n \"\"\"\n Apply a fully connected layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n # tf.layers.dense(x_tensor, num_outputs, activation=None)\n return tf.contrib.layers.fully_connected(x_tensor, num_outputs, activation_fn=None)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_fully_conn(fully_conn)", "Tests Passed\n" ] ], [ [ "### Output Layer\nImplement the `output` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.\n\n**Note:** Activation, softmax, or cross entropy should **not** be applied to this.", "_____no_output_____" ] ], [ [ "def output(x_tensor, num_outputs):\n \"\"\"\n Apply a output layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n # TODO: Implement Function\n # tf.layers.dense(x_tensor, num_outputs, activation=None)\n return tf.contrib.layers.fully_connected(x_tensor, num_outputs, activation_fn=None)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_output(output)", "Tests Passed\n" ] ], [ [ "### Create Convolutional Model\nImplement the function `conv_net` to create a convolutional neural network model. The function takes in a batch of images, `x`, and outputs logits. Use the layers you created above to create this model:\n\n* Apply 1, 2, or 3 Convolution and Max Pool layers\n* Apply a Flatten Layer\n* Apply 1, 2, or 3 Fully Connected Layers\n* Apply an Output Layer\n* Return the output\n* Apply [TensorFlow's Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout) to one or more layers in the model using `keep_prob`. ", "_____no_output_____" ] ], [ [ "def conv_net(x, keep_prob):\n \"\"\"\n Create a convolutional neural network model\n : x: Placeholder tensor that holds image data.\n : keep_prob: Placeholder tensor that hold dropout keep probability.\n : return: Tensor that represents logits\n \"\"\"\n \n num_outputs = 10 # [airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck]\n conv_ksize, conv_strides = [3,3], [1,1]\n pool_ksize, pool_strides = [2,2], [2,2]\n \n conv1 = conv2d_maxpool(x, 32, conv_ksize, conv_strides, pool_ksize, pool_strides)\n conv2 = conv2d_maxpool(conv1, 24, conv_ksize, conv_strides, pool_ksize, pool_strides)\n conv3 = conv2d_maxpool(conv2, 16, conv_ksize, conv_strides, pool_ksize, pool_strides)\n\n # TODO: Apply a Flatten Layer\n # Function Definition from Above:\n # flatten(x_tensor)\n f1 = flatten(conv3)\n \n # TODO: Apply 1, 2, or 3 Fully Connected Layers\n # Play around with different number of outputs\n # Function Definition from Above:\n # fully_conn(x_tensor, num_outputs)\n fc1 = fully_conn(f1, num_outputs)\n fc1 = tf.nn.dropout(fc1, keep_prob)\n\n # TODO: Apply an Output Layer\n # Set this to the number of classes\n # Function Definition from Above:\n # output(x_tensor, num_outputs)\n out = output(fc1, num_outputs)\n \n # TODO: return output\n return out\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\n##############################\n## Build the Neural Network ##\n##############################\n\n# Remove previous weights, bias, inputs, etc..\ntf.reset_default_graph()\n\n# Inputs\nx = neural_net_image_input((32, 32, 3))\ny = neural_net_label_input(10)\nkeep_prob = neural_net_keep_prob_input()\n\n# Model\nlogits = conv_net(x, keep_prob)\n\n# Name logits Tensor, so that is can be loaded from disk after training\nlogits = tf.identity(logits, name='logits')\n\n# Loss and Optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\noptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n# Accuracy\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\ntests.test_conv_net(conv_net)", "Neural Network Built!\n" ] ], [ [ "## Train the Neural Network\n### Single Optimization\nImplement the function `train_neural_network` to do a single optimization. The optimization should use `optimizer` to optimize in `session` with a `feed_dict` of the following:\n* `x` for image input\n* `y` for labels\n* `keep_prob` for keep probability for dropout\n\nThis function will be called for each batch, so `tf.global_variables_initializer()` has already been called.\n\nNote: Nothing needs to be returned. This function is only optimizing the neural network.", "_____no_output_____" ] ], [ [ "def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n \"\"\"\n Optimize the session on a batch of images and labels\n : session: Current TensorFlow session\n : optimizer: TensorFlow optimizer function\n : keep_probability: keep probability\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n \"\"\"\n # TODO: Implement Function \n session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_train_nn(train_neural_network)", "Tests Passed\n" ] ], [ [ "### Show Stats\nImplement the function `print_stats` to print loss and validation accuracy. Use the global variables `valid_features` and `valid_labels` to calculate validation accuracy. Use a keep probability of `1.0` to calculate the loss and validation accuracy.", "_____no_output_____" ] ], [ [ "def print_stats(session, feature_batch, label_batch, cost, accuracy):\n \"\"\"\n Print information about loss and validation accuracy\n : session: Current TensorFlow session\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n : cost: TensorFlow cost function\n : accuracy: TensorFlow accuracy function\n \"\"\"\n # TODO: Implement Function\n loss = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob:1.0})\n validation_accuracy = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob:1.0})\n \n print('Current loss : {} Validation Accuracy: {}'.format(loss, validation_accuracy))", "_____no_output_____" ] ], [ [ "### Hyperparameters\nTune the following parameters:\n* Set `epochs` to the number of iterations until the network stops learning or start overfitting\n* Set `batch_size` to the highest number that your machine has memory for. Most people set them to common sizes of memory:\n * 64\n * 128\n * 256\n * ...\n* Set `keep_probability` to the probability of keeping a node using dropout", "_____no_output_____" ] ], [ [ "# TODO: Tune Parameters\nepochs = 100\nbatch_size = 200\nkeep_probability = 0.8", "_____no_output_____" ] ], [ [ "### Train on a Single CIFAR-10 Batch\nInstead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nprint('Checking the Training on a Single Batch...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n batch_i = 1\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)", "Checking the Training on a Single Batch...\nEpoch 1, CIFAR-10 Batch 1: Current loss : 2.1404201984405518 Validation Accuracy: 0.23019999265670776\nEpoch 2, CIFAR-10 Batch 1: Current loss : 1.960931420326233 Validation Accuracy: 0.3001999855041504\nEpoch 3, CIFAR-10 Batch 1: Current loss : 1.8160004615783691 Validation Accuracy: 0.36419999599456787\nEpoch 4, CIFAR-10 Batch 1: Current loss : 1.7508413791656494 Validation Accuracy: 0.39159998297691345\nEpoch 5, CIFAR-10 Batch 1: Current loss : 1.6565430164337158 Validation Accuracy: 0.4187999963760376\nEpoch 6, CIFAR-10 Batch 1: Current loss : 1.6212316751480103 Validation Accuracy: 0.41999995708465576\nEpoch 7, CIFAR-10 Batch 1: Current loss : 1.5533783435821533 Validation Accuracy: 0.44099998474121094\nEpoch 8, CIFAR-10 Batch 1: Current loss : 1.5518786907196045 Validation Accuracy: 0.44179999828338623\nEpoch 9, CIFAR-10 Batch 1: Current loss : 1.49874746799469 Validation Accuracy: 0.46459996700286865\nEpoch 10, CIFAR-10 Batch 1: Current loss : 1.4666521549224854 Validation Accuracy: 0.47759994864463806\nEpoch 11, CIFAR-10 Batch 1: Current loss : 1.4292752742767334 Validation Accuracy: 0.49139994382858276\nEpoch 12, CIFAR-10 Batch 1: Current loss : 1.4235910177230835 Validation Accuracy: 0.4923999309539795\nEpoch 13, CIFAR-10 Batch 1: Current loss : 1.4200773239135742 Validation Accuracy: 0.494799941778183\nEpoch 14, CIFAR-10 Batch 1: Current loss : 1.379530906677246 Validation Accuracy: 0.5097999572753906\nEpoch 15, CIFAR-10 Batch 1: Current loss : 1.3886100053787231 Validation Accuracy: 0.49679994583129883\nEpoch 16, CIFAR-10 Batch 1: Current loss : 1.3300666809082031 Validation Accuracy: 0.5203999280929565\nEpoch 17, CIFAR-10 Batch 1: Current loss : 1.3581687211990356 Validation Accuracy: 0.49619996547698975\nEpoch 18, CIFAR-10 Batch 1: Current loss : 1.2945761680603027 Validation Accuracy: 0.5189999341964722\nEpoch 19, CIFAR-10 Batch 1: Current loss : 1.2913408279418945 Validation Accuracy: 0.5221999287605286\nEpoch 20, CIFAR-10 Batch 1: Current loss : 1.283439040184021 Validation Accuracy: 0.5203999280929565\nEpoch 21, CIFAR-10 Batch 1: Current loss : 1.2524608373641968 Validation Accuracy: 0.5287999510765076\nEpoch 22, CIFAR-10 Batch 1: Current loss : 1.2282676696777344 Validation Accuracy: 0.5323999524116516\nEpoch 23, CIFAR-10 Batch 1: Current loss : 1.212867021560669 Validation Accuracy: 0.5371999144554138\nEpoch 24, CIFAR-10 Batch 1: Current loss : 1.2062358856201172 Validation Accuracy: 0.5343999862670898\nEpoch 25, CIFAR-10 Batch 1: Current loss : 1.1782071590423584 Validation Accuracy: 0.5371999740600586\nEpoch 26, CIFAR-10 Batch 1: Current loss : 1.1675879955291748 Validation Accuracy: 0.5399999618530273\nEpoch 27, CIFAR-10 Batch 1: Current loss : 1.1652848720550537 Validation Accuracy: 0.5425999760627747\nEpoch 28, CIFAR-10 Batch 1: Current loss : 1.1356130838394165 Validation Accuracy: 0.5501999258995056\nEpoch 29, CIFAR-10 Batch 1: Current loss : 1.138736605644226 Validation Accuracy: 0.5411999225616455\nEpoch 30, CIFAR-10 Batch 1: Current loss : 1.1113590002059937 Validation Accuracy: 0.5517999529838562\nEpoch 31, CIFAR-10 Batch 1: Current loss : 1.1093512773513794 Validation Accuracy: 0.5431999564170837\nEpoch 32, CIFAR-10 Batch 1: Current loss : 1.108507513999939 Validation Accuracy: 0.5541999340057373\nEpoch 33, CIFAR-10 Batch 1: Current loss : 1.0875743627548218 Validation Accuracy: 0.5619999766349792\nEpoch 34, CIFAR-10 Batch 1: Current loss : 1.0910513401031494 Validation Accuracy: 0.5547999143600464\nEpoch 35, CIFAR-10 Batch 1: Current loss : 1.0885463953018188 Validation Accuracy: 0.5525999665260315\nEpoch 36, CIFAR-10 Batch 1: Current loss : 1.0715484619140625 Validation Accuracy: 0.5609999895095825\nEpoch 37, CIFAR-10 Batch 1: Current loss : 1.0523797273635864 Validation Accuracy: 0.567799985408783\nEpoch 38, CIFAR-10 Batch 1: Current loss : 1.0767592191696167 Validation Accuracy: 0.5559998750686646\nEpoch 39, CIFAR-10 Batch 1: Current loss : 1.0331004858016968 Validation Accuracy: 0.5625999569892883\nEpoch 40, CIFAR-10 Batch 1: Current loss : 1.0291157960891724 Validation Accuracy: 0.5589998960494995\nEpoch 41, CIFAR-10 Batch 1: Current loss : 1.016866683959961 Validation Accuracy: 0.5603999495506287\nEpoch 42, CIFAR-10 Batch 1: Current loss : 1.0134137868881226 Validation Accuracy: 0.5695998668670654\nEpoch 43, CIFAR-10 Batch 1: Current loss : 1.0087881088256836 Validation Accuracy: 0.5615999102592468\nEpoch 44, CIFAR-10 Batch 1: Current loss : 1.0104384422302246 Validation Accuracy: 0.5627999305725098\nEpoch 45, CIFAR-10 Batch 1: Current loss : 0.9976009130477905 Validation Accuracy: 0.5637999176979065\nEpoch 46, CIFAR-10 Batch 1: Current loss : 0.9994834661483765 Validation Accuracy: 0.5661999583244324\nEpoch 47, CIFAR-10 Batch 1: Current loss : 0.9938300251960754 Validation Accuracy: 0.5637998580932617\nEpoch 48, CIFAR-10 Batch 1: Current loss : 0.9644387364387512 Validation Accuracy: 0.5761999487876892\nEpoch 49, CIFAR-10 Batch 1: Current loss : 0.9731469750404358 Validation Accuracy: 0.5607999563217163\nEpoch 50, CIFAR-10 Batch 1: Current loss : 0.9632340669631958 Validation Accuracy: 0.571199893951416\nEpoch 51, CIFAR-10 Batch 1: Current loss : 0.9288572669029236 Validation Accuracy: 0.5753999352455139\nEpoch 52, CIFAR-10 Batch 1: Current loss : 0.9197142124176025 Validation Accuracy: 0.579800009727478\nEpoch 53, CIFAR-10 Batch 1: Current loss : 0.9209819436073303 Validation Accuracy: 0.572399914264679\nEpoch 54, CIFAR-10 Batch 1: Current loss : 0.9483346343040466 Validation Accuracy: 0.5641999244689941\nEpoch 55, CIFAR-10 Batch 1: Current loss : 0.9200437068939209 Validation Accuracy: 0.5735999345779419\nEpoch 56, CIFAR-10 Batch 1: Current loss : 0.9261587858200073 Validation Accuracy: 0.577799916267395\nEpoch 57, CIFAR-10 Batch 1: Current loss : 0.9097819328308105 Validation Accuracy: 0.5761999487876892\nEpoch 58, CIFAR-10 Batch 1: Current loss : 0.9143059253692627 Validation Accuracy: 0.5829999446868896\nEpoch 59, CIFAR-10 Batch 1: Current loss : 0.8903466463088989 Validation Accuracy: 0.5735999345779419\nEpoch 60, CIFAR-10 Batch 1: Current loss : 0.9270176887512207 Validation Accuracy: 0.5713999271392822\nEpoch 61, CIFAR-10 Batch 1: Current loss : 0.9870089292526245 Validation Accuracy: 0.5547999739646912\nEpoch 62, CIFAR-10 Batch 1: Current loss : 0.9077752828598022 Validation Accuracy: 0.5765999555587769\nEpoch 63, CIFAR-10 Batch 1: Current loss : 0.8759788870811462 Validation Accuracy: 0.5845999121665955\nEpoch 64, CIFAR-10 Batch 1: Current loss : 0.8640506267547607 Validation Accuracy: 0.5783999562263489\nEpoch 65, CIFAR-10 Batch 1: Current loss : 0.8669037818908691 Validation Accuracy: 0.5825998783111572\nEpoch 66, CIFAR-10 Batch 1: Current loss : 0.8763057589530945 Validation Accuracy: 0.5819999575614929\nEpoch 67, CIFAR-10 Batch 1: Current loss : 0.8505159616470337 Validation Accuracy: 0.5859999060630798\nEpoch 68, CIFAR-10 Batch 1: Current loss : 0.8544848561286926 Validation Accuracy: 0.586199939250946\nEpoch 69, CIFAR-10 Batch 1: Current loss : 0.8386510610580444 Validation Accuracy: 0.590399980545044\nEpoch 70, CIFAR-10 Batch 1: Current loss : 0.831778883934021 Validation Accuracy: 0.593799889087677\nEpoch 71, CIFAR-10 Batch 1: Current loss : 0.8406695127487183 Validation Accuracy: 0.5905998945236206\nEpoch 72, CIFAR-10 Batch 1: Current loss : 0.8190510272979736 Validation Accuracy: 0.5889999270439148\nEpoch 73, CIFAR-10 Batch 1: Current loss : 0.8077617883682251 Validation Accuracy: 0.5959998965263367\nEpoch 74, CIFAR-10 Batch 1: Current loss : 0.8057792782783508 Validation Accuracy: 0.595599889755249\nEpoch 75, CIFAR-10 Batch 1: Current loss : 0.7848161458969116 Validation Accuracy: 0.5959999561309814\nEpoch 76, CIFAR-10 Batch 1: Current loss : 0.7816042304039001 Validation Accuracy: 0.5945999026298523\nEpoch 77, CIFAR-10 Batch 1: Current loss : 0.7582575082778931 Validation Accuracy: 0.5923998951911926\nEpoch 78, CIFAR-10 Batch 1: Current loss : 0.7665430307388306 Validation Accuracy: 0.5893999338150024\nEpoch 79, CIFAR-10 Batch 1: Current loss : 0.7552751898765564 Validation Accuracy: 0.5957999229431152\nEpoch 80, CIFAR-10 Batch 1: Current loss : 0.7595005035400391 Validation Accuracy: 0.5945999026298523\nEpoch 81, CIFAR-10 Batch 1: Current loss : 0.7554182410240173 Validation Accuracy: 0.5939999222755432\nEpoch 82, CIFAR-10 Batch 1: Current loss : 0.7542269229888916 Validation Accuracy: 0.5915999412536621\nEpoch 83, CIFAR-10 Batch 1: Current loss : 0.7540221214294434 Validation Accuracy: 0.5881999135017395\nEpoch 84, CIFAR-10 Batch 1: Current loss : 0.7932620644569397 Validation Accuracy: 0.5833998918533325\nEpoch 85, CIFAR-10 Batch 1: Current loss : 0.7508374452590942 Validation Accuracy: 0.5899999737739563\nEpoch 86, CIFAR-10 Batch 1: Current loss : 0.7381057739257812 Validation Accuracy: 0.5971999168395996\nEpoch 87, CIFAR-10 Batch 1: Current loss : 0.7560104131698608 Validation Accuracy: 0.5945998430252075\nEpoch 88, CIFAR-10 Batch 1: Current loss : 0.7472323179244995 Validation Accuracy: 0.5941998958587646\nEpoch 89, CIFAR-10 Batch 1: Current loss : 0.7386046051979065 Validation Accuracy: 0.5959999561309814\nEpoch 90, CIFAR-10 Batch 1: Current loss : 0.7453267574310303 Validation Accuracy: 0.5897999405860901\nEpoch 91, CIFAR-10 Batch 1: Current loss : 0.735518217086792 Validation Accuracy: 0.5957998633384705\nEpoch 92, CIFAR-10 Batch 1: Current loss : 0.7449897527694702 Validation Accuracy: 0.5915999412536621\nEpoch 93, CIFAR-10 Batch 1: Current loss : 0.7266314625740051 Validation Accuracy: 0.5945999026298523\nEpoch 94, CIFAR-10 Batch 1: Current loss : 0.7178974747657776 Validation Accuracy: 0.5993998646736145\nEpoch 95, CIFAR-10 Batch 1: Current loss : 0.7117376327514648 Validation Accuracy: 0.5911999940872192\nEpoch 96, CIFAR-10 Batch 1: Current loss : 0.7212687730789185 Validation Accuracy: 0.5935999155044556\nEpoch 97, CIFAR-10 Batch 1: Current loss : 0.7136944532394409 Validation Accuracy: 0.5949999094009399\nEpoch 98, CIFAR-10 Batch 1: Current loss : 0.7308631539344788 Validation Accuracy: 0.5957999229431152\nEpoch 99, CIFAR-10 Batch 1: Current loss : 0.7057619690895081 Validation Accuracy: 0.5995998978614807\nEpoch 100, CIFAR-10 Batch 1: Current loss : 0.7057435512542725 Validation Accuracy: 0.5965998768806458\n" ] ], [ [ "### Fully Train the Model\nNow that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_model_path = './image_classification'\n\nprint('Training...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n # Loop over all batches\n n_batches = 5\n for batch_i in range(1, n_batches + 1):\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)\n \n # Save Model\n saver = tf.train.Saver()\n save_path = saver.save(sess, save_model_path)", "Training...\nEpoch 1, CIFAR-10 Batch 1: Current loss : 2.1939005851745605 Validation Accuracy: 0.18039998412132263\nEpoch 1, CIFAR-10 Batch 2: Current loss : 1.873802661895752 Validation Accuracy: 0.321399986743927\nEpoch 1, CIFAR-10 Batch 3: Current loss : 1.7009248733520508 Validation Accuracy: 0.35319995880126953\nEpoch 1, CIFAR-10 Batch 4: Current loss : 1.617295503616333 Validation Accuracy: 0.40779998898506165\nEpoch 1, CIFAR-10 Batch 5: Current loss : 1.6494498252868652 Validation Accuracy: 0.41419997811317444\nEpoch 2, CIFAR-10 Batch 1: Current loss : 1.6784964799880981 Validation Accuracy: 0.4447999596595764\nEpoch 2, CIFAR-10 Batch 2: Current loss : 1.560352087020874 Validation Accuracy: 0.43839994072914124\nEpoch 2, CIFAR-10 Batch 3: Current loss : 1.3611189126968384 Validation Accuracy: 0.4395999610424042\nEpoch 2, CIFAR-10 Batch 4: Current loss : 1.385043740272522 Validation Accuracy: 0.4691999554634094\nEpoch 2, CIFAR-10 Batch 5: Current loss : 1.4706279039382935 Validation Accuracy: 0.46959996223449707\nEpoch 3, CIFAR-10 Batch 1: Current loss : 1.5209804773330688 Validation Accuracy: 0.48579996824264526\nEpoch 3, CIFAR-10 Batch 2: Current loss : 1.3918681144714355 Validation Accuracy: 0.48719996213912964\nEpoch 3, CIFAR-10 Batch 3: Current loss : 1.22645902633667 Validation Accuracy: 0.4931999742984772\nEpoch 3, CIFAR-10 Batch 4: Current loss : 1.2797505855560303 Validation Accuracy: 0.5065999627113342\nEpoch 3, CIFAR-10 Batch 5: Current loss : 1.3027738332748413 Validation Accuracy: 0.5179999470710754\nEpoch 4, CIFAR-10 Batch 1: Current loss : 1.4026306867599487 Validation Accuracy: 0.5295999646186829\nEpoch 4, CIFAR-10 Batch 2: Current loss : 1.287933111190796 Validation Accuracy: 0.5327999591827393\nEpoch 4, CIFAR-10 Batch 3: Current loss : 1.1408357620239258 Validation Accuracy: 0.5207999348640442\nEpoch 4, CIFAR-10 Batch 4: Current loss : 1.1746201515197754 Validation Accuracy: 0.5335999727249146\nEpoch 4, CIFAR-10 Batch 5: Current loss : 1.2507318258285522 Validation Accuracy: 0.5235999226570129\nEpoch 5, CIFAR-10 Batch 1: Current loss : 1.3393816947937012 Validation Accuracy: 0.5471999645233154\nEpoch 5, CIFAR-10 Batch 2: Current loss : 1.2439855337142944 Validation Accuracy: 0.5471999049186707\nEpoch 5, CIFAR-10 Batch 3: Current loss : 1.064825177192688 Validation Accuracy: 0.5447999238967896\nEpoch 5, CIFAR-10 Batch 4: Current loss : 1.138387680053711 Validation Accuracy: 0.5471999645233154\nEpoch 5, CIFAR-10 Batch 5: Current loss : 1.2171167135238647 Validation Accuracy: 0.5401999354362488\nEpoch 6, CIFAR-10 Batch 1: Current loss : 1.2818371057510376 Validation Accuracy: 0.558199942111969\nEpoch 6, CIFAR-10 Batch 2: Current loss : 1.1968698501586914 Validation Accuracy: 0.5633999109268188\nEpoch 6, CIFAR-10 Batch 3: Current loss : 1.0301936864852905 Validation Accuracy: 0.5591999292373657\nEpoch 6, CIFAR-10 Batch 4: Current loss : 1.0846534967422485 Validation Accuracy: 0.5647999048233032\nEpoch 6, CIFAR-10 Batch 5: Current loss : 1.1721278429031372 Validation Accuracy: 0.5567999482154846\nEpoch 7, CIFAR-10 Batch 1: Current loss : 1.2570619583129883 Validation Accuracy: 0.5771999359130859\nEpoch 7, CIFAR-10 Batch 2: Current loss : 1.186401128768921 Validation Accuracy: 0.5697999000549316\nEpoch 7, CIFAR-10 Batch 3: Current loss : 1.0192232131958008 Validation Accuracy: 0.5725999474525452\nEpoch 7, CIFAR-10 Batch 4: Current loss : 1.0451502799987793 Validation Accuracy: 0.5765998959541321\nEpoch 7, CIFAR-10 Batch 5: Current loss : 1.1231029033660889 Validation Accuracy: 0.5725999474525452\nEpoch 8, CIFAR-10 Batch 1: Current loss : 1.2337552309036255 Validation Accuracy: 0.5755999088287354\nEpoch 8, CIFAR-10 Batch 2: Current loss : 1.1486127376556396 Validation Accuracy: 0.5837999582290649\nEpoch 8, CIFAR-10 Batch 3: Current loss : 0.9726188778877258 Validation Accuracy: 0.5797998905181885\nEpoch 8, CIFAR-10 Batch 4: Current loss : 1.0263440608978271 Validation Accuracy: 0.5783998966217041\nEpoch 8, CIFAR-10 Batch 5: Current loss : 1.1139333248138428 Validation Accuracy: 0.5717999339103699\nEpoch 9, CIFAR-10 Batch 1: Current loss : 1.172505259513855 Validation Accuracy: 0.5915999412536621\nEpoch 9, CIFAR-10 Batch 2: Current loss : 1.1148194074630737 Validation Accuracy: 0.5923999547958374\nEpoch 9, CIFAR-10 Batch 3: Current loss : 0.9389104247093201 Validation Accuracy: 0.5885999202728271\nEpoch 9, CIFAR-10 Batch 4: Current loss : 1.0035417079925537 Validation Accuracy: 0.5873998999595642\nEpoch 9, CIFAR-10 Batch 5: Current loss : 1.0693283081054688 Validation Accuracy: 0.5887998938560486\nEpoch 10, CIFAR-10 Batch 1: Current loss : 1.1510024070739746 Validation Accuracy: 0.5951998829841614\nEpoch 10, CIFAR-10 Batch 2: Current loss : 1.0737136602401733 Validation Accuracy: 0.6027998924255371\nEpoch 10, CIFAR-10 Batch 3: Current loss : 0.9074586629867554 Validation Accuracy: 0.6065998673439026\nEpoch 10, CIFAR-10 Batch 4: Current loss : 0.9826431274414062 Validation Accuracy: 0.5979998707771301\nEpoch 10, CIFAR-10 Batch 5: Current loss : 1.045066237449646 Validation Accuracy: 0.5957999229431152\nEpoch 11, CIFAR-10 Batch 1: Current loss : 1.1246505975723267 Validation Accuracy: 0.6089999079704285\nEpoch 11, CIFAR-10 Batch 2: Current loss : 1.0417002439498901 Validation Accuracy: 0.6157999038696289\nEpoch 11, CIFAR-10 Batch 3: Current loss : 0.8726595044136047 Validation Accuracy: 0.6101999282836914\nEpoch 11, CIFAR-10 Batch 4: Current loss : 0.9451372623443604 Validation Accuracy: 0.6127999424934387\nEpoch 11, CIFAR-10 Batch 5: Current loss : 1.036030650138855 Validation Accuracy: 0.5957999229431152\nEpoch 12, CIFAR-10 Batch 1: Current loss : 1.098718523979187 Validation Accuracy: 0.5873998999595642\nEpoch 12, CIFAR-10 Batch 2: Current loss : 1.0605512857437134 Validation Accuracy: 0.6061998605728149\nEpoch 12, CIFAR-10 Batch 3: Current loss : 0.8612033724784851 Validation Accuracy: 0.6195999383926392\nEpoch 12, CIFAR-10 Batch 4: Current loss : 0.947963535785675 Validation Accuracy: 0.6117998957633972\nEpoch 12, CIFAR-10 Batch 5: Current loss : 1.02720308303833 Validation Accuracy: 0.6043999195098877\nEpoch 13, CIFAR-10 Batch 1: Current loss : 1.0881388187408447 Validation Accuracy: 0.6123998761177063\nEpoch 13, CIFAR-10 Batch 2: Current loss : 1.0243065357208252 Validation Accuracy: 0.6181999444961548\nEpoch 13, CIFAR-10 Batch 3: Current loss : 0.8374801278114319 Validation Accuracy: 0.6285999417304993\nEpoch 13, CIFAR-10 Batch 4: Current loss : 0.9158819317817688 Validation Accuracy: 0.6257998943328857\nEpoch 13, CIFAR-10 Batch 5: Current loss : 0.9998889565467834 Validation Accuracy: 0.6167998909950256\nEpoch 14, CIFAR-10 Batch 1: Current loss : 1.0434759855270386 Validation Accuracy: 0.6203998923301697\nEpoch 14, CIFAR-10 Batch 2: Current loss : 1.0101757049560547 Validation Accuracy: 0.6267999410629272\nEpoch 14, CIFAR-10 Batch 3: Current loss : 0.8239379525184631 Validation Accuracy: 0.6289998292922974\nEpoch 14, CIFAR-10 Batch 4: Current loss : 0.9186958074569702 Validation Accuracy: 0.6279999017715454\nEpoch 14, CIFAR-10 Batch 5: Current loss : 0.9668319821357727 Validation Accuracy: 0.6261999011039734\nEpoch 15, CIFAR-10 Batch 1: Current loss : 1.0323407649993896 Validation Accuracy: 0.6261999011039734\nEpoch 15, CIFAR-10 Batch 2: Current loss : 0.9864116907119751 Validation Accuracy: 0.6261999607086182\nEpoch 15, CIFAR-10 Batch 3: Current loss : 0.8129538297653198 Validation Accuracy: 0.631399929523468\nEpoch 15, CIFAR-10 Batch 4: Current loss : 0.9079692363739014 Validation Accuracy: 0.6283999085426331\nEpoch 15, CIFAR-10 Batch 5: Current loss : 0.9595471620559692 Validation Accuracy: 0.6263998746871948\nEpoch 16, CIFAR-10 Batch 1: Current loss : 1.026469111442566 Validation Accuracy: 0.6285999417304993\nEpoch 16, CIFAR-10 Batch 2: Current loss : 0.9742425084114075 Validation Accuracy: 0.6353999376296997\nEpoch 16, CIFAR-10 Batch 3: Current loss : 0.7895388007164001 Validation Accuracy: 0.6385998725891113\nEpoch 16, CIFAR-10 Batch 4: Current loss : 0.8729797601699829 Validation Accuracy: 0.6375998854637146\nEpoch 16, CIFAR-10 Batch 5: Current loss : 0.9481512308120728 Validation Accuracy: 0.63319993019104\nEpoch 17, CIFAR-10 Batch 1: Current loss : 0.9828776717185974 Validation Accuracy: 0.6317999362945557\nEpoch 17, CIFAR-10 Batch 2: Current loss : 0.9464256763458252 Validation Accuracy: 0.6403998732566833\nEpoch 17, CIFAR-10 Batch 3: Current loss : 0.7919307351112366 Validation Accuracy: 0.6383998990058899\nEpoch 17, CIFAR-10 Batch 4: Current loss : 0.8646535873413086 Validation Accuracy: 0.6385998725891113\nEpoch 17, CIFAR-10 Batch 5: Current loss : 0.9175827503204346 Validation Accuracy: 0.6427999138832092\nEpoch 18, CIFAR-10 Batch 1: Current loss : 0.9762493371963501 Validation Accuracy: 0.6423999071121216\nEpoch 18, CIFAR-10 Batch 2: Current loss : 0.9426829814910889 Validation Accuracy: 0.6433998942375183\nEpoch 18, CIFAR-10 Batch 3: Current loss : 0.771439254283905 Validation Accuracy: 0.6463999152183533\nEpoch 18, CIFAR-10 Batch 4: Current loss : 0.8708946704864502 Validation Accuracy: 0.6397998332977295\nEpoch 18, CIFAR-10 Batch 5: Current loss : 0.9141246676445007 Validation Accuracy: 0.6439998745918274\nEpoch 19, CIFAR-10 Batch 1: Current loss : 0.9483576416969299 Validation Accuracy: 0.6441999077796936\nEpoch 19, CIFAR-10 Batch 2: Current loss : 0.9158825874328613 Validation Accuracy: 0.6501998901367188\nEpoch 19, CIFAR-10 Batch 3: Current loss : 0.7679075598716736 Validation Accuracy: 0.6511998772621155\nEpoch 19, CIFAR-10 Batch 4: Current loss : 0.8370040655136108 Validation Accuracy: 0.6469998955726624\nEpoch 19, CIFAR-10 Batch 5: Current loss : 0.8995217084884644 Validation Accuracy: 0.6517998576164246\nEpoch 20, CIFAR-10 Batch 1: Current loss : 0.9257192611694336 Validation Accuracy: 0.6525998711585999\nEpoch 20, CIFAR-10 Batch 2: Current loss : 0.8903281688690186 Validation Accuracy: 0.6557998657226562\nEpoch 20, CIFAR-10 Batch 3: Current loss : 0.7649483680725098 Validation Accuracy: 0.6501998901367188\nEpoch 20, CIFAR-10 Batch 4: Current loss : 0.8276218771934509 Validation Accuracy: 0.6525998711585999\nEpoch 20, CIFAR-10 Batch 5: Current loss : 0.8780385255813599 Validation Accuracy: 0.6555998921394348\nEpoch 21, CIFAR-10 Batch 1: Current loss : 0.9118562936782837 Validation Accuracy: 0.6507998704910278\nEpoch 21, CIFAR-10 Batch 2: Current loss : 0.9149813652038574 Validation Accuracy: 0.645599901676178\nEpoch 21, CIFAR-10 Batch 3: Current loss : 0.7410898208618164 Validation Accuracy: 0.6579998731613159\nEpoch 21, CIFAR-10 Batch 4: Current loss : 0.8148171305656433 Validation Accuracy: 0.6543998718261719\nEpoch 21, CIFAR-10 Batch 5: Current loss : 0.8858017921447754 Validation Accuracy: 0.6513999104499817\nEpoch 22, CIFAR-10 Batch 1: Current loss : 0.9149585366249084 Validation Accuracy: 0.650999903678894\nEpoch 22, CIFAR-10 Batch 2: Current loss : 0.8560728430747986 Validation Accuracy: 0.6475999355316162\nEpoch 22, CIFAR-10 Batch 3: Current loss : 0.7179722785949707 Validation Accuracy: 0.6599999070167542\nEpoch 22, CIFAR-10 Batch 4: Current loss : 0.8024967908859253 Validation Accuracy: 0.6579998731613159\nEpoch 22, CIFAR-10 Batch 5: Current loss : 0.8614816665649414 Validation Accuracy: 0.662199854850769\nEpoch 23, CIFAR-10 Batch 1: Current loss : 0.9014730453491211 Validation Accuracy: 0.6561998724937439\nEpoch 23, CIFAR-10 Batch 2: Current loss : 0.8695048093795776 Validation Accuracy: 0.6511998176574707\nEpoch 23, CIFAR-10 Batch 3: Current loss : 0.7389315366744995 Validation Accuracy: 0.6625999212265015\nEpoch 23, CIFAR-10 Batch 4: Current loss : 0.8204813003540039 Validation Accuracy: 0.645599901676178\nEpoch 23, CIFAR-10 Batch 5: Current loss : 0.8669534921646118 Validation Accuracy: 0.6629998683929443\nEpoch 24, CIFAR-10 Batch 1: Current loss : 0.8843370079994202 Validation Accuracy: 0.653999924659729\nEpoch 24, CIFAR-10 Batch 2: Current loss : 0.8353283405303955 Validation Accuracy: 0.6641998291015625\nEpoch 24, CIFAR-10 Batch 3: Current loss : 0.7050259113311768 Validation Accuracy: 0.6661998629570007\nEpoch 24, CIFAR-10 Batch 4: Current loss : 0.8037465214729309 Validation Accuracy: 0.6591998934745789\nEpoch 24, CIFAR-10 Batch 5: Current loss : 0.8906100988388062 Validation Accuracy: 0.6609998941421509\nEpoch 25, CIFAR-10 Batch 1: Current loss : 0.8725862503051758 Validation Accuracy: 0.6627998352050781\nEpoch 25, CIFAR-10 Batch 2: Current loss : 0.832291841506958 Validation Accuracy: 0.6673998832702637\nEpoch 25, CIFAR-10 Batch 3: Current loss : 0.7259007096290588 Validation Accuracy: 0.6693998575210571\nEpoch 25, CIFAR-10 Batch 4: Current loss : 0.8088729977607727 Validation Accuracy: 0.6533998250961304\nEpoch 25, CIFAR-10 Batch 5: Current loss : 0.8355013728141785 Validation Accuracy: 0.6639998555183411\nEpoch 26, CIFAR-10 Batch 1: Current loss : 0.8672313690185547 Validation Accuracy: 0.6645998954772949\nEpoch 26, CIFAR-10 Batch 2: Current loss : 0.8240985870361328 Validation Accuracy: 0.6639998555183411\nEpoch 26, CIFAR-10 Batch 3: Current loss : 0.704670786857605 Validation Accuracy: 0.6715998649597168\nEpoch 26, CIFAR-10 Batch 4: Current loss : 0.7986750602722168 Validation Accuracy: 0.6531998515129089\nEpoch 26, CIFAR-10 Batch 5: Current loss : 0.8388141393661499 Validation Accuracy: 0.6665998697280884\nEpoch 27, CIFAR-10 Batch 1: Current loss : 0.8458704948425293 Validation Accuracy: 0.6531999111175537\nEpoch 27, CIFAR-10 Batch 2: Current loss : 0.8186399936676025 Validation Accuracy: 0.6617998480796814\nEpoch 27, CIFAR-10 Batch 3: Current loss : 0.68885338306427 Validation Accuracy: 0.6775998473167419\nEpoch 27, CIFAR-10 Batch 4: Current loss : 0.7714119553565979 Validation Accuracy: 0.6643998622894287\nEpoch 27, CIFAR-10 Batch 5: Current loss : 0.8166139721870422 Validation Accuracy: 0.6681998372077942\nEpoch 28, CIFAR-10 Batch 1: Current loss : 0.8304986357688904 Validation Accuracy: 0.6529998779296875\nEpoch 28, CIFAR-10 Batch 2: Current loss : 0.8096197843551636 Validation Accuracy: 0.6673998832702637\nEpoch 28, CIFAR-10 Batch 3: Current loss : 0.6778653860092163 Validation Accuracy: 0.6755998730659485\nEpoch 28, CIFAR-10 Batch 4: Current loss : 0.7705238461494446 Validation Accuracy: 0.6713998913764954\nEpoch 28, CIFAR-10 Batch 5: Current loss : 0.836593747138977 Validation Accuracy: 0.6711998581886292\nEpoch 29, CIFAR-10 Batch 1: Current loss : 0.8346940279006958 Validation Accuracy: 0.6641998291015625\nEpoch 29, CIFAR-10 Batch 2: Current loss : 0.8094237446784973 Validation Accuracy: 0.6657999157905579\nEpoch 29, CIFAR-10 Batch 3: Current loss : 0.6846739053726196 Validation Accuracy: 0.669999897480011\nEpoch 29, CIFAR-10 Batch 4: Current loss : 0.7631700038909912 Validation Accuracy: 0.6697998642921448\nEpoch 29, CIFAR-10 Batch 5: Current loss : 0.8149406909942627 Validation Accuracy: 0.6705998778343201\nEpoch 30, CIFAR-10 Batch 1: Current loss : 0.8241127729415894 Validation Accuracy: 0.6575998067855835\nEpoch 30, CIFAR-10 Batch 2: Current loss : 0.7976753115653992 Validation Accuracy: 0.6733998656272888\nEpoch 30, CIFAR-10 Batch 3: Current loss : 0.6649386882781982 Validation Accuracy: 0.679399847984314\nEpoch 30, CIFAR-10 Batch 4: Current loss : 0.7908545136451721 Validation Accuracy: 0.6611998677253723\nEpoch 30, CIFAR-10 Batch 5: Current loss : 0.812059760093689 Validation Accuracy: 0.6703999042510986\nEpoch 31, CIFAR-10 Batch 1: Current loss : 0.8134020566940308 Validation Accuracy: 0.6601999402046204\nEpoch 31, CIFAR-10 Batch 2: Current loss : 0.7930998206138611 Validation Accuracy: 0.6777998805046082\nEpoch 31, CIFAR-10 Batch 3: Current loss : 0.6777948141098022 Validation Accuracy: 0.679399847984314\nEpoch 31, CIFAR-10 Batch 4: Current loss : 0.7561708688735962 Validation Accuracy: 0.6713998913764954\nEpoch 31, CIFAR-10 Batch 5: Current loss : 0.7950618267059326 Validation Accuracy: 0.6731998324394226\nEpoch 32, CIFAR-10 Batch 1: Current loss : 0.8149684071540833 Validation Accuracy: 0.6675999164581299\nEpoch 32, CIFAR-10 Batch 2: Current loss : 0.7818489074707031 Validation Accuracy: 0.6759999394416809\nEpoch 32, CIFAR-10 Batch 3: Current loss : 0.6512144207954407 Validation Accuracy: 0.6793999075889587\nEpoch 32, CIFAR-10 Batch 4: Current loss : 0.7601133584976196 Validation Accuracy: 0.6705998778343201\nEpoch 32, CIFAR-10 Batch 5: Current loss : 0.7955799102783203 Validation Accuracy: 0.6809998750686646\nEpoch 33, CIFAR-10 Batch 1: Current loss : 0.8050636053085327 Validation Accuracy: 0.6607999205589294\nEpoch 33, CIFAR-10 Batch 2: Current loss : 0.7675769329071045 Validation Accuracy: 0.6727998852729797\nEpoch 33, CIFAR-10 Batch 3: Current loss : 0.672049880027771 Validation Accuracy: 0.6843998432159424\nEpoch 33, CIFAR-10 Batch 4: Current loss : 0.7673715949058533 Validation Accuracy: 0.666999876499176\nEpoch 33, CIFAR-10 Batch 5: Current loss : 0.8030023574829102 Validation Accuracy: 0.6755999326705933\nEpoch 34, CIFAR-10 Batch 1: Current loss : 0.7992761731147766 Validation Accuracy: 0.6637998819351196\nEpoch 34, CIFAR-10 Batch 2: Current loss : 0.7676618099212646 Validation Accuracy: 0.6749998927116394\nEpoch 34, CIFAR-10 Batch 3: Current loss : 0.6439557075500488 Validation Accuracy: 0.6799998879432678\nEpoch 34, CIFAR-10 Batch 4: Current loss : 0.7534141540527344 Validation Accuracy: 0.6731998920440674\nEpoch 34, CIFAR-10 Batch 5: Current loss : 0.7751612663269043 Validation Accuracy: 0.6809998154640198\nEpoch 35, CIFAR-10 Batch 1: Current loss : 0.8062970042228699 Validation Accuracy: 0.6663998365402222\nEpoch 35, CIFAR-10 Batch 2: Current loss : 0.7735908627510071 Validation Accuracy: 0.679399847984314\nEpoch 35, CIFAR-10 Batch 3: Current loss : 0.6557350754737854 Validation Accuracy: 0.6785998940467834\nEpoch 35, CIFAR-10 Batch 4: Current loss : 0.7524763345718384 Validation Accuracy: 0.6737999320030212\nEpoch 35, CIFAR-10 Batch 5: Current loss : 0.7849115133285522 Validation Accuracy: 0.681199848651886\nEpoch 36, CIFAR-10 Batch 1: Current loss : 0.7998217344284058 Validation Accuracy: 0.6625999212265015\nEpoch 36, CIFAR-10 Batch 2: Current loss : 0.7586580514907837 Validation Accuracy: 0.6773998737335205\nEpoch 36, CIFAR-10 Batch 3: Current loss : 0.6311531066894531 Validation Accuracy: 0.6805999279022217\nEpoch 36, CIFAR-10 Batch 4: Current loss : 0.7502814531326294 Validation Accuracy: 0.674599826335907\nEpoch 36, CIFAR-10 Batch 5: Current loss : 0.7837042212486267 Validation Accuracy: 0.6803998947143555\nEpoch 37, CIFAR-10 Batch 1: Current loss : 0.7998322248458862 Validation Accuracy: 0.6777998208999634\nEpoch 37, CIFAR-10 Batch 2: Current loss : 0.741359531879425 Validation Accuracy: 0.6779998540878296\nEpoch 37, CIFAR-10 Batch 3: Current loss : 0.6216884255409241 Validation Accuracy: 0.6839998364448547\nEpoch 37, CIFAR-10 Batch 4: Current loss : 0.7362862825393677 Validation Accuracy: 0.6781998872756958\nEpoch 37, CIFAR-10 Batch 5: Current loss : 0.7695767879486084 Validation Accuracy: 0.6807999014854431\nEpoch 38, CIFAR-10 Batch 1: Current loss : 0.7792635560035706 Validation Accuracy: 0.6757998466491699\nEpoch 38, CIFAR-10 Batch 2: Current loss : 0.7454570531845093 Validation Accuracy: 0.6821998357772827\nEpoch 38, CIFAR-10 Batch 3: Current loss : 0.6364250183105469 Validation Accuracy: 0.6861997842788696\nEpoch 38, CIFAR-10 Batch 4: Current loss : 0.7249089479446411 Validation Accuracy: 0.6797998547554016\nEpoch 38, CIFAR-10 Batch 5: Current loss : 0.7480088472366333 Validation Accuracy: 0.6909998059272766\nEpoch 39, CIFAR-10 Batch 1: Current loss : 0.7798313498497009 Validation Accuracy: 0.679399847984314\nEpoch 39, CIFAR-10 Batch 2: Current loss : 0.738990068435669 Validation Accuracy: 0.6765998005867004\nEpoch 39, CIFAR-10 Batch 3: Current loss : 0.6261419653892517 Validation Accuracy: 0.6885998845100403\nEpoch 39, CIFAR-10 Batch 4: Current loss : 0.7549045085906982 Validation Accuracy: 0.6719998717308044\nEpoch 39, CIFAR-10 Batch 5: Current loss : 0.7476931810379028 Validation Accuracy: 0.6869998574256897\nEpoch 40, CIFAR-10 Batch 1: Current loss : 0.7726240754127502 Validation Accuracy: 0.6811999082565308\nEpoch 40, CIFAR-10 Batch 2: Current loss : 0.7350998520851135 Validation Accuracy: 0.6871997714042664\nEpoch 40, CIFAR-10 Batch 3: Current loss : 0.6196874380111694 Validation Accuracy: 0.6847999095916748\nEpoch 40, CIFAR-10 Batch 4: Current loss : 0.7287153601646423 Validation Accuracy: 0.6823998093605042\nEpoch 40, CIFAR-10 Batch 5: Current loss : 0.7641022205352783 Validation Accuracy: 0.6817998290061951\nEpoch 41, CIFAR-10 Batch 1: Current loss : 0.7887781858444214 Validation Accuracy: 0.674799919128418\nEpoch 41, CIFAR-10 Batch 2: Current loss : 0.7231857180595398 Validation Accuracy: 0.6821998357772827\nEpoch 41, CIFAR-10 Batch 3: Current loss : 0.6138823628425598 Validation Accuracy: 0.6883999109268188\nEpoch 41, CIFAR-10 Batch 4: Current loss : 0.7564014196395874 Validation Accuracy: 0.6687999367713928\nEpoch 41, CIFAR-10 Batch 5: Current loss : 0.7559623718261719 Validation Accuracy: 0.6909998655319214\nEpoch 42, CIFAR-10 Batch 1: Current loss : 0.786467432975769 Validation Accuracy: 0.6883999109268188\nEpoch 42, CIFAR-10 Batch 2: Current loss : 0.7192386984825134 Validation Accuracy: 0.6901998519897461\nEpoch 42, CIFAR-10 Batch 3: Current loss : 0.6166440844535828 Validation Accuracy: 0.6911998391151428\nEpoch 42, CIFAR-10 Batch 4: Current loss : 0.7351484298706055 Validation Accuracy: 0.6827998757362366\nEpoch 42, CIFAR-10 Batch 5: Current loss : 0.7560954093933105 Validation Accuracy: 0.689599871635437\nEpoch 43, CIFAR-10 Batch 1: Current loss : 0.7602498531341553 Validation Accuracy: 0.6867998838424683\nEpoch 43, CIFAR-10 Batch 2: Current loss : 0.7376834154129028 Validation Accuracy: 0.6883997917175293\nEpoch 43, CIFAR-10 Batch 3: Current loss : 0.6089425683021545 Validation Accuracy: 0.6879998445510864\nEpoch 43, CIFAR-10 Batch 4: Current loss : 0.7130275964736938 Validation Accuracy: 0.6819998621940613\nEpoch 43, CIFAR-10 Batch 5: Current loss : 0.758870005607605 Validation Accuracy: 0.6919999122619629\nEpoch 44, CIFAR-10 Batch 1: Current loss : 0.7649831175804138 Validation Accuracy: 0.6837998628616333\nEpoch 44, CIFAR-10 Batch 2: Current loss : 0.710472047328949 Validation Accuracy: 0.6865999102592468\nEpoch 44, CIFAR-10 Batch 3: Current loss : 0.6339905858039856 Validation Accuracy: 0.6871998906135559\nEpoch 44, CIFAR-10 Batch 4: Current loss : 0.7109887003898621 Validation Accuracy: 0.6835999488830566\nEpoch 44, CIFAR-10 Batch 5: Current loss : 0.7348861694335938 Validation Accuracy: 0.6963999271392822\nEpoch 45, CIFAR-10 Batch 1: Current loss : 0.762236475944519 Validation Accuracy: 0.6855998635292053\nEpoch 45, CIFAR-10 Batch 2: Current loss : 0.7138963937759399 Validation Accuracy: 0.6863999366760254\nEpoch 45, CIFAR-10 Batch 3: Current loss : 0.602990448474884 Validation Accuracy: 0.6907998323440552\nEpoch 45, CIFAR-10 Batch 4: Current loss : 0.7119219899177551 Validation Accuracy: 0.6871998310089111\nEpoch 45, CIFAR-10 Batch 5: Current loss : 0.7373459339141846 Validation Accuracy: 0.6917998790740967\nEpoch 46, CIFAR-10 Batch 1: Current loss : 0.7643793821334839 Validation Accuracy: 0.6869997978210449\nEpoch 46, CIFAR-10 Batch 2: Current loss : 0.715441882610321 Validation Accuracy: 0.6883997917175293\nEpoch 46, CIFAR-10 Batch 3: Current loss : 0.5931593179702759 Validation Accuracy: 0.6951999068260193\nEpoch 46, CIFAR-10 Batch 4: Current loss : 0.7239086627960205 Validation Accuracy: 0.6813998222351074\nEpoch 46, CIFAR-10 Batch 5: Current loss : 0.7529528141021729 Validation Accuracy: 0.6907998323440552\nEpoch 47, CIFAR-10 Batch 1: Current loss : 0.7644970417022705 Validation Accuracy: 0.6855998039245605\nEpoch 47, CIFAR-10 Batch 2: Current loss : 0.7060345411300659 Validation Accuracy: 0.6841998100280762\nEpoch 47, CIFAR-10 Batch 3: Current loss : 0.595613956451416 Validation Accuracy: 0.6885999441146851\nEpoch 47, CIFAR-10 Batch 4: Current loss : 0.6897016167640686 Validation Accuracy: 0.6887998580932617\nEpoch 47, CIFAR-10 Batch 5: Current loss : 0.732938826084137 Validation Accuracy: 0.694199800491333\nEpoch 48, CIFAR-10 Batch 1: Current loss : 0.7692315578460693 Validation Accuracy: 0.6873998641967773\nEpoch 48, CIFAR-10 Batch 2: Current loss : 0.6822245717048645 Validation Accuracy: 0.6861999034881592\nEpoch 48, CIFAR-10 Batch 3: Current loss : 0.5891316533088684 Validation Accuracy: 0.6889998316764832\nEpoch 48, CIFAR-10 Batch 4: Current loss : 0.7016903758049011 Validation Accuracy: 0.6855998635292053\nEpoch 48, CIFAR-10 Batch 5: Current loss : 0.7401713132858276 Validation Accuracy: 0.6927998065948486\nEpoch 49, CIFAR-10 Batch 1: Current loss : 0.7540028095245361 Validation Accuracy: 0.6939999461174011\nEpoch 49, CIFAR-10 Batch 2: Current loss : 0.6892269849777222 Validation Accuracy: 0.6887999176979065\nEpoch 49, CIFAR-10 Batch 3: Current loss : 0.5822277665138245 Validation Accuracy: 0.6969998478889465\nEpoch 49, CIFAR-10 Batch 4: Current loss : 0.7126274108886719 Validation Accuracy: 0.6815999150276184\nEpoch 49, CIFAR-10 Batch 5: Current loss : 0.7284194231033325 Validation Accuracy: 0.692599892616272\nEpoch 50, CIFAR-10 Batch 1: Current loss : 0.7738668918609619 Validation Accuracy: 0.7021998167037964\nEpoch 50, CIFAR-10 Batch 2: Current loss : 0.7040362358093262 Validation Accuracy: 0.6937998533248901\nEpoch 50, CIFAR-10 Batch 3: Current loss : 0.5852987170219421 Validation Accuracy: 0.697999894618988\nEpoch 50, CIFAR-10 Batch 4: Current loss : 0.7071952819824219 Validation Accuracy: 0.682999849319458\nEpoch 50, CIFAR-10 Batch 5: Current loss : 0.751944899559021 Validation Accuracy: 0.6957999467849731\nEpoch 51, CIFAR-10 Batch 1: Current loss : 0.751258909702301 Validation Accuracy: 0.6851998567581177\nEpoch 51, CIFAR-10 Batch 2: Current loss : 0.7003214955329895 Validation Accuracy: 0.6949998140335083\nEpoch 51, CIFAR-10 Batch 3: Current loss : 0.5920478105545044 Validation Accuracy: 0.6915997862815857\nEpoch 51, CIFAR-10 Batch 4: Current loss : 0.6886482238769531 Validation Accuracy: 0.6921998262405396\nEpoch 51, CIFAR-10 Batch 5: Current loss : 0.7466079592704773 Validation Accuracy: 0.6899998188018799\nEpoch 52, CIFAR-10 Batch 1: Current loss : 0.7535949349403381 Validation Accuracy: 0.6929998397827148\nEpoch 52, CIFAR-10 Batch 2: Current loss : 0.68513423204422 Validation Accuracy: 0.6955999732017517\nEpoch 52, CIFAR-10 Batch 3: Current loss : 0.5728607177734375 Validation Accuracy: 0.6969998478889465\nEpoch 52, CIFAR-10 Batch 4: Current loss : 0.6915093064308167 Validation Accuracy: 0.6935998797416687\nEpoch 52, CIFAR-10 Batch 5: Current loss : 0.72159743309021 Validation Accuracy: 0.6965999007225037\nEpoch 53, CIFAR-10 Batch 1: Current loss : 0.7526893615722656 Validation Accuracy: 0.6857998371124268\nEpoch 53, CIFAR-10 Batch 2: Current loss : 0.7041987776756287 Validation Accuracy: 0.6935998201370239\nEpoch 53, CIFAR-10 Batch 3: Current loss : 0.5875296592712402 Validation Accuracy: 0.6867998838424683\nEpoch 53, CIFAR-10 Batch 4: Current loss : 0.6879330277442932 Validation Accuracy: 0.6957998275756836\nEpoch 53, CIFAR-10 Batch 5: Current loss : 0.7459492087364197 Validation Accuracy: 0.6933998465538025\nEpoch 54, CIFAR-10 Batch 1: Current loss : 0.7558706998825073 Validation Accuracy: 0.6843998432159424\nEpoch 54, CIFAR-10 Batch 2: Current loss : 0.6807810664176941 Validation Accuracy: 0.7009998559951782\nEpoch 54, CIFAR-10 Batch 3: Current loss : 0.5963656902313232 Validation Accuracy: 0.685999870300293\nEpoch 54, CIFAR-10 Batch 4: Current loss : 0.6686573028564453 Validation Accuracy: 0.6925998330116272\nEpoch 54, CIFAR-10 Batch 5: Current loss : 0.7537830471992493 Validation Accuracy: 0.6887999773025513\nEpoch 55, CIFAR-10 Batch 1: Current loss : 0.7554334402084351 Validation Accuracy: 0.6845998167991638\nEpoch 55, CIFAR-10 Batch 2: Current loss : 0.6873762607574463 Validation Accuracy: 0.6949998140335083\nEpoch 55, CIFAR-10 Batch 3: Current loss : 0.5924113392829895 Validation Accuracy: 0.682999849319458\nEpoch 55, CIFAR-10 Batch 4: Current loss : 0.6974943280220032 Validation Accuracy: 0.6853998899459839\nEpoch 55, CIFAR-10 Batch 5: Current loss : 0.7311369776725769 Validation Accuracy: 0.6969999074935913\nEpoch 56, CIFAR-10 Batch 1: Current loss : 0.7556410431861877 Validation Accuracy: 0.6721999049186707\nEpoch 56, CIFAR-10 Batch 2: Current loss : 0.6868341565132141 Validation Accuracy: 0.7003999352455139\nEpoch 56, CIFAR-10 Batch 3: Current loss : 0.6173163652420044 Validation Accuracy: 0.6881998181343079\nEpoch 56, CIFAR-10 Batch 4: Current loss : 0.671432375907898 Validation Accuracy: 0.6957998871803284\nEpoch 56, CIFAR-10 Batch 5: Current loss : 0.7497548460960388 Validation Accuracy: 0.6919999122619629\nEpoch 57, CIFAR-10 Batch 1: Current loss : 0.743250846862793 Validation Accuracy: 0.681199848651886\nEpoch 57, CIFAR-10 Batch 2: Current loss : 0.6807453632354736 Validation Accuracy: 0.6985998749732971\nEpoch 57, CIFAR-10 Batch 3: Current loss : 0.5841580033302307 Validation Accuracy: 0.6939998865127563\nEpoch 57, CIFAR-10 Batch 4: Current loss : 0.6770579814910889 Validation Accuracy: 0.6877999305725098\nEpoch 57, CIFAR-10 Batch 5: Current loss : 0.7182193398475647 Validation Accuracy: 0.6991998553276062\nEpoch 58, CIFAR-10 Batch 1: Current loss : 0.7390916347503662 Validation Accuracy: 0.6885998249053955\nEpoch 58, CIFAR-10 Batch 2: Current loss : 0.6711169481277466 Validation Accuracy: 0.7013998031616211\nEpoch 58, CIFAR-10 Batch 3: Current loss : 0.5858151316642761 Validation Accuracy: 0.6897999048233032\nEpoch 58, CIFAR-10 Batch 4: Current loss : 0.6678297519683838 Validation Accuracy: 0.6963998079299927\nEpoch 58, CIFAR-10 Batch 5: Current loss : 0.7105096578598022 Validation Accuracy: 0.6953999400138855\nEpoch 59, CIFAR-10 Batch 1: Current loss : 0.7404210567474365 Validation Accuracy: 0.6843998432159424\nEpoch 59, CIFAR-10 Batch 2: Current loss : 0.6725130677223206 Validation Accuracy: 0.6973998546600342\nEpoch 59, CIFAR-10 Batch 3: Current loss : 0.5771117806434631 Validation Accuracy: 0.6891999244689941\nEpoch 59, CIFAR-10 Batch 4: Current loss : 0.6538439989089966 Validation Accuracy: 0.6951998472213745\nEpoch 59, CIFAR-10 Batch 5: Current loss : 0.6952682733535767 Validation Accuracy: 0.6981998682022095\nEpoch 60, CIFAR-10 Batch 1: Current loss : 0.7440651655197144 Validation Accuracy: 0.6899999380111694\nEpoch 60, CIFAR-10 Batch 2: Current loss : 0.6752477288246155 Validation Accuracy: 0.7069998383522034\nEpoch 60, CIFAR-10 Batch 3: Current loss : 0.5931367874145508 Validation Accuracy: 0.6867998242378235\nEpoch 60, CIFAR-10 Batch 4: Current loss : 0.6628087759017944 Validation Accuracy: 0.6909999251365662\nEpoch 60, CIFAR-10 Batch 5: Current loss : 0.7116810083389282 Validation Accuracy: 0.6961998343467712\nEpoch 61, CIFAR-10 Batch 1: Current loss : 0.7550030946731567 Validation Accuracy: 0.6805998682975769\nEpoch 61, CIFAR-10 Batch 2: Current loss : 0.6596770882606506 Validation Accuracy: 0.7045998573303223\nEpoch 61, CIFAR-10 Batch 3: Current loss : 0.5651389360427856 Validation Accuracy: 0.6917998790740967\nEpoch 61, CIFAR-10 Batch 4: Current loss : 0.6496437191963196 Validation Accuracy: 0.6911998987197876\nEpoch 61, CIFAR-10 Batch 5: Current loss : 0.7041070461273193 Validation Accuracy: 0.700799822807312\nEpoch 62, CIFAR-10 Batch 1: Current loss : 0.7640048265457153 Validation Accuracy: 0.6837999224662781\nEpoch 62, CIFAR-10 Batch 2: Current loss : 0.6552923917770386 Validation Accuracy: 0.7011998891830444\nEpoch 62, CIFAR-10 Batch 3: Current loss : 0.5669010281562805 Validation Accuracy: 0.6863999366760254\nEpoch 62, CIFAR-10 Batch 4: Current loss : 0.656542181968689 Validation Accuracy: 0.6975998282432556\nEpoch 62, CIFAR-10 Batch 5: Current loss : 0.7029579877853394 Validation Accuracy: 0.6997998952865601\nEpoch 63, CIFAR-10 Batch 1: Current loss : 0.7323884963989258 Validation Accuracy: 0.6949998736381531\nEpoch 63, CIFAR-10 Batch 2: Current loss : 0.662211000919342 Validation Accuracy: 0.703799843788147\nEpoch 63, CIFAR-10 Batch 3: Current loss : 0.5728261470794678 Validation Accuracy: 0.6903998851776123\nEpoch 63, CIFAR-10 Batch 4: Current loss : 0.6406804323196411 Validation Accuracy: 0.6889998316764832\nEpoch 63, CIFAR-10 Batch 5: Current loss : 0.7127077579498291 Validation Accuracy: 0.6953998804092407\nEpoch 64, CIFAR-10 Batch 1: Current loss : 0.7554419636726379 Validation Accuracy: 0.6789998412132263\nEpoch 64, CIFAR-10 Batch 2: Current loss : 0.6691474914550781 Validation Accuracy: 0.6987999081611633\nEpoch 64, CIFAR-10 Batch 3: Current loss : 0.5765405893325806 Validation Accuracy: 0.6915999054908752\nEpoch 64, CIFAR-10 Batch 4: Current loss : 0.6603675484657288 Validation Accuracy: 0.6893998980522156\nEpoch 64, CIFAR-10 Batch 5: Current loss : 0.6839617490768433 Validation Accuracy: 0.6977999210357666\nEpoch 65, CIFAR-10 Batch 1: Current loss : 0.7488533854484558 Validation Accuracy: 0.6967998743057251\nEpoch 65, CIFAR-10 Batch 2: Current loss : 0.6682483553886414 Validation Accuracy: 0.7011998891830444\nEpoch 65, CIFAR-10 Batch 3: Current loss : 0.5728740096092224 Validation Accuracy: 0.6909998059272766\nEpoch 65, CIFAR-10 Batch 4: Current loss : 0.6381422281265259 Validation Accuracy: 0.6939998865127563\nEpoch 65, CIFAR-10 Batch 5: Current loss : 0.6894887685775757 Validation Accuracy: 0.7029998898506165\nEpoch 66, CIFAR-10 Batch 1: Current loss : 0.7395097017288208 Validation Accuracy: 0.6899998188018799\nEpoch 66, CIFAR-10 Batch 2: Current loss : 0.6585076451301575 Validation Accuracy: 0.6971998810768127\nEpoch 66, CIFAR-10 Batch 3: Current loss : 0.579595685005188 Validation Accuracy: 0.687799870967865\nEpoch 66, CIFAR-10 Batch 4: Current loss : 0.6477972269058228 Validation Accuracy: 0.6981998085975647\nEpoch 66, CIFAR-10 Batch 5: Current loss : 0.6985942125320435 Validation Accuracy: 0.6991998553276062\nEpoch 67, CIFAR-10 Batch 1: Current loss : 0.7417192459106445 Validation Accuracy: 0.6849998235702515\nEpoch 67, CIFAR-10 Batch 2: Current loss : 0.640661895275116 Validation Accuracy: 0.7023998498916626\nEpoch 67, CIFAR-10 Batch 3: Current loss : 0.577081024646759 Validation Accuracy: 0.684199869632721\nEpoch 67, CIFAR-10 Batch 4: Current loss : 0.6279677748680115 Validation Accuracy: 0.6957998275756836\nEpoch 67, CIFAR-10 Batch 5: Current loss : 0.6975719332695007 Validation Accuracy: 0.7039998173713684\nEpoch 68, CIFAR-10 Batch 1: Current loss : 0.7370597124099731 Validation Accuracy: 0.6907998323440552\nEpoch 68, CIFAR-10 Batch 2: Current loss : 0.6349653601646423 Validation Accuracy: 0.7021998167037964\nEpoch 68, CIFAR-10 Batch 3: Current loss : 0.5652652382850647 Validation Accuracy: 0.6891999244689941\nEpoch 68, CIFAR-10 Batch 4: Current loss : 0.6325440406799316 Validation Accuracy: 0.6945998668670654\nEpoch 68, CIFAR-10 Batch 5: Current loss : 0.6967383027076721 Validation Accuracy: 0.6965998411178589\nEpoch 69, CIFAR-10 Batch 1: Current loss : 0.7417510747909546 Validation Accuracy: 0.6901998519897461\nEpoch 69, CIFAR-10 Batch 2: Current loss : 0.6733603477478027 Validation Accuracy: 0.695999801158905\nEpoch 69, CIFAR-10 Batch 3: Current loss : 0.5613471269607544 Validation Accuracy: 0.691399872303009\nEpoch 69, CIFAR-10 Batch 4: Current loss : 0.6432848572731018 Validation Accuracy: 0.694399893283844\nEpoch 69, CIFAR-10 Batch 5: Current loss : 0.675315797328949 Validation Accuracy: 0.6989998817443848\nEpoch 70, CIFAR-10 Batch 1: Current loss : 0.718602180480957 Validation Accuracy: 0.6839998960494995\nEpoch 70, CIFAR-10 Batch 2: Current loss : 0.6596934199333191 Validation Accuracy: 0.6985998153686523\nEpoch 70, CIFAR-10 Batch 3: Current loss : 0.5708349943161011 Validation Accuracy: 0.6963998675346375\nEpoch 70, CIFAR-10 Batch 4: Current loss : 0.63202303647995 Validation Accuracy: 0.6909998655319214\nEpoch 70, CIFAR-10 Batch 5: Current loss : 0.6876170635223389 Validation Accuracy: 0.7021998167037964\nEpoch 71, CIFAR-10 Batch 1: Current loss : 0.7310213446617126 Validation Accuracy: 0.6901998519897461\nEpoch 71, CIFAR-10 Batch 2: Current loss : 0.674271821975708 Validation Accuracy: 0.6961998343467712\nEpoch 71, CIFAR-10 Batch 3: Current loss : 0.5496845245361328 Validation Accuracy: 0.6873998641967773\nEpoch 71, CIFAR-10 Batch 4: Current loss : 0.6408512592315674 Validation Accuracy: 0.7017998695373535\nEpoch 71, CIFAR-10 Batch 5: Current loss : 0.6797906756401062 Validation Accuracy: 0.6993998289108276\nEpoch 72, CIFAR-10 Batch 1: Current loss : 0.7331732511520386 Validation Accuracy: 0.6909998059272766\nEpoch 72, CIFAR-10 Batch 2: Current loss : 0.6516426205635071 Validation Accuracy: 0.6993998885154724\nEpoch 72, CIFAR-10 Batch 3: Current loss : 0.5432341694831848 Validation Accuracy: 0.6945998072624207\nEpoch 72, CIFAR-10 Batch 4: Current loss : 0.6387117505073547 Validation Accuracy: 0.6935999393463135\nEpoch 72, CIFAR-10 Batch 5: Current loss : 0.7200286388397217 Validation Accuracy: 0.6929998993873596\nEpoch 73, CIFAR-10 Batch 1: Current loss : 0.736376941204071 Validation Accuracy: 0.6993998885154724\nEpoch 73, CIFAR-10 Batch 2: Current loss : 0.6397150158882141 Validation Accuracy: 0.708599865436554\nEpoch 73, CIFAR-10 Batch 3: Current loss : 0.5337207317352295 Validation Accuracy: 0.6939997673034668\nEpoch 73, CIFAR-10 Batch 4: Current loss : 0.6183962821960449 Validation Accuracy: 0.703799843788147\nEpoch 73, CIFAR-10 Batch 5: Current loss : 0.6904162168502808 Validation Accuracy: 0.6985998153686523\nEpoch 74, CIFAR-10 Batch 1: Current loss : 0.7306653261184692 Validation Accuracy: 0.7013998031616211\nEpoch 74, CIFAR-10 Batch 2: Current loss : 0.6414153575897217 Validation Accuracy: 0.707399845123291\nEpoch 74, CIFAR-10 Batch 3: Current loss : 0.5340661406517029 Validation Accuracy: 0.7015998959541321\nEpoch 74, CIFAR-10 Batch 4: Current loss : 0.6219505071640015 Validation Accuracy: 0.7025998830795288\nEpoch 74, CIFAR-10 Batch 5: Current loss : 0.6649799346923828 Validation Accuracy: 0.7069998383522034\nEpoch 75, CIFAR-10 Batch 1: Current loss : 0.7331044673919678 Validation Accuracy: 0.689599871635437\nEpoch 75, CIFAR-10 Batch 2: Current loss : 0.6380884051322937 Validation Accuracy: 0.7063998579978943\nEpoch 75, CIFAR-10 Batch 3: Current loss : 0.5466658473014832 Validation Accuracy: 0.6939998269081116\nEpoch 75, CIFAR-10 Batch 4: Current loss : 0.6379161477088928 Validation Accuracy: 0.6887999176979065\nEpoch 75, CIFAR-10 Batch 5: Current loss : 0.6627594828605652 Validation Accuracy: 0.7003999352455139\nEpoch 76, CIFAR-10 Batch 1: Current loss : 0.7265720963478088 Validation Accuracy: 0.6879998445510864\nEpoch 76, CIFAR-10 Batch 2: Current loss : 0.635189414024353 Validation Accuracy: 0.7055999040603638\nEpoch 76, CIFAR-10 Batch 3: Current loss : 0.526468813419342 Validation Accuracy: 0.6957999467849731\nEpoch 76, CIFAR-10 Batch 4: Current loss : 0.6147672533988953 Validation Accuracy: 0.700799822807312\nEpoch 76, CIFAR-10 Batch 5: Current loss : 0.7025755643844604 Validation Accuracy: 0.6937998533248901\nEpoch 77, CIFAR-10 Batch 1: Current loss : 0.7289980053901672 Validation Accuracy: 0.6933999061584473\nEpoch 77, CIFAR-10 Batch 2: Current loss : 0.6439146995544434 Validation Accuracy: 0.6957998871803284\nEpoch 77, CIFAR-10 Batch 3: Current loss : 0.5328066349029541 Validation Accuracy: 0.6967998743057251\nEpoch 77, CIFAR-10 Batch 4: Current loss : 0.6188308596611023 Validation Accuracy: 0.7029998898506165\nEpoch 77, CIFAR-10 Batch 5: Current loss : 0.6744743585586548 Validation Accuracy: 0.7051998376846313\nEpoch 78, CIFAR-10 Batch 1: Current loss : 0.7185181975364685 Validation Accuracy: 0.692599892616272\nEpoch 78, CIFAR-10 Batch 2: Current loss : 0.628387451171875 Validation Accuracy: 0.703999936580658\nEpoch 78, CIFAR-10 Batch 3: Current loss : 0.5329092741012573 Validation Accuracy: 0.6969997882843018\nEpoch 78, CIFAR-10 Batch 4: Current loss : 0.644492506980896 Validation Accuracy: 0.693199872970581\nEpoch 78, CIFAR-10 Batch 5: Current loss : 0.6735941171646118 Validation Accuracy: 0.7015998363494873\nEpoch 79, CIFAR-10 Batch 1: Current loss : 0.7283406257629395 Validation Accuracy: 0.6963999271392822\nEpoch 79, CIFAR-10 Batch 2: Current loss : 0.6184337735176086 Validation Accuracy: 0.7087998390197754\nEpoch 79, CIFAR-10 Batch 3: Current loss : 0.5482513904571533 Validation Accuracy: 0.6967999339103699\nEpoch 79, CIFAR-10 Batch 4: Current loss : 0.6461712121963501 Validation Accuracy: 0.6945998668670654\nEpoch 79, CIFAR-10 Batch 5: Current loss : 0.6838616728782654 Validation Accuracy: 0.6991997957229614\nEpoch 80, CIFAR-10 Batch 1: Current loss : 0.7244278192520142 Validation Accuracy: 0.6911998987197876\nEpoch 80, CIFAR-10 Batch 2: Current loss : 0.6449840068817139 Validation Accuracy: 0.7077997922897339\nEpoch 80, CIFAR-10 Batch 3: Current loss : 0.520375669002533 Validation Accuracy: 0.6949998140335083\nEpoch 80, CIFAR-10 Batch 4: Current loss : 0.6486458778381348 Validation Accuracy: 0.7015998959541321\nEpoch 80, CIFAR-10 Batch 5: Current loss : 0.6981443166732788 Validation Accuracy: 0.6915999054908752\nEpoch 81, CIFAR-10 Batch 1: Current loss : 0.7224106192588806 Validation Accuracy: 0.6949998736381531\nEpoch 81, CIFAR-10 Batch 2: Current loss : 0.6475022435188293 Validation Accuracy: 0.6977998614311218\nEpoch 81, CIFAR-10 Batch 3: Current loss : 0.548463761806488 Validation Accuracy: 0.6919998526573181\nEpoch 81, CIFAR-10 Batch 4: Current loss : 0.6323561072349548 Validation Accuracy: 0.6963998675346375\nEpoch 81, CIFAR-10 Batch 5: Current loss : 0.6849677562713623 Validation Accuracy: 0.6923999190330505\nEpoch 82, CIFAR-10 Batch 1: Current loss : 0.7234911918640137 Validation Accuracy: 0.6933999061584473\nEpoch 82, CIFAR-10 Batch 2: Current loss : 0.6268399953842163 Validation Accuracy: 0.7053998708724976\nEpoch 82, CIFAR-10 Batch 3: Current loss : 0.5368009209632874 Validation Accuracy: 0.6871998906135559\nEpoch 82, CIFAR-10 Batch 4: Current loss : 0.6115288138389587 Validation Accuracy: 0.7013998627662659\nEpoch 82, CIFAR-10 Batch 5: Current loss : 0.6651172041893005 Validation Accuracy: 0.694399893283844\nEpoch 83, CIFAR-10 Batch 1: Current loss : 0.7125174403190613 Validation Accuracy: 0.6971998810768127\nEpoch 83, CIFAR-10 Batch 2: Current loss : 0.6295739412307739 Validation Accuracy: 0.6973998546600342\nEpoch 83, CIFAR-10 Batch 3: Current loss : 0.522566020488739 Validation Accuracy: 0.692599892616272\nEpoch 83, CIFAR-10 Batch 4: Current loss : 0.610775351524353 Validation Accuracy: 0.6995998620986938\nEpoch 83, CIFAR-10 Batch 5: Current loss : 0.6708083152770996 Validation Accuracy: 0.7005999088287354\nEpoch 84, CIFAR-10 Batch 1: Current loss : 0.7127969861030579 Validation Accuracy: 0.697999894618988\nEpoch 84, CIFAR-10 Batch 2: Current loss : 0.6187505125999451 Validation Accuracy: 0.7077998518943787\nEpoch 84, CIFAR-10 Batch 3: Current loss : 0.4983294904232025 Validation Accuracy: 0.6995999217033386\nEpoch 84, CIFAR-10 Batch 4: Current loss : 0.623841404914856 Validation Accuracy: 0.7049998641014099\nEpoch 84, CIFAR-10 Batch 5: Current loss : 0.6722635626792908 Validation Accuracy: 0.6975998878479004\nEpoch 85, CIFAR-10 Batch 1: Current loss : 0.702329695224762 Validation Accuracy: 0.6961998343467712\nEpoch 85, CIFAR-10 Batch 2: Current loss : 0.6155141592025757 Validation Accuracy: 0.7039998173713684\nEpoch 85, CIFAR-10 Batch 3: Current loss : 0.48606184124946594 Validation Accuracy: 0.7053998708724976\nEpoch 85, CIFAR-10 Batch 4: Current loss : 0.6108675599098206 Validation Accuracy: 0.7027998566627502\nEpoch 85, CIFAR-10 Batch 5: Current loss : 0.6601114273071289 Validation Accuracy: 0.6993998289108276\nEpoch 86, CIFAR-10 Batch 1: Current loss : 0.7193909883499146 Validation Accuracy: 0.6977998614311218\nEpoch 86, CIFAR-10 Batch 2: Current loss : 0.6332384347915649 Validation Accuracy: 0.7071998715400696\nEpoch 86, CIFAR-10 Batch 3: Current loss : 0.5226855278015137 Validation Accuracy: 0.6979998350143433\nEpoch 86, CIFAR-10 Batch 4: Current loss : 0.6135684251785278 Validation Accuracy: 0.7065998315811157\nEpoch 86, CIFAR-10 Batch 5: Current loss : 0.662682294845581 Validation Accuracy: 0.6973998546600342\nEpoch 87, CIFAR-10 Batch 1: Current loss : 0.698144793510437 Validation Accuracy: 0.7009998559951782\nEpoch 87, CIFAR-10 Batch 2: Current loss : 0.6235312819480896 Validation Accuracy: 0.7063998579978943\nEpoch 87, CIFAR-10 Batch 3: Current loss : 0.5149868130683899 Validation Accuracy: 0.6953998804092407\nEpoch 87, CIFAR-10 Batch 4: Current loss : 0.6178901195526123 Validation Accuracy: 0.6973998546600342\nEpoch 87, CIFAR-10 Batch 5: Current loss : 0.6556953191757202 Validation Accuracy: 0.6981998085975647\nEpoch 88, CIFAR-10 Batch 1: Current loss : 0.7189005613327026 Validation Accuracy: 0.6915999054908752\nEpoch 88, CIFAR-10 Batch 2: Current loss : 0.6070088744163513 Validation Accuracy: 0.7053999304771423\nEpoch 88, CIFAR-10 Batch 3: Current loss : 0.513262927532196 Validation Accuracy: 0.6933998465538025\nEpoch 88, CIFAR-10 Batch 4: Current loss : 0.6150599718093872 Validation Accuracy: 0.7093998789787292\nEpoch 88, CIFAR-10 Batch 5: Current loss : 0.6370126008987427 Validation Accuracy: 0.7025998830795288\nEpoch 89, CIFAR-10 Batch 1: Current loss : 0.7308202385902405 Validation Accuracy: 0.6951998472213745\nEpoch 89, CIFAR-10 Batch 2: Current loss : 0.6169796586036682 Validation Accuracy: 0.7061998248100281\nEpoch 89, CIFAR-10 Batch 3: Current loss : 0.48980289697647095 Validation Accuracy: 0.6991998553276062\nEpoch 89, CIFAR-10 Batch 4: Current loss : 0.6392247080802917 Validation Accuracy: 0.7025998830795288\nEpoch 89, CIFAR-10 Batch 5: Current loss : 0.6623476147651672 Validation Accuracy: 0.6981998682022095\nEpoch 90, CIFAR-10 Batch 1: Current loss : 0.7098875641822815 Validation Accuracy: 0.6999999284744263\nEpoch 90, CIFAR-10 Batch 2: Current loss : 0.6172372698783875 Validation Accuracy: 0.703799843788147\nEpoch 90, CIFAR-10 Batch 3: Current loss : 0.4952930510044098 Validation Accuracy: 0.6939998865127563\nEpoch 90, CIFAR-10 Batch 4: Current loss : 0.6218746900558472 Validation Accuracy: 0.703799843788147\nEpoch 90, CIFAR-10 Batch 5: Current loss : 0.6385005712509155 Validation Accuracy: 0.705599844455719\nEpoch 91, CIFAR-10 Batch 1: Current loss : 0.7013004422187805 Validation Accuracy: 0.7029998302459717\nEpoch 91, CIFAR-10 Batch 2: Current loss : 0.6171879768371582 Validation Accuracy: 0.7027997970581055\nEpoch 91, CIFAR-10 Batch 3: Current loss : 0.4881971478462219 Validation Accuracy: 0.7031998634338379\nEpoch 91, CIFAR-10 Batch 4: Current loss : 0.6070951819419861 Validation Accuracy: 0.703799843788147\nEpoch 91, CIFAR-10 Batch 5: Current loss : 0.6660172939300537 Validation Accuracy: 0.6987998485565186\nEpoch 92, CIFAR-10 Batch 1: Current loss : 0.7109088897705078 Validation Accuracy: 0.6953998804092407\nEpoch 92, CIFAR-10 Batch 2: Current loss : 0.5995515584945679 Validation Accuracy: 0.702599823474884\nEpoch 92, CIFAR-10 Batch 3: Current loss : 0.5068671107292175 Validation Accuracy: 0.6957998275756836\nEpoch 92, CIFAR-10 Batch 4: Current loss : 0.615451455116272 Validation Accuracy: 0.6985997557640076\nEpoch 92, CIFAR-10 Batch 5: Current loss : 0.6530064344406128 Validation Accuracy: 0.7075998783111572\nEpoch 93, CIFAR-10 Batch 1: Current loss : 0.7223914861679077 Validation Accuracy: 0.6985998749732971\nEpoch 93, CIFAR-10 Batch 2: Current loss : 0.6045302152633667 Validation Accuracy: 0.7029998302459717\nEpoch 93, CIFAR-10 Batch 3: Current loss : 0.5079203248023987 Validation Accuracy: 0.6969998478889465\nEpoch 93, CIFAR-10 Batch 4: Current loss : 0.6178045272827148 Validation Accuracy: 0.7023998498916626\nEpoch 93, CIFAR-10 Batch 5: Current loss : 0.6450576782226562 Validation Accuracy: 0.705599844455719\nEpoch 94, CIFAR-10 Batch 1: Current loss : 0.7028319239616394 Validation Accuracy: 0.6941998600959778\nEpoch 94, CIFAR-10 Batch 2: Current loss : 0.5879367589950562 Validation Accuracy: 0.7047998905181885\nEpoch 94, CIFAR-10 Batch 3: Current loss : 0.484899640083313 Validation Accuracy: 0.6965999007225037\nEpoch 94, CIFAR-10 Batch 4: Current loss : 0.6210778951644897 Validation Accuracy: 0.70579993724823\nEpoch 94, CIFAR-10 Batch 5: Current loss : 0.6442102789878845 Validation Accuracy: 0.709199845790863\nEpoch 95, CIFAR-10 Batch 1: Current loss : 0.6951292753219604 Validation Accuracy: 0.6965998411178589\nEpoch 95, CIFAR-10 Batch 2: Current loss : 0.5894842743873596 Validation Accuracy: 0.7029998898506165\nEpoch 95, CIFAR-10 Batch 3: Current loss : 0.48872488737106323 Validation Accuracy: 0.700799822807312\nEpoch 95, CIFAR-10 Batch 4: Current loss : 0.6032423973083496 Validation Accuracy: 0.7065998315811157\nEpoch 95, CIFAR-10 Batch 5: Current loss : 0.6622084379196167 Validation Accuracy: 0.6963998079299927\nEpoch 96, CIFAR-10 Batch 1: Current loss : 0.7269689440727234 Validation Accuracy: 0.6925998330116272\nEpoch 96, CIFAR-10 Batch 2: Current loss : 0.604439377784729 Validation Accuracy: 0.7063999176025391\nEpoch 96, CIFAR-10 Batch 3: Current loss : 0.5025897026062012 Validation Accuracy: 0.6933998465538025\nEpoch 96, CIFAR-10 Batch 4: Current loss : 0.6131882667541504 Validation Accuracy: 0.7013998627662659\nEpoch 96, CIFAR-10 Batch 5: Current loss : 0.6585732698440552 Validation Accuracy: 0.6935998797416687\nEpoch 97, CIFAR-10 Batch 1: Current loss : 0.7250076532363892 Validation Accuracy: 0.6963998675346375\nEpoch 97, CIFAR-10 Batch 2: Current loss : 0.5980010032653809 Validation Accuracy: 0.7005999088287354\nEpoch 97, CIFAR-10 Batch 3: Current loss : 0.4982554316520691 Validation Accuracy: 0.6945998668670654\nEpoch 97, CIFAR-10 Batch 4: Current loss : 0.6123923063278198 Validation Accuracy: 0.700799822807312\nEpoch 97, CIFAR-10 Batch 5: Current loss : 0.6657126545906067 Validation Accuracy: 0.6975998878479004\nEpoch 98, CIFAR-10 Batch 1: Current loss : 0.6985070705413818 Validation Accuracy: 0.7033998966217041\nEpoch 98, CIFAR-10 Batch 2: Current loss : 0.5878209471702576 Validation Accuracy: 0.7035999298095703\nEpoch 98, CIFAR-10 Batch 3: Current loss : 0.4836087226867676 Validation Accuracy: 0.6979998350143433\nEpoch 98, CIFAR-10 Batch 4: Current loss : 0.5989862084388733 Validation Accuracy: 0.7075998783111572\nEpoch 98, CIFAR-10 Batch 5: Current loss : 0.6431298851966858 Validation Accuracy: 0.7013998627662659\nEpoch 99, CIFAR-10 Batch 1: Current loss : 0.7050133347511292 Validation Accuracy: 0.6985998749732971\nEpoch 99, CIFAR-10 Batch 2: Current loss : 0.5770039558410645 Validation Accuracy: 0.7081998586654663\nEpoch 99, CIFAR-10 Batch 3: Current loss : 0.47998207807540894 Validation Accuracy: 0.6981998682022095\nEpoch 99, CIFAR-10 Batch 4: Current loss : 0.6020467281341553 Validation Accuracy: 0.7099998593330383\nEpoch 99, CIFAR-10 Batch 5: Current loss : 0.644353985786438 Validation Accuracy: 0.6987999081611633\nEpoch 100, CIFAR-10 Batch 1: Current loss : 0.7252565622329712 Validation Accuracy: 0.6995998024940491\nEpoch 100, CIFAR-10 Batch 2: Current loss : 0.5763905644416809 Validation Accuracy: 0.7065998315811157\nEpoch 100, CIFAR-10 Batch 3: Current loss : 0.49475258588790894 Validation Accuracy: 0.6989998817443848\nEpoch 100, CIFAR-10 Batch 4: Current loss : 0.6062710285186768 Validation Accuracy: 0.7045998573303223\nEpoch 100, CIFAR-10 Batch 5: Current loss : 0.6478626728057861 Validation Accuracy: 0.6951999068260193\n" ] ], [ [ "# Checkpoint\nThe model has been saved to disk.\n## Test Model\nTest your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport tensorflow as tf\nimport pickle\nimport helper\nimport random\n\n# Set batch size if not already set\ntry:\n if batch_size:\n pass\nexcept NameError:\n batch_size = 64\n\nsave_model_path = './image_classification'\nn_samples = 4\ntop_n_predictions = 3\n\ndef test_model():\n \"\"\"\n Test the saved model against the test dataset\n \"\"\"\n\n test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))\n loaded_graph = tf.Graph()\n\n with tf.Session(graph=loaded_graph) as sess:\n # Load model\n loader = tf.train.import_meta_graph(save_model_path + '.meta')\n loader.restore(sess, save_model_path)\n\n # Get Tensors from loaded model\n loaded_x = loaded_graph.get_tensor_by_name('x:0')\n loaded_y = loaded_graph.get_tensor_by_name('y:0')\n loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n loaded_logits = loaded_graph.get_tensor_by_name('logits:0')\n loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')\n \n # Get accuracy in batches for memory limitations\n test_batch_acc_total = 0\n test_batch_count = 0\n \n for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):\n test_batch_acc_total += sess.run(\n loaded_acc,\n feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})\n test_batch_count += 1\n\n print('Testing Accuracy: {}\\n'.format(test_batch_acc_total/test_batch_count))\n\n # Print Random Samples\n random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))\n random_test_predictions = sess.run(\n tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),\n feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})\n helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)\n\n\ntest_model()", "Testing Accuracy: 0.6905999898910522\n\n" ] ], [ [ "## Why 50-80% Accuracy?\nYou might be wondering why you can't get an accuracy any higher. First things first, 50% isn't bad for a simple CNN. Pure guessing would get you 10% accuracy. However, you might notice people are getting scores [well above 80%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130). That's because we haven't taught you all there is to know about neural networks. We still need to cover a few more techniques.\n## Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_image_classification.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ec5daf4a998ee622a736aab34ec768bac51e6954
14,660
ipynb
Jupyter Notebook
Case Study - Fraud Detection.ipynb
Abhishekjha111/Case-studies
1d929c9f93de4f93080a0e084d1a4659a6748734
[ "Apache-2.0" ]
null
null
null
Case Study - Fraud Detection.ipynb
Abhishekjha111/Case-studies
1d929c9f93de4f93080a0e084d1a4659a6748734
[ "Apache-2.0" ]
null
null
null
Case Study - Fraud Detection.ipynb
Abhishekjha111/Case-studies
1d929c9f93de4f93080a0e084d1a4659a6748734
[ "Apache-2.0" ]
null
null
null
25.232358
104
0.423397
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "credit_card = pd.read_csv('data/creditcard.csv')", "_____no_output_____" ], [ "credit_card.head()", "_____no_output_____" ], [ "credit_card.drop('Time',axis=1,inplace=True)", "_____no_output_____" ], [ "credit_card.Class.value_counts()", "_____no_output_____" ], [ "492/284315*100", "_____no_output_____" ], [ "X,y = credit_card.drop('Class',axis=1), credit_card.Class", "_____no_output_____" ], [ "from imblearn.over_sampling import SMOTE", "_____no_output_____" ], [ "smote = SMOTE()", "_____no_output_____" ], [ "X_tf,y_tf = smote.fit_resample(X,y)", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "data = np.hstack([X_tf,y_tf.reshape(-1,1)])", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "rf = RandomForestClassifier()", "_____no_output_____" ], [ "rf", "_____no_output_____" ], [ "params = {'n_estimators':[10,20,30]}", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split, GridSearchCV", "_____no_output_____" ], [ "trainX,testX, trainY,testY = train_test_split(X_tf, y_tf)", "_____no_output_____" ], [ "trainX,testX, trainY,testY = train_test_split(X, y)", "_____no_output_____" ], [ "gs = GridSearchCV(rf, param_grid=params, cv=5, n_jobs=-1)", "_____no_output_____" ], [ "gs.fit(trainX,trainY)", "_____no_output_____" ], [ "gs.score(testX,testY)", "_____no_output_____" ], [ "pred = gs.predict(testX)", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix", "_____no_output_____" ], [ "confusion_matrix(y_pred=pred, y_true=testY)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ec5dda541471a747fbd18a6387051f90634a57eb
1,552
ipynb
Jupyter Notebook
online/WMC-week10-3.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
null
null
null
online/WMC-week10-3.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
null
null
null
online/WMC-week10-3.ipynb
IST256-classroom/fall2018-learn-python-mafudge
173a3c00baaf501a5006ff2a8058bdf97b23f37e
[ "MIT" ]
5
2018-09-17T03:54:06.000Z
2019-10-17T02:47:20.000Z
20.693333
102
0.522552
[ [ [ "import requests\nlat = \"43.0481\"\nlng = \"-76.1474\"\nurl = 'https://api.darksky.net/forecast/3428d875996cccbf7b713a175d3fd7a5/%s,%s' % (lat, lng)\nresponse = requests.get(url)\nprint(response.url)\nif response.ok:\n data = response.json()\ntemp = data['currently']['temperature']\nprint(\"The Current Temperature in Syracuse %.1f\" % (temp))\n", "https://api.darksky.net/forecast/3428d875996cccbf7b713a175d3fd7a5/43.0481,-76.1474\nThe Current Temperature in Syracuse 36.6\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
ec5df5b30b739088c7b04d951148d826c33c01b7
128,397
ipynb
Jupyter Notebook
ML India AI Challenge.ipynb
mrityu-jha/ML-DL-Challenges
5ccf1c28d2db631aae577732c9bb6f095322e99c
[ "MIT" ]
null
null
null
ML India AI Challenge.ipynb
mrityu-jha/ML-DL-Challenges
5ccf1c28d2db631aae577732c9bb6f095322e99c
[ "MIT" ]
null
null
null
ML India AI Challenge.ipynb
mrityu-jha/ML-DL-Challenges
5ccf1c28d2db631aae577732c9bb6f095322e99c
[ "MIT" ]
null
null
null
76.064573
28,664
0.707524
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom imblearn.under_sampling import NearMiss, RandomUnderSampler\nfrom imblearn.combine import SMOTETomek\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.over_sampling import RandomOverSampler\nfrom imblearn.pipeline import Pipeline\nfrom collections import Counter\nfrom pycaret.classification import * ", "_____no_output_____" ], [ "train = pd.read_csv( 'Downloads/MLI/TRAIN.csv', header = [0], index_col = [0] )\ntest = pd.read_csv( 'Downloads/MLI/TEST.csv', header = [0], index_col = [0] )\ncombined = [ train, test ]", "_____no_output_____" ], [ "count = pd.value_counts( train['Class'], sort = True )\ncount.plot( kind = 'bar', rot = 0 )\nplt.xlabel( 'Class' )\nplt.ylabel( 'Count' )\nplt.title( 'Class Distribution' )", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "test.head()", "_____no_output_____" ], [ "train.describe()", "_____no_output_____" ], [ "print('Original dataset shape {}'.format(Counter(train['Class'])))", "Original dataset shape Counter({0: 284015, 1: 440})\n" ], [ "from sklearn.preprocessing import Normalizer\nfrom sklearn.model_selection import train_test_split\nX = train.values[ :, :-1].copy()\nY = train.values[:,-1].copy().reshape( ( -1, 1 ) )", "_____no_output_____" ], [ "print( X.shape, Y.shape )", "(284455, 29) (284455, 1)\n" ], [ "X, Y = SMOTE().fit_resample( X, Y )", "_____no_output_____" ], [ "# nm = NearMiss()\n# X, Y = nm.fit_sample( X, Y )", "_____no_output_____" ], [ "print('Resampled dataset shape {}'.format(Counter(Y)))", "Resampled dataset shape Counter({0.0: 284015, 1.0: 284015})\n" ], [ "X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size = 0.3, random_state = 69 )\nprint( 'X_train: ', X_train.shape,'\\nX_test: ', X_test.shape, '\\nY_train: ', Y_train.shape, '\\nY_test: ',Y_test.shape )", "X_train: (397621, 29) \nX_test: (170409, 29) \nY_train: (397621,) \nY_test: (170409,)\n" ], [ "print('Train Set {}'.format(Counter(Y_train)))\nprint('Test Set {}'.format(Counter(Y_test)))", "Train Set Counter({1.0: 198856, 0.0: 198765})\nTest Set Counter({0.0: 85250, 1.0: 85159})\n" ], [ "normalizer = Normalizer()\nX_train = normalizer.fit_transform( X_train )\nX_test = normalizer.transform( X_test )", "_____no_output_____" ], [ "import tensorflow as tf\nimport keras\nfrom keras import backend as K\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau", "Using TensorFlow backend.\n" ], [ "def focal_loss(y_true, y_pred):\n g = 2.0\n alpha = 0.25\n pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))\n pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))\n return -K.sum(alpha * K.pow(1. - pt_1, g) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, g) * K.log(1. - pt_0))", "_____no_output_____" ], [ "#class_weights = { 1 : 0.95, 0 : 0.1 }", "_____no_output_____" ], [ "mc = ModelCheckpoint( 'Downloads/MLI/model.h5', monitor = 'val_loss', mode = 'min', verbose = 1, save_best_only = True )\nreduce_lr = ReduceLROnPlateau( monitor = 'val_loss', factor = 0.5, patience = 5, verbose = 1 )\nmodel = Sequential()\nmodel.add( Dense( units = 16, activation = 'relu', input_shape = ( 29, ) ) )\nmodel.add( Dropout( 0.5 ) )\nmodel.add( Dense( units = 32, activation = 'relu' ) )\nmodel.add( Dropout( 0.5 ) )\nmodel.add( Dense( units = 32, activation = 'relu' ) )\nmodel.add( Dropout( 0.5 ) )\nmodel.add( Dense( units = 24, activation = 'relu' ) )\nmodel.add( Dropout( 0.5 ) )\nmodel.add( Dense( units = 16, activation = 'relu' ) )\nmodel.add( Dense( units = 1, activation = 'sigmoid' ) )\nopt = tf.keras.optimizers.Adam( learning_rate = 0.01 )\nmodel.compile( optimizer = opt, loss = 'binary_crossentropy', metrics = ['accuracy'] )\nhistory = model.fit( X_train, Y_train, validation_data = ( X_test, Y_test ), epochs = 100, batch_size = 64, callbacks = [ mc ] )", "Train on 397621 samples, validate on 170409 samples\nEpoch 1/100\n397621/397621 [==============================] - 10s 24us/step - loss: 0.1836 - accuracy: 0.9285 - val_loss: 0.1261 - val_accuracy: 0.9535\n\nEpoch 00001: val_loss improved from inf to 0.12611, saving model to Downloads/MLI/model.h5\nEpoch 2/100\n397621/397621 [==============================] - 9s 22us/step - loss: 0.1488 - accuracy: 0.9462 - val_loss: 0.0824 - val_accuracy: 0.9776\n\nEpoch 00002: val_loss improved from 0.12611 to 0.08236, saving model to Downloads/MLI/model.h5\nEpoch 3/100\n397621/397621 [==============================] - 9s 23us/step - loss: 0.1379 - accuracy: 0.9502 - val_loss: 0.0650 - val_accuracy: 0.9782\n\nEpoch 00003: val_loss improved from 0.08236 to 0.06505, saving model to Downloads/MLI/model.h5\nEpoch 4/100\n397621/397621 [==============================] - 10s 26us/step - loss: 0.1316 - accuracy: 0.9534 - val_loss: 0.0655 - val_accuracy: 0.9793\n\nEpoch 00004: val_loss did not improve from 0.06505\nEpoch 5/100\n397621/397621 [==============================] - 11s 27us/step - loss: 0.1272 - accuracy: 0.9572 - val_loss: 0.0557 - val_accuracy: 0.9842\n\nEpoch 00005: val_loss improved from 0.06505 to 0.05568, saving model to Downloads/MLI/model.h5\nEpoch 6/100\n397621/397621 [==============================] - 11s 28us/step - loss: 0.1215 - accuracy: 0.9581 - val_loss: 0.0453 - val_accuracy: 0.9845\n\nEpoch 00006: val_loss improved from 0.05568 to 0.04528, saving model to Downloads/MLI/model.h5\nEpoch 7/100\n397621/397621 [==============================] - 10s 26us/step - loss: 0.1224 - accuracy: 0.9567 - val_loss: 0.0588 - val_accuracy: 0.9844\n\nEpoch 00007: val_loss did not improve from 0.04528\nEpoch 8/100\n397621/397621 [==============================] - 11s 27us/step - loss: 0.1283 - accuracy: 0.9546 - val_loss: 0.0593 - val_accuracy: 0.9773\n\nEpoch 00008: val_loss did not improve from 0.04528\nEpoch 9/100\n307008/397621 [======================>.......] - ETA: 1s - loss: 0.1368 - accuracy: 0.9492" ], [ "model = load_model( 'Downloads/MLI/model.h5' )", "_____no_output_____" ], [ "from sklearn.metrics import roc_curve, confusion_matrix, roc_auc_score, classification_report", "_____no_output_____" ], [ "cm = confusion_matrix( Y_test, model.predict_classes( X_test ) )\nsns.heatmap( cm, annot = True, fmt = '.1f' )", "_____no_output_____" ], [ "plt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ], [ "plt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ], [ "print( classification_report( Y_test, model.predict_classes( X_test ) ) )", " precision recall f1-score support\n\n 0.0 1.00 1.00 1.00 85250\n 1.0 1.00 1.00 1.00 85159\n\n accuracy 1.00 170409\n macro avg 1.00 1.00 1.00 170409\nweighted avg 1.00 1.00 1.00 170409\n\n" ], [ "print( \"AUC Score: \", roc_auc_score( Y_test , model.predict_proba( X_test ) ) )", "AUC Score: 0.9998156760345379\n" ], [ "test = test.values", "_____no_output_____" ], [ "test = normalizer.transform( test )", "_____no_output_____" ], [ "temp = pd.read_csv( 'Downloads/MLI/TEST.csv', header = [0] )\nsubmission = pd.DataFrame( model.predict_classes( test ) )\nsubmission = pd.concat( [temp.iloc[:,0], submission ], axis = 1 )", "_____no_output_____" ], [ "pd.DataFrame( model.predict_classes( X_test ) )", "_____no_output_____" ], [ "submission.to_csv( 'Downloads/MLI/Submission.csv', index_label = None, index = False, header = ['Index', 'Class'] )", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]