hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e74436bf94a619af763c51ca77604ede080e8cbd | 237,296 | ipynb | Jupyter Notebook | Constraints_diagram.ipynb | dev10110/AVD_Initial_Sizing | e25a9fc2f829b3e73e9933da78b41ff1d55e610b | [
"MIT"
] | 1 | 2019-11-05T00:14:15.000Z | 2019-11-05T00:14:15.000Z | Constraints_diagram.ipynb | dev10110/AVD_Initial_Sizing | e25a9fc2f829b3e73e9933da78b41ff1d55e610b | [
"MIT"
] | null | null | null | Constraints_diagram.ipynb | dev10110/AVD_Initial_Sizing | e25a9fc2f829b3e73e9933da78b41ff1d55e610b | [
"MIT"
] | 1 | 2019-11-04T21:38:12.000Z | 2019-11-04T21:38:12.000Z | 214.166065 | 168,584 | 0.896163 | [
[
[
"# Constraints Diagram\n### AVD Group 16\nAero Year 4 2019-2020\nLast updated: 2019.10.17",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport array as arr\nimport pandas as pd\nimport math as math\nimport matplotlib.pyplot as plt\nimport fluids",
"_____no_output_____"
]
],
[
[
"## Function returns CLmax, Cd, e based on configuration and gear",
"_____no_output_____"
]
],
[
[
"def flap(config=None, gear=False):\n \"\"\"function returns tuple of cl, cd, e with input configuration and gear option\n Args:\n config (str): flap configuration (basically comes from flaps angles)\n If set to None, clean config. is returned\n gear (bool): gear option\n Returns:\n (tuple): tuple of cl, cd, e\n \"\"\"\n # Airfoil choice: NACA23015\n # >> factor of 0.9 is multiplied into cl to account for 3D airfoil correction\n \n # clean configuration\n sweep = 15*np.pi/180\n cL = 0.9*1.825*np.cos(sweep)\n cD = 0.02311\n e = 0.7\n \n \n \n cratio = 1.25\n #dcl = 1.3 #slotted/fowler\n dcl = 1.3 #double slotted\n \n \n Sflapped_Sref = 0.6\n dcL = 0.9*dcl*np.cos(sweep)\n \n \n if config == 'takeoff':\n # 10 degree\n cL = 1.9344\n #cL += dcL\n #cL = 0.8*cL\n cD = 0.07982-0.03565\n #e -= 0.05\n \n if config == 'approach':\n # 20\n cL = 1.9707\n cD = 0.05989\n #e -= 0.05\n \n if config == 'landing':\n # 45\n cL = 2.115783\n cD = 0.18868 - 0.03565\n #e -= 0.07\n \n # append if gear is down\n if gear:\n cD += 0.03565\n #e -= 0.1\n \n return cL, cD, e\n\n# print all configurations\ncl, cd, e = flap(config='clean', gear=False)\nprint(f'clean cl, cd, e: {cl, cd, e}')\n\ncl, cd, e = flap(config='takeoff', gear=False)\nprint(f'takeoff cl, cd, e: {cl, cd, e}')\n\ncl, cd, e = flap(config='takeoff', gear=True)\nprint(f'takeoff with gear cl, cd, e: {cl, cd, e}')\n\ncl, cd, e = flap(config='approach', gear=False)\nprint(f'approach cl, cd, e: {cl, cd, e}')\n\ncl, cd, e = flap(config='landing', gear=False)\nprint(f'landing cl, cd, e: {cl, cd, e}')\n\ncl, cd, e = flap(config='landing', gear=True)\nprint(f'landing with gear cl, cd, e: {cl, cd, e}')",
"clean cl, cd, e: (1.5865331696797949, 0.02311, 0.7)\ntakeoff cl, cd, e: (1.9344, 0.04417, 0.7)\ntakeoff with gear cl, cd, e: (1.9344, 0.07982, 0.7)\napproach cl, cd, e: (1.9707, 0.05989, 0.7)\nlanding cl, cd, e: (2.115783, 0.15303, 0.7)\nlanding with gear cl, cd, e: (2.115783, 0.18868000000000001, 0.7)\n"
]
],
[
[
"## Inputs related to aircraft parameters",
"_____no_output_____"
]
],
[
[
"# Reference data from CRJ700\n#Sref = 70.6 # m2\n#b = 23.2 # m wingspan\nKld = 11 # stand. value for retractable prop aircraft\nSwetSref = 5.7 # Estimated value\n\n#AR = b**2/Sref\nAR = 8 # FIXME!!! - Weight sizing uses 8!!!\nBPR = 5.7\n\nprint('Aspect ratio: {}'.format(AR))\n\n# conversion ratio\nft2m = 0.3048\nms2knots = 1.94384449\n\n# physical constants\ng = 9.81\n\n# Initialize array of WSo\nWSo = np.linspace(1,8100,100+1)\n",
"Aspect ratio: 8\n"
]
],
[
[
"## List of $\\alpha$ and functions to calculate $\\beta$",
"_____no_output_____"
]
],
[
[
"# List of alphas from GPKit notebook\n# this is [M_0, M_1, M_2, ..., M_8, M_9, M_dry\n# M_9 is the mass at the end of landing and taxi, M_dry is different due to assumed 6% ullage\n# ------------------------- #\n# M_0 = alpha_list[0] - taxi and takeoff\n# M_1 = alpha_list[1] - climb and accelerate\n# M_2 = alpha_list[2] - cruise for 2,000 km at Mach 0.75\n# M_3 = alpha_list[3] - descent to land\n# M_4 = alpha_list[4] - missed approach climb\n# M_5 = alpha_list[5] - cruise to alternate dest.\n# M_6 = alpha_list[6] - loiter at 1,500 m for 45 min\n# M_7 = alpha_list[7] - descent to land\n# M_8 = alpha_list[8] - landing & taxi\n# M_9 = alpha_list[9] - end of landing & taxi (with fuel left over)\n# M_dry = alpha_list[10] - dry mass\n# ------------------------- #\nalpha_list = [1.0,\n 0.9700000000217274,\n 0.9554500000424797,\n 0.8662240940895669,\n 0.8618929736379425,\n 0.8489645790521038,\n 0.8346511478768818,\n 0.8137373076948168,\n 0.8096686211740269,\n 0.8056202780857528,\n 0.7600191302870707]\n",
"_____no_output_____"
]
],
[
[
"$ \\alpha $",
"_____no_output_____"
]
],
[
[
"# Function calculates betas\ndef calc_beta(z, M=0.0, BPR=0.0):\n \"\"\"\n Function calculates beta's for different altitudes # FIXME - should also have dependencies on speed/Mach?\n Args:\n z (float): altitude in meters\n Returns:\n (float): value of beta\n \"\"\"\n #Z is assumed to be in meters\n atm = fluids.atmosphere.ATMOSPHERE_1976(z)\n atm0 = fluids.atmosphere.ATMOSPHERE_1976(0)\n P = atm.P\n P0 = atm0.P\n sigma = P/P0\n\n alt_lapse = (sigma ** 0.7) if z < 11000 else (1.439 * sigma)\n\n if(M == 0 and BPR == 0):\n return alt_lapse\n\n # Mach lapse terms\n K1t = 0\n K2t = 0\n K3t = 0\n K4t = 0\n\n # Dry conditions only. No afterburners.\n # Denis Howe, Aircraft Design Synthesis\n # Table 3.2, page 67\n if(BPR < 1):\n if (M < 0.4):\n K1t = 1.0\n K2t = 0\n K3t = -0.2\n K4t = 0.07\n elif (M < 0.9):\n K1t = 0.856\n K2t = 0.062\n K3t = 0.16\n K4t = -0.23\n elif(BPR > 1 and BPR < 6):\n if(M < 0.4):\n K1t = 1\n K2t = 0\n K3t = -0.6\n K4t = -0.04\n elif(M < 0.9):\n K1t = 0.88\n K2t = -0.016\n K3t = -0.3\n K4t = 0\n elif(BPR > 6 and BPR <= 7): \n if(M < 0.4):\n K1t = 1\n K2t = 0\n K3t = -0.595\n K4t = -0.03\n elif(M < 0.9):\n K1t = 0.89\n K2t = -0.014\n K3t = -0.3\n K4t = +0.005\n\n mach_lapse = (K1t + K2t * BPR + (K3t + K4t * BPR) * (M))\n return mach_lapse * alt_lapse",
"_____no_output_____"
]
],
[
[
"## Function to calculate T/W_0 = f( W_0/S )\nUse same equation (full version for twin jet), re-assign values to parameters for each scenario\n\n$ \\left( \\dfrac{T}{W} \\right)_0 = \\dfrac{\\alpha}{\\beta} \\left[ \\dfrac{1}{V_{inf}}\\dfrac{dh}{dt} + \\dfrac{1}{g}\\dfrac{dV_{inf}}{dt} + \\dfrac{\\tfrac{1}{2}\\rho V_{inf}^2 C_{D_0}}{\\alpha \\tfrac{W_0}{S_{ref}}} + \\dfrac{\\alpha n^2 \\tfrac{W_0}{S_{ref}}}{\\tfrac{1}{2} \\rho V_{inf}^2 \\pi AR e} \\right] $",
"_____no_output_____"
]
],
[
[
"# Define function (T/W)_0 = fn(S/W0, etc.)\ndef TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e,split=False):\n \"\"\"\n Function calculates T/W for given S/W\n Args:\n WSo (array): list of WSo at which T/W is to be computed\n alpha (float): W/W0 \n beta (float): T/T0\n dhdt (float): climb rate [m/s]\n dvdt (float): acceleration rate [m/s^2]\n rho (float): air density at altitude [kg/m^3]\n Vinf (float): true airspeed [m/s]\n Cdo (float): zero-lift drag\n n (float): load factor\n AR (float): aspect ratio\n e (float): oswald defficiency\n split (bool): if set to True, return functions output element-wise\n Returns:\n (array): values of function output (i.e. (T/W)o) evaluated for each element in WSo\n \"\"\"\n g = 9.81\n \n # calculate term-by-term\n term_climb = (1/Vinf)*dhdt\n term_accel = (1/g)*dvdt\n term_cdo = (.5*rho*Vinf**2*Cdo)/(alpha*WSo)\n term_cdi = (alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)\n \n # sum all terms of the equation\n TW = (alpha/beta)*(term_climb + term_accel + term_cdo + term_cdi)\n \n if split == True:\n return term_climb,term_accel,term_cdo,term_cdi\n \n # else return sum\n return TW\n",
"_____no_output_____"
]
],
[
[
"### Take off",
"_____no_output_____"
]
],
[
[
"CLmax, Cdo, e = flap(config='takeoff', gear=True)\nsigma = 1\nTODA = 1500\n\nNe = 2\nTW_BFL = (1/TODA)*(0.297 - 0.019*Ne)*WSo/(sigma*CLmax)\n\nTW_AEO = (1/TODA)*0.144*WSo/(sigma*CLmax)\n",
"_____no_output_____"
]
],
[
[
"### Landing distance\n\n$ ALD = 0.51 \\frac{W/S}{\\sigma C_{L, max}} KR + Sa $\n\n$W/S = \\sigma C_{L,max} \\frac{ALD - SA}{0.51 K_R}$\n\n",
"_____no_output_____"
]
],
[
[
"# ===================================================================== #\n# Landing distance line\n# >> Plotted as vertical line\nCLmax, Cdo, e = flap(config='landing', gear=True)\n\nsigma = 1\n\nALD = 1500/(5/3)\nSA = 305 #FIXME, from Errikos slides\nKr = 0.66\n\nWS_landing = sigma*CLmax*(ALD-SA)/(0.51*Kr)\n\nWS_landing",
"_____no_output_____"
],
[
"# ===================================================================== #\n# Max Speed at Cruise Altitude\n# >> Mach 0.8 at cruise altitude\n#TW_serviceceil = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*pi*AR*e)))\natmosphere = fluids.atmosphere.ATMOSPHERE_1976(35000*ft2m)\nrho = atmosphere.rho\n\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='clean', gear=False)\nMinf = 0.8\nainf = 296.535 # FIXME: introduce standard atmosphere \nVinf = Minf*ainf\ndhdt = 0\ndvdt = 0\nalpha = alpha_list[2]\nbeta = calc_beta(35000*ft2m, Minf, BPR)\n\nTW_cruise_max_speed = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \n#TW_cruise_max_speed = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n",
"_____no_output_____"
],
[
"# ===================================================================== #\n# Absolute ceiling 42,000 ft\n# >> Absolute steady-level flight\natmosphere = fluids.atmosphere.ATMOSPHERE_1976(42000*ft2m)\nrho = atmosphere.rho\n\nn = 1\nCLmax, Cdo, e = flap(config='clean', gear=False)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nVinf = Vstall\ndhdt = 0\ndvdt = 0\nalpha = alpha_list[2]\nbeta = calc_beta(42000*ft2m) \nTW_absceil = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e)\n#tmp = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n#print(TW_absceil-tmp) # should be 0",
"_____no_output_____"
]
],
[
[
"### Climb segments (1 ~ 4)\n",
"_____no_output_____"
]
],
[
[
"# ===================================================================== #\n# Climb segments are given in: \n# https://aviation.stackexchange.com/questions/3310/how-are-take-off-segments-defined\nrho = 1.225 # assumed air-density is approx. constant\n\n# 1st segment - TAKEOFF \n# >> right after rotate, take-off configuration, flaps, gear up, until 35 ft)\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='takeoff', gear=True)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\ndhdt = 0\nobstacle = 35*0.3048\nVLOF = 1.1*Vstall\nV2 = 1.13*Vstall # FAR25 - Part 25.107 Takeoff speeds.\nVinf = (VLOF + V2)/2 # average between LOF flight and V2\ndvdt = (V2**2 - VLOF**2)*(0.024/(2*obstacle))\nalpha = alpha_list[1]\nbeta = calc_beta(0*ft2m)\nTW_climb1 = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \nTW_climb1_EO = 2*TW_climb1\n#TW_climb1 = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n#TW_climb1_EO = 2*alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\nprint(f'1st climb segment alpha: {alpha:.3}, beta: {beta:.3}, V/Vstall: {set(Vinf/Vstall)}, Cdo: {Cdo:.3}, CLmax:{CLmax:.3}')\n\n# 2nd segment - climb\n# >> steady climb, constant v (dvdt = 0)\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='takeoff', gear=False)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nVinf = V2\ndhdt = 0.024*Vinf\ndvdt = 0\nalpha = alpha_list[1]\nbeta = calc_beta(35*ft2m)\nTW_climb2 = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \nTW_climb2_EO = 2*TW_climb2\n#TW_climb2 = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n#TW_climb2_EO = 2*alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\nprint(f'2nd climb segment alpha: {alpha:.3}, beta: {beta:.3}, V/Vstall: {set(Vinf/Vstall)}, Cdo: {Cdo:.3}, CLmax:{CLmax:.3}')\n\n# 3rd segment - acceleration\n# >> \"during this segment, the airplane is considered to be maintaining the 400 ft above the ground and \n# acelerating from the V2 speed to the VFS speed before the climb proile is continued. The flaps are \n# raised at the beginning of the acceleratio segment and power is maintained at the takeoff settign \n# as long as possible (5 minutesmaximum)\"\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='clean', gear=False)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nV3 = 1.25*Vstall\nVinf = (V3 + V2)/2 # average of initial and final velocities in acceleration segment\ndhdt = 0\ntime_to_accelerate = 5*60 # 5 minutes... worst case ratio [sec]\ndvdt = (V3 - V2)/time_to_accelerate\nalpha = alpha_list[1]\nbeta = calc_beta(400*ft2m)\nTW_climb3 = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \nTW_climb3_EO = 2*TW_climb3\n#TW_climb3 = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n#TW_climb3_EO = 2*alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\nprint(f'3rd climb segment alpha: {alpha:.3}, beta: {beta:.3}, V/Vstall: {set(Vinf/Vstall)}, Cdo: {Cdo:.3}, CLmax:{CLmax:.3}')\n\n# 4th segment - climb\n# >> climb at minimum of 1.2%\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='clean', gear=False)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nVinf = V3\ndhdt = 0.012*Vinf\ndvdt = 0\nalpha = alpha_list[1]\nbeta = calc_beta(1500*ft2m)\nTW_climb4 = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \nTW_climb4_EO = 2*TW_climb4\n#TW_climb4 = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n#TW_climb4_EO = 2*alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\nprint(f'4th climb segment alpha: {alpha:.3}, beta: {beta:.3}, V/Vstall: {set(Vinf/Vstall)}, Cdo: {Cdo:.3}, CLmax:{CLmax:.3}')\n",
"1st climb segment alpha: 0.97, beta: 1.0, V/Vstall: {1.115, 1.1150000000000002, 1.1149999999999998}, Cdo: 0.0798, CLmax:1.93\n2nd climb segment alpha: 0.97, beta: 0.999, V/Vstall: {1.13, 1.1299999999999997}, Cdo: 0.0442, CLmax:1.93\n3rd climb segment alpha: 0.97, beta: 0.99, V/Vstall: {1.136681699139212, 1.1366816991392124, 1.1366816991392121}, Cdo: 0.0231, CLmax:1.59\n4th climb segment alpha: 0.97, beta: 0.963, V/Vstall: {1.25, 1.2499999999999998, 1.2500000000000002}, Cdo: 0.0231, CLmax:1.59\n"
],
[
"# ===================================================================== #\n# Approach climb\n# >> 1 EOP, approach flaps, no gear\natmosphere = fluids.atmosphere.ATMOSPHERE_1976(1500*ft2m)\nrho = atmosphere.rho\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='approach', gear=False)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nVinf = 1.5*Vstall\ndhdt = 0.021*Vinf\ndvdt = 0\nalpha = alpha_list[4]\nbeta = calc_beta(1500*ft2m)\nTW_approachclimb = 2*TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e)\n\nVinf = Vstall\ndhdt = 0.021*Vinf\nTW_approachclimb_vstall = 2*TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e)\n#TW_approachclimb = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*pi*AR*e)))\n",
"_____no_output_____"
],
[
"# ===================================================================== #\n# landing climb\n# >> with landing gears, landing flaps, climb\natmosphere = fluids.atmosphere.ATMOSPHERE_1976(30*ft2m)\nrho = atmosphere.rho\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='approach', gear=True)\nVstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nVinf = 1.3*Vstall\ndhdt = 0.032*Vinf\ndvdt = 0\nalpha = alpha_list[4]\nbeta = calc_beta(30*ft2m)\nTW_landingclimb = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e)\n\nVinf = Vstall\ndhdt = 0.032*Vinf\nTW_landingclimb_vstall = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e)",
"_____no_output_____"
],
[
"# plot approach climb and landing climb\nfig = plt.figure(figsize=(7,5))\n\n# approach climb with 1.5*Vstall\nplt.plot(WSo, TW_approachclimb, label=\"approach climb with 1.5*Vstall\")\n# approach climb with Vstall\nplt.plot(WSo, TW_approachclimb_vstall, label=\"approach climb with Vstall\")\n\n# landing climb with 1.5*Vstall\nplt.plot(WSo, TW_landingclimb, label=\"landing climb with 1.5*Vstall\")\n# landing climb with Vstall\nplt.plot(WSo, TW_landingclimb_vstall, label=\"landing climb with Vstall\")\n\n\n# PLOT SETTINGS\nax = plt.gca()\nax.set(xlim=(0, 8000), ylim=(0, 0.3)) \nplt.xlabel('Wo/S [N/m^2]')\nplt.ylabel('(T/W)o')\nplt.title('Constraint Diagram')\nplt.legend(loc='lower right')\nplt.grid()\nplt.show()\n",
"_____no_output_____"
],
[
"# ===================================================================== #\n# cruise\natmosphere = fluids.atmosphere.ATMOSPHERE_1976(35000*ft2m)\nrho = atmosphere.rho\nn = 1 # approx.\nCLmax, Cdo, e = flap(config='clean', gear=False)\n#Vstall = np.sqrt(2*WSo/(rho*CLmax)) # Vstall = fn(WSo)\nMinf = 0.75\nainf = 296.535\nVinf = Minf*ainf\ndhdt = 0\ndvdt = 0\nalpha = alpha_list[2]\nbeta = calc_beta(35000*ft2m, Minf, BPR) \nTW_cruise = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \n#tmp = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))\n",
"_____no_output_____"
]
],
[
[
"### Loiter\nAssume load factor $n$, then\n$n = \\dfrac{L}{mg} = \\dfrac{1}{\\cos(\\theta)}$\n\nalso from horizontal equilibrium\n\n$m\\omega^2 R = L\\sin(\\theta)$\n\n$R\\omega^2 = \\dfrac{L}{mg}g \\sin(\\theta)$\n\n$R = \\dfrac{n g \\sin(\\theta)}{\\omega^2}$\n",
"_____no_output_____"
]
],
[
[
"# ===================================================================== #\n# loiter\n# >> 3 degrees per second turn\natmosphere = fluids.atmosphere.ATMOSPHERE_1976(1500)\nrho = atmosphere.rho\na = atmosphere.v_sonic\nn = 1.2 # FIXME - guessed load factor for loiter\nprint(f'prescribed load factor: {n}')\ntheta = np.arccos(1/n) # [rad]\nprint(f'bank angle: {theta*180/np.pi:.3f} [deg]')\nomega = 3 * np.pi/180 #prescribed 3 degrees per second turn [rad/sec]\nR = n*g*np.sin(theta)/(omega**2)\nprint(f'turn radius: {R/1000:.3f} [km]')\n\nCLmax, Cdo, e = flap(config='clean', gear=False)\nVinf = omega*R\nMinf = Vinf/a\nprint(f'Vinf during loiter: {Vinf:.3f} [m/s] or {Vinf*ms2knots:.3f} [knots]')\ndhdt = 0\ndvdt = 0\nalpha = alpha_list[6]\nbeta = calc_beta(1500, Minf, BPR)\nTW_loiter = TWvsWS(WSo,alpha,beta,dhdt,dvdt,rho,Vinf,Cdo,n,AR,e) \n#tmp = alpha/beta * (((1/Vinf)*dhdt) + ((1/g)*(dvdt)) + ((.5*rho*Vinf**2*Cdo)/(alpha*WSo)) + ((alpha*n**2*WSo)/(.5*rho*Vinf**2*np.pi*AR*e)))",
"prescribed load factor: 1.2\nbank angle: 33.557 [deg]\nturn radius: 2.374 [km]\nVinf during loiter: 124.279 [m/s] or 241.578 [knots]\n"
],
[
"# other aircrafts design points\n# >> plot other aircraft's design points for reference\n\naircrafts=['ERJ-145', 'CRJ-550','CRJ-200']\nTW_others = [0.3274215818,0.4238795116,0.3293881286]\nWS_others = [4023.288394, 4096.786615,4877.811996]\n",
"_____no_output_____"
]
],
[
[
"### fuel consumption contours\n\nFor the cruise section, the range equation says\n\n$$ R = \\ln\\left(\\frac{W_{i}}{W_{i+1}}\\right) V \\frac{L/D}{SFC} $$\n\nWe can use a balance of forces to find \n$$ V^2 =\\frac{W/S}{1/2 \\rho C_L S} $$\n\nand \n\n$$ L/D = \\frac{W}{T} = \\frac{1}{T/W} $$\n\nwhich gives\n\n$$ R = \\ln\\left(\\frac{W_i}{W_{i+1}}\\right) \\sqrt{\\frac{W/S}{1/2 \\rho C_L S}} \\frac{1}{T/W} \\frac{1}{SFC} $$\n",
"_____no_output_____"
],
[
"\n\n\nTherefore, the weight fraction is proportional to\n$$ \\ln \\frac{W_i}{W_{i+1}} = R SFC \\sqrt{1/2 \\rho C_L S} \\left(\\frac{T/W}{\\sqrt{W/S}}\\right)$$\n\nwhere $k$ is some proportionality constant. Therefore, the fuel needed is \n$$ \\frac{ W_{fuel}}{W_i} = 1 - \\exp \\left(- k \\frac{T/W}{\\sqrt{W/S}}\\right)$$\n\n\n\n\n",
"_____no_output_____"
]
],
[
[
"WS_mesh, TW_mesh = np.meshgrid(np.linspace(0,8000, 100), np.logspace(-8,-0.1,100))\n# the 1/10 factor is a estimate but plugging in all the parameter\nfuel_consumption = 1-np.exp(-(1/10)*TW_mesh/np.sqrt(WS_mesh/4000))\nplt.contourf(WS_mesh, TW_mesh, fuel_consumption,levels=np.linspace(0,0.1, 10),cmap='Reds',alpha=1)\nplt.colorbar()",
"/Users/Devansh/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:3: RuntimeWarning: divide by zero encountered in true_divide\n This is separate from the ipykernel package so we can avoid doing imports until\n"
]
],
[
[
"## Constraint Diagram",
"_____no_output_____"
]
],
[
[
"# ========== CONSTRAINT DIAGRAM ========== \nfig = plt.figure(figsize=(15,15))\n\n# climb segment 1\n#plt.plot(WSo, TW_climb1, 'b-', label=\"1st climb segment\")\n# climb segment 2\n#plt.plot(WSo, TW_climb2, 'g-', label=\"2nd climb segment\")\n# climb segment 3\n#plt.plot(WSo, TW_climb3, 'r-', label=\"3rd climb segment\")\n# climb segment 4\n#plt.plot(WSo, TW_climb4, 'c-', label=\"4th climb segment\")\n\nplt.plot(WSo, TW_BFL, label='BFL')\nplt.plot(WSo, TW_AEO, label='AEO')\n\n# climb segment 1, EO\nplt.plot(WSo, TW_climb1_EO, 'b--', label=\"1st climb segment, EO\")\n# climb segment 2 EO\nplt.plot(WSo, TW_climb2_EO, 'g--', label=\"2nd climb segment, EO\")\n# climb segment 3, EO\nplt.plot(WSo, TW_climb3_EO, 'r--', label=\"3rd climb segment, EO\")\n# climb segment 4, EO\nplt.plot(WSo, TW_climb4_EO, 'c--', label=\"4th climb segment, EO\")\n\n# Cruise\nplt.plot(WSo, TW_cruise, 'k-', label=\"Cruise\")\n\n# approach climb\nplt.plot(WSo, TW_approachclimb, 'm-.', label=\"Approach climb, EO\")\n\n# landing climb (gear down)\nplt.plot(WSo, TW_landingclimb, 'm:', label=\"Landing climb\")\n\n# loiter 1\nplt.plot(WSo, TW_loiter, 'k--', label=\"loiter\")\n\n# landing req\nplt.axvline(x = WS_landing, label='Landing constraint on runway length')\n\n# max cruise speed\nplt.plot(WSo, TW_cruise_max_speed, 'k-.', label=\"Max cruise speed\")\n\n# Abs ceiling\nplt.plot(WSo, TW_absceil, 'y:', label=\"Absolute ceiling\")\n\n\n# DESIGN POINT - plot picked point:\nTtakeoff = 29.8*1000 # take-off thrust per engine - Lycoming ALF502 R-3 [N]\nThrustpoint = 2*Ttakeoff\nWtakeoff = 17987*9.81 # max. take-off weight [N]\n\nWSo_picked = [4000]\nTW_picked = [Thrustpoint/Wtakeoff]\n#TW_picked = \nprint(f'Picked T/W: {TW_picked}')\n\nplt.plot(WSo_picked, TW_picked, '*', label='Picked Point', markersize=20)\n\n# plot others\n\nplt.plot(WS_others, TW_others, 'bs')\nfor i in range(len(aircrafts)):\n plt.text(WS_others[i], TW_others[i], aircrafts[i])\n\n# plot fuel consumption contours\n\nplt.contourf(WS_mesh, TW_mesh, fuel_consumption,levels=np.linspace(0,0.1, 10),cmap='CMRmap',alpha=0.3, label='Fuel Fraction')\n\n\n# PLOT SETTINGS\nax = plt.gca()\nax.set(xlim=(0, 6000), ylim=(0, 0.6)) \nplt.xlabel('Wo/S [N/m^2]')\nplt.ylabel('(T/W)o')\nplt.title('Constraint Diagram')\nplt.legend(loc='upper right')\nplt.grid()\n\n# save constraint diagram as .eps file\nplt.savefig('constraint_diagram_MachLapse_NACA23015.eps', format='eps') # save as eps\n\nplt.show()\n\n\n\n",
"/Users/Devansh/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:67: UserWarning: The following kwargs were not used by contour: 'label'\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\nThe PostScript backend does not support transparency; partially transparent artists will be rendered opaque.\n"
],
[
"#ENGINE SELECTION\nprint(f'Minimum T/W: {np.mean(TW_absceil)}')\nprint(f'Takeoff weight: {Wtakeoff} N')\nminThrust = (TW_absceil * Wtakeoff)/2 # per engine\nprint(f'Min thrust: {np.mean(minThrust)}')\n\n",
"Minimum T/W: 0.39886888789725733\nTakeoff weight: 176452.47 N\nMin thrust: 35190.70023781207\n"
],
[
"#TW_climb1_EO",
"_____no_output_____"
],
[
"#TW_climb2_EO",
"_____no_output_____"
],
[
"# calculation of aircraft parameters\nSref = Wtakeoff/WSo_picked[0]\nB = np.sqrt(Sref*AR) # wingspan\nMac = Sref/B\nprint(f'Sref: {Sref} [m^2]')\nprint(f'Wingspan: {B} [m]')\nprint(f'Mean aerodynamic chord: {Mac} [m]')\n",
"Sref: 44.1131175 [m^2]\nWingspan: 18.785764291079563 [m]\nMean aerodynamic chord: 2.3482205363849453 [m]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7444718e467f120ba36fdd1b3266c21bb799589 | 14,929 | ipynb | Jupyter Notebook | sentence_suggestion/train_nn_models/Train_seq2seq_multilayer_GRU.ipynb | LuoDingo/Langauge_model | f10e18ac2c9f31b187f77bc8f927ffb6b8d77d7e | [
"MIT"
] | null | null | null | sentence_suggestion/train_nn_models/Train_seq2seq_multilayer_GRU.ipynb | LuoDingo/Langauge_model | f10e18ac2c9f31b187f77bc8f927ffb6b8d77d7e | [
"MIT"
] | 2 | 2020-03-24T15:15:23.000Z | 2020-04-16T01:41:17.000Z | sentence_suggestion/train_nn_models/Train_seq2seq_multilayer_GRU.ipynb | LuoDingo/Langauge_model | f10e18ac2c9f31b187f77bc8f927ffb6b8d77d7e | [
"MIT"
] | null | null | null | 43.398256 | 1,650 | 0.585505 | [
[
[
"# Initialization Cell\n# path to folder that data exists\nPATH_DATA = 'Masked Corpus'",
"_____no_output_____"
],
[
"import torch \nimport torch.optim as optim\n\nimport seq2seq_multilayer_gru_with_pad\nfrom sequence_model_trainer import TrainModel\n\nfrom torchtext.data import Field, LabelField\nfrom torchtext.data import TabularDataset\nfrom torchtext.data import Iterator, BucketIterator\n\n%load_ext autoreload\n%autoreload 2",
"/content/gdrive/My Drive/Colab Notebooks/Notebooks/Luodingo\n"
],
[
"MASKED_TEXT = Field(\n sequential=True,\n tokenize=lambda x: x.split(), \n init_token = '<sos>', \n eos_token = '<eos>', \n lower = True, \n include_lengths = True\n )\n\nTARGET_TEXT = Field(\n sequential=True,\n tokenize=lambda x: x.split(), \n init_token = '<sos>', \n eos_token = '<eos>', \n lower = True\n )\n\nfields = [('id', None), ('keywords', MASKED_TEXT), ('target', TARGET_TEXT)]",
"_____no_output_____"
],
[
"train, val, test = TabularDataset.splits(\n path=PATH_DATA,\n train='train.csv',\n validation='val.csv',\n test='test.csv',\n format='csv',\n skip_header=True,\n fields=fields\n )",
"/content/gdrive/My Drive/Colab Notebooks/Datasets\n"
],
[
"MASKED_TEXT.build_vocab(train)\nTARGET_TEXT.build_vocab(train)",
"_____no_output_____"
],
[
"BATCH_SIZE = 32\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ntrain_iter, val_iter, test_iter = BucketIterator.splits(\n (train, val, test),\n batch_size=BATCH_SIZE,\n sort_within_batch = True,\n sort_key = lambda x : len(x.keywords),\n device = device\n )",
"_____no_output_____"
],
[
"EMB_DIM=256\nENC_INPUT_DIM=len(MASKED_TEXT.vocab)\nDEC_INPUT_DIM=len(TARGET_TEXT.vocab)\nOUTPUT_DIM=DEC_INPUT_DIM\nN_LAYER=4\nHID_DIM=1024\nDROPOUT=0.3\nTRG_PAD_IDX = TARGET_TEXT.vocab.stoi[TARGET_TEXT.pad_token]\n\nmodel = seq2seq_multilayer_gru_with_pad.Seq2Seq(\n enc_input_dim=ENC_INPUT_DIM,\n dec_input_dim=DEC_INPUT_DIM,\n emb_dim=EMB_DIM,\n enc_hid_dim=HID_DIM,\n dec_hid_dim=HID_DIM,\n n_layers=N_LAYER,\n output_dim=OUTPUT_DIM, \n device=device,\n dropout=DROPOUT\n ).to(device)",
"_____no_output_____"
],
[
"LEARNING_RATE = 0.0001\nadam = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\ncross_e = torch.nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)",
"_____no_output_____"
],
[
"trainer = TrainModel(\n model=model,\n train_iterator=train_iter,\n val_iterator=val_iter,\n optimizer=adam,\n criterion=cross_e,\n output_dim=OUTPUT_DIM\n )",
"_____no_output_____"
],
[
"%cd /content/gdrive/My\\ Drive/Colab\\ Notebooks/Notebooks/Luodingo\nN_EPOCHS = 200\nCLIP = 1\ntrainer.epoch(n_epochs=N_EPOCHS, clip=CLIP, model_name='seq2seq-multilayer-gru.pt')",
"/content/gdrive/My Drive/Colab Notebooks/Notebooks/Luodingo\nEpoch: 01 | Time: 0m 59s\n\tTrain Loss: 5.035 | Train PPL: 153.761\n\t Val. Loss: 4.743 | Val. PPL: 114.774\n"
],
[
"test_loss = trainer.test(iterator=test_iter,\n model_name='seq2seq-multilayer-gru.pt')",
"_____no_output_____"
],
[
"import math\n\nprint(f'| Test Loss: {round(test_loss, 4)} | Test PPL: {round(math.exp(test_loss),4)} |')",
"| Test Loss: 4.743 | Test PPL: 114.7747 |\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7444de2395d423a5a62bb35b12f9a34b49b4135 | 78,842 | ipynb | Jupyter Notebook | tutorial/t5_02.ipynb | hyungjun010/transformer-evolution | a6fc2dba169bc014638197a40f08721049a90e3b | [
"Apache-2.0"
] | 105 | 2019-11-20T04:28:12.000Z | 2022-03-21T15:36:30.000Z | tutorial/t5_02.ipynb | hyungjun010/transformer-evolution | a6fc2dba169bc014638197a40f08721049a90e3b | [
"Apache-2.0"
] | 1 | 2021-11-30T14:47:56.000Z | 2021-11-30T14:47:56.000Z | tutorial/t5_02.ipynb | hyungjun010/transformer-evolution | a6fc2dba169bc014638197a40f08721049a90e3b | [
"Apache-2.0"
] | 47 | 2019-11-24T16:15:28.000Z | 2022-03-26T15:47:01.000Z | 63.684976 | 23,584 | 0.600695 | [
[
[
"## T5 구현 과정 (2/2)\nT5 모델 구현에 대한 설명 입니다.\n\n이 내용을 확인하기 전 아래 내용을 확인하시기 바랍니다.\n- [Sentencepiece를 활용해 Vocab 만들기](https://paul-hyun.github.io/vocab-with-sentencepiece/)\n- [Naver 영화리뷰 감정분석 데이터 전처리 하기](https://paul-hyun.github.io/preprocess-nsmc/)\n- [Transformer (Attention Is All You Need) 구현하기 (1/3)](https://paul-hyun.github.io/transformer-01/)\n- [Transformer (Attention Is All You Need) 구현하기 (2/3)](https://paul-hyun.github.io/transformer-02/)\n- [Transformer (Attention Is All You Need) 구현하기 (3/3)](https://paul-hyun.github.io/transformer-03/)\n\n\n[Colab](https://colab.research.google.com/)에서 실행 했습니다.",
"_____no_output_____"
],
[
"#### 0. Pip Install\n필요한 패키지를 pip를 이용해서 설치합니다.",
"_____no_output_____"
]
],
[
[
"!pip install sentencepiece\n!pip install wget",
"Collecting sentencepiece\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/74/f4/2d5214cbf13d06e7cb2c20d84115ca25b53ea76fa1f0ade0e3c9749de214/sentencepiece-0.1.85-cp36-cp36m-manylinux1_x86_64.whl (1.0MB)\n\r\u001b[K |▎ | 10kB 12.9MB/s eta 0:00:01\r\u001b[K |▋ | 20kB 5.2MB/s eta 0:00:01\r\u001b[K |█ | 30kB 7.0MB/s eta 0:00:01\r\u001b[K |█▎ | 40kB 6.8MB/s eta 0:00:01\r\u001b[K |█▋ | 51kB 5.7MB/s eta 0:00:01\r\u001b[K |██ | 61kB 5.9MB/s eta 0:00:01\r\u001b[K |██▏ | 71kB 6.4MB/s eta 0:00:01\r\u001b[K |██▌ | 81kB 7.2MB/s eta 0:00:01\r\u001b[K |██▉ | 92kB 7.6MB/s eta 0:00:01\r\u001b[K |███▏ | 102kB 7.0MB/s eta 0:00:01\r\u001b[K |███▌ | 112kB 7.0MB/s eta 0:00:01\r\u001b[K |███▉ | 122kB 7.0MB/s eta 0:00:01\r\u001b[K |████ | 133kB 7.0MB/s eta 0:00:01\r\u001b[K |████▍ | 143kB 7.0MB/s eta 0:00:01\r\u001b[K |████▊ | 153kB 7.0MB/s eta 0:00:01\r\u001b[K |█████ | 163kB 7.0MB/s eta 0:00:01\r\u001b[K |█████▍ | 174kB 7.0MB/s eta 0:00:01\r\u001b[K |█████▊ | 184kB 7.0MB/s eta 0:00:01\r\u001b[K |██████ | 194kB 7.0MB/s eta 0:00:01\r\u001b[K |██████▎ | 204kB 7.0MB/s eta 0:00:01\r\u001b[K |██████▋ | 215kB 7.0MB/s eta 0:00:01\r\u001b[K |███████ | 225kB 7.0MB/s eta 0:00:01\r\u001b[K |███████▎ | 235kB 7.0MB/s eta 0:00:01\r\u001b[K |███████▋ | 245kB 7.0MB/s eta 0:00:01\r\u001b[K |███████▉ | 256kB 7.0MB/s eta 0:00:01\r\u001b[K |████████▏ | 266kB 7.0MB/s eta 0:00:01\r\u001b[K |████████▌ | 276kB 7.0MB/s eta 0:00:01\r\u001b[K |████████▉ | 286kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████▏ | 296kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████▌ | 307kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████▊ | 317kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████ | 327kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████▍ | 337kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████▊ | 348kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████ | 358kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████▍ | 368kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████▋ | 378kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████ | 389kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████▎ | 399kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████▋ | 409kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████ | 419kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████▎ | 430kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████▌ | 440kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 450kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████▏ | 460kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████▌ | 471kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████▉ | 481kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 491kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 501kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████▊ | 512kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████ | 522kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████▍ | 532kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████▊ | 542kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████ | 552kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████▎ | 563kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████▋ | 573kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████ | 583kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████▎ | 593kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████▋ | 604kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████ | 614kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████▏ | 624kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████▌ | 634kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████▉ | 645kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████▏ | 655kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████▌ | 665kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████▉ | 675kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████▏ | 686kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████▍ | 696kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████▊ | 706kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 716kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████▍ | 727kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████▊ | 737kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████ | 747kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████▎ | 757kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████▋ | 768kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 778kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 788kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████▋ | 798kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 808kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████▏ | 819kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████▌ | 829kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████▉ | 839kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████▏ | 849kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████▌ | 860kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████▉ | 870kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 880kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 890kB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 901kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 911kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████████▍ | 921kB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████████▊ | 931kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████████ | 942kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▎ | 952kB 7.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 962kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 972kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▎ | 983kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▋ | 993kB 7.0MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▉ | 1.0MB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 1.0MB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▌| 1.0MB 7.0MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▉| 1.0MB 7.0MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 1.0MB 7.0MB/s \n\u001b[?25hInstalling collected packages: sentencepiece\nSuccessfully installed sentencepiece-0.1.85\nCollecting wget\n Downloading https://files.pythonhosted.org/packages/47/6a/62e288da7bcda82b935ff0c6cfe542970f04e29c756b0e147251b2fb251f/wget-3.2.zip\nBuilding wheels for collected packages: wget\n Building wheel for wget (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for wget: filename=wget-3.2-cp36-none-any.whl size=9682 sha256=e6a25da026f33947edcc73153385c40ae8e7758d46d8f60b791be74527c547d7\n Stored in directory: /root/.cache/pip/wheels/40/15/30/7d8f7cea2902b4db79e3fea550d7d7b85ecb27ef992b618f3f\nSuccessfully built wget\nInstalling collected packages: wget\nSuccessfully installed wget-3.2\n"
]
],
[
[
"#### 1. Google Drive Mount\nColab에서는 컴퓨터에 자원에 접근이 불가능 하므로 Google Drive에 파일을 올려 놓은 후 Google Drive를 mount 에서 로컬 디스크처럼 사용 합니다.\n1. 아래 블럭을 실행하면 나타나는 링크를 클릭하세요.\n2. Google 계정을 선택 하시고 허용을 누르면 나타나는 코드를 복사하여 아래 박스에 입력한 후 Enter 키를 입력하면 됩니다.\n\n학습관련 [데이터 및 결과 파일](https://drive.google.com/open?id=15XGr-L-W6DSoR5TbniPMJASPsA0IDTiN)을 참고 하세요.",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')\n# data를 저장할 폴더 입니다. 환경에 맞게 수정 하세요.\ndata_dir = \"/content/drive/My Drive/Data/transformer-evolution\"",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
]
],
[
[
"#### 2. Imports",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport json\nimport pandas as pd\nfrom IPython.display import display\nfrom tqdm import tqdm, tqdm_notebook, trange\nimport sentencepiece as spm\nimport wget\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F",
"_____no_output_____"
]
],
[
[
"#### 3. 폴더의 목록을 확인\nGoogle Drive mount가 잘 되었는지 확인하기 위해 data_dir 목록을 확인 합니다.",
"_____no_output_____"
]
],
[
[
"for f in os.listdir(data_dir):\n print(f)",
"kowiki.csv.gz\nkowiki.model\nkowiki.vocab\nratings_train.txt\nratings_test.txt\nratings_train.json\nratings_test.json\nkowiki.txt\nkowiki_gpt.json\nsave_gpt_pretrain.pth\nkowiki_bert_0.json\nsave_bert_pretrain.pth\nkowiki_t5.model\nkowiki_t5.vocab\nkowiki_t5_0.json\nsave_t5_pretrain.pth\n"
]
],
[
[
"#### 4. Vocab 및 입력\n\n[Sentencepiece를 활용해 Vocab 만들기](https://paul-hyun.github.io/vocab-with-sentencepiece/)를 통해 만들어 놓은 vocab을 로딩 합니다.",
"_____no_output_____"
]
],
[
[
"# vocab loading\nvocab_file = f\"{data_dir}/kowiki_t5.model\"\nvocab = spm.SentencePieceProcessor()\nvocab.load(vocab_file)",
"_____no_output_____"
]
],
[
[
"#### 5. Config\n\n모델에 설정 값을 전달하기 위한 config를 만듭니다.",
"_____no_output_____"
]
],
[
[
"\"\"\" configuration json을 읽어들이는 class \"\"\"\nclass Config(dict): \n __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n\n @classmethod\n def load(cls, file):\n with open(file, 'r') as f:\n config = json.loads(f.read())\n return Config(config)",
"_____no_output_____"
],
[
"config = Config({\n \"n_vocab\": len(vocab),\n \"n_seq\": 256,\n \"n_layer\": 6,\n \"d_hidn\": 256,\n \"i_pad\": 0,\n \"d_ff\": 1024,\n \"n_head\": 4,\n \"d_head\": 64,\n \"dropout\": 0.1,\n \"layer_norm_epsilon\": 1e-12\n})\nprint(config)",
"{'n_vocab': 8033, 'n_seq': 256, 'n_layer': 6, 'd_hidn': 256, 'i_pad': 0, 'd_ff': 1024, 'n_head': 4, 'd_head': 64, 'dropout': 0.1, 'layer_norm_epsilon': 1e-12}\n"
]
],
[
[
"#### 6. T5\n\nT5 Class 및 함수 입니다.",
"_____no_output_____"
]
],
[
[
"\"\"\" attention pad mask \"\"\"\ndef get_attn_pad_mask(seq_q, seq_k, i_pad):\n batch_size, len_q = seq_q.size()\n batch_size, len_k = seq_k.size()\n pad_attn_mask = seq_k.data.eq(i_pad).unsqueeze(1).expand(batch_size, len_q, len_k) # <pad>\n return pad_attn_mask\n\n\n\"\"\" attention decoder mask \"\"\"\ndef get_attn_decoder_mask(seq):\n subsequent_mask = torch.ones_like(seq).unsqueeze(-1).expand(seq.size(0), seq.size(1), seq.size(1))\n subsequent_mask = subsequent_mask.triu(diagonal=1) # upper triangular part of a matrix(2-D)\n return subsequent_mask\n\n\n\"\"\" scale dot product attention \"\"\"\nclass ScaledDotProductAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.dropout = nn.Dropout(config.dropout)\n self.scale = 1 / (self.config.d_head ** 0.5)\n self.num_buckets = 32\n self.relative_attention_bias = torch.nn.Embedding(self.num_buckets, self.config.n_head)\n \n def forward(self, Q, K, V, attn_mask, bidirectional=True):\n qlen, klen = Q.size(-2), K.size(-2)\n # (bs, n_head, n_q_seq, n_k_seq)\n scores = torch.matmul(Q, K.transpose(-1, -2)).mul_(self.scale)\n # (1, n_head, n_q_seq, n_k_seq)\n position_bias = self.compute_bias(qlen, klen, bidirectional=bidirectional)\n scores += position_bias\n scores.masked_fill_(attn_mask, -1e9)\n # (bs, n_head, n_q_seq, n_k_seq)\n attn_prob = nn.Softmax(dim=-1)(scores)\n attn_prob = self.dropout(attn_prob)\n # (bs, n_head, n_q_seq, d_v)\n context = torch.matmul(attn_prob, V)\n # (bs, n_head, n_q_seq, d_v), (bs, n_head, n_q_seq, n_v_seq)\n return context, attn_prob\n \n def compute_bias(self, qlen, klen, bidirectional=True):\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n # (qlen, klen)\n relative_position = memory_position - context_position\n # (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position, # shape (qlen, klen)\n num_buckets=self.num_buckets,\n bidirectional=bidirectional\n )\n # (qlen, klen)\n rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)\n # (qlen, klen, n_head)\n values = self.relative_attention_bias(rp_bucket)\n # (1, n_head, qlen, klen)\n values = values.permute([2, 0, 1]).unsqueeze(0)\n return values\n\n def _relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):\n ret = 0\n n = -relative_position\n if bidirectional:\n num_buckets //= 2\n ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets\n n = torch.abs(n)\n else:\n n = torch.max(n, torch.zeros_like(n))\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = n < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n val_if_large = max_exact + (\n torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)\n ).to(torch.long)\n val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))\n\n ret += torch.where(is_small, n, val_if_large)\n return ret\n\n\n\"\"\" multi head attention \"\"\"\nclass MultiHeadAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.W_Q = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)\n self.W_K = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)\n self.W_V = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)\n self.scaled_dot_attn = ScaledDotProductAttention(self.config)\n self.linear = nn.Linear(self.config.n_head * self.config.d_head, self.config.d_hidn)\n self.dropout = nn.Dropout(config.dropout)\n \n def forward(self, Q, K, V, attn_mask, bidirectional=False):\n batch_size = Q.size(0)\n # (bs, n_head, n_q_seq, d_head)\n q_s = self.W_Q(Q).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)\n # (bs, n_head, n_k_seq, d_head)\n k_s = self.W_K(K).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)\n # (bs, n_head, n_v_seq, d_head)\n v_s = self.W_V(V).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)\n\n # (bs, n_head, n_q_seq, n_k_seq)\n attn_mask = attn_mask.unsqueeze(1).repeat(1, self.config.n_head, 1, 1)\n\n # (bs, n_head, n_q_seq, d_head), (bs, n_head, n_q_seq, n_k_seq)\n context, attn_prob = self.scaled_dot_attn(q_s, k_s, v_s, attn_mask, bidirectional=bidirectional)\n # (bs, n_head, n_q_seq, h_head * d_head)\n context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.config.n_head * self.config.d_head)\n # (bs, n_head, n_q_seq, e_embd)\n output = self.linear(context)\n output = self.dropout(output)\n # (bs, n_q_seq, d_hidn), (bs, n_head, n_q_seq, n_k_seq)\n return output, attn_prob\n\n\n\"\"\" feed forward \"\"\"\nclass PoswiseFeedForwardNet(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.conv1 = nn.Conv1d(in_channels=self.config.d_hidn, out_channels=self.config.d_ff, kernel_size=1)\n self.conv2 = nn.Conv1d(in_channels=self.config.d_ff, out_channels=self.config.d_hidn, kernel_size=1)\n self.active = F.gelu\n self.dropout = nn.Dropout(config.dropout)\n\n def forward(self, inputs):\n # (bs, d_ff, n_seq)\n output = self.active(self.conv1(inputs.transpose(1, 2)))\n # (bs, n_seq, d_hidn)\n output = self.conv2(output).transpose(1, 2)\n output = self.dropout(output)\n # (bs, n_seq, d_hidn)\n return output",
"_____no_output_____"
],
[
"\"\"\" encoder layer \"\"\"\nclass EncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.self_attn = MultiHeadAttention(self.config)\n self.layer_norm1 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)\n self.pos_ffn = PoswiseFeedForwardNet(self.config)\n self.layer_norm2 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)\n \n def forward(self, inputs, attn_mask):\n # (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)\n att_outputs, attn_prob = self.self_attn(inputs, inputs, inputs, attn_mask)\n att_outputs = self.layer_norm1(inputs + att_outputs)\n # (bs, n_enc_seq, d_hidn)\n ffn_outputs = self.pos_ffn(att_outputs)\n ffn_outputs = self.layer_norm2(ffn_outputs + att_outputs)\n # (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)\n return ffn_outputs, attn_prob\n\n\n\"\"\" encoder \"\"\"\nclass Encoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.layers = nn.ModuleList([EncoderLayer(self.config) for _ in range(self.config.n_layer)])\n \n def forward(self, enc_embd, enc_self_mask):\n # (bs, n_enc_seq, d_hidn)\n enc_outputs = enc_embd\n\n attn_probs = []\n for layer in self.layers:\n # (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)\n enc_outputs, attn_prob = layer(enc_outputs, enc_self_mask)\n attn_probs.append(attn_prob)\n # (bs, n_enc_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]\n return enc_outputs, attn_probs\n\n\n\"\"\" decoder layer \"\"\"\nclass DecoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.self_attn = MultiHeadAttention(self.config)\n self.layer_norm1 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)\n self.dec_enc_attn = MultiHeadAttention(self.config)\n self.layer_norm2 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)\n self.pos_ffn = PoswiseFeedForwardNet(self.config)\n self.layer_norm3 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)\n \n def forward(self, dec_inputs, enc_outputs, self_mask, ende_mask):\n # (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_dec_seq)\n self_att_outputs, self_attn_prob = self.self_attn(dec_inputs, dec_inputs, dec_inputs, self_mask, bidirectional=False)\n self_att_outputs = self.layer_norm1(dec_inputs + self_att_outputs)\n # (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_enc_seq)\n dec_enc_att_outputs, dec_enc_attn_prob = self.dec_enc_attn(self_att_outputs, enc_outputs, enc_outputs, ende_mask)\n dec_enc_att_outputs = self.layer_norm2(self_att_outputs + dec_enc_att_outputs)\n # (bs, n_dec_seq, d_hidn)\n ffn_outputs = self.pos_ffn(dec_enc_att_outputs)\n ffn_outputs = self.layer_norm3(dec_enc_att_outputs + ffn_outputs)\n # (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_dec_seq), (bs, n_head, n_dec_seq, n_enc_seq)\n return ffn_outputs, self_attn_prob, dec_enc_attn_prob\n\n\n\"\"\" decoder \"\"\"\nclass Decoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.layers = nn.ModuleList([DecoderLayer(self.config) for _ in range(self.config.n_layer)])\n \n def forward(self, dec_embd, enc_outputs, self_mask, ende_mask):\n # (bs, n_dec_seq, d_hidn)\n dec_outputs = dec_embd\n\n self_attn_probs, dec_enc_attn_probs = [], []\n for layer in self.layers:\n # (bs, n_dec_seq, d_hidn), (bs, n_dec_seq, n_dec_seq), (bs, n_dec_seq, n_enc_seq)\n dec_outputs, self_attn_prob, dec_enc_attn_prob = layer(dec_outputs, enc_outputs, self_mask, ende_mask)\n self_attn_probs.append(self_attn_prob)\n dec_enc_attn_probs.append(dec_enc_attn_prob)\n # (bs, n_dec_seq, d_hidn), [(bs, n_dec_seq, n_dec_seq)], [(bs, n_dec_seq, n_enc_seq)]S\n return dec_outputs, self_attn_probs, dec_enc_attn_probs\n\n\n\"\"\" t5 \"\"\"\nclass T5(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.embedding = nn.Embedding(self.config.n_vocab, self.config.d_hidn)\n self.encoder = Encoder(self.config)\n self.decoder = Decoder(self.config)\n\n self.projection_lm = nn.Linear(self.config.d_hidn, self.config.n_vocab, bias=False)\n self.projection_lm.weight = self.embedding.weight\n \n def forward(self, enc_inputs, dec_inputs):\n enc_embd = self.embedding(enc_inputs)\n dec_embd = self.embedding(dec_inputs)\n\n enc_self_mask = get_attn_pad_mask(enc_inputs, enc_inputs, self.config.i_pad)\n dec_self_mask = self.get_attn_dec_mask(dec_inputs)\n dec_ende_mask = get_attn_pad_mask(dec_inputs, enc_inputs, self.config.i_pad)\n\n # (bs, n_enc_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]\n enc_outputs, enc_self_attn_probs = self.encoder(enc_embd, enc_self_mask)\n # (bs, n_dec_seq, d_hidn), [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]\n dec_outputs, dec_self_attn_probs, dec_enc_attn_probs = self.decoder(dec_embd, enc_outputs, dec_self_mask, dec_ende_mask)\n # (bs, n_dec_seq, n_vocab)\n dec_outputs = self.projection_lm(dec_outputs)\n # (bs, n_dec_seq, n_vocab), [(bs, n_head, n_enc_seq, n_enc_seq)], [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]\n return dec_outputs, enc_self_attn_probs, dec_self_attn_probs, dec_enc_attn_probs\n \n def get_attn_dec_mask(self, dec_inputs):\n # (bs, n_dec_seq, n_dec_seq)\n dec_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs, self.config.i_pad)\n # (bs, n_dec_seq, n_dec_seq)\n dec_ahead_mask = get_attn_decoder_mask(dec_inputs)\n # (bs, n_dec_seq, n_dec_seq)\n dec_self_mask = torch.gt((dec_pad_mask + dec_ahead_mask), 0)\n # (bs, n_dec_seq, n_dec_seq)\n return dec_self_mask\n\n def save(self, epoch, loss, path):\n torch.save({\n \"epoch\": epoch,\n \"loss\": loss,\n \"state_dict\": self.state_dict()\n }, path)\n \n def load(self, path):\n save = torch.load(path)\n self.load_state_dict(save[\"state_dict\"])\n return save[\"epoch\"], save[\"loss\"]",
"_____no_output_____"
]
],
[
[
"#### 7. Naver 영화 분류 모델",
"_____no_output_____"
]
],
[
[
"\"\"\" naver movie classfication \"\"\"\nclass MovieClassification(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.t5 = T5(self.config)\n \n def forward(self, enc_inputs, dec_inputs):\n # (bs, n_dec_seq, n_vocab), [(bs, n_head, n_enc_seq, n_enc_seq)], [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]\n logits, enc_self_attn_probs, dec_self_attn_probs, dec_enc_attn_probs = self.t5(enc_inputs, dec_inputs)\n return logits, enc_self_attn_probs, dec_self_attn_probs, dec_enc_attn_probs",
"_____no_output_____"
]
],
[
[
"#### 8. 네이버 영화 분류 데이터\n\nT5를 위해 vocab을 새로 만들어서 학습 데이터도 새로 만들 었습니다.\n\n",
"_____no_output_____"
]
],
[
[
"\"\"\" train data 준비 \"\"\"\ndef prepare_train(vocab, infile, outfile):\n df = pd.read_csv(infile, sep=\"\\t\", engine=\"python\")\n with open(outfile, \"w\") as f:\n for index, row in df.iterrows():\n document = row[\"document\"]\n if type(document) != str:\n continue\n instance = { \"id\": row[\"id\"], \"doc\": vocab.encode_as_pieces(document), \"label\": row[\"label\"] }\n f.write(json.dumps(instance, ensure_ascii=False))\n f.write(\"\\n\")",
"_____no_output_____"
],
[
"prepare_train(vocab, f\"{data_dir}/ratings_train.txt\", f\"{data_dir}/ratings_train_t5.json\")\nprepare_train(vocab, f\"{data_dir}/ratings_test.txt\", f\"{data_dir}/ratings_test_t5.json\")",
"_____no_output_____"
],
[
"\"\"\" 정답 text \"\"\"\nlable_map = {0: \"부\", 1: \"정\"}\n\n\"\"\" 영화 분류 데이터셋 \"\"\"\nclass MovieDataSet(torch.utils.data.Dataset):\n def __init__(self, vocab, infile, is_valid=False):\n self.vocab = vocab\n self.labels = []\n self.enc_inputs = []\n self.dec_inputs = []\n\n line_cnt = 0\n with open(infile, \"r\") as f:\n for line in f:\n line_cnt += 1\n\n with open(infile, \"r\") as f:\n for i, line in enumerate(tqdm(f, total=line_cnt, desc=\"Loading Dataset\", unit=\" lines\")):\n data = json.loads(line)\n\n enc_input = vocab.encode_as_ids(\"감정분류:\") + [vocab.piece_to_id(p) for p in data[\"doc\"]]\n if is_valid:\n label = vocab.encode_as_ids(lable_map[data[\"label\"]])\n dec_input = [vocab.piece_to_id(\"[BOS]\")]\n else:\n label = vocab.encode_as_ids(lable_map[data[\"label\"]]) + [vocab.piece_to_id(\"[EOS]\")]\n dec_input = [vocab.piece_to_id(\"[BOS]\")] + vocab.encode_as_ids(lable_map[data[\"label\"]])\n\n self.labels.append(label)\n self.enc_inputs.append(enc_input)\n self.dec_inputs.append(dec_input)\n \n def __len__(self):\n assert len(self.labels) == len(self.enc_inputs)\n assert len(self.labels) == len(self.dec_inputs)\n return len(self.labels)\n \n def __getitem__(self, item):\n return (torch.tensor(self.labels[item]),\n torch.tensor(self.enc_inputs[item]),\n torch.tensor(self.dec_inputs[item]))",
"_____no_output_____"
],
[
"\"\"\" movie data collate_fn \"\"\"\ndef movie_collate_fn(inputs):\n labels, enc_inputs, dec_inputs = list(zip(*inputs))\n\n enc_inputs = torch.nn.utils.rnn.pad_sequence(enc_inputs, batch_first=True, padding_value=0)\n dec_inputs = torch.nn.utils.rnn.pad_sequence(dec_inputs, batch_first=True, padding_value=0)\n\n batch = [\n torch.stack(labels, dim=0),\n enc_inputs,\n dec_inputs,\n ]\n return batch",
"_____no_output_____"
],
[
"\"\"\" 데이터 로더 \"\"\"\nbatch_size = 128\ntrain_dataset = MovieDataSet(vocab, f\"{data_dir}/ratings_train_t5.json\")\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=movie_collate_fn)\ntest_dataset = MovieDataSet(vocab, f\"{data_dir}/ratings_test_t5.json\", is_valid=True)\ntest_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=movie_collate_fn)",
"Loading Dataset: 100%|██████████| 149995/149995 [00:06<00:00, 21670.37 lines/s]\nLoading Dataset: 100%|██████████| 49997/49997 [00:01<00:00, 25291.42 lines/s]\n"
]
],
[
[
"#### 9. 네이버 영화 분류 데이터 학습",
"_____no_output_____"
]
],
[
[
"\"\"\" 모델 epoch 평가 \"\"\"\ndef eval_epoch(config, model, data_loader):\n matchs = []\n model.eval()\n\n n_word_total = 0\n n_correct_total = 0\n with tqdm(total=len(data_loader), desc=f\"Valid\") as pbar:\n for i, value in enumerate(data_loader):\n labels, enc_inputs, dec_inputs = map(lambda v: v.to(config.device), value)\n\n outputs = model(enc_inputs, dec_inputs)\n logits = outputs[0]\n _, indices = logits.max(2)\n\n match = torch.eq(indices, labels).detach()\n matchs.extend(match.cpu())\n accuracy = np.sum(matchs) / len(matchs) if 0 < len(matchs) else 0\n\n pbar.update(1)\n pbar.set_postfix_str(f\"Acc: {accuracy:.3f}\")\n return np.sum(matchs) / len(matchs) if 0 < len(matchs) else 0",
"_____no_output_____"
],
[
"\"\"\" 모델 epoch 학습 \"\"\"\ndef train_epoch(config, epoch, model, criterion, optimizer, train_loader):\n losses = []\n model.train()\n\n with tqdm(total=len(train_loader), desc=f\"Train({epoch})\") as pbar:\n for i, value in enumerate(train_loader):\n labels, enc_inputs, dec_inputs = map(lambda v: v.to(config.device), value)\n\n optimizer.zero_grad()\n outputs = model(enc_inputs, dec_inputs)\n logits = outputs[0]\n\n loss = criterion(logits.view(-1, logits.size(2)), labels.view(-1))\n\n loss_val = loss.item()\n losses.append(loss_val)\n\n loss.backward()\n optimizer.step()\n\n pbar.update(1)\n pbar.set_postfix_str(f\"Loss: {loss_val:.3f} ({np.mean(losses):.3f})\")\n return np.mean(losses)",
"_____no_output_____"
],
[
"config.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(config)\n\nlearning_rate = 5e-5\nn_epoch = 5",
"{'n_vocab': 8033, 'n_seq': 256, 'n_layer': 6, 'd_hidn': 256, 'i_pad': 0, 'd_ff': 1024, 'n_head': 4, 'd_head': 64, 'dropout': 0.1, 'layer_norm_epsilon': 1e-12, 'device': device(type='cuda')}\n"
],
[
"def train(model):\n model.to(config.device)\n\n criterion_cls = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n best_epoch, best_loss, best_score = 0, 0, 0\n losses, scores = [], []\n for epoch in range(n_epoch):\n loss = train_epoch(config, epoch, model, criterion_cls, optimizer, train_loader)\n score = eval_epoch(config, model, test_loader)\n\n losses.append(loss)\n scores.append(score)\n\n if best_score < score:\n best_epoch, best_loss, best_score = epoch, loss, score\n print(f\">>>> epoch={best_epoch}, loss={best_loss:.5f}, socre={best_score:.5f}\")\n return losses, scores",
"_____no_output_____"
]
],
[
[
"###### Pretrain 없이 학습",
"_____no_output_____"
]
],
[
[
"model = MovieClassification(config)\n\nlosses_00, scores_00 = train(model)",
"Train(0): 100%|██████████| 1172/1172 [05:07<00:00, 3.72it/s, Loss: 0.244 (0.827)]\nValid: 100%|██████████| 391/391 [01:00<00:00, 4.63it/s, Acc: 0.771]\nTrain(1): 100%|██████████| 1172/1172 [05:06<00:00, 3.89it/s, Loss: 0.232 (0.253)]\nValid: 100%|██████████| 391/391 [01:02<00:00, 4.59it/s, Acc: 0.794]\nTrain(2): 100%|██████████| 1172/1172 [05:10<00:00, 4.05it/s, Loss: 0.235 (0.229)]\nValid: 100%|██████████| 391/391 [01:02<00:00, 4.76it/s, Acc: 0.812]\nTrain(3): 100%|██████████| 1172/1172 [05:10<00:00, 3.88it/s, Loss: 0.193 (0.214)]\nValid: 100%|██████████| 391/391 [01:02<00:00, 4.77it/s, Acc: 0.816]\nTrain(4): 100%|██████████| 1172/1172 [05:09<00:00, 3.75it/s, Loss: 0.192 (0.201)]\nValid: 100%|██████████| 391/391 [01:01<00:00, 4.84it/s, Acc: 0.814]\n"
]
],
[
[
"###### Pretrain을 한 후 학습",
"_____no_output_____"
]
],
[
[
"model = MovieClassification(config)\n\nsave_pretrain = f\"{data_dir}/save_t5_pretrain.pth\"\nmodel.t5.load(save_pretrain)\n\nlosses_20, scores_20 = train(model)",
"Train(0): 100%|██████████| 1172/1172 [05:11<00:00, 3.69it/s, Loss: 0.227 (0.288)]\nValid: 100%|██████████| 391/391 [01:01<00:00, 4.86it/s, Acc: 0.769]\nTrain(1): 100%|██████████| 1172/1172 [05:07<00:00, 3.93it/s, Loss: 0.261 (0.220)]\nValid: 100%|██████████| 391/391 [01:00<00:00, 4.68it/s, Acc: 0.813]\nTrain(2): 100%|██████████| 1172/1172 [05:06<00:00, 3.94it/s, Loss: 0.173 (0.204)]\nValid: 100%|██████████| 391/391 [01:00<00:00, 4.67it/s, Acc: 0.822]\nTrain(3): 100%|██████████| 1172/1172 [05:09<00:00, 3.78it/s, Loss: 0.205 (0.191)]\nValid: 100%|██████████| 391/391 [01:03<00:00, 4.86it/s, Acc: 0.828]\nTrain(4): 100%|██████████| 1172/1172 [05:07<00:00, 3.98it/s, Loss: 0.138 (0.181)]\nValid: 100%|██████████| 391/391 [01:00<00:00, 4.56it/s, Acc: 0.827]\n"
]
],
[
[
"#### 10. Result",
"_____no_output_____"
]
],
[
[
"# table\ndata = {\n \"loss_00\": losses_00,\n \"socre_00\": scores_00,\n \"loss_20\": losses_20,\n \"socre_20\": scores_20,\n}\ndf = pd.DataFrame(data)\ndisplay(df)\n\n# graph\nplt.figure(figsize=[12, 4])\nplt.plot(scores_00, label=\"score_00\")\nplt.plot(scores_20, label=\"score_20\")\nplt.legend()\nplt.xlabel('Epoch')\nplt.ylabel('Value')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7445cde7398be6ee8cb31d97a861c56011004e2 | 195,648 | ipynb | Jupyter Notebook | 07_Python_Finance.ipynb | devscie/PythonFinance | 3185092a5da511533d074a905c1421046a09e7b7 | [
"MIT"
] | null | null | null | 07_Python_Finance.ipynb | devscie/PythonFinance | 3185092a5da511533d074a905c1421046a09e7b7 | [
"MIT"
] | null | null | null | 07_Python_Finance.ipynb | devscie/PythonFinance | 3185092a5da511533d074a905c1421046a09e7b7 | [
"MIT"
] | null | null | null | 180.65374 | 53,578 | 0.875578 | [
[
[
"# 07 - Python Finance\n\n**Capitulo 07**: Como calcular essa probabilidade usando Python.",
"_____no_output_____"
],
[
"Considerar que os retornos seguem uma distribuição de probabilidade normal induz a erros grosseiros.\n\nUtilizando distribuições de caudas gordas podemos ter uma aproximação melhor do mundo real.\n\n**Qual a probabilidade do índice bovespa cair mais de 12% ?** Ref.: 09/03/2020",
"_____no_output_____"
],
[
"## Configurações Iniciais\n\n## 1. Importando bibliotecas\n\n1.1 Instalando o YFinance\n",
"_____no_output_____"
]
],
[
[
"# Configurando dados historicos do Yahoo Finance\n!pip install yfinance --upgrade --no-cache-dir",
"_____no_output_____"
]
],
[
[
"1.2 Importando o YFinance e sobrescrevendo os métodos do pandas_datareader",
"_____no_output_____"
]
],
[
[
"import yfinance as yf\n#yf.pdr_override()",
"_____no_output_____"
]
],
[
[
"1.3 Importando as Bibliotecas",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (16,8)\nmatplotlib.rcParams.update({'font.size': 22})\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"# biblioteca estatística\nfrom scipy.stats import norm, t",
"_____no_output_____"
]
],
[
[
"## 2. Análise Estatística do Índice Bovespa",
"_____no_output_____"
]
],
[
[
"# baixando as cotações\nibov = yf.download(\"^BVSP\")[[\"Adj Close\"]]",
"\r[*********************100%***********************] 1 of 1 completed\n"
]
],
[
[
"Exibindo dados",
"_____no_output_____"
]
],
[
[
"ibov",
"_____no_output_____"
],
[
"# criando coluna com retorno percentual para cada dia\nibov['retorno'] = ibov['Adj Close'].pct_change()\nibov.dropna(inplace=True)",
"_____no_output_____"
]
],
[
[
"Exibindo dados",
"_____no_output_____"
]
],
[
[
"# variação diaria do índice\nibov",
"_____no_output_____"
]
],
[
[
"Calculando Média do retorno e Desvio Padrão",
"_____no_output_____"
]
],
[
[
"# calcular a média do retorno\nmedia_ibov = ibov['retorno'].mean()\nprint('Retorno médio = {:.2f}%'.format(media_ibov*100))",
"Retorno médio = 0.15%\n"
],
[
"# calcular o desvio padrão\ndesvio_padrao_ibov = ibov['retorno'].std()\nprint('Desvio padrão = {:.2f}%'.format(desvio_padrao_ibov*100))",
"Desvio padrão = 2.26%\n"
]
],
[
[
"Exibindo os dados que corresponde a pergunta do estudo",
"_____no_output_____"
]
],
[
[
"# buscar os dias que o índice ibovespa teve retorno abaixo 12%\nibov[ibov[\"retorno\"] < -0.12]",
"_____no_output_____"
]
],
[
[
"## 3. Análise\n\n**Qual a probabilidade do ibov cair mais que 12% considerando que os retornos seguem uma distribuição normal?**",
"_____no_output_____"
]
],
[
[
"probabilidade_teorica = norm.cdf(-0.12, loc=media_ibov, scale=desvio_padrao_ibov)\nprint('{:.8f}%'.format(probabilidade_teorica*100))",
"0.00000371%\n"
],
[
"frequencia_teorica = 1 / probabilidade_teorica\nprint('Uma vez a cada {} dias'.format(int(round(frequencia_teorica, 5))))\nprint('Ou uma vez a cada {} anos'.format(int(round(frequencia_teorica/252, 5))))",
"Uma vez a cada 26946255 dias\nOu uma vez a cada 106929 anos\n"
],
[
"#ibov[ibov[\"retorno\"] > 0.05].size / ibov.size * 100",
"_____no_output_____"
],
[
"ibov['retorno'].plot(title=\"Retorno Diário do Índice Bovespa\");",
"_____no_output_____"
]
],
[
[
"Comparando o gráfico para visualizar se segue uma normal téorica, utilizando os mesmos parametros (padrão de média e desvio padrão) definidos anterior.",
"_____no_output_____"
]
],
[
[
"ibov['retorno_teorico'] = norm.rvs(size=ibov['retorno'].size, loc=media_ibov, scale=desvio_padrao_ibov)",
"_____no_output_____"
],
[
"ax = ibov['retorno_teorico'].plot(title=\"Retorno Normal Simulado\");\nax.set_ylim(-0.2, 0.4)",
"_____no_output_____"
]
],
[
[
"Distribuição normal os retornos é bem mais comportada, os retornos são centrados na média.",
"_____no_output_____"
]
],
[
[
"sns.distplot(ibov['retorno'], bins=100, kde=False);",
"_____no_output_____"
]
],
[
[
"Histograma da distribuição dos retornos",
"_____no_output_____"
]
],
[
[
"sns.distplot(ibov['retorno'], bins=100, kde=False, fit=norm);",
"_____no_output_____"
]
],
[
[
"Os dados tem um pico elevado, dados centralizados em torno da média. Os dados intermediarios (rombos) taxa de ocorrrência menor, nas caldas tem maior ocorrência.",
"_____no_output_____"
]
],
[
[
"sns.distplot(ibov['retorno'], bins=100, kde=False, fit=t);",
"_____no_output_____"
]
],
[
[
"Encontrar paramentros que coincidem com a amostra.\n",
"_____no_output_____"
]
],
[
[
"# obter paramentros que foram utilizados para fazer o ajustar, fit da curva\n(graus_de_liberdade, media_t, desvio_padrao_t) = t.fit(ibov['retorno'])\nprint('Distribuição T-Student\\nGraus de liberdade={:.2f} \\nMédia={:.4f} \\nDesvio padrão={:.5f}'.format(graus_de_liberdade, media_t, desvio_padrao_t))",
"Distribuição T-Student\nGraus de liberdade=3.28 \nMédia=0.0012 \nDesvio padrão=0.01444\n"
],
[
"# considerando a distribuição de calda gorda\nprobabilidade_teorica_t = t.cdf(-0.12, graus_de_liberdade, loc=media_t, scale=desvio_padrao_t)\nprint('{:.8f}%'.format(probabilidade_teorica_t*100))",
"0.12571533%\n"
],
[
"frequencia_teorica_t = 1 / probabilidade_teorica_t\nprint('Para uma distribuição T-Student: \\nUma vez a cada {} dias'.format(int(round(frequencia_teorica_t, 5))))\nprint('Ou uma vez a cada {} anos'.format(int(round(frequencia_teorica_t/252, 5))))",
"Para uma distribuição T-Student: \nUma vez a cada 795 dias\nOu uma vez a cada 3 anos\n"
]
],
[
[
"Comparação distribuição calda gorda e distribuição normal",
"_____no_output_____"
]
],
[
[
"frequencia_teorica = 1 / probabilidade_teorica\nprint('Para uma distribuição Normal: \\nUma vez a cada {} dias'.format(int(round(frequencia_teorica, 5))))\nprint('Ou uma vez a cada {} anos'.format(int(round(frequencia_teorica/252, 5))))",
"Para uma distribuição Normal: \nUma vez a cada 26946255 dias\nOu uma vez a cada 106929 anos\n"
],
[
"frequencia_observada = ibov['retorno'].size / ibov[ibov[\"retorno\"] < -0.12].shape[0] \nprint('Na vida real aconteceu: \\nUma vez a cada {} dias'.format(int(round(frequencia_observada, 5))))",
"Na vida real aconteceu: \nUma vez a cada 1380 dias\n"
]
],
[
[
"## 4. Observações",
"_____no_output_____"
],
[
"Distribuição T Stuent ---> Over Fit\n\nA gente não consegue calcular baixas probabilidades.\n\nAs baixas probabilidades são muito sujeitas a erros (erros do modelo e erros do paramentro).",
"_____no_output_____"
],
[
"Segundo Nassim Taleb, não recomenda, não orienta a calcular baixas probabilidades. O mais importante do que calcular probalilidade do evento, é saber se expor aos eventos de baixa probabilidade que causam grandes impactos.\n\n**A lógica do Cisne Negro: O impacto do altamente improvável (Taleb, Nassim Nicholas)**",
"_____no_output_____"
],
[
"Apesar de varios modelos utilizarem a distribuição normal, ela é uma simplificação que gera muitos erros, principalmente quando lidamos com eventos de baixa probabilidade, eventos na calda da distribuição.\n\nA gente consegue trabalhar de forma mais aproximada, com menos erros utilizando a distribuição de calda gorda.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7447af67857590fd5f10e48f30ab02b455654c2 | 408,532 | ipynb | Jupyter Notebook | Script-035-Bar-chart-SUBPLOTS-Phil.ipynb | paulinelemenkova/Python-script-035-Bar-Chart | 492d45276df7b4d8853682a933be5632770c5c3c | [
"MIT"
] | null | null | null | Script-035-Bar-chart-SUBPLOTS-Phil.ipynb | paulinelemenkova/Python-script-035-Bar-Chart | 492d45276df7b4d8853682a933be5632770c5c3c | [
"MIT"
] | null | null | null | Script-035-Bar-chart-SUBPLOTS-Phil.ipynb | paulinelemenkova/Python-script-035-Bar-Chart | 492d45276df7b4d8853682a933be5632770c5c3c | [
"MIT"
] | null | null | null | 3,142.553846 | 404,432 | 0.9611 | [
[
[
"#!/usr/bin/env python\n# coding: utf-8\nimport os\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport matplotlib.artist as martist\nfrom matplotlib.offsetbox import AnchoredText\n\nsns.set_style('whitegrid')\n\nos.chdir('/Users/pauline/Documents/Python')\ndf = pd.read_csv(\"Tab-GeomorphPhil.csv\")\nsunda = df.plate_sunda\nphil = df.plate_phil\nsa = df.tan_angle\nsth = df.sedim_thick\n\nfig = plt.figure(figsize=(10.0, 8.0), dpi=300)\nfig.suptitle('Bar charts for the bathymetry of the Philippine Trench',\n fontsize=10, fontweight='bold', x=0.5, y=0.95)\n\n# define annotations\ndef add_at(ax, t, loc=2):\n fp = dict(size=11)\n _at = AnchoredText(t, loc=loc, prop=fp)\n ax.add_artist(_at)\n return _at\n\n# Generate a loop for 25 profiles\nnames = range(1, 26)\n\n# subplot 1\nax = fig.add_subplot(221)\nsunda.plot(kind ='bar', color=sns.color_palette('rainbow'))\nplt.title('Sample points across Sunda Plate')\nplt.xlabel('Cross-section profiles', fontsize=10, fontfamily='sans-serif')\nplt.xticks(np.arange(25), (names), rotation=45, fontsize=8)\nplt.ylabel('Number of observation ponts', fontsize=10, fontfamily='sans-serif')\nadd_at(ax, \"A\")\n\n# subplot 2\nax = fig.add_subplot(222)\nphil.plot(kind ='bar', color=sns.color_palette('plasma'))\nplt.title('Sample points across Philippine Plate')\nplt.xlabel('Cross-section profiles', fontsize=10, fontfamily='sans-serif')\nplt.xticks(np.arange(25), (names), rotation=45, fontsize=8)\nplt.ylabel('Number of observation ponts', fontsize=10, fontfamily='sans-serif')\nadd_at(ax, \"B\")\n\n\n# subplot 3\nax = fig.add_subplot(223)\nsa.plot(kind ='bar', color=sns.color_palette('gist_heat'))\nplt.title('Slope steepness (tangent angles °)')\nplt.xlabel('Cross-section profiles', fontsize=10, fontfamily='sans-serif')\nplt.xticks(np.arange(25), (names), rotation=45, fontsize=8)\nplt.ylabel('Slope angles, tangent °', fontsize=10, fontfamily='sans-serif')\nadd_at(ax, \"C\")\n\n# subplot 4\nax = fig.add_subplot(224)\nsth.plot(kind ='bar', color=sns.color_palette('cool'))\nplt.title('Sediment thickness')\nplt.xlabel('Cross-section profiles', fontsize=10, fontfamily='sans-serif')\nplt.xticks(np.arange(25), (names), rotation=45, fontsize=8)\nplt.ylabel('Sediment thickness, m', fontsize=10, fontfamily='sans-serif')\nadd_at(ax, \"D\", loc=1)\n\n# visualizing and saving\nplt.tight_layout()\nplt.subplots_adjust(top=0.90, bottom=0.08,\n left=0.10, right=0.95,\n hspace=0.25, wspace=0.35\n )\nplt.savefig('plot_BarCharP.png', dpi=300)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7448b10df555e0ccc04a6f60c6739e9cb2288b1 | 25,624 | ipynb | Jupyter Notebook | weight-initialization/weight_initialization_exercise.ipynb | hfurkanvural/udacity-deep-learning-v2-pytorch | 9c1bfad7feab07d76f4730baf204e393b8a34491 | [
"MIT"
] | null | null | null | weight-initialization/weight_initialization_exercise.ipynb | hfurkanvural/udacity-deep-learning-v2-pytorch | 9c1bfad7feab07d76f4730baf204e393b8a34491 | [
"MIT"
] | 1 | 2020-04-01T21:53:01.000Z | 2020-04-01T21:53:01.000Z | weight-initialization/weight_initialization_exercise.ipynb | hfurkanvural/udacity-deep-learning-v2-pytorch | 9c1bfad7feab07d76f4730baf204e393b8a34491 | [
"MIT"
] | null | null | null | 39.300613 | 635 | 0.60498 | [
[
[
"# Weight Initialization\nIn this lesson, you'll learn how to find good initial weights for a neural network. Weight initialization happens once, when a model is created and before it trains. Having good initial weights can place the neural network close to the optimal solution. This allows the neural network to come to the best solution quicker. \n\n<img src=\"notebook_ims/neuron_weights.png\" width=40%/>\n\n\n## Initial Weights and Observing Training Loss\n\nTo see how different weights perform, we'll test on the same dataset and neural network. That way, we know that any changes in model behavior are due to the weights and not any changing data or model structure. \n> We'll instantiate at least two of the same models, with _different_ initial weights and see how the training loss decreases over time, such as in the example below. \n\n<img src=\"notebook_ims/loss_comparison_ex.png\" width=60%/>\n\nSometimes the differences in training loss, over time, will be large and other times, certain weights offer only small improvements.\n\n### Dataset and Model\n\nWe'll train an MLP to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist) to demonstrate the effect of different initial weights. As a reminder, the FashionMNIST dataset contains images of clothing types; `classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']`. The images are normalized so that their pixel values are in a range [0.0 - 1.0). Run the cell below to download and load the dataset.\n\n---\n#### EXERCISE\n\n[Link to normalized distribution, exercise code](#normalex)\n\n---",
"_____no_output_____"
],
[
"### Import Libraries and Load [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)",
"_____no_output_____"
]
],
[
[
"import torch\nimport numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 100\n# percentage of training set to use as validation\nvalid_size = 0.2\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = datasets.FashionMNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.FashionMNIST(root='data', train=False,\n download=True, transform=transform)\n\n# obtain training indices that will be used for validation\nnum_train = len(train_data)\nindices = list(range(num_train))\nnp.random.shuffle(indices)\nsplit = int(np.floor(valid_size * num_train))\ntrain_idx, valid_idx = indices[split:], indices[:split]\n\n# define samplers for obtaining training and validation batches\ntrain_sampler = SubsetRandomSampler(train_idx)\nvalid_sampler = SubsetRandomSampler(valid_idx)\n\n# prepare data loaders (combine dataset and sampler)\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n sampler=train_sampler, num_workers=num_workers)\nvalid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, \n sampler=valid_sampler, num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)\n\n# specify the image classes\nclasses = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']",
"_____no_output_____"
]
],
[
[
"### Visualize Some Training Data",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(classes[labels[idx]])",
"_____no_output_____"
]
],
[
[
"## Define the Model Architecture\n\nWe've defined the MLP that we'll use for classifying the dataset.\n\n### Neural Network\n<img style=\"float: left\" src=\"notebook_ims/neural_net.png\" width=50%/>\n\n\n* A 3 layer MLP with hidden dimensions of 256 and 128. \n\n* This MLP accepts a flattened image (784-value long vector) as input and produces 10 class scores as output.\n---\nWe'll test the effect of different initial weights on this 3 layer neural network with ReLU activations and an Adam optimizer. \n\nThe lessons you learn apply to other neural networks, including different activations and optimizers.",
"_____no_output_____"
],
[
"---\n## Initialize Weights\nLet's start looking at some initial weights.\n### All Zeros or Ones\nIf you follow the principle of [Occam's razor](https://en.wikipedia.org/wiki/Occam's_razor), you might think setting all the weights to 0 or 1 would be the best solution. This is not the case.\n\nWith every weight the same, all the neurons at each layer are producing the same output. This makes it hard to decide which weights to adjust.\n\nLet's compare the loss with all ones and all zero weights by defining two models with those constant weights.\n\nBelow, we are using PyTorch's [nn.init](https://pytorch.org/docs/stable/nn.html#torch-nn-init) to initialize each Linear layer with a constant weight. The init library provides a number of weight initialization functions that give you the ability to initialize the weights of each layer according to layer type.\n\nIn the case below, we look at every layer/module in our model. If it is a Linear layer (as all three layers are for this MLP), then we initialize those layer weights to be a `constant_weight` with bias=0 using the following code:\n>```\nif isinstance(m, nn.Linear):\n nn.init.constant_(m.weight, constant_weight)\n nn.init.constant_(m.bias, 0)\n```\n\nThe `constant_weight` is a value that you can pass in when you instantiate the model.",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\n# define the NN architecture\nclass Net(nn.Module):\n def __init__(self, hidden_1=256, hidden_2=128, constant_weight=None):\n super(Net, self).__init__()\n # linear layer (784 -> hidden_1)\n self.fc1 = nn.Linear(28 * 28, hidden_1)\n # linear layer (hidden_1 -> hidden_2)\n self.fc2 = nn.Linear(hidden_1, hidden_2)\n # linear layer (hidden_2 -> 10)\n self.fc3 = nn.Linear(hidden_2, 10)\n # dropout layer (p=0.2)\n self.dropout = nn.Dropout(0.2)\n \n # initialize the weights to a specified, constant value\n if(constant_weight is not None):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.constant_(m.weight, constant_weight)\n nn.init.constant_(m.bias, 0)\n \n \n def forward(self, x):\n # flatten image input\n x = x.view(-1, 28 * 28)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n # add dropout layer\n x = self.dropout(x)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc2(x))\n # add dropout layer\n x = self.dropout(x)\n # add output layer\n x = self.fc3(x)\n return x\n",
"_____no_output_____"
]
],
[
[
"### Compare Model Behavior\n\nBelow, we are using `helpers.compare_init_weights` to compare the training and validation loss for the two models we defined above, `model_0` and `model_1`. This function takes in a list of models (each with different initial weights), the name of the plot to produce, and the training and validation dataset loaders. For each given model, it will plot the training loss for the first 100 batches and print out the validation accuracy after 2 training epochs. *Note: if you've used a small batch_size, you may want to increase the number of epochs here to better compare how models behave after seeing a few hundred images.* \n\nWe plot the loss over the first 100 batches to better judge which model weights performed better at the start of training. **I recommend that you take a look at the code in `helpers.py` to look at the details behind how the models are trained, validated, and compared.**\n\nRun the cell below to see the difference between weights of all zeros against all ones.",
"_____no_output_____"
]
],
[
[
"# initialize two NN's with 0 and 1 constant weights\nmodel_0 = Net(constant_weight=0)\nmodel_1 = Net(constant_weight=1)",
"_____no_output_____"
],
[
"import helpers\n\n# put them in list form to compare\nmodel_list = [(model_0, 'All Zeros'),\n (model_1, 'All Ones')]\n\n\n# plot the loss over the first 100 batches\nhelpers.compare_init_weights(model_list, \n 'All Zeros vs All Ones', \n train_loader,\n valid_loader)",
"_____no_output_____"
]
],
[
[
"As you can see the accuracy is close to guessing for both zeros and ones, around 10%.\n\nThe neural network is having a hard time determining which weights need to be changed, since the neurons have the same output for each layer. To avoid neurons with the same output, let's use unique weights. We can also randomly select these weights to avoid being stuck in a local minimum for each run.\n\nA good solution for getting these random weights is to sample from a uniform distribution.",
"_____no_output_____"
],
[
"### Uniform Distribution\nA [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution) has the equal probability of picking any number from a set of numbers. We'll be picking from a continuous distribution, so the chance of picking the same number is low. We'll use NumPy's `np.random.uniform` function to pick random numbers from a uniform distribution.\n\n>#### [`np.random_uniform(low=0.0, high=1.0, size=None)`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html)\n>Outputs random values from a uniform distribution.\n\n>The generated values follow a uniform distribution in the range [low, high). The lower bound minval is included in the range, while the upper bound maxval is excluded.\n\n>- **low:** The lower bound on the range of random values to generate. Defaults to 0.\n- **high:** The upper bound on the range of random values to generate. Defaults to 1.\n- **size:** An int or tuple of ints that specify the shape of the output array.\n\nWe can visualize the uniform distribution by using a histogram. Let's map the values from `np.random_uniform(-3, 3, [1000])` to a histogram using the `helper.hist_dist` function. This will be `1000` random float values from `-3` to `3`, excluding the value `3`.",
"_____no_output_____"
]
],
[
[
"helpers.hist_dist('Random Uniform (low=-3, high=3)', np.random.uniform(-3, 3, [1000]))",
"_____no_output_____"
]
],
[
[
"The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2.\n\nNow that you understand the uniform function, let's use PyTorch's `nn.init` to apply it to a model's initial weights.\n\n### Uniform Initialization, Baseline\n\n\nLet's see how well the neural network trains using a uniform weight initialization, where `low=0.0` and `high=1.0`. Below, I'll show you another way (besides in the Net class code) to initialize the weights of a network. To define weights outside of the model definition, you can:\n>1. Define a function that assigns weights by the type of network layer, *then* \n2. Apply those weights to an initialized model using `model.apply(fn)`, which applies a function to each model layer.\n\nThis time, we'll use `weight.data.uniform_` to initialize the weights of our model, directly.",
"_____no_output_____"
]
],
[
[
"# takes in a module and applies the specified weight initialization\ndef weights_init_uniform(m):\n classname = m.__class__.__name__\n # for every Linear layer in a model..\n if classname.find('Linear') != -1:\n # apply a uniform distribution to the weights and a bias=0\n m.weight.data.uniform_(0.0, 1.0)\n m.bias.data.fill_(0)",
"_____no_output_____"
],
[
"# create a new model with these weights\nmodel_uniform = Net()\nmodel_uniform.apply(weights_init_uniform)",
"_____no_output_____"
],
[
"# evaluate behavior \nhelpers.compare_init_weights([(model_uniform, 'Uniform Weights')], \n 'Uniform Baseline', \n train_loader,\n valid_loader)",
"_____no_output_____"
]
],
[
[
"---\nThe loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction!\n\n## General rule for setting weights\nThe general rule for setting the weights in a neural network is to set them to be close to zero without being too small. \n>Good practice is to start your weights in the range of $[-y, y]$ where $y=1/\\sqrt{n}$ \n($n$ is the number of inputs to a given neuron).\n\nLet's see if this holds true; let's create a baseline to compare with and center our uniform range over zero by shifting it over by 0.5. This will give us the range [-0.5, 0.5).",
"_____no_output_____"
]
],
[
[
"# takes in a module and applies the specified weight initialization\ndef weights_init_uniform_center(m):\n classname = m.__class__.__name__\n # for every Linear layer in a model..\n if classname.find('Linear') != -1:\n # apply a centered, uniform distribution to the weights\n m.weight.data.uniform_(-0.5, 0.5)\n m.bias.data.fill_(0)\n\n# create a new model with these weights\nmodel_centered = Net()\nmodel_centered.apply(weights_init_uniform_center)",
"_____no_output_____"
]
],
[
[
"Then let's create a distribution and model that uses the **general rule** for weight initialization; using the range $[-y, y]$, where $y=1/\\sqrt{n}$ .\n\nAnd finally, we'll compare the two models.",
"_____no_output_____"
]
],
[
[
"# takes in a module and applies the specified weight initialization\ndef weights_init_uniform_rule(m):\n classname = m.__class__.__name__\n # for every Linear layer in a model..\n if classname.find('Linear') != -1:\n # get the number of the inputs\n n = m.in_features\n y = 1.0/np.sqrt(n)\n m.weight.data.uniform_(-y, y)\n m.bias.data.fill_(0)\n\n# create a new model with these weights\nmodel_rule = Net()\nmodel_rule.apply(weights_init_uniform_rule)",
"_____no_output_____"
],
[
"# compare these two models\nmodel_list = [(model_centered, 'Centered Weights [-0.5, 0.5)'), \n (model_rule, 'General Rule [-y, y)')]\n\n# evaluate behavior \nhelpers.compare_init_weights(model_list, \n '[-0.5, 0.5) vs [-y, y)', \n train_loader,\n valid_loader)",
"_____no_output_____"
]
],
[
[
"This behavior is really promising! Not only is the loss decreasing, but it seems to do so very quickly for our uniform weights that follow the general rule; after only two epochs we get a fairly high validation accuracy and this should give you some intuition for why starting out with the right initial weights can really help your training process!\n\n---\n\nSince the uniform distribution has the same chance to pick *any value* in a range, what if we used a distribution that had a higher chance of picking numbers closer to 0? Let's look at the normal distribution.\n\n### Normal Distribution\nUnlike the uniform distribution, the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) has a higher likelihood of picking number close to it's mean. To visualize it, let's plot values from NumPy's `np.random.normal` function to a histogram.\n\n>[np.random.normal(loc=0.0, scale=1.0, size=None)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.normal.html)\n\n>Outputs random values from a normal distribution.\n\n>- **loc:** The mean of the normal distribution.\n- **scale:** The standard deviation of the normal distribution.\n- **shape:** The shape of the output array.",
"_____no_output_____"
]
],
[
[
"helpers.hist_dist('Random Normal (mean=0.0, stddev=1.0)', np.random.normal(size=[1000]))",
"_____no_output_____"
]
],
[
[
"Let's compare the normal distribution against the previous, rule-based, uniform distribution.\n\n<a id='normalex'></a>\n#### TODO: Define a weight initialization function that gets weights from a normal distribution \n> The normal distribution should have a mean of 0 and a standard deviation of $y=1/\\sqrt{n}$",
"_____no_output_____"
]
],
[
[
"## complete this function\ndef weights_init_normal(m):\n '''Takes in a module and initializes all linear layers with weight\n values taken from a normal distribution.'''\n \n classname = m.__class__.__name__\n # for every Linear layer in a model\n # m.weight.data shoud be taken from a normal distribution\n # m.bias.data should be 0\n \n if classname.find('Linear') != -1:\n # get the number of the inputs\n n = m.in_features\n y = 1.0/np.sqrt(n)\n m.weight.data.normal_(0, y)\n m.bias.data.fill_(0)",
"_____no_output_____"
],
[
"## -- no need to change code below this line -- ##\n\n# create a new model with the rule-based, uniform weights\nmodel_uniform_rule = Net()\nmodel_uniform_rule.apply(weights_init_uniform_rule)\n\n# create a new model with the rule-based, NORMAL weights\nmodel_normal_rule = Net()\nmodel_normal_rule.apply(weights_init_normal)",
"_____no_output_____"
],
[
"# compare the two models\nmodel_list = [(model_uniform_rule, 'Uniform Rule [-y, y)'), \n (model_normal_rule, 'Normal Distribution')]\n\n# evaluate behavior \nhelpers.compare_init_weights(model_list, \n 'Uniform vs Normal', \n train_loader,\n valid_loader)",
"_____no_output_____"
]
],
[
[
"The normal distribution gives us pretty similar behavior compared to the uniform distribution, in this case. This is likely because our network is so small; a larger neural network will pick more weight values from each of these distributions, magnifying the effect of both initialization styles. In general, a normal distribution will result in better performance for a model.\n",
"_____no_output_____"
],
[
"---\n\n### Automatic Initialization\n\nLet's quickly take a look at what happens *without any explicit weight initialization*.",
"_____no_output_____"
]
],
[
[
"## Instantiate a model with _no_ explicit weight initialization \nmodel_no_initialization = Net()",
"_____no_output_____"
],
[
"## evaluate the behavior using helpers.compare_init_weights\nmodel_list = [(model_no_initialization, 'No Weights')]\n\nhelpers.compare_init_weights(model_list, \n 'No Weight Initialization', \n train_loader,\n valid_loader)",
"_____no_output_____"
]
],
[
[
"As you complete this exercise, keep in mind these questions:\n* What initializaion strategy has the lowest training loss after two epochs? What about highest validation accuracy?\n* After testing all these initial weight options, which would you decide to use in a final classification model?",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7448c87afa107c41105467e43646da140117abc | 44,018 | ipynb | Jupyter Notebook | matrix_one/day3.ipynb | zukowski2012/dw-matrix | 9dcc4e9c6faa104ec6a277ad5c7b483603170246 | [
"MIT"
] | null | null | null | matrix_one/day3.ipynb | zukowski2012/dw-matrix | 9dcc4e9c6faa104ec6a277ad5c7b483603170246 | [
"MIT"
] | null | null | null | matrix_one/day3.ipynb | zukowski2012/dw-matrix | 9dcc4e9c6faa104ec6a277ad5c7b483603170246 | [
"MIT"
] | null | null | null | 44,018 | 44,018 | 0.734677 | [
[
[
"# !pip install datadotworld\n# !pip install datadotworld[pandas]",
"_____no_output_____"
],
[
"# !dw configure",
"_____no_output_____"
],
[
"from google.colab import drive\nimport pandas as pd\nimport numpy as np\n\nimport datadotworld as dw",
"_____no_output_____"
],
[
"# drive.mount(\"/content/drive\")",
"_____no_output_____"
],
[
"cd \"drive/My Drive/Colab Notebooks/dw-matrix\"",
"/content/drive/My Drive/Colab Notebooks/dw-matrix\n"
],
[
"!mkdir data",
"_____no_output_____"
],
[
"!echo 'data' > .gitignore",
"_____no_output_____"
],
[
"!git add .gitignore",
"_____no_output_____"
],
[
"data = dw.load_dataset('datafiniti/mens-shoe-prices')",
"_____no_output_____"
],
[
"df = data.dataframes['7004_1']\ndf.shape",
"/usr/local/lib/python3.6/dist-packages/datadotworld/models/dataset.py:209: UserWarning: Unable to set data frame dtypes automatically using 7004_1 schema. Data types may need to be adjusted manually. Error: Integer column has NA values in column 10\n 'Error: {}'.format(resource_name, e))\n/usr/local/lib/python3.6/dist-packages/datadotworld/util.py:121: DtypeWarning: Columns (39,45) have mixed types. Specify dtype option on import or set low_memory=False.\n return self._loader_func()\n"
],
[
"df.sample(5)",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df.prices_currency.unique()",
"_____no_output_____"
],
[
"df.prices_currency.value_counts(normalize=True)",
"_____no_output_____"
],
[
"df_usd = df[ df.prices_currency == 'USD' ].copy()\ndf_usd.shape",
"_____no_output_____"
],
[
"df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)\ndf_usd['prices_amountmin'].hist()",
"_____no_output_____"
],
[
"filter_max = np.percentile( df_usd['prices_amountmin'], 99 )\nfilter_max",
"_____no_output_____"
],
[
"df_usd_filter = df_usd[ df_usd['prices_amountmin'] < filter_max ]",
"_____no_output_____"
],
[
"df_usd_filter.prices_amountmin.hist(bins=100)",
"_____no_output_____"
],
[
"df.to_csv('data/shoes_prices.csv', index=False)",
"_____no_output_____"
],
[
"!git add matrix_one/day3.ipynb",
"_____no_output_____"
],
[
"!git commit -m \"Read Men's Shoe Prices dataset from data.world without API\"",
"\n*** Please tell me who you are.\n\nRun\n\n git config --global user.email \"[email protected]\"\n git config --global user.name \"Your Name\"\n\nto set your account's default identity.\nOmit --global to set the identity only in this repository.\n\nfatal: unable to auto-detect email address (got 'root@ed2aa052712c.(none)')\n"
],
[
"",
"_____no_output_____"
],
[
"!git push -u origin master",
"fatal: could not read Password for 'https://[email protected]': No such device or address\n"
],
[
"cd \"drive/My Drive/Colab Notebooks/dw-matrix\"",
"[Errno 2] No such file or directory: 'drive/My Drive/Colab Notebooks/dw-matrix'\n/root\n"
],
[
"!pwd",
"/content/drive/My Drive/Colab Notebooks/dw-matrix\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74490c9b96f6a2363ffda419ed15e1f141c7704 | 68,316 | ipynb | Jupyter Notebook | GFNORTEO.MX.ipynb | ramirezdiana/Analisis-de-riesgo-2019 | 0b95be5d342abed68670117430871f58b1a864ad | [
"MIT"
] | null | null | null | GFNORTEO.MX.ipynb | ramirezdiana/Analisis-de-riesgo-2019 | 0b95be5d342abed68670117430871f58b1a864ad | [
"MIT"
] | null | null | null | GFNORTEO.MX.ipynb | ramirezdiana/Analisis-de-riesgo-2019 | 0b95be5d342abed68670117430871f58b1a864ad | [
"MIT"
] | null | null | null | 191.361345 | 38,320 | 0.892968 | [
[
[
"# Importar paquetes\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport pandas as pd\npd.core.common.is_list_like = pd.api.types.is_list_like\nimport pandas_datareader.data as web\nfrom scipy import stats",
"_____no_output_____"
],
[
"# Función para descargar precios de cierre ajustados de varios activos a la vez:\ndef get_closes(tickers, start_date=None, end_date=None, freq=None):\n # Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)\n # Frecuencia de muestreo por defecto (freq='d')\n # Importamos paquetes necesarios\n import pandas as pd\n pd.core.common.is_list_like = pd.api.types.is_list_like\n import pandas_datareader.data as web \n # Creamos DataFrame vacío de precios, con el índice de las fechas\n closes = pd.DataFrame(columns = tickers, index=web.YahooDailyReader(symbols=tickers[0], start=start_date, end=end_date, interval=freq).read().index)\n # Agregamos cada uno de los precios con YahooDailyReader\n for ticker in tickers:\n df = web.YahooDailyReader(symbols=ticker, start=start_date, end=end_date, interval=freq).read()\n closes[ticker]=df['Adj Close']\n closes.index_name = 'Date'\n closes = closes.sort_index()\n return closes",
"_____no_output_____"
],
[
"ticker=['GFNORTEO.MX']\nstart,end='2016-01-01','2019-02-23'\ncloses=get_closes(ticker,start,end,freq='d')",
"_____no_output_____"
],
[
"closes.plot(figsize=(8,6))",
"_____no_output_____"
],
[
"ret=np.log(closes/closes.shift()).dropna()\n#prueba normalidad\nstats.jarque_bera(ret)",
"_____no_output_____"
],
[
"plt.title('GFNORTEO.MX')\nplt.hist(ret['GFNORTEO.MX'], bins = 60, alpha=1, edgecolor = 'black', linewidth=1)\nplt.grid(True)\nplt.show()",
"_____no_output_____"
],
[
"a2016=ret.iloc[0:251,]\na2017=ret.iloc[251:502,]\na2018=ret.iloc[502:753,]",
"_____no_output_____"
]
],
[
[
"### Cambios en los precios\n\n\n<div class=text-justify> El aumento en el recio parece constante hasta finales del 2016 o inicios 2017, donde se ve una caida de 105 a 85 aproximadamente, se manntiene unos meses en el rango de 85 a 95, hasta que nicia un crecimeinto en vertical, se mantiene con un precio constante de aproximadamente 102, para después iniciar otra pendiente casi vertical. A finales del 2017 inicia a caer el precio hasta enero del 2018, cuando vuelve a subir. En julio 2018 inicia una alza en el precio sobrepasando los 130, para llegar a una baja muy marcada hasta 80 en picada. Inicia a recuperarse hasta la fecha. </div>",
"_____no_output_____"
]
],
[
[
"ret_sum=pd.DataFrame(index=['Rend diario','Rend anual','Vol diaria','Vol anual'],columns=['2016','2017','2018','Todo'])\nlist=[a2016,a2017,a2018,ret]\nfor x in range (0,4):\n ret_sum.loc['Rend diario'][ret_sum.columns[x]]=list[x]['GFNORTEO.MX'].mean()\n ret_sum.loc['Rend anual'][ret_sum.columns[x]]=list[x]['GFNORTEO.MX'].mean()*252\n ret_sum.loc['Vol diaria'][ret_sum.columns[x]]=list[x]['GFNORTEO.MX'].std()\n ret_sum.loc['Vol anual'][ret_sum.columns[x]]=list[x]['GFNORTEO.MX'].std()*np.sqrt(252)\nret_sum",
"_____no_output_____"
]
],
[
[
"### Discusión datos tabla \n\n\n<div class=text-justify> Se puede observar que el mayor rendimiento anual se da en el 2016, el peor en el 2018, y como se ve en la gráfica de los precios, hay una caida casi al final del 2018, por lo que tiene sentido que su rendimiento diario y anual sea negativo. La volatilidad anual se mantiene entre .24 y .34, siendo ésta ultima mayor por la misma caida en el precio. </div>",
"_____no_output_____"
]
],
[
[
"ret_sum=pd.DataFrame(index=['Mean','Volatility'],columns=ticker)\nret_sum.loc['Mean']=a2018.mean()\nret_sum.loc['Volatility']=a2018.std()",
"_____no_output_____"
],
[
"n=1000\nfor x in range (0,3):\n mu= ret_sum['GFNORTEO.MX']['Mean']\n sigma= ret_sum['GFNORTEO.MX']['Volatility']\n s0=107.01\n listaepsilon = [np.random.randn() for _ in range(n)]\n epsilons = np.asarray(listaepsilon)\n epsilon = pd.DataFrame(columns = ['Epsilon', 'st1','st10','st30','st252'])\n epsilon['Epsilon']= epsilons\n epsilon['st1']= s0*np.exp((mu*.5*sigma**2)*1+sigma*np.sqrt(1)*epsilons)\n epsilon['st10']= s0*np.exp((mu*.5*sigma**2)*10+sigma*np.sqrt(10)*epsilons)\n epsilon['st30']= s0*np.exp((mu*.5*sigma**2)*30+sigma*np.sqrt(30)*epsilons)\n epsilon['st252']= s0*np.exp((mu*.5*sigma**2)*252+sigma*np.sqrt(252)*epsilons)\n st=pd.DataFrame(index=['Mean','liminf','limsup'],columns=epsilon.columns)\n st.loc['Mean']=epsilon.mean()\n st.loc['liminf']=epsilon.min()\n st.loc['limsup']=epsilon.max()\n st=st.drop('Epsilon',1)\n print(st)\n n=n*10",
" st1 st10 st30 st252\nMean 106.993 107.123 107.518 112.653\nliminf 100.711 88.3282 76.754 40.8441\nlimsup 113.925 130.442 150.789 289.134\n st1 st10 st30 st252\nMean 106.999 107.156 107.598 113.224\nliminf 98.0485 81.1539 66.2795 26.696\nlimsup 115.462 136.09 162.276 357.693\n st1 st10 st30 st252\nMean 107.026 107.243 107.75 113.72\nliminf 97.8359 80.5988 65.4961 25.7917\nlimsup 118.408 147.375 186.285 533.565\n"
]
],
[
[
"",
"_____no_output_____"
],
[
"Este fue el resultado el dia 23 de febrero",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e74494ee7acf68de32bb63ef8f050fc56f84c774 | 12,456 | ipynb | Jupyter Notebook | Notebooks/Example-004-Python-Lists.ipynb | Sean-hsj/Elements-of-Software-Design | 1aa4bf9053e37b8cc9d2d7fea4dec1a09f488ce3 | [
"MIT"
] | 3 | 2021-12-08T00:55:56.000Z | 2022-03-01T20:14:56.000Z | Notebooks/Example-004-Python-Lists.ipynb | Sean-hsj/Elements-of-Software-Design | 1aa4bf9053e37b8cc9d2d7fea4dec1a09f488ce3 | [
"MIT"
] | null | null | null | Notebooks/Example-004-Python-Lists.ipynb | Sean-hsj/Elements-of-Software-Design | 1aa4bf9053e37b8cc9d2d7fea4dec1a09f488ce3 | [
"MIT"
] | 19 | 2021-08-31T20:41:59.000Z | 2022-03-31T22:46:22.000Z | 21.328767 | 86 | 0.456647 | [
[
[
"# Lists in Python\n\n<p>\nIn most languages a collection of homogeneous (all of the same type)\nentities is called an array. The size of the array is fixed at the\ntime of creation, however, the contents of the array can be changed\nduring the course of the execution of the program. Higher dimensional\narrays are also possible, where each element of an array is an array.\n</p>\n\n<p>\nThe analogue of an array in Python is a <i>list</i>. Even though a \nlist defines a collection of things it has different properties from\nan array. A list could be a collection of heterogeneous (different\ntypes) items. The size of a list is dynamic. It is not specified at\nthe time of creation and can grow or shrink as needed. A list could\nhave duplicate items. The order of the items in a list is important\nand not their uniqueness. Python also provides built-in functions to \nmanipulate a list and its contents. A higher dimensional list has \nelements that are themselves lists. Given the flexibility and the \nassociated functions, a Python list is a more powerful data structure \nthan an array.\n</p>\n\n<h3> List Creation </h3>\n<p>\nThere are several ways in which to create a list. You can enumerate\nall the elements of a list or create an empty list and then append\nor insert items into the list. When you append an item to a list, that\nitem is added to the end of the list. To insert an item into a list\nyou must specify its position and then all the elements to the right\nor below it are shifted to make space for it.",
"_____no_output_____"
]
],
[
[
"# Enumerate the items\na = [1, 2, 3]\na\n",
"_____no_output_____"
],
[
"# Create an empty list and append or insert\na = []\nprint(a)\n\na.append(1) # a = [1]\nprint(a)\n\na.append(2) # a = [1, 2]\nprint(a)\n\na.insert(1, 3) # a = [1, 3, 2]\nprint(a)",
"[]\n[1]\n[1, 2]\n[1, 3, 2]\n"
],
[
"# Create a two dimensional list\nb = [ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]\n\nb",
"_____no_output_____"
]
],
[
[
"Note that the positions of items in a list start at an index value of 0.\nYou can also create a list by concatenating two or more lists together.\nYou can initialize a list with a predefined value.\n",
"_____no_output_____"
]
],
[
[
"a = [1, 2]\nb = [4, 5]\nc = a + b # c = [1, 2, 4, 5]\n\nprint(c)\n\nd = [0] * 5 # d = [0, 0, 0, 0, 0]\nprint(d)",
"[1, 2, 4, 5]\n[0, 0, 0, 0, 0]\n"
]
],
[
[
"## Basic List Manipulations\nTo obtain the length of a list you can use the <i>len()</i> function.\n",
"_____no_output_____"
]
],
[
[
"a = [1, 2, 3]\nlength = len (a) # length = 3\nlength",
"_____no_output_____"
]
],
[
[
"#### Indexing\nThe items in a list are indexed starting at 0 and ending at index\n<i>length - 1</i>. You can also use negative indices to access elements\nin a list. For example a[-1] returns the last item on the list and\na[-length] returns the first. Unlike a string, a list is mutable, i.e.\nits contents can be changed like so:\n",
"_____no_output_____"
]
],
[
[
"a = [1, 2, 3]\n\na[1] = 4 # a = [1, 4, 3]\na",
"_____no_output_____"
]
],
[
[
"To access or change an element in a 2-dimensional list specify the row\nfirst and then the column.",
"_____no_output_____"
]
],
[
[
"b = [ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]\nprint(b)\n\nd = b[1][2] # d = 6\n\nprint(d)\n\nb[2][1] = b[1][2]*2\n\nprint(b)\n",
"[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n6\n[[1, 2, 3], [4, 5, 6], [7, 12, 9]]\n"
]
],
[
[
"Note that the positions of items in a list start at an index value of 0.\nYou can also create a list by concatenating two or more lists together.\nYou can initialize a list with a predefined value.",
"_____no_output_____"
]
],
[
[
"a = [1, 2]\nb = [4, 5]\nc = a + b # c = [1, 2, 4, 5]\n\nprint(c)\n\nd = [0] * 5 # d = [0, 0, 0, 0, 0]\n\nd",
"[1, 2, 4, 5]\n"
]
],
[
[
"#### List Traversal\n\n<p>\nOne of the most important operations that you can do with a list is to\ntraverse it, i.e. visit each and every element in the list in order.\nThere are several ways in which to do so:\n<pre>\n \n</pre>\n</p>\n",
"_____no_output_____"
]
],
[
[
"a = [9, 2, 6, 4, 7]\n\nprint(a)\n\nfor item in a:\n print (item, end = \" \") # 9 2 6 4 7\n\n\n# Doubles each item in the list\nlength = len (a)\n\nfor i in range (length):\n a[i] = a[i] * 2 ",
"[9, 2, 6, 4, 7]\n9 2 6 4 7 "
]
],
[
[
"\n<a href = \"https://docs.python.org/3/tutorial/datastructures.html\">\nOther List Functions </a> \n\n\n<table border = \"1\" width = \"75%\">\n<tr>\n<th> Function </th><th> Meaning </th>\n</tr>\n<tr>\n<td> list.sort() </td>\n<td> Sorts a list in ascending order </td>\n</tr>\n<tr>\n<td> list.reverse() </td>\n<td> Reverses the elements in a list </td>\n</tr>\n<tr>\n<td> <i>value</i> in list </td>\n<td> Returns True if the <i>value</i> is in the list and False otherwise</td>\n</tr>\n<tr> \n<td> list.index(x) </td>\n<td> Returns the index of the first occurence of x. Use with the above\nfunction to check if <i>x</i> is in the list before determining its position.\n</td>\n</tr>\n<tr>\n<td> list.count(x) </td>\n<td> Returns the number of occurences of x in the list </td>\n</tr>\n<tr>\n<td> list.remove(x) </td>\n<td> Deletes the first occurence of x in list </td>\n</tr>\n<tr>\n<td> list.pop(i) </td>\n<td> Deletes the ith element in the list and returns its value </td>\n</tr>\n</table>",
"_____no_output_____"
]
],
[
[
"a = [9, 2, 6, 4, 7]\na.sort()\na",
"_____no_output_____"
],
[
"a = [9, 2, 6, 4, 7]\na.reverse()\na",
"_____no_output_____"
],
[
"for value in [9, 2, 6, 4, 7]:\n print(value)",
"9\n2\n6\n4\n7\n"
],
[
"#index\na = [9, 2, 6, 4, 7]\n\na.index(6)",
"_____no_output_____"
],
[
"# count()\na.count(6)\n\n",
"_____no_output_____"
],
[
"# remove\na = [9, 2, 6, 4, 7]\na.remove(2)\na",
"_____no_output_____"
],
[
"# pop\na = [9, 2, 6, 4, 7]\n\nb = a.pop(2)\nprint(b)\na",
"6\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e744963552ab47595f7c1fad7884a31353ac2fae | 1,876 | ipynb | Jupyter Notebook | _pages/Language/Python/src/matplotlib.ipynb | shpimit/shpimit.github.io | 83d1f920f75c2871f8e62f045db29a9b2d93f87b | [
"MIT"
] | 1 | 2018-05-13T12:57:32.000Z | 2018-05-13T12:57:32.000Z | _pages/Language/Python/src/matplotlib.ipynb | shpimit/shpimit.github.io | 83d1f920f75c2871f8e62f045db29a9b2d93f87b | [
"MIT"
] | null | null | null | _pages/Language/Python/src/matplotlib.ipynb | shpimit/shpimit.github.io | 83d1f920f75c2871f8e62f045db29a9b2d93f87b | [
"MIT"
] | null | null | null | 23.45 | 81 | 0.534648 | [
[
[
"# [matplotlib](https://matplotlib.org/gallery/index.html)",
"_____no_output_____"
]
],
[
[
"# font_manager 글꼴 관리자\n# rc 함수 : 전역적으로 사용하고 싶을때\n# 예을 들어서, Figure의 크기를(10, 10)으로 전역 설정하고 싶다면 다음과 같이 코딩한다.\n# plt.rc('figure',figsize=(10,10))\n\nfrom matplotlib import font_manager, rc\nimport matplotlib.pyplot as plt\n\n# 더 많은 설정과 옵션의 종류는 matplotlib/mpl-data폴더의 matplotlibrc 파일에 저장되어 있다.\n# 예시) 파이썬 설치 경로\\Lib\\site-packages\\matplotlib\\mpl-data\\matplotlibrc \nfont_location = 'c:/Windows/Fonts/malgun.ttf' # \\는 \\\\를 사용한다\nfont_name = font_manager.FontProperties(fname=font_location).get_name()\nplt.rc('font',family=font_name)\n\nxdata = [1, 2, 3, 4]\nydata = [5, 6, 7, 8]\n\nplt.plot(xdata, ydata) # 백그라운에 그린다.\nplt.xlabel('x축 한글 표시')\nplt.ylabel('y축 한글 표시')\nplt.title('matplotlib 활용')\nplt.show() # 그린것을 보여준다..",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e744aadc02fe8c50016c4144b30a4aa68237443e | 18,060 | ipynb | Jupyter Notebook | 8-US-airports-case-study-student.ipynb | OSSSP/Network-Analysis-Made-Simple | e87362bbceb6fe04d922155cf6f354f79aca19b5 | [
"MIT"
] | 1 | 2020-05-27T12:52:23.000Z | 2020-05-27T12:52:23.000Z | 8-US-airports-case-study-student.ipynb | OSSSP/Network-Analysis-Made-Simple | e87362bbceb6fe04d922155cf6f354f79aca19b5 | [
"MIT"
] | null | null | null | 8-US-airports-case-study-student.ipynb | OSSSP/Network-Analysis-Made-Simple | e87362bbceb6fe04d922155cf6f354f79aca19b5 | [
"MIT"
] | null | null | null | 22.351485 | 223 | 0.541307 | [
[
[
"## Exploratory analysis of the US Airport Dataset\n\nThis dataset contains data for 25 years[1995-2015] of flights between various US airports and metadata about these routes. Taken from Bureau of Transportation Statistics, United States Department of Transportation.\n\nLet's see what can we make out of this!",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport networkx as nx\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')\n\npass_air_data = pd.read_csv('datasets/passengers.csv')",
"_____no_output_____"
]
],
[
[
"In the `pass_air_data` dataframe we have the information of number of people that fly every year on a particular route.",
"_____no_output_____"
]
],
[
[
"pass_air_data.head()",
"_____no_output_____"
],
[
"# Create a MultiDiGraph from this dataset\n\npassenger_graph = nx.from_pandas_edgelist(pass_air_data, source='ORIGIN', target='DEST', edge_attr=['YEAR', 'PASSENGERS', 'UNIQUE_CARRIER_NAME'], create_using=nx.MultiDiGraph())",
"_____no_output_____"
]
],
[
[
"### Cleveland to Chicago, how many people fly this route?",
"_____no_output_____"
]
],
[
[
"passenger_graph['CLE']['ORD']",
"_____no_output_____"
],
[
"temp = [(i['YEAR'], i['PASSENGERS'])for i in dict(passenger_graph['CLE']['ORD']).values()]\nx, y = zip(*temp)\nplt.plot(x, y)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Exercise\n\nFind the busiest route in 1990 and in 2015 according to number of passengers, and plot the time series of number of passengers on these routes.\n\nYou can use the DataFrame instead of working with the network. It will be faster ;)\n[5 mins]",
"_____no_output_____"
],
[
"So let's have a look at the important nodes in this network, i.e. important airports in this network. We'll use pagerank, betweenness centrality and degree centrality.",
"_____no_output_____"
]
],
[
[
"nx.pagerank(passenger_graph)",
"_____no_output_____"
],
[
"def year_network(G, year):\n temp_g = nx.DiGraph()\n for i in G.edges(data=True):\n if i[2]['YEAR'] == year:\n temp_g.add_edge(i[0], i[1], weight=i[2]['PASSENGERS'])\n return temp_g",
"_____no_output_____"
],
[
"pass_2015 = year_network(passenger_graph, 2015)",
"_____no_output_____"
],
[
"len(pass_2015)",
"_____no_output_____"
],
[
"len(pass_2015.edges())",
"_____no_output_____"
],
[
"# Load in the GPS coordinates of all the airports\nlat_long = pd.read_csv('datasets/GlobalAirportDatabase.txt', delimiter=':', header=None)",
"_____no_output_____"
],
[
"lat_long[lat_long[1].isin(list(pass_2015.nodes()))]",
"_____no_output_____"
],
[
"pos_dict = {}\nfor airport in lat_long[lat_long[1].isin(list(pass_2015.nodes()))].iterrows():\n pos_dict[airport[1][1]] = (airport[1][15], airport[1][14]) ",
"_____no_output_____"
],
[
"pos_dict",
"_____no_output_____"
]
],
[
[
"## Exercise\n\nUsing the position dictionary `pos_dict` create a plot of the airports, only the nodes not the edges.\n\n- As we don't have coordinates for all the airports we have to create a subgraph first.\n- Use `nx.subgraph(Graph, iterable of nodes)` to create the subgraph\n- Use `nx.draw_networkx_nodes(G, pos)` to map the nodes. \n\nor \n\n- Just use a scatter plot :)",
"_____no_output_____"
],
[
"### What about degree distribution of this network?",
"_____no_output_____"
]
],
[
[
"plt.hist(list(nx.degree_centrality(pass_2015).values()))\nplt.show()",
"_____no_output_____"
]
],
[
[
"Let's plot a log log plot to get a better overview of this.",
"_____no_output_____"
]
],
[
[
"d = {}\nfor i, j in dict(nx.degree(pass_2015)).items():\n if j in d:\n d[j] += 1\n else:\n d[j] = 1\nx = np.log2(list((d.keys())))\ny = np.log2(list(d.values()))\nplt.scatter(x, y, alpha=0.4)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Directed Graphs\n\n",
"_____no_output_____"
]
],
[
[
"G = nx.DiGraph()\n\nG.add_edge(1, 2, weight=1)\n\n# print(G.edges())\n# G[1][2]\n# G[2][1]\n# G.is_directed()\n# type(G)",
"_____no_output_____"
],
[
"G.add_edges_from([(1, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2)])\nnx.draw_circular(G, with_labels=True)",
"_____no_output_____"
],
[
"G.in_degree()",
"_____no_output_____"
],
[
"nx.pagerank(G)",
"_____no_output_____"
],
[
"G.add_edge(5, 6)\nnx.draw_circular(G, with_labels=True)",
"_____no_output_____"
],
[
"nx.pagerank(G)",
"_____no_output_____"
],
[
"G.add_edge(2, 8)\nnx.draw_circular(G, with_labels=True)",
"_____no_output_____"
],
[
"nx.pagerank(G)",
"_____no_output_____"
]
],
[
[
"### Moving back to Airports",
"_____no_output_____"
]
],
[
[
"sorted(nx.pagerank(pass_2015, weight=None).items(), key=lambda x:x[1], reverse=True)[:10]",
"_____no_output_____"
],
[
"sorted(nx.betweenness_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
],
[
"sorted(nx.degree_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
]
],
[
[
"'ANC' is the airport code of Anchorage airport, a place in Alaska, and according to pagerank and betweenness centrality it is the most important airport in this network Isn't that weird? Thoughts?\n\nrelated blog post: https://toreopsahl.com/2011/08/12/why-anchorage-is-not-that-important-binary-ties-and-sample-selection/\n\nLet's look at weighted version, i.e taking into account the number of people flying to these places.",
"_____no_output_____"
]
],
[
[
"sorted(nx.betweenness_centrality(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
],
[
"sorted(nx.pagerank(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
]
],
[
[
"## How reachable is this network?\n\nWe calculate the average shortest path length of this network, it gives us an idea about the number of jumps we need to make around the network to go from one airport to any other airport in this network.",
"_____no_output_____"
]
],
[
[
"nx.average_shortest_path_length(pass_2015)",
"_____no_output_____"
]
],
[
[
"Wait, What??? This network is not connected. That seems like a really stupid thing to do.",
"_____no_output_____"
]
],
[
[
"list(nx.weakly_connected_components(pass_2015))",
"_____no_output_____"
]
],
[
[
"### SPB, SSB, AIK anyone?",
"_____no_output_____"
]
],
[
[
"pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['ORIGIN'] == 'AIK')]",
"_____no_output_____"
],
[
"pass_2015.remove_nodes_from(['SPB', 'SSB', 'AIK'])",
"_____no_output_____"
],
[
"nx.is_weakly_connected(pass_2015)",
"_____no_output_____"
],
[
"nx.is_strongly_connected(pass_2015)",
"_____no_output_____"
]
],
[
[
"### Strongly vs weakly connected graphs.",
"_____no_output_____"
]
],
[
[
"G = nx.DiGraph()\nG.add_edge(1, 2)\nG.add_edge(2, 3)\nG.add_edge(3, 1)\nnx.draw(G)",
"_____no_output_____"
],
[
"G.add_edge(3, 4)\nnx.draw(G)",
"_____no_output_____"
],
[
"nx.is_strongly_connected(G)",
"_____no_output_____"
],
[
"list(nx.strongly_connected_components(pass_2015))",
"_____no_output_____"
],
[
"pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['DEST'] == 'TSP')]",
"_____no_output_____"
],
[
"pass_2015_strong = pass_2015.subgraph(\n max(nx.strongly_connected_components(pass_2015), key=len))",
"_____no_output_____"
],
[
"len(pass_2015_strong)",
"_____no_output_____"
],
[
"nx.average_shortest_path_length(pass_2015_strong)",
"_____no_output_____"
]
],
[
[
"#### Exercise! (Actually this is a game :D)\n\nHow can we decrease the avg shortest path length of this network?\n\nThink of an effective way to add new edges to decrease the avg shortest path length.\nLet's see if we can come up with a nice way to do this, and the one who gets the highest decrease wins!!!\n\nThe rules are simple:\n- You can't add more than 2% of the current edges( ~500 edges)\n\n[10 mins]",
"_____no_output_____"
]
],
[
[
"# unfreeze the graph\npass_2015_strong = nx.DiGraph(pass_2015_strong)\n",
"_____no_output_____"
]
],
[
[
"### What about airlines? Can we find airline specific reachability?",
"_____no_output_____"
]
],
[
[
"passenger_graph['CLE']['SFO'][25]",
"_____no_output_____"
],
[
"def str_to_list(a):\n return a[1:-1].split(', ')",
"_____no_output_____"
],
[
"for i in str_to_list(passenger_graph['JFK']['SFO'][25]['UNIQUE_CARRIER_NAME']):\n print(i)",
"_____no_output_____"
],
[
"%%time\nfor origin, dest in passenger_graph.edges():\n for key in passenger_graph[origin][dest]:\n passenger_graph[origin][dest][key]['airlines'] = str_to_list(passenger_graph[origin][dest][key]['UNIQUE_CARRIER_NAME'])",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nPlay around with United Airlines network.\n\n- Extract a network for United Airlines flights from the metagraph `passenger_graph` for the year 2015\n- Make sure it's a weighted network, where weight is the number of passengers.\n- Find the number of airports and connections in this network\n- Find the most important airport, according to PageRank and degree centrality.",
"_____no_output_____"
]
],
[
[
"united_network = nx._________\nfor _______, _______ in passenger_graph.edges():\n if 25 in passenger_graph[______][_______]: # 25 key is for the year 2015\n if \"'United Air Lines Inc.'\" in ____________________:\n united_network.add_edge(_____, ______, weight= __________)",
"_____no_output_____"
],
[
"# number of nodes\n",
"_____no_output_____"
],
[
"# number of edges",
"_____no_output_____"
],
[
"# top 10 according to pagerank",
"_____no_output_____"
],
[
"# top 10 according to degree centrality",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nWe are in Cleveland so what should we do?\n\nObviously we will make a time series of number of passengers flying out of Cleveland with United Airlines over the years.\n\nThere are 2 ways of doing it.\n- Create a new multidigraph specifically for this exercise.\n\nOR\n\n- exploit the `pass_air_data` dataframe.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e744b262158ab3a30e3b40ca12e73f513362eec9 | 66,563 | ipynb | Jupyter Notebook | Implementation/Jupyter_Notebooks/Compas_cGAN.ipynb | bendiste/Algorithmic-Fairness | 7e3b54e38eddb7572777be6f9772e3b2a8e398ec | [
"MIT"
] | null | null | null | Implementation/Jupyter_Notebooks/Compas_cGAN.ipynb | bendiste/Algorithmic-Fairness | 7e3b54e38eddb7572777be6f9772e3b2a8e398ec | [
"MIT"
] | null | null | null | Implementation/Jupyter_Notebooks/Compas_cGAN.ipynb | bendiste/Algorithmic-Fairness | 7e3b54e38eddb7572777be6f9772e3b2a8e398ec | [
"MIT"
] | null | null | null | 36.734547 | 98 | 0.29877 | [
[
[
"# cGAN Generate Synthetic Data for Compas Dataset",
"_____no_output_____"
],
[
"CTGAN model is based on the GAN-based Deep Learning data synthesizer ",
"_____no_output_____"
]
],
[
[
"from implementation_functions import *\n\nimport pandas as pd\nimport numpy as np\n\nfrom prince import FAMD #Factor analysis of mixed data\nfrom aif360.metrics import BinaryLabelDatasetMetric\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nimport matplotlib.pyplot as plt\nimport skfuzzy as fuzz",
"_____no_output_____"
],
[
"data_name = \"compas\"\ndataset_orig, privileged_groups, unprivileged_groups = aif_data(data_name, False)",
"WARNING:root:Missing Data: 5 rows removed from CompasDataset.\n"
],
[
"#assign the sensitive attr and decision labels\nsens_attr = ['race', 'sex']\ndecision_label = 'two_year_recid'\nfav_l = 1\nunfav_l = 0",
"_____no_output_____"
],
[
"orig_df, num_list, cat_list = preprocess(dataset_orig, sens_attr, decision_label)",
"_____no_output_____"
],
[
"#switch the dataset labels other way around for easier interpretation\norig_df['transf_labels'] = np.where(orig_df['two_year_recid']== 0, 1, 0)\ndecision_label = 'transf_labels'\norig_df = orig_df.drop('two_year_recid', axis=1)\n\norig_df, num_list, cat_list = preprocess(orig_df, sens_attr, decision_label)\norig_df['sub_labels'].value_counts()",
"_____no_output_____"
],
[
"import time\nstart_time = time.time()\nprint(\"--- %s seconds ---\" % (time.time() - start_time))",
"--- 9.1552734375e-05 seconds ---\n"
],
[
"# Train-test split WITH stratification\nX = orig_df.loc[:, orig_df.columns != decision_label]\ny = orig_df.loc[:, orig_df.columns == decision_label].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, \n shuffle=True,\n stratify=X['sub_labels'])",
"_____no_output_____"
],
[
"keep_sub_l = X_train['sub_labels']",
"_____no_output_____"
],
[
"X_train_new = X_train.drop(['age', 'sex', 'sub_labels'], axis=1)",
"_____no_output_____"
],
[
"X_train_new['sub_labels'] = keep_sub_l",
"_____no_output_____"
],
[
"X_train_new['class_labels'] = y_train",
"_____no_output_____"
],
[
"X_train_new",
"_____no_output_____"
]
],
[
[
"# Here we start the GAN work",
"_____no_output_____"
]
],
[
[
"from sdv.tabular import CTGAN\nmodel = CTGAN()",
"_____no_output_____"
]
],
[
[
"from sdv.tabular import CTGAN\nmodel = CTGAN()\nstart_time = time.time()\nmodel.fit(X_train_new)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))",
"_____no_output_____"
],
[
"model.save('my_fariness_Compas_V3.pkl')",
"_____no_output_____"
]
],
[
[
"loaded = CTGAN.load('my_fariness_Compas_V3.pkl')",
"_____no_output_____"
],
[
"# print(X_train.loc[:,\"sub_labels\"])\navailable_rows = {}\nfor row_count in range(8):\n available_rows[row_count] = X_train[\"sub_labels\"].value_counts()[row_count]\n \ntarget_rows = max(available_rows.values())\nmax_label = max(available_rows, key=available_rows.get)\nprint(target_rows)\nprint(max_label)\n# print(X_train[\"sub_labels\"].value_counts())\n",
"1021\n0\n"
],
[
"main_df = pd.DataFrame()",
"_____no_output_____"
],
[
"for key, value in available_rows.items():\n if int(key) != int(max_label):\n conditions = {\n \"sub_labels\" : int(key),\n }\n needed_rows = target_rows - value\n main_df = pd.concat([main_df, loaded.sample(needed_rows, conditions=conditions)])\n \nprint(len(main_df.index))",
"4477\n"
]
],
[
[
"# Extreme Gradient Boosting Classifier",
"_____no_output_____"
]
],
[
[
"# Type the desired classifier to train the classification models with model obj\nxgb= GradientBoostingClassifier()\nbaseline_stats, cm, ratio_table, preds = baseline_metrics(xgb, X_train, X_test, \n y_train, y_test, sens_attr, \n fav_l, unfav_l)",
"_____no_output_____"
],
[
"test_sublabels = X_test['sub_labels']\nX_test_n = X_test.drop(['race', 'sex','sub_labels'], axis=1)\nnum_list, cat_list = type_lists(X_test_n)",
"_____no_output_____"
],
[
"final_df = pd.concat([main_df, X_train_new])",
"_____no_output_____"
],
[
"final_df ",
"_____no_output_____"
],
[
"final_df['sub_labels'].value_counts()",
"_____no_output_____"
],
[
"print(ratio_table)",
" Base Ratio Positive Ratio Negative Ratio\nIndex \n{'race': 0, 'sex': 0} 0.497472 0.204804 0.292668\n{'race': 1, 'sex': 0} 0.307206 0.213021 0.094185\n{'race': 0, 'sex': 1} 0.104298 0.067636 0.036662\n{'race': 1, 'sex': 1} 0.091024 0.069532 0.021492\n"
],
[
"# Predicting the test sets using the extreme gradient boosting \nX_test_pred_xgb = predict_whole_set(xgb, final_df, X_test_n)",
"_____no_output_____"
],
[
"metrics_table1, cm1, ratio_t1 = metrics_calculate(X_test, X_test_pred_xgb, y_test,\n sens_attr, fav_l, unfav_l)\n",
"_____no_output_____"
],
[
"#outputs from strategy 1\nprint(metrics_table1)\nprint(\"Confusion Matrix:\", cm1)\nprint(ratio_t1)",
" AEO Difference \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] -0.119925 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] -0.034355 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.007313 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] -0.154280 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] -0.027042 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] -0.146967 \n\n Disparate Impact Ratio \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.824351 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.972639 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.993566 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.801796 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.966382 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.796638 \n\n Dem Parity Difference \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] -0.153241 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] -0.024542 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] -0.005808 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] -0.177783 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] -0.030350 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] -0.183591 \n\n Predictive Parity Difference \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] -0.117374 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.002295 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] -0.035967 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] -0.115080 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] -0.033672 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] -0.151046 \n\n Consistency Accuracy \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.919975 0.634008 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.919975 0.634008 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.919975 0.634008 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.919975 0.634008 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.919975 0.634008 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.919975 0.634008 \n\n Balanced accuracy F1-Score \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.616868 0.724941 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.616868 0.724941 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.616868 0.724941 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.616868 0.724941 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.616868 0.724941 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.616868 0.724941 \n\n Precision (PPV) Recall (TPR) \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.601735 0.911589 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.601735 0.911589 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.601735 0.911589 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.601735 0.911589 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.601735 0.911589 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.601735 0.911589 \n\n Specificity (TNR) \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.322148 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.322148 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.322148 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.322148 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.322148 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.322148 \nConfusion Matrix: {'TP': 763.0, 'FP': 505.0, 'TN': 240.0, 'FN': 74.0}\n Base Ratio Positive Ratio Negative Ratio\nIndex \n{'race': 0, 'sex': 0} 0.497472 0.357775 0.139697\n{'race': 1, 'sex': 0} 0.307206 0.268015 0.039191\n{'race': 0, 'sex': 1} 0.104298 0.093552 0.010746\n{'race': 1, 'sex': 1} 0.091024 0.082174 0.008850\n"
]
],
[
[
"# Random Forest Classifer",
"_____no_output_____"
]
],
[
[
"# Type the desired classifier to train the classification models with model obj\nRF= GradientBoostingClassifier()\nbaseline_stats, cm, ratio_table, preds = baseline_metrics(xgb, X_train, X_test, \n y_train, y_test, sens_attr, \n fav_l, unfav_l)",
"_____no_output_____"
],
[
"print(ratio_table)",
" Base Ratio Positive Ratio Negative Ratio\nIndex \n{'race': 0, 'sex': 0} 0.497472 0.205436 0.292035\n{'race': 1, 'sex': 0} 0.307206 0.211125 0.096081\n{'race': 0, 'sex': 1} 0.104298 0.067636 0.036662\n{'race': 1, 'sex': 1} 0.091024 0.069532 0.021492\n"
],
[
"# Predicting the test sets using the extreme gradient boosting \nX_test_pred_RF = predict_whole_set(RF, final_df, X_test_n)",
"_____no_output_____"
],
[
"metrics_table1, cm1, ratio_t1 = metrics_calculate(X_test, X_test_pred_RF, y_test,\n sens_attr, fav_l, unfav_l)",
"_____no_output_____"
],
[
"#outputs from strategy 1\nprint(metrics_table1)\nprint(\"Confusion Matrix:\", cm1)\nprint(ratio_t1)",
" AEO Difference \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] -0.119925 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] -0.034355 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.007313 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] -0.154280 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] -0.027042 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] -0.146967 \n\n Disparate Impact Ratio \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.824351 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.972639 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.993566 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.801796 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.966382 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.796638 \n\n Dem Parity Difference \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] -0.153241 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] -0.024542 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] -0.005808 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] -0.177783 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] -0.030350 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] -0.183591 \n\n Predictive Parity Difference \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] -0.117374 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.002295 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] -0.035967 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] -0.115080 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] -0.033672 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] -0.151046 \n\n Consistency Accuracy \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.919975 0.634008 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.919975 0.634008 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.919975 0.634008 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.919975 0.634008 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.919975 0.634008 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.919975 0.634008 \n\n Balanced accuracy F1-Score \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.616868 0.724941 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.616868 0.724941 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.616868 0.724941 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.616868 0.724941 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.616868 0.724941 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.616868 0.724941 \n\n Precision (PPV) Recall (TPR) \\\n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.601735 0.911589 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.601735 0.911589 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.601735 0.911589 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.601735 0.911589 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.601735 0.911589 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.601735 0.911589 \n\n Specificity (TNR) \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 0}] 0.322148 \n[{'race': 1, 'sex': 0}][{'race': 0, 'sex': 1}] 0.322148 \n[{'race': 0, 'sex': 1}][{'race': 1, 'sex': 1}] 0.322148 \n[{'race': 0, 'sex': 0}][{'race': 0, 'sex': 1}] 0.322148 \n[{'race': 1, 'sex': 0}][{'race': 1, 'sex': 1}] 0.322148 \n[{'race': 0, 'sex': 0}][{'race': 1, 'sex': 1}] 0.322148 \nConfusion Matrix: {'TP': 763.0, 'FP': 505.0, 'TN': 240.0, 'FN': 74.0}\n Base Ratio Positive Ratio Negative Ratio\nIndex \n{'race': 0, 'sex': 0} 0.497472 0.357775 0.139697\n{'race': 1, 'sex': 0} 0.307206 0.268015 0.039191\n{'race': 0, 'sex': 1} 0.104298 0.093552 0.010746\n{'race': 1, 'sex': 1} 0.091024 0.082174 0.008850\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"raw",
"raw"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e744bdf30bf0c08dedefdebdd42b991cd00cd2f0 | 32,174 | ipynb | Jupyter Notebook | notebooks/2c_visualizacion_datos.ipynb | kevinalexandr19/manual-python-geologia | 00c3b1731140bf2fa3bc6ce181d8fd4956325ffb | [
"MIT"
] | 33 | 2021-11-19T05:37:45.000Z | 2022-03-30T02:52:37.000Z | notebooks/2c_visualizacion_datos.ipynb | kevinalexandr19/manual-python-geologia | 00c3b1731140bf2fa3bc6ce181d8fd4956325ffb | [
"MIT"
] | null | null | null | notebooks/2c_visualizacion_datos.ipynb | kevinalexandr19/manual-python-geologia | 00c3b1731140bf2fa3bc6ce181d8fd4956325ffb | [
"MIT"
] | 7 | 2022-02-08T01:55:26.000Z | 2022-03-11T16:43:51.000Z | 32.897751 | 261 | 0.579785 | [
[
[
"# **Visualización de Datos en Python**\n***\n\n### **Editado por: Kevin Alexander Gómez**\n#### Contacto: [email protected] | [Linkedin](https://www.linkedin.com/in/kevin-alexander-g%C3%B3mez-2b0263111/) | [Github](https://github.com/kevinalexandr19)\n***",
"_____no_output_____"
],
[
"### **Descripción**\n\nUsando este manual, desarrollarás código en Python orientado a la visualización de datos.\n\nEste Notebook es parte del proyecto [**Python para Geólogos**](https://github.com/kevinalexandr19/manual-python-geologia), y ha sido creado con la finalidad de facilitar el aprendizaje en Python para estudiantes y profesionales en el campo de la Geología.",
"_____no_output_____"
],
[
"## **Índice**\n***\n\n1. [Ventajas de la visualización de datos en Python](#parte1)\n2. [Seaborn](#parte2)\n3. [Pyrolite](#parte3)\n4. [Mplstereonet](#parte4)\n\n***",
"_____no_output_____"
],
[
"<a id=\"parte1\"></a>",
"_____no_output_____"
],
[
"## **1. Ventajas de la visualización de datos en Python**\n***\nLa **visualización de datos** consiste en intentar entender los datos a través de un contexto visual de tal manera que podamos detectar patrones, tendencias y correlaciones.\\\nPuedes revisar diferentes estilos de visualización en la página de [DataVizProject](https://datavizproject.com/).\n\nLas principales ventajas de realizar visualizaciones dentro de Python son:\n- Acceso a múltiples <span style=\"color:lightgreen\">librerías</span> (e.g. `Matplotlib`, `Seaborn`, `Mplstereonet`, etc.) con diferentes funcionalidades y aplicaciones en diferentes disciplinas.\n- <span style=\"color:lightgreen\">Escalabilidad</span> y <span style=\"color:lightgreen\">automatización</span> con el potencial de generar decenas de gráficos usando solamente unas pocas líneas de código.\n- Amplia gama de figuras y estilos de visualización con un alto nivel de <span style=\"color:lightgreen\">personalización</span>, lo que permite la creación de nuevos tipos de figuras.\n\n\n",
"_____no_output_____"
],
[
"<a id=\"parte2\"></a>",
"_____no_output_____"
],
[
"## **2. Seaborn**\n***\n**Esta librería posee una interface de alto nivel para la creación de figuras atractivas. Usa menos líneas de código comparado con Matplotlib.**\n\nEn los siguientes ejemplos, usaremos información geoquímica de **peridotitas** y **granodioritas** para crear diferentes tipos de gráficos.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(context=\"notebook\", style=\"ticks\")",
"_____no_output_____"
]
],
[
[
"La información se encuentra en un archivo CSV llamado `rocas.csv`.\\\nEsta información ha sido procesada previamente y proviene de una base de datos geoquímica de uso público llamada [GEOROC](http://georoc.mpch-mainz.gwdg.de/georoc/Start.asp).\\\nAbriremos estos archivos a través de la librería `Pandas` y usaremos la función `read_csv`.\n> Si al ejecutar `read_csv` ocurren problemas para leer el archivo, puedes probar usando `encoding=\"ISO-8859-1\"` (por defecto se usa `encoding=\"utf-8\"`).",
"_____no_output_____"
]
],
[
[
"rocas = pd.read_csv(\"files/rocas.csv\", encoding=\"ISO-8859-1\")",
"_____no_output_____"
],
[
"rocas",
"_____no_output_____"
]
],
[
[
"Revisaremos la información general del cuadro usando el método `info`:",
"_____no_output_____"
]
],
[
[
"rocas.info()",
"_____no_output_____"
]
],
[
[
"<br>",
"_____no_output_____"
],
[
"En resumen, el cuadro contiene una columna llamada `Nombre` que representa la clasificación petrográfica y está representada por valores de tipo `string` (señalado como `object`).\\\nLas columnas: `SiO2`, `Al2O3`, `FeOT`, `CaO`, `MgO`, `Na2O`, `K2O`, `MnO` y `TiO`, representan concentraciones geoquímicas (en wt%) y están representadas por valores numéricos de tipo `float`.\\\nY por último, el cuadro contiene 4566 muestras, y no presenta valores vacíos o nulos.\n\nA continuación, usaremos esta información para generar algunos gráficos.",
"_____no_output_____"
],
[
"### **2.1. Visualizando la distribución de datos con `boxplot` y `violinplot`**\n\nEmpezaremos separando el cuadro en dos y usaremos los nombres `prd` y `grn` para referenciar a las muestras de peridotita y granodiorita respectivamente.\\\nCrearemos una copia de cada cuadro usando el método `copy`:",
"_____no_output_____"
]
],
[
[
"prd = rocas[rocas[\"Nombre\"] == \"Peridotita\"].copy()\ngrn = rocas[rocas[\"Nombre\"] == \"Granodiorita\"].copy()",
"_____no_output_____"
]
],
[
[
"Para observar la distribución de los datos geoquímicos en las muestras, usaremos dos tipos de figuras:\n- `boxplot`: muestra la distribución cuantitativa de los datos y sus cuartiles, también estableces un máximo y mínimo en base al rango intercuartílico.\\\n Los puntos que se alejan del rango se consideran *outliers*.\n\n<img src=\"resources/boxplot.png\" alt=\"Las 4 fases en el análisis de datos\" width=\"700\"/>\n\n- `violinplot`: cumple las mismas funciones del `boxplot` pero además muestra una distribución de densidad de los datos.\n\n<img src=\"resources/box_violinplot.png\" alt=\"Las 4 fases en el análisis de datos\" width=\"500\"/>\n\nPrimero, crearemos un boxplot para las muestras de peridotita:",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 2, figsize=(18, 10))\n\nsns.boxplot(ax=axs[0], data=prd[[\"SiO2\", \"Al2O3\", \"FeOT\", \"CaO\", \"MgO\"]], orient=\"h\", flierprops={\"marker\":\"o\", \"markersize\": 4})\naxs[0].grid()\naxs[0].set_xlabel(\"%\", fontsize=18)\n\nsns.boxplot(ax=axs[1], data=prd[[\"Na2O\", \"K2O\", \"MnO\", \"TiO\"]], orient=\"h\", flierprops={\"marker\":\"o\", \"markersize\": 4})\naxs[1].grid()\naxs[1].set_xlabel(\"%\", fontsize=18)\n\nfig.suptitle(\"Boxplot para las muestras de peridotita\", y=0.92, fontsize=25)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Los gráficos en `boxplot` nos ayudan a visualizar mejor la distribución de los datos, pero podemos mejorarlo usando `violinplot`:\n",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 2, figsize=(18, 10))\n\nsns.violinplot(ax=axs[0], data=prd[[\"SiO2\", \"Al2O3\", \"FeOT\", \"CaO\", \"MgO\"]], orient=\"h\")\naxs[0].grid()\naxs[0].set_xlabel(\"%\", fontsize=18)\n\nsns.violinplot(ax=axs[1], data=prd[[\"Na2O\", \"K2O\", \"MnO\", \"TiO\"]], orient=\"h\")\naxs[1].grid()\naxs[1].set_xlabel(\"%\", fontsize=18)\n\nfig.suptitle(\"Violinplot para las muestras de peridotita\", y=0.92, fontsize=25)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### **2.2. Visualizando la matriz de correlación con `heatmap`**\n\n",
"_____no_output_____"
],
[
"Ahora, crearemos una matriz de correlación para las muestras de peridotita usando el método `corr`:",
"_____no_output_____"
]
],
[
[
"prd.corr()",
"_____no_output_____"
]
],
[
[
"Esta matriz nos muestra la correlación de Pearson por cada par de columnas en el cuadro.\\\nUsaremos esta matriz para crear una visualización agradable de las diferentes correlaciones en el cuadro.",
"_____no_output_____"
]
],
[
[
"# Matriz de correlación\ncorr = prd.corr()\n\n# Generamos una matriz triangular\nmask = np.triu(np.ones_like(corr, dtype=bool))\n\n# Creamos la figura\nfig, ax = plt.subplots(figsize=(10, 8))\n\n# Creamos un mapa de colores divergentes\ncmap = sns.diverging_palette(230, 20, as_cmap=True)\n\n# Creamos un mapa de calor usando la matriz triangular y el mapa de colores\nsns.heatmap(corr, mask=mask, cmap=cmap,\n vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .9, \"label\": \"Correlación de Pearson\"}, annot=True)\n\n# Remueve los ticks\nax.tick_params(left=False, bottom=False)\n\n# Título\nax.set_title(\"Matriz de correlación (Peridotita)\", fontsize=18, x=0.55)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Vamos a filtrar aquellas correlaciones mayores a 0.7 y menores a -0.7:",
"_____no_output_____"
]
],
[
[
"# Filtrando aquellos pares con una correlación alta\ncorr = corr.where((corr > 0.7) | (corr < -0.7), 0)\n\n# Matriz triangular\nmask = np.triu(np.ones_like(corr, dtype=bool))\n\n# Figura\nfig, ax = plt.subplots(figsize=(10, 8))\ncmap = sns.diverging_palette(230, 20, as_cmap=True)\nsns.heatmap(corr, mask=mask, cmap=cmap,\n vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .9, \"label\": \"Correlación de Pearson\"}, annot=True)\nax.tick_params(left=False, bottom=False)\nax.set_title(\"Matriz de correlación (Peridotita)\", fontsize=18, x=0.55)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Ahora, crearemos diagramas de dispersión para visualizar estos 3 pares.",
"_____no_output_____"
],
[
"### **2.2. Diagrama de dispersión con `scatterplot`**\nColocaremos estos pares en una lista de tuplas llamada `pares`:",
"_____no_output_____"
]
],
[
[
"pares = [(\"CaO\", \"Al2O3\"), (\"MgO\", \"Al2O3\"), (\"MgO\", \"CaO\")]",
"_____no_output_____"
]
],
[
[
"Y lo usaremos dentro de la función `scatterplot` para crear una figura con 3 diagramas de dispersión:",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 3, figsize=(16, 6))\n\nfor par, ax in zip(pares, axs):\n sns.scatterplot(ax=ax, data=prd, x=par[0], y=par[1], edgecolor=\"black\", marker=\"o\", s=12)\n ax.grid()\n \nfig.suptitle(\"Diagramas de dispersión para pares de elementos con alta correlación (Peridotita)\", fontsize=20)\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"Por último, agregaremos los valores de estos pares con las muestras de granodiorita.",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 3, figsize=(16, 6))\n\nfor par, ax in zip(pares, axs):\n sns.scatterplot(ax=ax, data=rocas, x=par[0], y=par[1], marker=\"o\", hue=\"Nombre\", s=12, edgecolor=\"black\", palette=[\"green\", \"red\"], legend=False)\n ax.grid()\n\nfig.suptitle(\"Diagramas de dispersión para pares de elementos con alta correlación\", fontsize=20) \n\n# Leyenda personalizada\nplt.scatter([], [], color=\"g\", marker=\"o\", edgecolor=\"black\", label=\"Peridotita\") \nplt.scatter([], [], color=\"r\", marker=\"o\", edgecolor=\"black\", label=\"Granodiorita\")\nplt.legend(title=\"Tipo de roca\", frameon=True, markerscale=1.5)\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"### **2.3. Histograma y Distribuciones de probabilidad con `histplot` y `kdeplot`**\n\nPodemos observar la distribución univariable de datos geoquímicos usando un **histograma** o una **distribución de probabilidad**.",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 2, figsize=(15, 5))\n\nsns.histplot(ax=axs[0], data=rocas, x=\"CaO\", hue=\"Nombre\", bins=20, alpha=0.6, edgecolor=\"black\", linewidth=.5, palette=[\"green\", \"red\"])\naxs[0].set_title(\"Histograma\", fontsize=20)\n\nsns.kdeplot(ax=axs[1], data=rocas, x=\"CaO\", hue=\"Nombre\", fill=True, cut=0, palette=[\"green\", \"red\"])\naxs[1].set_title(\"Distribución de probabilidad\", fontsize=20)\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"También es posible observar la distribución bivariable",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 2, figsize=(15, 5))\n\nsns.histplot(ax=axs[0], data=rocas, x=\"SiO2\", y=\"FeOT\", hue=\"Nombre\", alpha=0.8, palette=[\"green\", \"red\"])\naxs[0].set_title(\"Histograma\", fontsize=20)\naxs[0].grid()\n\nsns.kdeplot(ax=axs[1], data=rocas, x=\"SiO2\", y=\"FeOT\", hue=\"Nombre\", fill=True, cut=0, palette=[\"green\", \"red\"])\naxs[1].set_title(\"Distribución de probabilidad\", fontsize=20)\naxs[1].grid()\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"<a id=\"parte3\"></a>",
"_____no_output_____"
],
[
"## **3. Pyrolite**\n***\n**Pyrolite es una librería que te permite crear diagramas ternarios a partir de información geoquímica.**\n\nPodemos verificar que tenemos `pyrolite` instalado usando el siguiente comando:",
"_____no_output_____"
]
],
[
[
"!pip show pyrolite",
"_____no_output_____"
]
],
[
[
"Ahora, importaremos la función `pyroplot` del módulo `plot`:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(context=\"notebook\", style=\"ticks\")\nfrom pyrolite.plot import pyroplot",
"_____no_output_____"
],
[
"rocas = pd.read_csv(\"files/rocas.csv\")\nprd = rocas[rocas[\"Nombre\"] == \"Peridotita\"].copy()\ngrn = rocas[rocas[\"Nombre\"] == \"Granodiorita\"].copy()",
"_____no_output_____"
]
],
[
[
"Y crearemos un diagrama ternario, para esto tenemos que usar el método `pyroplot` en el cuadro que contenga la información geoquímica:",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(6, 6))\n\nax1 = prd[[\"SiO2\", \"Al2O3\", \"FeOT\"]].pyroplot.scatter(ax=ax, c=\"green\", s=5, marker=\"o\")\n\nax1.grid(axis=\"r\", linestyle=\"--\", linewidth=1)\n\nplt.suptitle(\"Diagrama ternario $SiO_{2} - Al_{2}O_{3} - FeOT$\", fontsize=18)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Podemos establecer límites en el diagrama ternario usando el método `set_ternary_lim`.\\\nAdemás, podemos cambiar la etiqueta de cada esquina usando `set_tlabel`, `set_llabel` y `set_rlabel`:",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(6, 6))\n\nax1 = prd[[\"SiO2\", \"Al2O3\", \"FeOT\"]].pyroplot.scatter(ax=ax, c=\"green\", s=5, marker=\"o\")\n\nax1.set_ternary_lim(tmin=0.5, tmax=1.0,\n lmin=0.0, lmax=0.5, \n rmin=0.0, rmax=0.5)\n\nax1.set_tlabel(\"$SiO_{2}$\")\nax1.set_llabel(\"$Al_{2}O_{3}$\")\nax1.set_rlabel(\"$FeOT$\")\n\nax1.grid()\n\nplt.suptitle(\"Diagrama ternario $SiO_{2} - Al_{2}O_{3} - FeOT$\", fontsize=18, y=1.01)\nplt.show()",
"_____no_output_____"
]
],
[
[
"También podemos graficar distribuciones de probabilidad usando el método `density`:",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(1, 2, figsize=(15, 6))\n\nprd[[\"Na2O\", \"CaO\", \"K2O\"]].pyroplot.density(ax=axs[0])\n\nprd[[\"Na2O\", \"CaO\", \"K2O\"]].pyroplot.density(ax=axs[1], contours=[0.95, 0.66, 0.33], linewidths=[1, 2, 3], linestyles=[\"-.\", \"--\", \"-\"], colors=[\"purple\", \"green\", \"blue\"])\n\nplt.suptitle(\"Diagrama ternario $Na_{2}O - Ca_{2}O - K_{2}O$\", fontsize=20)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"Ahora, crearemos una figura que muestre la relación `SiO2 - Al2O3 - CaO` para las muestras de peridotita y granodiorita:",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(8, 8))\n\nax1 = prd[[\"SiO2\", \"Al2O3\", \"CaO\"]].pyroplot.scatter(c=\"g\", s=5, marker=\"o\", ax=ax, alpha=0.7, label=\"Peridotita\")\nprd[[\"SiO2\", \"Al2O3\", \"CaO\"]].pyroplot.density(ax=ax, contours=[0.95, 0.66, 0.33], colors=[\"blue\"]*3, alpha=0.6)\n\ngrn[[\"SiO2\", \"Al2O3\", \"CaO\"]].pyroplot.scatter(c=\"r\", s=5, marker=\"o\", ax=ax, alpha=0.7, label=\"Granodiorita\")\ngrn[[\"SiO2\", \"Al2O3\", \"CaO\"]].pyroplot.density(ax=ax, contours=[0.95, 0.66, 0.33], colors=[\"purple\"]*3, alpha=0.6)\n\nplt.suptitle(\"$SiO_{2} - Al_{2}O_{3} - CaO$\", fontsize=20)\nplt.legend(prop={'size': 12}, markerscale=4, frameon=True, loc=0)\nplt.grid(linewidth=.5)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"Por último, crearemos otra figura que muestre la relación `SiO2 - Al2O3 - (FeOT + MgO)` para las muestras de peridotita y granodiorita.\\\nPara esto, crearemos una columna llamada `FeOT + MgO` en ambos cuadros:",
"_____no_output_____"
]
],
[
[
"prd[\"FeOT + MgO\"] = prd[\"FeOT\"] + prd[\"MgO\"]\ngrn[\"FeOT + MgO\"] = grn[\"FeOT\"] + grn[\"MgO\"]",
"_____no_output_____"
]
],
[
[
"Ahora, podemos usar esta nueva columna en la figura:",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(8, 8))\n\nax1 = prd[[\"SiO2\", \"Al2O3\", \"FeOT + MgO\"]].pyroplot.scatter(c=\"g\", s=5, marker=\"o\", ax=ax, alpha=0.6, label=\"Peridotita\")\nprd[[\"SiO2\", \"Al2O3\", \"FeOT + MgO\"]].pyroplot.density(ax=ax, contours=[0.95, 0.66, 0.33], colors=[\"blue\"]*3, alpha=0.6)\n\ngrn[[\"SiO2\", \"Al2O3\", \"FeOT + MgO\"]].pyroplot.scatter(c=\"r\", s=5, marker=\"o\", ax=ax, alpha=0.6, label=\"Granodiorita\")\ngrn[[\"SiO2\", \"Al2O3\", \"FeOT + MgO\"]].pyroplot.density(ax=ax, contours=[0.95, 0.66, 0.33], colors=[\"purple\"]*3, alpha=0.6)\n\nax1.set_ternary_lim(0.3, 1.0, 0.0, 0.7, 0.0, 0.7)\n\nplt.suptitle(\"$SiO_{2} - Al_{2}O_{3} - (FeOT + MgO)$\", fontsize=20)\nplt.legend(prop={'size': 12}, markerscale=4, frameon=True, loc=0)\nplt.grid(linewidth=.5)\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"<a id=\"parte4\"></a>",
"_____no_output_____"
],
[
"## **4. Mplstereonet**\n***\n**Esta librería permite crear figuras estereográficas equiangulares (red de Wulff) y equiareales (red de Schmidtt).**\n\nEmpezaremos revisando si `mplstereonet` se encuentra instalado:",
"_____no_output_____"
]
],
[
[
"!pip show mplstereonet",
"_____no_output_____"
]
],
[
[
"Ahora, importaremos `mplstereonet` y cargaremos el archivo `data_estructural.csv`:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mplstereonet",
"_____no_output_____"
],
[
"datos = pd.read_csv(\"files/data_estructural.csv\")\ndatos.head()",
"_____no_output_____"
]
],
[
[
"### **4.1. Diagrama de círculos máximos o Diagrama Beta**\nEste diagrama es utilizado para la representación de elementos planos.\\\nEn la siguiente figura, usaremos la función `plane` para representar el plano. Esta función debe tener una dirección o rumbo (`strike`) y un buzamiento (`dip`).\\\nTambién es posible agregar el cabeceo de una línea o (también llamado `rake`) a partir de una dirección, buzamiento y ángulo de cabeceo (`rake_angle`).\n\nAsignaremos las columnas de dirección y buzamiento a dos variables llamadas `strike` y `dip`:",
"_____no_output_____"
]
],
[
[
"strike = datos.direccion\ndip = datos.buzamiento\nrake = datos.cabeceo",
"_____no_output_____"
]
],
[
[
"Para crear la figura estereográfica usaremos el método `add_subplot` y la opción `projection=\"stereonet\"`.\n> Nota: usaremos `constrained_layout=True` para mantener las etiquetas de los ángulos en posición correcta.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(5, 5), constrained_layout=True)\nax = fig.add_subplot(111, projection=\"equal_angle_stereonet\")\n\nax.plane(strike, dip, c=\"black\", linewidth=0.5)\nax.grid()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### **4.2. Diagrama de polos o Diagrama Pi**\nUsado cuando las medidas a representar en el diagrama son muy numerosas.\\\nEn la siguiente figura, usaremos la función `pole` para representar el polo. Esta función debe tener una dirección (`strike`) y buzamiento (`dip`).",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(5, 5), constrained_layout=True)\nax = fig.add_subplot(111, projection=\"equal_angle_stereonet\")\n\nax.pole(strike, dip, c=\"red\", markersize=5)\nax.grid()\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### **4.3. Diagrama de densidad de polos**\n\nUsando la red de Schmidt (equiareal), podemos hacer un recuento directo de los polos y calcular su valor estadístico por unidad de superficie, determinando las direcciones y buzamiento predominantes.",
"_____no_output_____"
]
],
[
[
"fig = plt.figure(figsize=(5, 5), constrained_layout=True)\nax = fig.add_subplot(111, projection=\"equal_area_stereonet\")\n\ncax = ax.density_contourf(strike, dip, measurement=\"poles\", cmap=\"gist_earth\", sigma=1.5)\nax.density_contour(strike, dip, measurement=\"poles\", colors=\"black\", sigma=1.5)\n \nax.pole(strike, dip, c=\"red\", ms=5)\nax.grid(linewidth=0.5)\n# fig.colorbar(cax, orientation=\"horizontal\")\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### **4.4. Stereonet interactiva**\n\nUsando una herramienta de visualización interactiva, crearemos una red estereográfica en donde podemos alterar los valores de rumbo, buzamiento y cabeceo de un plano.",
"_____no_output_____"
]
],
[
[
"import ipywidgets as widgets",
"_____no_output_____"
],
[
"def stereonet(rotation, strike, dip, rake):\n fig = plt.figure(figsize=(6, 6), constrained_layout=True)\n \n ax = fig.add_subplot(111, projection=\"equal_angle_stereonet\", rotation=rotation)\n \n ax.plane(strike, dip, color=\"green\", linewidth=2)\n ax.pole(strike, dip, color=\"red\", ms=10)\n ax.rake(strike, dip, rake, color=\"blue\", ms=10)\n\n ax.grid()\n \n plt.show()\n \nwidgets.interact(stereonet,\n rotation=widgets.IntSlider(min=0, max=360, step=5, value=0, description=\"Rotación\"),\n strike=widgets.IntSlider(min=0, max=360, step=5, value=90, description=\"Rumbo\"),\n dip=widgets.IntSlider(min=0, max=90, step=1, value=45, description=\"Buzamiento\"),\n rake=widgets.IntSlider(min=-90, max=90, step=1, value=45, description=\"Cabeceo\"));",
"_____no_output_____"
]
],
[
[
"***",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e744d4715855f0c7490688c215d05c17ba5764bc | 2,047 | ipynb | Jupyter Notebook | play/from_Vlad/list_comprehension_example.ipynb | davestanley/animated-succotash | 174f08063c222ead153bf9db67c75e2843301912 | [
"BSD-3-Clause"
] | null | null | null | play/from_Vlad/list_comprehension_example.ipynb | davestanley/animated-succotash | 174f08063c222ead153bf9db67c75e2843301912 | [
"BSD-3-Clause"
] | null | null | null | play/from_Vlad/list_comprehension_example.ipynb | davestanley/animated-succotash | 174f08063c222ead153bf9db67c75e2843301912 | [
"BSD-3-Clause"
] | null | null | null | 19.682692 | 117 | 0.460186 | [
[
[
"from string import ascii_letters",
"_____no_output_____"
],
[
"li = []\nfor i in range(4):\n for j in range(10, 13):\n li.append((i, j))",
"_____no_output_____"
],
[
"out = ', '.join((str(pair) for pair in li))",
"_____no_output_____"
],
[
"out",
"_____no_output_____"
],
[
"as_list_comprehension = [str((i, j)) for i in range(4) for j in range(10, 13)]\n', '.join(as_list_comprehension)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e744dde0c56701e0925ac7f0507528d9fb27dab9 | 71,237 | ipynb | Jupyter Notebook | RoadTrafficInjury/RoadTrafficInjury.ipynb | leizhenyu-lzy/BigHomework | 964fb75ff2d02080a383674b4ded82c06e2b8d7f | [
"MIT"
] | null | null | null | RoadTrafficInjury/RoadTrafficInjury.ipynb | leizhenyu-lzy/BigHomework | 964fb75ff2d02080a383674b4ded82c06e2b8d7f | [
"MIT"
] | null | null | null | RoadTrafficInjury/RoadTrafficInjury.ipynb | leizhenyu-lzy/BigHomework | 964fb75ff2d02080a383674b4ded82c06e2b8d7f | [
"MIT"
] | null | null | null | 73.364573 | 16,126 | 0.80847 | [
[
[
"# 道路交通事故受伤预测",
"_____no_output_____"
],
[
"## 导入必要的包",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\n# from IPython.display import display, display_html , HTML\n\nimport matplotlib.pyplot as plt\n# import seaborn as sns\n# from sklearn.decomposition import PCA\n\n# from sklearn.model_selection import train_test_split\n\n# from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report, roc_curve\n# from sklearn.model_selection import learning_curve, cross_val_score, GridSearchCV\n# from sklearn.model_selection import train_test_split\n\n# from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n# from sklearn.svm import SVC\nfrom sklearn.pipeline import make_pipeline\n# from sklearn.neighbors import KNeighborsClassifier\n# from sklearn.linear_model import LogisticRegression\n# from sklearn.preprocessing import RobustScaler,StandardScaler,MinMaxScaler\n\nfrom sklearn.impute import SimpleImputer\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# 为了在Jupyter Notebook中显示图片的必须配置项\n%matplotlib inline\n# 用兼容字体解决中文显示为方格的问题\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['font.serif'] = ['SimHei']\nplt.rcParams['font.family'] = 'sans-serif'\n# 解决保存图像是负号'-'显示为方块的问题\nplt.rcParams['axes.unicode_minus'] = False",
"_____no_output_____"
]
],
[
[
"## 常量&全局变量定义",
"_____no_output_____"
]
],
[
[
"# 没有用的特征名列表,方便eval和test时丢弃\nunusedFeatureList = []\n# 各个特征最可能的值,用于填补空缺。 key为特征名,value为特征的最可能值\nfeatureMostfreqValueDict = {}",
"_____no_output_____"
]
],
[
[
"## 函数定义",
"_____no_output_____"
]
],
[
[
"def countDFNull(aimDF):\n nullAmount = aimDF.isnull().sum().sum()\n print(\"Null数量 : \", nullAmount)\n return nullAmount",
"_____no_output_____"
]
],
[
[
"## 读取数据集",
"_____no_output_____"
]
],
[
[
"print(\"读取trainDataset\")\ntrainDatasetDF = pd.read_csv('dataset/train.csv', header=0, index_col=None)\ntrainDatasetDF.Name = 'train dataset'\n# print(trainDatasetDF.head(2))\n\nprint(\"读取evalDataset\")\nevalDatasetDF = pd.read_csv('dataset/val.csv', header=0, index_col=0)\nevalDatasetDF.Name = 'eval dataset'\n# print(evalDatasetDF.head(2))\n\nprint(\"读取testDataset\")\ntestDatasetDF = pd.read_csv('dataset/test.csv', header=0, index_col=0)\ntestDatasetDF.Name = 'test dataset'\n# print(testDatasetDF.head(2))\n\n\ndatasetDFList = [trainDatasetDF,evalDatasetDF,testDatasetDF]\n\nfor df in datasetDFList:\n print(\"DF Name : \", df.Name, \" DF Shape : \", df.shape)",
"读取trainDataset\n读取evalDataset\n读取testDataset\nDF Name : train dataset DF Shape : (79786, 54)\nDF Name : eval dataset DF Shape : (2836, 54)\nDF Name : test dataset DF Shape : (2836, 53)\n"
]
],
[
[
"## 数据清洗",
"_____no_output_____"
]
],
[
[
"topBadFeatureNumbers = 20 # 展示缺失样本最多的特征的数量,全部展示太长了\n\nbadFeatureMaxMissingSample = 500 # 若某个特征缺失的样本数量超过该值,认定为坏特征\nbadSampleMaxMissingFeature = 1 # 若某个样本缺失的特征超过该值,认定为坏样本",
"_____no_output_____"
]
],
[
[
"### 找出sample较少的feature",
"_____no_output_____"
]
],
[
[
"# 对所有特征(每一列)进行null值统计\ntrainFeatureNullSeries = trainDatasetDF.isnull().sum().sort_values(ascending=False) # 降序排列\nprint(\"type : \", type(trainFeatureNullSeries))\n# averageTrainFeatureNull = trainFeatureNullSeries.sum()/len(trainFeatureNullSeries)\n# print(\"averageTrainFeatureNull : \", averageTrainFeatureNull)\ntrainFeatureNullDict = trainFeatureNullSeries.to_dict()\nbadTrainFeatureDict = {key:trainFeatureNullDict[key] for key in trainFeatureNullDict if trainFeatureNullSeries[key] > badFeatureMaxMissingSample}\nprint(\"缺失%d个以上样本的特征数量 : \"%badFeatureMaxMissingSample, len(badTrainFeatureDict))\nprint(badTrainFeatureDict)",
"type : <class 'pandas.core.series.Series'>\n缺失500个以上样本的特征数量 : 13\n{'lartpc': 79508, 'larrout': 79277, 'occutc': 78639, 'v2': 73891, 'v1': 14997, 'pr1': 10096, 'pr': 9672, 'circ': 4255, 'voie': 3712, 'vma': 1287, 'vosp': 1034, 'nbv': 1019, 'adr': 808}\n"
],
[
"# 可视化展示特征,缺失样本的数量\nplt.figure(figsize=(15,3))\nplt.grid()\nplt.bar(x=trainFeatureNullSeries.index[0:topBadFeatureNumbers], height=trainFeatureNullSeries.values[0:topBadFeatureNumbers])\nplt.xlabel(\"特征名称\")\nplt.ylabel(\"特征缺失样本数量\")\nplt.title(\"缺失样本数量最多的Top%d特征 柱状图\"%topBadFeatureNumbers)",
"_____no_output_____"
],
[
"# 删除选出的bad feature整列\nunusedFeatureList.extend(badTrainFeatureDict.keys())\nprint('unusedFeatureList : ', unusedFeatureList)\n\ntrainDatasetDF.drop(columns=badTrainFeatureDict.keys(), inplace=True)\nprint(trainDatasetDF.shape)",
"unusedFeatureList : ['lartpc', 'larrout', 'occutc', 'v2', 'v1', 'pr1', 'pr', 'circ', 'voie', 'vma', 'vosp', 'nbv', 'adr']\n(79786, 41)\n"
],
[
"# 统计删除后的特征缺失情况\ntrainFeatureNullSeries = trainDatasetDF.isnull().sum().sort_values(ascending=False) # 降序排列\nprint(\"type : \", type(trainFeatureNullSeries))\nprint(trainFeatureNullSeries.head(15).to_dict())\n\n# 可视化展示特征,缺失样本的数量\nplt.figure(figsize=(15,3))\nplt.grid()\nplt.bar(x=trainFeatureNullSeries.index[0:10], height=trainFeatureNullSeries.values[0:10])\nplt.xlabel(\"特征名称\")\nplt.ylabel(\"特征缺失样本数量\")\nplt.title(\"删除坏特征后的特征缺失情况 柱状图\")",
"type : <class 'pandas.core.series.Series'>\n{'situ': 302, 'infra': 226, 'surf': 190, 'prof': 185, 'plan': 176, 'atm': 163, 'jour': 163, 'catr': 163, 'long': 163, 'col': 163, 'lat': 163, 'int': 163, 'agg': 163, 'com': 163, 'dep': 163}\n"
]
],
[
[
"### 找出feature较少的sample",
"_____no_output_____"
]
],
[
[
"# 对所有样本(每一行)进行null值统计\ntrainSampleNullSeries = trainDatasetDF.T.isnull().sum().sort_values(ascending=False) # 倒序排列\ntrainSampleNullDict = trainSampleNullSeries.to_dict()\nprint(\"type : \", type(trainSampleNullSeries))\n\nbadTrainSampleDict = {key:trainSampleNullDict[key] for key in trainSampleNullDict if trainSampleNullDict[key] > badSampleMaxMissingFeature}\nprint(\"缺失%d个以上特征的样本数量 : \"%badSampleMaxMissingFeature, len(badTrainSampleDict))\nprint(badTrainSampleDict)\n# averageTrainSampleNull = trainSampleNullSeries.sum()/len(trainSampleNullSeries)\n# print(\"averageTrainSampleNull : \", averageTrainSampleNull)",
"type : <class 'pandas.core.series.Series'>\n缺失1个以上特征的样本数量 : 181\n{14951: 19, 56986: 19, 41678: 19, 56992: 19, 56991: 19, 15726: 19, 41677: 19, 41597: 19, 39022: 19, 40418: 19, 40417: 19, 40215: 19, 48658: 19, 52159: 19, 52160: 19, 74093: 19, 78644: 19, 74094: 19, 5045: 19, 10723: 19, 41784: 19, 14926: 19, 70809: 19, 24282: 19, 59932: 19, 59931: 19, 69083: 19, 55953: 19, 10904: 19, 10903: 19, 74746: 19, 74747: 19, 14953: 19, 14952: 19, 54032: 19, 32845: 19, 37693: 19, 37692: 19, 45758: 19, 45757: 19, 37691: 19, 37690: 19, 78660: 19, 55954: 19, 31389: 19, 14925: 19, 71574: 19, 45741: 19, 58847: 19, 61454: 19, 11887: 19, 67785: 19, 67784: 19, 67783: 19, 30127: 19, 66939: 19, 61328: 19, 61005: 19, 16441: 19, 61004: 19, 61003: 19, 35970: 19, 62160: 19, 62163: 19, 48839: 19, 35969: 19, 48838: 19, 48837: 19, 58848: 19, 47717: 19, 71771: 19, 45193: 19, 67664: 19, 65857: 19, 65858: 19, 45740: 19, 45739: 19, 45738: 19, 71772: 19, 52181: 19, 52182: 19, 6642: 19, 47726: 19, 21581: 19, 21582: 19, 47725: 19, 18945: 19, 21584: 19, 6643: 19, 10839: 19, 45192: 19, 58465: 19, 54031: 19, 50074: 19, 58445: 19, 6444: 19, 63186: 19, 25316: 19, 2256: 19, 72690: 19, 36350: 19, 32107: 19, 54849: 19, 36497: 19, 37765: 19, 56113: 19, 78319: 19, 1109: 19, 77593: 19, 63185: 19, 54848: 19, 30440: 19, 22725: 19, 2215: 19, 42836: 19, 26195: 19, 79095: 19, 68678: 19, 68677: 19, 27871: 19, 67640: 19, 6443: 19, 78366: 19, 63184: 19, 70778: 19, 14069: 19, 50073: 19, 78318: 19, 56112: 19, 37766: 19, 12268: 19, 23232: 19, 67738: 19, 67739: 19, 48912: 19, 48911: 19, 51844: 19, 59365: 19, 56111: 19, 18345: 19, 59366: 19, 59367: 19, 72689: 19, 50071: 19, 50072: 19, 23782: 19, 79536: 19, 79535: 19, 23231: 19, 49982: 19, 45471: 19, 45472: 19, 63187: 19, 49981: 19, 49980: 19, 591: 19, 56110: 19, 590: 19, 42403: 19, 42404: 19, 589: 19, 69080: 19, 22726: 19, 7593: 5, 78814: 5, 58291: 5, 78813: 5, 23792: 5, 29178: 5, 7620: 5, 23793: 5, 7619: 5, 70608: 5, 3284: 4, 3285: 4, 36351: 3, 10766: 2, 1195: 2, 78646: 2, 78647: 2, 10767: 2}\n"
],
[
"# 删除选出的bad sample整行\ntrainDatasetDF.drop(index=badTrainSampleDict.keys(), inplace=True)\nprint(trainDatasetDF.shape)",
"(79623, 41)\n"
]
],
[
[
"### 找出值不具有参考性的特征",
"_____no_output_____"
]
],
[
[
"# 特征的值的可能太多和太少都不具有参考性\ntooMuchValueFeatureThreshold = 300 # 如果特征的可能的指多于该数,认定为没有参考性\ntooLessValueFeatureThreshold = 2 # 如果特征的可能的指少于该数,认定为没有参考性",
"_____no_output_____"
],
[
"featureValueCountDict = {}\n# 输出各个特征值对应的特征数量\nfor loopIdx, colName in enumerate(trainDatasetDF):\n tempSeries = trainDatasetDF[colName]\n tempSeriesValueCountDict = tempSeries.value_counts().to_dict()\n # print(\"特征\", colName, \"共有%d个可能值\"%len(tempSeriesValueCountDict))\n featureValueCountDict[colName] = len(tempSeriesValueCountDict)\n\nprint(featureValueCountDict)\n\ntooMuchValueFeatureList = [feature for feature in featureValueCountDict if featureValueCountDict[feature] > tooMuchValueFeatureThreshold]\ntooLessValueFeatureList = [feature for feature in featureValueCountDict if featureValueCountDict[feature] < tooLessValueFeatureThreshold]\n\nprint(\"tooMuchValueFeatureList : \", tooMuchValueFeatureList)\nprint(\"tooLessValueFeatureList : \", tooLessValueFeatureList)",
"{'Num_Acc': 48304, 'jour': 31, 'mois': 12, 'an': 1, 'hrmn': 1347, 'lum': 5, 'dep': 107, 'com': 10018, 'agg': 2, 'int': 9, 'atm': 10, 'col': 8, 'lat': 46055, 'long': 46428, 'catr': 8, 'prof': 4, 'plan': 4, 'surf': 9, 'infra': 10, 'situ': 7, 'id_vehicule': 65846, 'num_veh': 27, 'place': 10, 'catu': 3, 'grav': 4, 'sexe': 2, 'an_nais': 103, 'trajet': 8, 'secu1': 11, 'secu2': 11, 'secu3': 10, 'locp': 11, 'actp': 13, 'etatp': 4, 'senc': 5, 'catv': 31, 'obs': 19, 'obsm': 8, 'choc': 11, 'manv': 28, 'motor': 8}\ntooMuchValueFeatureList : ['Num_Acc', 'hrmn', 'com', 'lat', 'long', 'id_vehicule']\ntooLessValueFeatureList : ['an']\n"
],
[
"unusedFeatureList.extend(tooMuchValueFeatureList)\nunusedFeatureList.extend(tooLessValueFeatureList)\n\nprint(\"unusedFeatureList : \", unusedFeatureList)",
"unusedFeatureList : ['Num_Acc', 'hrmn', 'com', 'lat', 'long', 'id_vehicule', 'an']\n"
],
[
"trainDatasetDF.drop(columns=unusedFeatureList, inplace=True)\nprint(trainDatasetDF.shape)",
"(79623, 34)\n"
],
[
"for featureName in trainDatasetDF:\n featureType = trainDatasetDF[featureName].dtype\n print(featureName, featureType, featureValueCountDict[featureName])\n\n# 查看类型为object的feature",
"jour float64 31\nmois float64 12\nlum float64 5\ndep object 107\nagg float64 2\nint float64 9\natm float64 10\ncol float64 8\ncatr float64 8\nprof float64 4\nplan float64 4\nsurf float64 9\ninfra float64 10\nsitu float64 7\nnum_veh object 27\nplace int64 10\ncatu int64 3\ngrav int64 4\nsexe int64 2\nan_nais int64 103\ntrajet int64 8\nsecu1 int64 11\nsecu2 int64 11\nsecu3 int64 10\nlocp int64 11\nactp object 13\netatp int64 4\nsenc int64 5\ncatv int64 31\nobs int64 19\nobsm int64 8\nchoc int64 11\nmanv int64 28\nmotor int64 8\n"
]
],
[
[
"### 统计清洗后空值数量",
"_____no_output_____"
]
],
[
[
"nullAfterClean = trainDatasetDF.isnull().sum().sum()\nprint(nullAfterClean)",
"264\n"
]
],
[
[
"## 缺失值填充 ",
"_____no_output_____"
]
],
[
[
"trainImputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')",
"_____no_output_____"
],
[
"trainDatasetDF = trainImputer.fit_transform(trainDatasetDF)",
"_____no_output_____"
],
[
"nullAfterClean = countDFNull(trainDatasetDF)\nprint(nullAfterClean)",
"Null数量 : 264\n264\n"
]
],
[
[
"## 数据重采样 ",
"_____no_output_____"
],
[
"### 查看训练集标签分布",
"_____no_output_____"
]
],
[
[
"countTrainDatasetLabel = trainDatasetDF['grav'].value_counts()\nprint(countTrainDatasetLabel)\nmaxTrainLabelAmount = countTrainDatasetLabel.max()\nprint(maxTrainLabelAmount)",
"1 33205\n4 31839\n3 12505\n2 2074\nName: grav, dtype: int64\n33205\n"
],
[
"plt.figure(figsize=(10,5))\nplt.title(\"训练集样本标签分布\")\nplt.xlabel('标签名称')\nplt.ylabel('标签数量')\nplt.grid()\nplt.xticks(labels=['Unharmed','Killed','Hospitalized','Slightly'], ticks=[1,2,3,4])\nplt.bar(x=countTrainDatasetLabel.index, height=countTrainDatasetLabel.values)",
"_____no_output_____"
],
[
"# 1. Unharmed # 2. Killed # 3. Injured hospitalized # 4. Slightly injured \ntrainUnharmedDF = trainDatasetDF.groupby('grav').get_group(1)\ntrainKilledDF = trainDatasetDF.groupby('grav').get_group(2)\ntrainHospitalizedDF = trainDatasetDF.groupby('grav').get_group(3)\ntrainSlightlyDF = trainDatasetDF.groupby('grav').get_group(4)\n\nprint(trainUnharmedDF.shape)\nprint(trainKilledDF.shape)\nprint(trainHospitalizedDF.shape)\nprint(trainSlightlyDF.shape)",
"(33205, 41)\n(2074, 41)\n(12505, 41)\n(31839, 41)\n"
],
[
"overSampleTrainUnharmedDF = trainUnharmedDF\noverSampleTrainKilledDF = trainKilledDF.sample(n=maxTrainLabelAmount, replace=True, random_state=42)\noverSampleTrainHospitalizedDF = trainHospitalizedDF.sample(n=maxTrainLabelAmount, replace=True, random_state=42)\noverSampleTrainSlightlyDF = trainSlightlyDF.sample(n=maxTrainLabelAmount, replace=True, random_state=42)\n\nprint(len(overSampleTrainUnharmedDF))\nprint(len(overSampleTrainKilledDF))\nprint(len(overSampleTrainHospitalizedDF))\nprint(len(overSampleTrainSlightlyDF))\n\noverSampleTrainDatasetDF = pd.concat([overSampleTrainUnharmedDF, overSampleTrainKilledDF, overSampleTrainHospitalizedDF, overSampleTrainSlightlyDF])\n\nprint(overSampleTrainDatasetDF.info())",
"33205\n33205\n33205\n33205\n<class 'pandas.core.frame.DataFrame'>\nInt64Index: 132820 entries, 1 to 48653\nData columns (total 41 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Num_Acc 132820 non-null int64 \n 1 jour 132820 non-null float64\n 2 mois 132820 non-null float64\n 3 an 132820 non-null float64\n 4 hrmn 132820 non-null float64\n 5 lum 132820 non-null float64\n 6 dep 132820 non-null object \n 7 com 132820 non-null object \n 8 agg 132820 non-null float64\n 9 int 132820 non-null float64\n 10 atm 132820 non-null float64\n 11 col 132820 non-null float64\n 12 lat 132820 non-null float64\n 13 long 132820 non-null float64\n 14 catr 132820 non-null float64\n 15 prof 132734 non-null float64\n 16 plan 132747 non-null float64\n 17 surf 132720 non-null float64\n 18 infra 132687 non-null float64\n 19 situ 132574 non-null float64\n 20 id_vehicule 132820 non-null object \n 21 num_veh 132820 non-null object \n 22 place 132820 non-null int64 \n 23 catu 132820 non-null int64 \n 24 grav 132820 non-null int64 \n 25 sexe 132820 non-null int64 \n 26 an_nais 132820 non-null int64 \n 27 trajet 132820 non-null int64 \n 28 secu1 132820 non-null int64 \n 29 secu2 132820 non-null int64 \n 30 secu3 132820 non-null int64 \n 31 locp 132820 non-null int64 \n 32 actp 132820 non-null object \n 33 etatp 132820 non-null int64 \n 34 senc 132820 non-null int64 \n 35 catv 132820 non-null int64 \n 36 obs 132820 non-null int64 \n 37 obsm 132820 non-null int64 \n 38 choc 132820 non-null int64 \n 39 manv 132820 non-null int64 \n 40 motor 132820 non-null int64 \ndtypes: float64(17), int64(19), object(5)\nmemory usage: 42.6+ MB\nNone\n"
]
],
[
[
"## 模型训练",
"_____no_output_____"
]
],
[
[
"trainXDF = trainDatasetDF.drop(columns=['grav'])\ntrainYDF = trainDatasetDF.loc[:, 'grav']\nevalXDF = evalDatasetDF.drop(columns=['grav'])\nevalYDF = evalDatasetDF.loc[:, 'grav']",
"0 4\n1 1\n2 1\n3 4\n4 4\n ..\n79781 1\n79782 4\n79783 1\n79784 4\n79785 4\nName: grav, Length: 79623, dtype: int64\n"
],
[
"preprocessorPipeline = make_pipeline()\n\nrandomForestPipeline = make_pipeline(preprocessorPipeline, )\nadaBoostPipeline = make_pipeline(preprocessorPipeline, )\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e744eecfdcf21dfafed8febe8501519de7d56a70 | 139,111 | ipynb | Jupyter Notebook | notebooks/orbit_calculations.ipynb | fraserw/kbmod | 65d69746d1dd8de867f8da147d73c09439d28b41 | [
"BSD-2-Clause"
] | 16 | 2018-07-23T11:39:05.000Z | 2022-01-27T17:15:42.000Z | notebooks/orbit_calculations.ipynb | flashasdbaksdgi/kbmod | 65d69746d1dd8de867f8da147d73c09439d28b41 | [
"BSD-2-Clause"
] | 42 | 2017-06-19T22:55:41.000Z | 2018-03-15T02:49:39.000Z | notebooks/orbit_calculations.ipynb | DiracInstitute/kbmod | 65d69746d1dd8de867f8da147d73c09439d28b41 | [
"BSD-2-Clause"
] | 7 | 2018-07-23T11:39:04.000Z | 2022-01-27T18:43:02.000Z | 420.274924 | 67,756 | 0.929517 | [
[
[
"import numpy as np\nimport trajectory_utils as tu\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"radius = 30 #in AU\ninclination = 15. #in degrees\nmax_time = 1. #in years\ntime_step = .001 #in years\ntheta_0 = 0. #Object's progress along orbit in degrees. \n #Note: Would be more useful to have angle from opposition probably. Will update in future.\nfig = plt.figure(figsize=(16,8))\nfig.add_subplot(1,2,1)\ntu.plot_ang_vel(radius, inclination, max_time, time_step, theta_0)\nfig.add_subplot(1,2,2)\ntu.plot_trajectory(radius, inclination, max_time, time_step, theta_0)",
"_____no_output_____"
],
[
"radius = 50 #in AU\ninclination = 10 #in degrees\nmax_time = 1. #in years\ntime_step = .001 #in years\ntheta_0 = 15. #Object's progress along orbit in degrees. \n #Note: Would be more useful to have angle from opposition probably. Will update in future.\nfig = plt.figure(figsize=(16,8))\nfig.add_subplot(1,2,1)\ntu.plot_ang_vel(radius, inclination, max_time, time_step, theta_0)\nfig.add_subplot(1,2,2)\ntu.plot_trajectory(radius, inclination, max_time, time_step, theta_0)",
"_____no_output_____"
],
[
"radius = 30\nincl = 15.\ntime_step = 0.001",
"_____no_output_____"
],
[
"lon, lat = tu.get_trajectory(radius, incl, time_step)",
"_____no_output_____"
],
[
"# Sample NEO field\nfield_central_lat = 0.\nin_field = np.where((lat < (field_central_lat + 1.)) & (lat > (field_central_lat-1.) ))[0]",
"_____no_output_____"
],
[
"percent_in_field = float(len(in_field))/len(lat)*100.\nprint(str(\"Object at radius %i AU and %.1f degree inclination will be in ecliptic latitudes \" + \n \"between +/- 1 deg of %.2f degrees %.2f percent of the time.\") % (radius, incl, \n field_central_lat, \n percent_in_field))",
"Object at radius 30 AU and 15.0 degree inclination will be in ecliptic latitudes between +/- 1 deg of 0.00 degrees 4.33 percent of the time.\n"
],
[
"ang_vel, ang_travelled = tu.get_ang_vel(radius, incl, time_step)",
"_____no_output_____"
],
[
"print(str(\"Absolute maximum velocity of object across sky: %.4f arcsec/hr.\" % np.max(ang_vel)))",
"Absolute maximum velocity of object across sky: 5.6497 arcsec/hr.\n"
],
[
"print(str(\"Absolute minimum velocity of object across sky: %.4f arcsec/hr.\" % np.min(ang_vel)))",
"Absolute minimum velocity of object across sky: 0.0060 arcsec/hr.\n"
],
[
"# Search only looks at trajectories within 12 degrees of parallel to a search angle\n# eclip_offset degrees from parallel to the ecliptic\nsearch_width = 12.\neclip_offset = 0.\nin_search = np.where((np.abs(ang_travelled[in_field[:-1]] - eclip_offset) < search_width))[0]",
"_____no_output_____"
],
[
"percent_in_search = float(len(in_search))/(len(in_field)-1) * 100.",
"_____no_output_____"
],
[
"print(str(\"Object trajectory will fall in search angular parameters %.2f percent of the time\" + \n \" that it is in these latitudes.\") % (percent_in_search))",
"Object trajectory will fall in search angular parameters 49.49 percent of the time that it is in these latitudes.\n"
],
[
"# Search sets minimum velocity at 1 arcsec/hour\nmin_vel = 1.\nmax_vel = 5.7\npercent_in_search_vel = (len(np.where((ang_vel[in_field[:-1]][in_search] > min_vel) &\n (ang_vel[in_field[:-1]][in_search] < max_vel))[0])/\n float(len(ang_vel[in_field[:-1]][in_search])))*100.",
"_____no_output_____"
],
[
"print(str(\"Object velocity parameters cover %.2f percent of possible objects \" +\n \"in search angular parameters.\") % (percent_in_search_vel))",
"Object velocity parameters cover 99.21 percent of possible objects in search angular parameters.\n"
],
[
"min_obj_vel = np.min(ang_vel[in_field[:-1]][in_search])\nmax_obj_vel = np.max(ang_vel[in_field[:-1]][in_search])\nprint(str(\"Minimum/Maximum object velocity with radius %i AU and %.1f degree inclination \" +\n \"in search field is %.3f/%.3f arcseconds per hour.\") % (radius, incl,\n min_obj_vel,\n max_obj_vel))",
"Minimum/Maximum object velocity with radius 30 AU and 15.0 degree inclination in search field is 0.704/5.619 arcseconds per hour.\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e744f76a43c685d0639f3b791757a74dacac9f3f | 86,993 | ipynb | Jupyter Notebook | notebooks/13_Recurrent_Neural_Networks.ipynb | udapy/MadeWithML | 4cb9208cda8806b2d191bfe3efe4633112cb6e53 | [
"MIT"
] | 1 | 2021-09-21T22:45:18.000Z | 2021-09-21T22:45:18.000Z | notebooks/13_Recurrent_Neural_Networks.ipynb | udapy/MadeWithML | 4cb9208cda8806b2d191bfe3efe4633112cb6e53 | [
"MIT"
] | null | null | null | notebooks/13_Recurrent_Neural_Networks.ipynb | udapy/MadeWithML | 4cb9208cda8806b2d191bfe3efe4633112cb6e53 | [
"MIT"
] | 1 | 2021-09-16T00:59:42.000Z | 2021-09-16T00:59:42.000Z | 33.692099 | 833 | 0.471072 | [
[
[
"<div align=\"center\">\n<h1><img width=\"30\" src=\"https://madewithml.com/static/images/rounded_logo.png\"> <a href=\"https://madewithml.com/\">Made With ML</a></h1>\nApplied ML · MLOps · Production\n<br>\nJoin 30K+ developers in learning how to responsibly <a href=\"https://madewithml.com/about/\">deliver value</a> with ML.\n <br>\n</div>\n\n<br>\n\n<div align=\"center\">\n <a target=\"_blank\" href=\"https://newsletter.madewithml.com\"><img src=\"https://img.shields.io/badge/Subscribe-30K-brightgreen\"></a> \n <a target=\"_blank\" href=\"https://github.com/GokuMohandas/MadeWithML\"><img src=\"https://img.shields.io/github/stars/GokuMohandas/MadeWithML.svg?style=social&label=Star\"></a> \n <a target=\"_blank\" href=\"https://www.linkedin.com/in/goku\"><img src=\"https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social\"></a> \n <a target=\"_blank\" href=\"https://twitter.com/GokuMohandas\"><img src=\"https://img.shields.io/twitter/follow/GokuMohandas.svg?label=Follow&style=social\"></a>\n <br>\n 🔥 Among the <a href=\"https://github.com/topics/deep-learning\" target=\"_blank\">top ML</a> repositories on GitHub\n</div>\n\n<br>\n<hr>",
"_____no_output_____"
],
[
"# Recurrent Neural Networks (RNN)\n\nIn this lesson we will learn how to process sequential data (sentences, time-series, etc.) with recurrent neural networks (RNNs).",
"_____no_output_____"
],
[
"<div align=\"left\">\n<a target=\"_blank\" href=\"https://madewithml.com/courses/foundations/recurrent-neural-networks/\"><img src=\"https://img.shields.io/badge/📖 Read-blog post-9cf\"></a> \n<a href=\"https://github.com/GokuMohandas/MadeWithML/blob/main/notebooks/13_Recurrent_Neural_Networks.ipynb\" role=\"button\"><img src=\"https://img.shields.io/static/v1?label=&message=View%20On%20GitHub&color=586069&logo=github&labelColor=2f363d\"></a> \n<a href=\"https://colab.research.google.com/github/GokuMohandas/MadeWithML/blob/main/notebooks/13_Recurrent_Neural_Networks.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a>\n</div>",
"_____no_output_____"
],
[
"So far we've processed inputs as whole (ex. applying filters across the entire input to extract features) but we can also process our inputs sequentially. For example we can think of each token in our text as an event in time (timestep). We can process each timestep, one at a time, and predict the class after the last timestep (token) has been processed. This is very powerful because the model now has a meaningful way to account for the sequential order of tokens in our sequence and predict accordingly. ",
"_____no_output_____"
],
[
"# Overview",
"_____no_output_____"
],
[
"<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/vanilla.png\" width=\"500\">\n</div>\n\nRNN forward pass for a single time step $X_t$:\n\n$h_t = tanh(W_{hh}h_{t-1} + W_{xh}X_t+b_h)$\n\n*where*:\n* $W_{hh}$ = hidden units weights| $\\in \\mathbb{R}^{HXH}$ ($H$ is the hidden dim)\n* $h_{t-1}$ = previous timestep's hidden state $\\in \\mathbb{R}^{NXH}$\n* $W_{xh}$ = input weights| $\\in \\mathbb{R}^{EXH}$\n* $X_t$ = input at time step t | $\\in \\mathbb{R}^{NXE}$ ($N$ is the batch size, $E$ is the embedding dim)\n* $b_h$ = hidden units bias $\\in \\mathbb{R}^{HX1}$\n* $h_t$ = output from RNN for timestep $t$",
"_____no_output_____"
],
[
"* **Objective:** Process sequential data by accounting for the currend input and also what has been learned from previous inputs.\n* **Advantages:** \n * Account for order and previous inputs in a meaningful way.\n * Conditioned generation for generating sequences.\n* **Disadvantages:** \n * Each time step's prediction depends on the previous prediction so it's difficult to parallelize RNN operations. \n * Processing long sequences can yield memory and computation issues.\n * Interpretability is difficult but there are few [techniques](https://arxiv.org/abs/1506.02078) that use the activations from RNNs to see what parts of the inputs are processed. \n* **Miscellaneous:** \n * Architectural tweaks to make RNNs faster and interpretable is an ongoing area of research.",
"_____no_output_____"
],
[
"# Set up",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport random\nimport torch\nimport torch.nn as nn",
"_____no_output_____"
],
[
"SEED = 1234",
"_____no_output_____"
],
[
"def set_seeds(seed=1234):\n \"\"\"Set seeds for reproducibility.\"\"\"\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # multi-GPU# Set seeds for reproducibility\nset_seeds(seed=SEED)",
"_____no_output_____"
],
[
"# Set seeds for reproducibility\nset_seeds(seed=SEED)",
"_____no_output_____"
],
[
"# Set device\ncuda = True\ndevice = torch.device('cuda' if (\n torch.cuda.is_available() and cuda) else 'cpu')\ntorch.set_default_tensor_type('torch.FloatTensor')\nif device.type == 'cuda':\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nprint (device)",
"cuda\n"
]
],
[
[
"## Load data",
"_____no_output_____"
],
[
"We will download the [AG News dataset](http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html), which consists of 120K text samples from 4 unique classes (`Business`, `Sci/Tech`, `Sports`, `World`)",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport re\nimport urllib",
"_____no_output_____"
],
[
"# Load data\nurl = \"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/datasets/news.csv\"\ndf = pd.read_csv(url, header=0) # load\ndf = df.sample(frac=1).reset_index(drop=True) # shuffle\ndf.head()",
"_____no_output_____"
]
],
[
[
"## Preprocessing",
"_____no_output_____"
],
[
"We're going to clean up our input data first by doing operations such as lower text, removing stop (filler) words, filters using regular expressions, etc.",
"_____no_output_____"
]
],
[
[
"import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport re",
"_____no_output_____"
],
[
"nltk.download('stopwords')\nSTOPWORDS = stopwords.words('english')\nprint (STOPWORDS[:5])\nporter = PorterStemmer()",
"[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n['i', 'me', 'my', 'myself', 'we']\n"
],
[
"def preprocess(text, stopwords=STOPWORDS):\n \"\"\"Conditional preprocessing on our text unique to our task.\"\"\"\n # Lower\n text = text.lower()\n\n # Remove stopwords\n pattern = re.compile(r'\\b(' + r'|'.join(stopwords) + r')\\b\\s*')\n text = pattern.sub('', text)\n\n # Remove words in paranthesis\n text = re.sub(r'\\([^)]*\\)', '', text)\n\n # Spacing and filters\n text = re.sub(r\"([-;;.,!?<=>])\", r\" \\1 \", text)\n text = re.sub('[^A-Za-z0-9]+', ' ', text) # remove non alphanumeric chars\n text = re.sub(' +', ' ', text) # remove multiple spaces\n text = text.strip()\n\n return text",
"_____no_output_____"
],
[
"# Sample\ntext = \"Great week for the NYSE!\"\npreprocess(text=text)",
"_____no_output_____"
],
[
"# Apply to dataframe\npreprocessed_df = df.copy()\npreprocessed_df.title = preprocessed_df.title.apply(preprocess)\nprint (f\"{df.title.values[0]}\\n\\n{preprocessed_df.title.values[0]}\")",
"Sharon Accepts Plan to Reduce Gaza Army Operation, Haaretz Says\n\nsharon accepts plan reduce gaza army operation haaretz says\n"
]
],
[
[
"## Split data",
"_____no_output_____"
]
],
[
[
"import collections\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"TRAIN_SIZE = 0.7\nVAL_SIZE = 0.15\nTEST_SIZE = 0.15",
"_____no_output_____"
],
[
"def train_val_test_split(X, y, train_size):\n \"\"\"Split dataset into data splits.\"\"\"\n X_train, X_, y_train, y_ = train_test_split(X, y, train_size=TRAIN_SIZE, stratify=y)\n X_val, X_test, y_val, y_test = train_test_split(X_, y_, train_size=0.5, stratify=y_)\n return X_train, X_val, X_test, y_train, y_val, y_test",
"_____no_output_____"
],
[
"# Data\nX = preprocessed_df[\"title\"].values\ny = preprocessed_df[\"category\"].values",
"_____no_output_____"
],
[
"# Create data splits\nX_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(\n X=X, y=y, train_size=TRAIN_SIZE)\nprint (f\"X_train: {X_train.shape}, y_train: {y_train.shape}\")\nprint (f\"X_val: {X_val.shape}, y_val: {y_val.shape}\")\nprint (f\"X_test: {X_test.shape}, y_test: {y_test.shape}\")\nprint (f\"Sample point: {X_train[0]} → {y_train[0]}\")",
"X_train: (84000,), y_train: (84000,)\nX_val: (18000,), y_val: (18000,)\nX_test: (18000,), y_test: (18000,)\nSample point: china battles north korea nuclear talks → World\n"
]
],
[
[
"## LabelEncoder",
"_____no_output_____"
],
[
"Next we'll define a `LabelEncoder` to encode our text labels into unique indices",
"_____no_output_____"
]
],
[
[
"import itertools",
"_____no_output_____"
],
[
"class LabelEncoder(object):\n \"\"\"Label encoder for tag labels.\"\"\"\n def __init__(self, class_to_index={}):\n self.class_to_index = class_to_index\n self.index_to_class = {v: k for k, v in self.class_to_index.items()}\n self.classes = list(self.class_to_index.keys())\n\n def __len__(self):\n return len(self.class_to_index)\n\n def __str__(self):\n return f\"<LabelEncoder(num_classes={len(self)})>\"\n\n def fit(self, y):\n classes = np.unique(y)\n for i, class_ in enumerate(classes):\n self.class_to_index[class_] = i\n self.index_to_class = {v: k for k, v in self.class_to_index.items()}\n self.classes = list(self.class_to_index.keys())\n return self\n\n def encode(self, y):\n encoded = np.zeros((len(y)), dtype=int)\n for i, item in enumerate(y):\n encoded[i] = self.class_to_index[item]\n return encoded\n\n def decode(self, y):\n classes = []\n for i, item in enumerate(y):\n classes.append(self.index_to_class[item])\n return classes\n\n def save(self, fp):\n with open(fp, 'w') as fp:\n contents = {'class_to_index': self.class_to_index}\n json.dump(contents, fp, indent=4, sort_keys=False)\n\n @classmethod\n def load(cls, fp):\n with open(fp, 'r') as fp:\n kwargs = json.load(fp=fp)\n return cls(**kwargs)",
"_____no_output_____"
],
[
"# Encode\nlabel_encoder = LabelEncoder()\nlabel_encoder.fit(y_train)\nNUM_CLASSES = len(label_encoder)\nlabel_encoder.class_to_index",
"_____no_output_____"
],
[
"# Convert labels to tokens\nprint (f\"y_train[0]: {y_train[0]}\")\ny_train = label_encoder.encode(y_train)\ny_val = label_encoder.encode(y_val)\ny_test = label_encoder.encode(y_test)\nprint (f\"y_train[0]: {y_train[0]}\")",
"y_train[0]: World\ny_train[0]: 3\n"
],
[
"# Class weights\ncounts = np.bincount(y_train)\nclass_weights = {i: 1.0/count for i, count in enumerate(counts)}\nprint (f\"counts: {counts}\\nweights: {class_weights}\")",
"counts: [21000 21000 21000 21000]\nweights: {0: 4.761904761904762e-05, 1: 4.761904761904762e-05, 2: 4.761904761904762e-05, 3: 4.761904761904762e-05}\n"
]
],
[
[
"## Tokenizer",
"_____no_output_____"
],
[
"We'll define a `Tokenizer` to convert our text input data into token indices.",
"_____no_output_____"
]
],
[
[
"import json\nfrom collections import Counter\nfrom more_itertools import take",
"_____no_output_____"
],
[
"class Tokenizer(object):\n def __init__(self, char_level, num_tokens=None, \n pad_token='<PAD>', oov_token='<UNK>',\n token_to_index=None):\n self.char_level = char_level\n self.separator = '' if self.char_level else ' '\n if num_tokens: num_tokens -= 2 # pad + unk tokens\n self.num_tokens = num_tokens\n self.pad_token = pad_token\n self.oov_token = oov_token\n if not token_to_index:\n token_to_index = {pad_token: 0, oov_token: 1}\n self.token_to_index = token_to_index\n self.index_to_token = {v: k for k, v in self.token_to_index.items()}\n\n def __len__(self):\n return len(self.token_to_index)\n\n def __str__(self):\n return f\"<Tokenizer(num_tokens={len(self)})>\"\n\n def fit_on_texts(self, texts):\n if not self.char_level:\n texts = [text.split(\" \") for text in texts]\n all_tokens = [token for text in texts for token in text]\n counts = Counter(all_tokens).most_common(self.num_tokens)\n self.min_token_freq = counts[-1][1]\n for token, count in counts:\n index = len(self)\n self.token_to_index[token] = index\n self.index_to_token[index] = token\n return self\n\n def texts_to_sequences(self, texts):\n sequences = []\n for text in texts:\n if not self.char_level:\n text = text.split(' ')\n sequence = []\n for token in text:\n sequence.append(self.token_to_index.get(\n token, self.token_to_index[self.oov_token]))\n sequences.append(np.asarray(sequence))\n return sequences\n\n def sequences_to_texts(self, sequences):\n texts = []\n for sequence in sequences:\n text = []\n for index in sequence:\n text.append(self.index_to_token.get(index, self.oov_token))\n texts.append(self.separator.join([token for token in text]))\n return texts\n\n def save(self, fp):\n with open(fp, 'w') as fp:\n contents = {\n 'char_level': self.char_level,\n 'oov_token': self.oov_token,\n 'token_to_index': self.token_to_index\n }\n json.dump(contents, fp, indent=4, sort_keys=False)\n\n @classmethod\n def load(cls, fp):\n with open(fp, 'r') as fp:\n kwargs = json.load(fp=fp)\n return cls(**kwargs)",
"_____no_output_____"
],
[
"# Tokenize\ntokenizer = Tokenizer(char_level=False, num_tokens=5000)\ntokenizer.fit_on_texts(texts=X_train)\nVOCAB_SIZE = len(tokenizer)\nprint (tokenizer)",
"<Tokenizer(num_tokens=5000)>\n"
],
[
"# Sample of tokens\nprint (take(5, tokenizer.token_to_index.items()))\nprint (f\"least freq token's freq: {tokenizer.min_token_freq}\") # use this to adjust num_tokens",
"[('<PAD>', 0), ('<UNK>', 1), ('39', 2), ('b', 3), ('gt', 4)]\nleast freq token's freq: 14\n"
],
[
"# Convert texts to sequences of indices\nX_train = tokenizer.texts_to_sequences(X_train)\nX_val = tokenizer.texts_to_sequences(X_val)\nX_test = tokenizer.texts_to_sequences(X_test)\npreprocessed_text = tokenizer.sequences_to_texts([X_train[0]])[0]\nprint (\"Text to indices:\\n\"\n f\" (preprocessed) → {preprocessed_text}\\n\"\n f\" (tokenized) → {X_train[0]}\")",
"Text to indices:\n (preprocessed) → china battles north korea nuclear talks\n (tokenized) → [ 16 1491 285 142 114 24]\n"
]
],
[
[
"## Padding",
"_____no_output_____"
],
[
"We'll need to do 2D padding to our tokenized text.",
"_____no_output_____"
]
],
[
[
"def pad_sequences(sequences, max_seq_len=0):\n \"\"\"Pad sequences to max length in sequence.\"\"\"\n max_seq_len = max(max_seq_len, max(len(sequence) for sequence in sequences))\n padded_sequences = np.zeros((len(sequences), max_seq_len))\n for i, sequence in enumerate(sequences):\n padded_sequences[i][:len(sequence)] = sequence\n return padded_sequences",
"_____no_output_____"
],
[
"# 2D sequences\npadded = pad_sequences(X_train[0:3])\nprint (padded.shape)\nprint (padded)",
"(3, 6)\n[[1.600e+01 1.491e+03 2.850e+02 1.420e+02 1.140e+02 2.400e+01]\n [1.445e+03 2.300e+01 6.560e+02 2.197e+03 1.000e+00 0.000e+00]\n [1.200e+02 1.400e+01 1.955e+03 1.005e+03 1.529e+03 4.014e+03]]\n"
]
],
[
[
"## Datasets",
"_____no_output_____"
],
[
"We're going to create Datasets and DataLoaders to be able to efficiently create batches with our data splits.",
"_____no_output_____"
]
],
[
[
"class Dataset(torch.utils.data.Dataset):\n def __init__(self, X, y,):\n self.X = X\n self.y = y\n\n def __len__(self):\n return len(self.y)\n\n def __str__(self):\n return f\"<Dataset(N={len(self)})>\"\n\n def __getitem__(self, index):\n X = self.X[index]\n y = self.y[index]\n return [X, len(X), y]\n\n def collate_fn(self, batch):\n \"\"\"Processing on a batch.\"\"\"\n # Get inputs\n batch = np.array(batch, dtype=object)\n X = batch[:, 0]\n seq_lens = batch[:, 1]\n y = np.stack(batch[:, 2], axis=0)\n\n # Pad inputs\n X = pad_sequences(sequences=X)\n\n # Cast\n X = torch.LongTensor(X.astype(np.int32))\n seq_lens = torch.LongTensor(seq_lens.astype(np.int32))\n y = torch.LongTensor(y.astype(np.int32))\n\n return X, seq_lens, y\n\n def create_dataloader(self, batch_size, shuffle=False, drop_last=False):\n return torch.utils.data.DataLoader(\n dataset=self, batch_size=batch_size, collate_fn=self.collate_fn,\n shuffle=shuffle, drop_last=drop_last, pin_memory=True)",
"_____no_output_____"
],
[
"# Create datasets\ntrain_dataset = Dataset(X=X_train, y=y_train)\nval_dataset = Dataset(X=X_val, y=y_val)\ntest_dataset = Dataset(X=X_test, y=y_test)\nprint (\"Datasets:\\n\"\n f\" Train dataset:{train_dataset.__str__()}\\n\"\n f\" Val dataset: {val_dataset.__str__()}\\n\"\n f\" Test dataset: {test_dataset.__str__()}\\n\"\n \"Sample point:\\n\"\n f\" X: {train_dataset[0][0]}\\n\"\n f\" seq_len: {train_dataset[0][1]}\\n\"\n f\" y: {train_dataset[0][2]}\")",
"Datasets:\n Train dataset:<Dataset(N=84000)>\n Val dataset: <Dataset(N=18000)>\n Test dataset: <Dataset(N=18000)>\nSample point:\n X: [ 16 1491 285 142 114 24]\n seq_len: 6\n y: 3\n"
],
[
"# Create dataloaders\nbatch_size = 64\ntrain_dataloader = train_dataset.create_dataloader(\n batch_size=batch_size)\nval_dataloader = val_dataset.create_dataloader(\n batch_size=batch_size)\ntest_dataloader = test_dataset.create_dataloader(\n batch_size=batch_size)\nbatch_X, batch_seq_lens, batch_y = next(iter(train_dataloader))\nprint (\"Sample batch:\\n\"\n f\" X: {list(batch_X.size())}\\n\"\n f\" seq_lens: {list(batch_seq_lens.size())}\\n\"\n f\" y: {list(batch_y.size())}\\n\"\n \"Sample point:\\n\"\n f\" X: {batch_X[0]}\\n\"\n f\" seq_len: {batch_seq_lens[0]}\\n\"\n f\" y: {batch_y[0]}\")",
"Sample batch:\n X: [64, 14]\n seq_lens: [64]\n y: [64]\nSample point:\n X: tensor([ 16, 1491, 285, 142, 114, 24, 0, 0, 0, 0, 0, 0,\n 0, 0], device='cpu')\n seq_len: 6\n y: 3\n"
]
],
[
[
"## Trainer",
"_____no_output_____"
],
[
"Let's create the `Trainer` class that we'll use to facilitate training for our experiments.",
"_____no_output_____"
]
],
[
[
"class Trainer(object):\n def __init__(self, model, device, loss_fn=None, optimizer=None, scheduler=None):\n\n # Set params\n self.model = model\n self.device = device\n self.loss_fn = loss_fn\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n def train_step(self, dataloader):\n \"\"\"Train step.\"\"\"\n # Set model to train mode\n self.model.train()\n loss = 0.0\n\n # Iterate over train batches\n for i, batch in enumerate(dataloader):\n\n # Step\n batch = [item.to(self.device) for item in batch] # Set device\n inputs, targets = batch[:-1], batch[-1]\n self.optimizer.zero_grad() # Reset gradients\n z = self.model(inputs) # Forward pass\n J = self.loss_fn(z, targets) # Define loss\n J.backward() # Backward pass\n self.optimizer.step() # Update weights\n\n # Cumulative Metrics\n loss += (J.detach().item() - loss) / (i + 1)\n\n return loss\n\n def eval_step(self, dataloader):\n \"\"\"Validation or test step.\"\"\"\n # Set model to eval mode\n self.model.eval()\n loss = 0.0\n y_trues, y_probs = [], []\n\n # Iterate over val batches\n with torch.inference_mode():\n for i, batch in enumerate(dataloader):\n\n # Step\n batch = [item.to(self.device) for item in batch] # Set device\n inputs, y_true = batch[:-1], batch[-1]\n z = self.model(inputs) # Forward pass\n J = self.loss_fn(z, y_true).item()\n\n # Cumulative Metrics\n loss += (J - loss) / (i + 1)\n\n # Store outputs\n y_prob = F.softmax(z).cpu().numpy()\n y_probs.extend(y_prob)\n y_trues.extend(y_true.cpu().numpy())\n\n return loss, np.vstack(y_trues), np.vstack(y_probs)\n\n def predict_step(self, dataloader):\n \"\"\"Prediction step.\"\"\"\n # Set model to eval mode\n self.model.eval()\n y_probs = []\n\n # Iterate over val batches\n with torch.inference_mode():\n for i, batch in enumerate(dataloader):\n\n # Forward pass w/ inputs\n inputs, targets = batch[:-1], batch[-1]\n z = self.model(inputs)\n\n # Store outputs\n y_prob = F.softmax(z).cpu().numpy()\n y_probs.extend(y_prob)\n\n return np.vstack(y_probs)\n \n def train(self, num_epochs, patience, train_dataloader, val_dataloader):\n best_val_loss = np.inf\n for epoch in range(num_epochs):\n # Steps\n train_loss = self.train_step(dataloader=train_dataloader)\n val_loss, _, _ = self.eval_step(dataloader=val_dataloader)\n self.scheduler.step(val_loss)\n\n # Early stopping\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_model = self.model\n _patience = patience # reset _patience\n else:\n _patience -= 1\n if not _patience: # 0\n print(\"Stopping early!\")\n break\n\n # Logging\n print(\n f\"Epoch: {epoch+1} | \"\n f\"train_loss: {train_loss:.5f}, \"\n f\"val_loss: {val_loss:.5f}, \"\n f\"lr: {self.optimizer.param_groups[0]['lr']:.2E}, \"\n f\"_patience: {_patience}\"\n )\n return best_model",
"_____no_output_____"
]
],
[
[
"# Vanilla RNN",
"_____no_output_____"
],
[
"Inputs to RNNs are sequential like text or time-series.",
"_____no_output_____"
]
],
[
[
"BATCH_SIZE = 64\nEMBEDDING_DIM = 100",
"_____no_output_____"
],
[
"# Input\nsequence_size = 8 # words per input\nx = torch.rand((BATCH_SIZE, sequence_size, EMBEDDING_DIM))\nseq_lens = torch.randint(high=sequence_size, size=(1, BATCH_SIZE))\nprint (x.shape)\nprint (seq_lens.shape)",
"torch.Size([64, 8, 100])\ntorch.Size([1, 64])\n"
]
],
[
[
"<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/vanilla.png\" width=\"500\">\n</div>\n\nRNN forward pass for a single time step $X_t$:\n\n$h_t = tanh(W_{hh}h_{t-1} + W_{xh}X_t+b_h)$\n\n*where*:\n* $W_{hh}$ = hidden units weights| $\\in \\mathbb{R}^{HXH}$ ($H$ is the hidden dim)\n* $h_{t-1}$ = previous timestep's hidden state $\\in \\mathbb{R}^{NXH}$\n* $W_{xh}$ = input weights| $\\in \\mathbb{R}^{EXH}$\n* $X_t$ = input at time step t | $\\in \\mathbb{R}^{NXE}$ ($N$ is the batch size, $E$ is the embedding dim)\n* $b_h$ = hidden units bias $\\in \\mathbb{R}^{HX1}$\n* $h_t$ = output from RNN for timestep $t$\n\n> At the first time step, the previous hidden state $h_{t-1}$ can either be a zero vector (unconditioned) or initialized (conditioned). If we are conditioning the RNN, the first hidden state $h_0$ can belong to a specific condition or we can concat the specific condition to the randomly initialized hidden vectors at each time step. More on this in the subsequent notebooks on RNNs.",
"_____no_output_____"
]
],
[
[
"RNN_HIDDEN_DIM = 128\nDROPOUT_P = 0.1\nRNN_DROPOUT_P = 0.1",
"_____no_output_____"
],
[
"# Initialize hidden state\nhidden_t = torch.zeros((BATCH_SIZE, RNN_HIDDEN_DIM))\nprint (hidden_t.size())",
"torch.Size([64, 128])\n"
]
],
[
[
"We'll show how to create an RNN cell using PyTorch's [`RNNCell`](https://pytorch.org/docs/stable/generated/torch.nn.RNNCell.html#torch.nn.RNNCell) and the more abstracted [`RNN`](https://pytorch.org/docs/stable/generated/torch.nn.RNN.html#torch.nn.RNN).",
"_____no_output_____"
]
],
[
[
"# Initialize RNN cell\nrnn_cell = nn.RNNCell(EMBEDDING_DIM, RNN_HIDDEN_DIM)\nprint (rnn_cell)",
"RNNCell(100, 128)\n"
],
[
"# Forward pass through RNN\nx = x.permute(1, 0, 2) # RNN needs batch_size to be at dim 1\n\n# Loop through the inputs time steps\nhiddens = []\nfor t in range(sequence_size):\n hidden_t = rnn_cell(x[t], hidden_t)\n hiddens.append(hidden_t)\nhiddens = torch.stack(hiddens)\nhiddens = hiddens.permute(1, 0, 2) # bring batch_size back to dim 0\nprint (hiddens.size())",
"torch.Size([64, 8, 128])\n"
],
[
"# We also could've used a more abstracted layer\nx = torch.rand((BATCH_SIZE, sequence_size, EMBEDDING_DIM))\nrnn = nn.RNN(EMBEDDING_DIM, RNN_HIDDEN_DIM, batch_first=True)\nout, h_n = rnn(x) # h_n is the last hidden state\nprint (\"out: \", out.shape)\nprint (\"h_n: \", h_n.shape)",
"out: torch.Size([64, 8, 128])\nh_n: torch.Size([1, 64, 128])\n"
],
[
"# The same tensors\nprint (out[:,-1,:])\nprint (h_n.squeeze(0))",
"tensor([[-0.5056, 0.6157, 0.4275, ..., 0.1804, -0.1480, -0.4822],\n [-0.1490, 0.6549, 0.3184, ..., 0.2831, -0.3557, -0.5438],\n [-0.5290, 0.4321, 0.0885, ..., 0.4848, -0.2672, -0.2660],\n ...,\n [-0.3273, 0.6155, -0.2170, ..., 0.1718, -0.1623, -0.3876],\n [-0.3860, 0.3749, 0.0142, ..., 0.6179, -0.3790, -0.2459],\n [-0.0464, 0.4893, -0.2189, ..., 0.3532, -0.3793, -0.5216]],\n grad_fn=<SliceBackward>)\ntensor([[-0.5056, 0.6157, 0.4275, ..., 0.1804, -0.1480, -0.4822],\n [-0.1490, 0.6549, 0.3184, ..., 0.2831, -0.3557, -0.5438],\n [-0.5290, 0.4321, 0.0885, ..., 0.4848, -0.2672, -0.2660],\n ...,\n [-0.3273, 0.6155, -0.2170, ..., 0.1718, -0.1623, -0.3876],\n [-0.3860, 0.3749, 0.0142, ..., 0.6179, -0.3790, -0.2459],\n [-0.0464, 0.4893, -0.2189, ..., 0.3532, -0.3793, -0.5216]],\n grad_fn=<SqueezeBackward1>)\n"
]
],
[
[
"In our model, we want to use the RNN's output after the last relevant token in the sentence is processed. The last relevant token doesn't refer the `<PAD>` tokens but to the last actual word in the sentence and its index is different for each input in the batch. This is why we included a `seq_lens` tensor in our batches.",
"_____no_output_____"
]
],
[
[
"def gather_last_relevant_hidden(hiddens, seq_lens):\n \"\"\"Extract and collect the last relevant \n hidden state based on the sequence length.\"\"\"\n seq_lens = seq_lens.long().detach().cpu().numpy() - 1\n out = []\n for batch_index, column_index in enumerate(seq_lens):\n out.append(hiddens[batch_index, column_index])\n return torch.stack(out)",
"_____no_output_____"
],
[
"# Get the last relevant hidden state\ngather_last_relevant_hidden(hiddens=out, seq_lens=seq_lens).squeeze(0).shape",
"_____no_output_____"
]
],
[
[
"There are many different ways to use RNNs. So far we've processed our inputs one timestep at a time and we could either use the RNN's output at each time step or just use the final input timestep's RNN output. Let's look at a few other possibilities.\n\n<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/architectures.png\" width=\"1000\">\n</div>",
"_____no_output_____"
],
[
"## Model",
"_____no_output_____"
]
],
[
[
"import torch.nn.functional as F",
"_____no_output_____"
],
[
"HIDDEN_DIM = 100",
"_____no_output_____"
],
[
"class RNN(nn.Module):\n def __init__(self, embedding_dim, vocab_size, rnn_hidden_dim,\n hidden_dim, dropout_p, num_classes, padding_idx=0):\n super(RNN, self).__init__()\n \n # Initialize embeddings\n self.embeddings = nn.Embedding(\n embedding_dim=embedding_dim, num_embeddings=vocab_size,\n padding_idx=padding_idx)\n \n # RNN\n self.rnn = nn.RNN(embedding_dim, rnn_hidden_dim, batch_first=True)\n \n # FC weights\n self.dropout = nn.Dropout(dropout_p)\n self.fc1 = nn.Linear(rnn_hidden_dim, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, num_classes)\n\n def forward(self, inputs):\n # Embed\n x_in, seq_lens = inputs\n x_in = self.embeddings(x_in)\n \n # Rnn outputs\n out, h_n = self.rnn(x_in)\n z = gather_last_relevant_hidden(hiddens=out, seq_lens=seq_lens)\n\n # FC layers\n z = self.fc1(z)\n z = self.dropout(z)\n z = self.fc2(z)\n return z",
"_____no_output_____"
],
[
"# Simple RNN cell\nmodel = RNN(\n embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE, \n rnn_hidden_dim=RNN_HIDDEN_DIM, hidden_dim=HIDDEN_DIM, \n dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)\nmodel = model.to(device) # set device\nprint (model.named_parameters)",
"<bound method Module.named_parameters of RNN(\n (embeddings): Embedding(5000, 100, padding_idx=0)\n (rnn): RNN(100, 128, batch_first=True)\n (dropout): Dropout(p=0.1, inplace=False)\n (fc1): Linear(in_features=128, out_features=100, bias=True)\n (fc2): Linear(in_features=100, out_features=4, bias=True)\n)>\n"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"from torch.optim import Adam",
"_____no_output_____"
],
[
"NUM_LAYERS = 1\nLEARNING_RATE = 1e-4\nPATIENCE = 10\nNUM_EPOCHS = 50",
"_____no_output_____"
],
[
"# Define Loss\nclass_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)\nloss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)",
"_____no_output_____"
],
[
"# Define optimizer & scheduler\noptimizer = Adam(model.parameters(), lr=LEARNING_RATE) \nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode='min', factor=0.1, patience=3)",
"_____no_output_____"
],
[
"# Trainer module\ntrainer = Trainer(\n model=model, device=device, loss_fn=loss_fn, \n optimizer=optimizer, scheduler=scheduler)",
"_____no_output_____"
],
[
"# Train\nbest_model = trainer.train(\n NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)",
"Epoch: 1 | train_loss: 1.25500, val_loss: 1.12003, lr: 1.00E-04, _patience: 10\nEpoch: 2 | train_loss: 1.03130, val_loss: 0.97659, lr: 1.00E-04, _patience: 10\nEpoch: 3 | train_loss: 0.89955, val_loss: 0.87245, lr: 1.00E-04, _patience: 10\nEpoch: 4 | train_loss: 0.79928, val_loss: 0.79484, lr: 1.00E-04, _patience: 10\nEpoch: 5 | train_loss: 0.72322, val_loss: 0.73841, lr: 1.00E-04, _patience: 10\nEpoch: 6 | train_loss: 0.66636, val_loss: 0.69743, lr: 1.00E-04, _patience: 10\nEpoch: 7 | train_loss: 0.62149, val_loss: 0.66580, lr: 1.00E-04, _patience: 10\nEpoch: 8 | train_loss: 0.58615, val_loss: 0.64251, lr: 1.00E-04, _patience: 10\nEpoch: 9 | train_loss: 0.55711, val_loss: 0.62356, lr: 1.00E-04, _patience: 10\nEpoch: 10 | train_loss: 0.53208, val_loss: 0.60801, lr: 1.00E-04, _patience: 10\nEpoch: 11 | train_loss: 0.51060, val_loss: 0.59539, lr: 1.00E-04, _patience: 10\nEpoch: 12 | train_loss: 0.49146, val_loss: 0.58582, lr: 1.00E-04, _patience: 10\nEpoch: 13 | train_loss: 0.47394, val_loss: 0.57700, lr: 1.00E-04, _patience: 10\nEpoch: 14 | train_loss: 0.45890, val_loss: 0.57153, lr: 1.00E-04, _patience: 10\nEpoch: 15 | train_loss: 0.44470, val_loss: 0.56508, lr: 1.00E-04, _patience: 10\nEpoch: 16 | train_loss: 0.43173, val_loss: 0.56183, lr: 1.00E-04, _patience: 10\nEpoch: 17 | train_loss: 0.41952, val_loss: 0.55895, lr: 1.00E-04, _patience: 10\nEpoch: 18 | train_loss: 0.40817, val_loss: 0.55745, lr: 1.00E-04, _patience: 10\nEpoch: 19 | train_loss: 0.39774, val_loss: 0.55570, lr: 1.00E-04, _patience: 10\nEpoch: 20 | train_loss: 0.38761, val_loss: 0.55598, lr: 1.00E-04, _patience: 9\nEpoch: 21 | train_loss: 0.37759, val_loss: 0.55710, lr: 1.00E-04, _patience: 8\nEpoch: 22 | train_loss: 0.36872, val_loss: 0.55861, lr: 1.00E-04, _patience: 7\nEpoch: 23 | train_loss: 0.35990, val_loss: 0.55922, lr: 1.00E-05, _patience: 6\nEpoch: 24 | train_loss: 0.33898, val_loss: 0.53767, lr: 1.00E-05, _patience: 10\nEpoch: 25 | train_loss: 0.33417, val_loss: 0.53845, lr: 1.00E-05, _patience: 9\nEpoch: 26 | train_loss: 0.33196, val_loss: 0.53936, lr: 1.00E-05, _patience: 8\nEpoch: 27 | train_loss: 0.33048, val_loss: 0.54046, lr: 1.00E-05, _patience: 7\nEpoch: 28 | train_loss: 0.32941, val_loss: 0.54174, lr: 1.00E-06, _patience: 6\nEpoch: 29 | train_loss: 0.32617, val_loss: 0.53706, lr: 1.00E-06, _patience: 10\nEpoch: 30 | train_loss: 0.32539, val_loss: 0.53703, lr: 1.00E-06, _patience: 10\nEpoch: 31 | train_loss: 0.32537, val_loss: 0.53706, lr: 1.00E-06, _patience: 9\nEpoch: 32 | train_loss: 0.32483, val_loss: 0.53709, lr: 1.00E-06, _patience: 8\nEpoch: 33 | train_loss: 0.32509, val_loss: 0.53717, lr: 1.00E-07, _patience: 7\nEpoch: 34 | train_loss: 0.32449, val_loss: 0.53715, lr: 1.00E-07, _patience: 6\nEpoch: 35 | train_loss: 0.32475, val_loss: 0.53718, lr: 1.00E-07, _patience: 5\nEpoch: 36 | train_loss: 0.32441, val_loss: 0.53720, lr: 1.00E-07, _patience: 4\nEpoch: 37 | train_loss: 0.32452, val_loss: 0.53722, lr: 1.00E-08, _patience: 3\nEpoch: 38 | train_loss: 0.32467, val_loss: 0.53722, lr: 1.00E-08, _patience: 2\nEpoch: 39 | train_loss: 0.32426, val_loss: 0.53723, lr: 1.00E-08, _patience: 1\nStopping early!\n"
]
],
[
[
"## Evaluation",
"_____no_output_____"
]
],
[
[
"import json\nfrom sklearn.metrics import precision_recall_fscore_support",
"_____no_output_____"
],
[
"def get_performance(y_true, y_pred, classes):\n \"\"\"Per-class performance metrics.\"\"\"\n # Performance\n performance = {\"overall\": {}, \"class\": {}}\n\n # Overall performance\n metrics = precision_recall_fscore_support(y_true, y_pred, average=\"weighted\")\n performance[\"overall\"][\"precision\"] = metrics[0]\n performance[\"overall\"][\"recall\"] = metrics[1]\n performance[\"overall\"][\"f1\"] = metrics[2]\n performance[\"overall\"][\"num_samples\"] = np.float64(len(y_true))\n\n # Per-class performance\n metrics = precision_recall_fscore_support(y_true, y_pred, average=None)\n for i in range(len(classes)):\n performance[\"class\"][classes[i]] = {\n \"precision\": metrics[0][i],\n \"recall\": metrics[1][i],\n \"f1\": metrics[2][i],\n \"num_samples\": np.float64(metrics[3][i]),\n }\n\n return performance",
"_____no_output_____"
],
[
"# Get predictions\ntest_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)\ny_pred = np.argmax(y_prob, axis=1)",
"_____no_output_____"
],
[
"# Determine performance\nperformance = get_performance(\n y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)\nprint (json.dumps(performance['overall'], indent=2))",
"{\n \"precision\": 0.8211429212272771,\n \"recall\": 0.8212777777777778,\n \"f1\": 0.8208202838924475,\n \"num_samples\": 18000.0\n}\n"
]
],
[
[
"# Gated RNNs: LSTMs & GRUs",
"_____no_output_____"
],
[
"While our simple RNNs so far are great for sequentially processing our inputs, they have quite a few disadvantages. They commonly suffer from exploding or vanishing gradients as a result using the same set of weights ($W_{xh}$ and $W_{hh}$) with each timestep's input. During backpropagation, this can cause gradients to explode (>1) or vanish (<1). If you multiply any number greater than 1 with itself over and over, it moves towards infinity (exploding gradients) and similarly, If you multiply any number less than 1 with itself over and over, it moves towards zero (vanishing gradients). To mitigate this issue, gated RNNs were devised to selectively retrain information. If you're interested in learning more of the specifics, this [post](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) is a must-read.\n\nThere are two popular types of gated RNNs: Long Short-term Memory (LSTMs) units and Gated Recurrent Units (GRUs).\n\n<div align=\"left\">\n<img src=\"https://raw.githubusercontent.com/GokuMohandas/MadeWithML/main/images/foundations/rnn/gated.png\" width=\"600\">\n</div>\n<a href=\"http://colah.github.io/posts/2015-08-Understanding-LSTMs/\">Understanding LSTM Networks</a> - Chris Olah\n\n\n> When deciding between LSTMs and GRUs, empirical performance is the best factor but in genreal GRUs offer similar performance with less complexity (less weights). \n\n",
"_____no_output_____"
]
],
[
[
"# Input\nsequence_size = 8 # words per input\nx = torch.rand((BATCH_SIZE, sequence_size, EMBEDDING_DIM))\nprint (x.shape)",
"torch.Size([64, 8, 100])\n"
],
[
"# GRU \ngru = nn.GRU(input_size=EMBEDDING_DIM, hidden_size=RNN_HIDDEN_DIM, batch_first=True)",
"_____no_output_____"
],
[
"# Forward pass\nout, h_n = gru(x)\nprint (f\"out: {out.shape}\")\nprint (f\"h_n: {h_n.shape}\")",
"out: torch.Size([64, 8, 128])\nh_n: torch.Size([1, 64, 128])\n"
]
],
[
[
"## Bidirectional RNN",
"_____no_output_____"
],
[
"We can also have RNNs that process inputs from both directions (first token to last token and vice versa) and combine their outputs. This architecture is known as a bidirectional RNN. ",
"_____no_output_____"
]
],
[
[
"# GRU \ngru = nn.GRU(input_size=EMBEDDING_DIM, hidden_size=RNN_HIDDEN_DIM, \n batch_first=True, bidirectional=True)",
"_____no_output_____"
],
[
"# Forward pass\nout, h_n = gru(x)\nprint (f\"out: {out.shape}\")\nprint (f\"h_n: {h_n.shape}\")",
"out: torch.Size([64, 8, 256])\nh_n: torch.Size([2, 64, 128])\n"
]
],
[
[
"Notice that the output for each sample at each timestamp has size 256 (double the `RNN_HIDDEN_DIM`). This is because this includes both the forward and backward directions from the BiRNN. ",
"_____no_output_____"
],
[
"## Model",
"_____no_output_____"
]
],
[
[
"class GRU(nn.Module):\n def __init__(self, embedding_dim, vocab_size, rnn_hidden_dim,\n hidden_dim, dropout_p, num_classes, padding_idx=0):\n super(GRU, self).__init__()\n \n # Initialize embeddings\n self.embeddings = nn.Embedding(embedding_dim=embedding_dim,\n num_embeddings=vocab_size,\n padding_idx=padding_idx)\n \n # RNN\n self.rnn = nn.GRU(embedding_dim, rnn_hidden_dim, \n batch_first=True, bidirectional=True)\n \n # FC weights\n self.dropout = nn.Dropout(dropout_p)\n self.fc1 = nn.Linear(rnn_hidden_dim*2, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, num_classes)\n\n def forward(self, inputs):\n # Embed\n x_in, seq_lens = inputs\n x_in = self.embeddings(x_in)\n \n # Rnn outputs\n out, h_n = self.rnn(x_in)\n z = gather_last_relevant_hidden(hiddens=out, seq_lens=seq_lens)\n\n # FC layers\n z = self.fc1(z)\n z = self.dropout(z)\n z = self.fc2(z)\n return z",
"_____no_output_____"
],
[
"# Simple RNN cell\nmodel = GRU(\n embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE, \n rnn_hidden_dim=RNN_HIDDEN_DIM, hidden_dim=HIDDEN_DIM, \n dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)\nmodel = model.to(device) # set device\nprint (model.named_parameters)",
"<bound method Module.named_parameters of GRU(\n (embeddings): Embedding(5000, 100, padding_idx=0)\n (rnn): GRU(100, 128, batch_first=True, bidirectional=True)\n (dropout): Dropout(p=0.1, inplace=False)\n (fc1): Linear(in_features=256, out_features=100, bias=True)\n (fc2): Linear(in_features=100, out_features=4, bias=True)\n)>\n"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"# Define Loss\nclass_weights_tensor = torch.Tensor(list(class_weights.values())).to(device)\nloss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor)",
"_____no_output_____"
],
[
"# Define optimizer & scheduler\noptimizer = Adam(model.parameters(), lr=LEARNING_RATE) \nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode='min', factor=0.1, patience=3)",
"_____no_output_____"
],
[
"# Trainer module\ntrainer = Trainer(\n model=model, device=device, loss_fn=loss_fn, \n optimizer=optimizer, scheduler=scheduler)",
"_____no_output_____"
],
[
"# Train\nbest_model = trainer.train(\n NUM_EPOCHS, PATIENCE, train_dataloader, val_dataloader)",
"Epoch: 1 | train_loss: 1.16930, val_loss: 0.94210, lr: 1.00E-04, _patience: 10\nEpoch: 2 | train_loss: 0.82127, val_loss: 0.72819, lr: 1.00E-04, _patience: 10\nEpoch: 3 | train_loss: 0.65862, val_loss: 0.64288, lr: 1.00E-04, _patience: 10\nEpoch: 4 | train_loss: 0.58088, val_loss: 0.60078, lr: 1.00E-04, _patience: 10\nEpoch: 5 | train_loss: 0.53120, val_loss: 0.57543, lr: 1.00E-04, _patience: 10\nEpoch: 6 | train_loss: 0.49535, val_loss: 0.55925, lr: 1.00E-04, _patience: 10\nEpoch: 7 | train_loss: 0.46603, val_loss: 0.54962, lr: 1.00E-04, _patience: 10\nEpoch: 8 | train_loss: 0.44215, val_loss: 0.54387, lr: 1.00E-04, _patience: 10\nEpoch: 9 | train_loss: 0.42093, val_loss: 0.53825, lr: 1.00E-04, _patience: 10\nEpoch: 10 | train_loss: 0.40259, val_loss: 0.53758, lr: 1.00E-04, _patience: 10\nEpoch: 11 | train_loss: 0.38635, val_loss: 0.53765, lr: 1.00E-04, _patience: 9\nEpoch: 12 | train_loss: 0.37150, val_loss: 0.53935, lr: 1.00E-04, _patience: 8\nEpoch: 13 | train_loss: 0.35735, val_loss: 0.54147, lr: 1.00E-04, _patience: 7\nEpoch: 14 | train_loss: 0.34519, val_loss: 0.54368, lr: 1.00E-05, _patience: 6\nEpoch: 15 | train_loss: 0.31900, val_loss: 0.53039, lr: 1.00E-05, _patience: 10\nEpoch: 16 | train_loss: 0.31445, val_loss: 0.53071, lr: 1.00E-05, _patience: 9\nEpoch: 17 | train_loss: 0.31230, val_loss: 0.53166, lr: 1.00E-05, _patience: 8\nEpoch: 18 | train_loss: 0.31042, val_loss: 0.53303, lr: 1.00E-05, _patience: 7\nEpoch: 19 | train_loss: 0.30883, val_loss: 0.53416, lr: 1.00E-06, _patience: 6\nEpoch: 20 | train_loss: 0.30481, val_loss: 0.53245, lr: 1.00E-06, _patience: 5\nEpoch: 21 | train_loss: 0.30435, val_loss: 0.53248, lr: 1.00E-06, _patience: 4\nEpoch: 22 | train_loss: 0.30458, val_loss: 0.53250, lr: 1.00E-06, _patience: 3\nEpoch: 23 | train_loss: 0.30449, val_loss: 0.53258, lr: 1.00E-07, _patience: 2\nEpoch: 24 | train_loss: 0.30425, val_loss: 0.53252, lr: 1.00E-07, _patience: 1\nStopping early!\n"
]
],
[
[
"## Evaluation",
"_____no_output_____"
]
],
[
[
"from pathlib import Path",
"_____no_output_____"
],
[
"# Get predictions\ntest_loss, y_true, y_prob = trainer.eval_step(dataloader=test_dataloader)\ny_pred = np.argmax(y_prob, axis=1)",
"_____no_output_____"
],
[
"# Determine performance\nperformance = get_performance(\n y_true=y_test, y_pred=y_pred, classes=label_encoder.classes)\nprint (json.dumps(performance['overall'], indent=2))",
"{\n \"precision\": 0.8206922733621068,\n \"recall\": 0.8209444444444445,\n \"f1\": 0.8205583307860935,\n \"num_samples\": 18000.0\n}\n"
],
[
"# Save artifacts\ndir = Path(\"gru\")\ndir.mkdir(parents=True, exist_ok=True)\nlabel_encoder.save(fp=Path(dir, 'label_encoder.json'))\ntokenizer.save(fp=Path(dir, 'tokenizer.json'))\ntorch.save(best_model.state_dict(), Path(dir, 'model.pt'))\nwith open(Path(dir, 'performance.json'), \"w\") as fp:\n json.dump(performance, indent=2, sort_keys=False, fp=fp)",
"_____no_output_____"
]
],
[
[
"## Inference",
"_____no_output_____"
]
],
[
[
"def get_probability_distribution(y_prob, classes):\n \"\"\"Create a dict of class probabilities from an array.\"\"\"\n results = {}\n for i, class_ in enumerate(classes):\n results[class_] = np.float64(y_prob[i])\n sorted_results = {k: v for k, v in sorted(\n results.items(), key=lambda item: item[1], reverse=True)}\n return sorted_results",
"_____no_output_____"
],
[
"# Load artifacts\ndevice = torch.device(\"cpu\")\nlabel_encoder = LabelEncoder.load(fp=Path(dir, 'label_encoder.json'))\ntokenizer = Tokenizer.load(fp=Path(dir, 'tokenizer.json'))\nmodel = GRU(\n embedding_dim=EMBEDDING_DIM, vocab_size=VOCAB_SIZE, \n rnn_hidden_dim=RNN_HIDDEN_DIM, hidden_dim=HIDDEN_DIM, \n dropout_p=DROPOUT_P, num_classes=NUM_CLASSES)\nmodel.load_state_dict(torch.load(Path(dir, 'model.pt'), map_location=device))\nmodel.to(device)",
"_____no_output_____"
],
[
"# Initialize trainer\ntrainer = Trainer(model=model, device=device)",
"_____no_output_____"
],
[
"# Dataloader\ntext = \"The final tennis tournament starts next week.\"\nX = tokenizer.texts_to_sequences([preprocess(text)])\nprint (tokenizer.sequences_to_texts(X))\ny_filler = label_encoder.encode([label_encoder.classes[0]]*len(X))\ndataset = Dataset(X=X, y=y_filler)\ndataloader = dataset.create_dataloader(batch_size=batch_size)",
"['final tennis tournament starts next week']\n"
],
[
"# Inference\ny_prob = trainer.predict_step(dataloader)\ny_pred = np.argmax(y_prob, axis=1)\nlabel_encoder.decode(y_pred)",
"_____no_output_____"
],
[
"# Class distributions\nprob_dist = get_probability_distribution(y_prob=y_prob[0], classes=label_encoder.classes)\nprint (json.dumps(prob_dist, indent=2))",
"{\n \"Sports\": 0.9944021105766296,\n \"World\": 0.004813326057046652,\n \"Sci/Tech\": 0.0007053817971609533,\n \"Business\": 7.924934470793232e-05\n}\n"
]
],
[
[
"> We will learn how to create more context-aware representations and a little bit of interpretability with RNNs in the next lesson on <a target=\"_blank\" href=\"https://madewithml.com/courses/foundations/attention/\">attention</a>.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e74516389ea5c3607af5b9d0ffc52e6aed1115ba | 21,341 | ipynb | Jupyter Notebook | courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/1_core_tensorflow.ipynb | anoopdobhal/training-data-analyst | 48994b56fcecd5129e1f1b7a266d26ac0bfcd824 | [
"Apache-2.0"
] | null | null | null | courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/1_core_tensorflow.ipynb | anoopdobhal/training-data-analyst | 48994b56fcecd5129e1f1b7a266d26ac0bfcd824 | [
"Apache-2.0"
] | null | null | null | courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/1_core_tensorflow.ipynb | anoopdobhal/training-data-analyst | 48994b56fcecd5129e1f1b7a266d26ac0bfcd824 | [
"Apache-2.0"
] | null | null | null | 24.005624 | 553 | 0.524437 | [
[
[
"# Getting started with TensorFlow\n\n**Learning Objectives**\n 1. Practice defining and performing basic operations on constant Tensors\n 1. Use Tensorflow's automatic differentiation capability\n 1. Learn how to train a linear regression from scratch with TensorFLow\n",
"_____no_output_____"
],
[
"In this notebook, we will start by reviewing the main operations on Tensors in TensorFlow and understand how to manipulate TensorFlow Variables. We explain how these are compatible with python built-in list and numpy arrays. \n\nThen we will jump to the problem of training a linear regression from scratch with gradient descent. The first order of business will be to understand how to compute the gradients of a function (the loss here) with respect to some of its arguments (the model weights here). The TensorFlow construct allowing us to do that is `tf.GradientTape`, which we will describe. \n\nAt last we will create a simple training loop to learn the weights of a 1-dim linear regression using synthetic data generated from a linear model. \n\nAs a bonus exercise, we will do the same for data generated from a non linear model, forcing us to manual engineer non-linear features to improve our linear model performance.",
"_____no_output_____"
]
],
[
[
"!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst",
"_____no_output_____"
],
[
"# Ensure the right version of Tensorflow is installed.\n!pip freeze | grep tensorflow==2.1",
"_____no_output_____"
],
[
"import numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf",
"_____no_output_____"
],
[
"print(tf.__version__)",
"2.1.3\n"
]
],
[
[
"## Operations on Tensors",
"_____no_output_____"
],
[
"### Variables and Constants",
"_____no_output_____"
],
[
"Tensors in TensorFlow are either contant (`tf.constant`) or variables (`tf.Variable`).\nConstant values can not be changed, while variables values can be.\n\nThe main difference is that instances of `tf.Variable` have methods allowing us to change \ntheir values while tensors constructed with `tf.constant` don't have these methods, and\ntherefore their values can not be changed. When you want to change the value of a `tf.Variable`\n`x` use one of the following method: \n\n* `x.assign(new_value)`\n* `x.assign_add(value_to_be_added)`\n* `x.assign_sub(value_to_be_subtracted`\n\n",
"_____no_output_____"
]
],
[
[
"x = tf.constant([2, 3, 4])\nx",
"_____no_output_____"
],
[
"x = tf.Variable(2.0, dtype=tf.float32, name='my_variable')",
"_____no_output_____"
],
[
"x.assign(45.8) # TODO 1\nx",
"_____no_output_____"
],
[
"x.assign_add(4) # TODO 2\nx",
"_____no_output_____"
],
[
"x.assign_sub(3) # TODO 3\nx",
"_____no_output_____"
]
],
[
[
"### Point-wise operations",
"_____no_output_____"
],
[
"Tensorflow offers similar point-wise tensor operations as numpy does:\n \n* `tf.add` allows to add the components of a tensor \n* `tf.multiply` allows us to multiply the components of a tensor\n* `tf.subtract` allow us to substract the components of a tensor\n* `tf.math.*` contains the usual math operations to be applied on the components of a tensor\n* and many more...\n\nMost of the standard aritmetic operations (`tf.add`, `tf.substrac`, etc.) are overloaded by the usual corresponding arithmetic symbols (`+`, `-`, etc.)",
"_____no_output_____"
]
],
[
[
"a = tf.constant([5, 3, 8]) # TODO 1\nb = tf.constant([3, -1, 2])\nc = tf.add(a, b)\nd = a + b\n\nprint(\"c:\", c)\nprint(\"d:\", d)",
"c: tf.Tensor([ 8 2 10], shape=(3,), dtype=int32)\nd: tf.Tensor([ 8 2 10], shape=(3,), dtype=int32)\n"
],
[
"a = tf.constant([5, 3, 8]) # TODO 2\nb = tf.constant([3, -1, 2])\nc = tf.multiply(a, b)\nd = a * b\n\nprint(\"c:\", c)\nprint(\"d:\", d)",
"c: tf.Tensor([15 -3 16], shape=(3,), dtype=int32)\nd: tf.Tensor([15 -3 16], shape=(3,), dtype=int32)\n"
],
[
"# tf.math.exp expects floats so we need to explicitly give the type\na = tf.constant([5, 3, 8], dtype=tf.float32)\nb = tf.math.exp(a)\n\nprint(\"b:\", b)",
"b: tf.Tensor([ 148.41316 20.085537 2980.958 ], shape=(3,), dtype=float32)\n"
]
],
[
[
"### NumPy Interoperability\n\nIn addition to native TF tensors, tensorflow operations can take native python types and NumPy arrays as operands. ",
"_____no_output_____"
]
],
[
[
"# native python list\na_py = [1, 2] \nb_py = [3, 4] ",
"_____no_output_____"
],
[
"tf.add(a_py, b_py) # TODO 1",
"_____no_output_____"
],
[
"# numpy arrays\na_np = np.array([1, 2])\nb_np = np.array([3, 4])",
"_____no_output_____"
],
[
"tf.add(a_np, b_np) # TODO 2",
"_____no_output_____"
],
[
"# native TF tensor\na_tf = tf.constant([1, 2])\nb_tf = tf.constant([3, 4])",
"_____no_output_____"
],
[
"tf.add(a_tf, b_tf) # TODO 3",
"_____no_output_____"
]
],
[
[
"You can convert a native TF tensor to a NumPy array using .numpy()",
"_____no_output_____"
]
],
[
[
"a_tf.numpy()",
"_____no_output_____"
]
],
[
[
"## Linear Regression\n\nNow let's use low level tensorflow operations to implement linear regression.\n\nLater in the course you'll see abstracted ways to do this using high level TensorFlow.",
"_____no_output_____"
],
[
"### Toy Dataset\n\nWe'll model the following function:\n\n\\begin{equation}\ny= 2x + 10\n\\end{equation}",
"_____no_output_____"
]
],
[
[
"X = tf.constant(range(10), dtype=tf.float32)\nY = 2 * X + 10\n\nprint(\"X:{}\".format(X))\nprint(\"Y:{}\".format(Y))",
"X:[0. 1. 2. 3. 4. 5. 6. 7. 8. 9.]\nY:[10. 12. 14. 16. 18. 20. 22. 24. 26. 28.]\n"
]
],
[
[
"Let's also create a test dataset to evaluate our models:",
"_____no_output_____"
]
],
[
[
"X_test = tf.constant(range(10, 20), dtype=tf.float32)\nY_test = 2 * X_test + 10\n\nprint(\"X_test:{}\".format(X_test))\nprint(\"Y_test:{}\".format(Y_test))",
"X_test:[10. 11. 12. 13. 14. 15. 16. 17. 18. 19.]\nY_test:[30. 32. 34. 36. 38. 40. 42. 44. 46. 48.]\n"
]
],
[
[
"#### Loss Function",
"_____no_output_____"
],
[
"The simplest model we can build is a model that for each value of x returns the sample mean of the training set:",
"_____no_output_____"
]
],
[
[
"y_mean = Y.numpy().mean()\n\n\ndef predict_mean(X):\n y_hat = [y_mean] * len(X)\n return y_hat\n\nY_hat = predict_mean(X_test)",
"_____no_output_____"
]
],
[
[
"Using mean squared error, our loss is:\n\\begin{equation}\nMSE = \\frac{1}{m}\\sum_{i=1}^{m}(\\hat{Y}_i-Y_i)^2\n\\end{equation}",
"_____no_output_____"
],
[
"For this simple model the loss is then:",
"_____no_output_____"
]
],
[
[
"errors = (Y_hat - Y)**2\nloss = tf.reduce_mean(errors)\nloss.numpy()",
"_____no_output_____"
]
],
[
[
"This values for the MSE loss above will give us a baseline to compare how a more complex model is doing.",
"_____no_output_____"
],
[
"Now, if $\\hat{Y}$ represents the vector containing our model's predictions when we use a linear regression model\n\\begin{equation}\n\\hat{Y} = w_0X + w_1\n\\end{equation}\n\nwe can write a loss function taking as arguments the coefficients of the model:",
"_____no_output_____"
]
],
[
[
"def loss_mse(X, Y, w0, w1):\n Y_hat = w0 * X + w1\n errors = (Y_hat - Y)**2\n return tf.reduce_mean(errors)",
"_____no_output_____"
]
],
[
[
"### Gradient Function\n\nTo use gradient descent we need to take the partial derivatives of the loss function with respect to each of the weights. We could manually compute the derivatives, but with Tensorflow's automatic differentiation capabilities we don't have to!\n\nDuring gradient descent we think of the loss as a function of the parameters $w_0$ and $w_1$. Thus, we want to compute the partial derivative with respect to these variables. \n\nFor that we need to wrap our loss computation within the context of `tf.GradientTape` instance which will reccord gradient information:\n\n```python\nwith tf.GradientTape() as tape:\n loss = # computation \n```\n\nThis will allow us to later compute the gradients of any tensor computed within the `tf.GradientTape` context with respect to instances of `tf.Variable`:\n\n```python\ngradients = tape.gradient(loss, [w0, w1])\n```",
"_____no_output_____"
],
[
"We illustrate this procedure with by computing the loss gradients with respect to the model weights:",
"_____no_output_____"
]
],
[
[
"# TODO 1\ndef compute_gradients(X, Y, w0, w1):\n with tf.GradientTape() as tape:\n loss = loss_mse(X, Y, w0, w1)\n return tape.gradient(loss, [w0, w1])",
"_____no_output_____"
],
[
"w0 = tf.Variable(0.0)\nw1 = tf.Variable(0.0)\n\ndw0, dw1 = compute_gradients(X, Y, w0, w1)",
"_____no_output_____"
],
[
"print(\"dw0:\", dw0.numpy())",
"_____no_output_____"
],
[
"print(\"dw1\", dw1.numpy())",
"_____no_output_____"
]
],
[
[
"### Training Loop\n\nHere we have a very simple training loop that converges. Note we are ignoring best practices like batching, creating a separate test set, and random weight initialization for the sake of simplicity.",
"_____no_output_____"
]
],
[
[
"STEPS = 1000\nLEARNING_RATE = .02\nMSG = \"STEP {step} - loss: {loss}, w0: {w0}, w1: {w1}\\n\"\n\n\nw0 = tf.Variable(0.0)\nw1 = tf.Variable(0.0)\n\n\nfor step in range(0, STEPS + 1):\n\n dw0, dw1 = compute_gradients(X, Y, w0, w1)\n w0.assign_sub(dw0 * LEARNING_RATE)\n w1.assign_sub(dw1 * LEARNING_RATE)\n\n if step % 100 == 0:\n loss = loss_mse(X, Y, w0, w1)\n print(MSG.format(step=step, loss=loss, w0=w0.numpy(), w1=w1.numpy()))\n",
"_____no_output_____"
]
],
[
[
"Now let's compare the test loss for this linear regression to the test loss from the baseline model that outputs always the mean of the training set:",
"_____no_output_____"
]
],
[
[
"loss = loss_mse(X_test, Y_test, w0, w1)\nloss.numpy()",
"_____no_output_____"
]
],
[
[
"This is indeed much better!",
"_____no_output_____"
],
[
"## Bonus",
"_____no_output_____"
],
[
"Try modelling a non-linear function such as: $y=xe^{-x^2}$",
"_____no_output_____"
]
],
[
[
"X = tf.constant(np.linspace(0, 2, 1000), dtype=tf.float32)\nY = X * tf.exp(-X**2)",
"_____no_output_____"
],
[
"%matplotlib inline\n\nplt.plot(X, Y)",
"_____no_output_____"
],
[
"def make_features(X):\n f1 = tf.ones_like(X) # Bias.\n f2 = X\n f3 = tf.square(X)\n f4 = tf.sqrt(X)\n f5 = tf.exp(X)\n return tf.stack([f1, f2, f3, f4, f5], axis=1)",
"_____no_output_____"
],
[
"def predict(X, W):\n return tf.squeeze(X @ W, -1)",
"_____no_output_____"
],
[
"def loss_mse(X, Y, W):\n Y_hat = predict(X, W)\n errors = (Y_hat - Y)**2\n return tf.reduce_mean(errors)",
"_____no_output_____"
],
[
"def compute_gradients(X, Y, W):\n with tf.GradientTape() as tape:\n loss = loss_mse(Xf, Y, W)\n return tape.gradient(loss, W)",
"_____no_output_____"
],
[
"# TODO 2\nSTEPS = 2000\nLEARNING_RATE = .02\n\n\nXf = make_features(X)\nn_weights = Xf.shape[1]\n\nW = tf.Variable(np.zeros((n_weights, 1)), dtype=tf.float32)\n\n# For plotting\nsteps, losses = [], []\nplt.figure()\n\n\nfor step in range(1, STEPS + 1):\n\n dW = compute_gradients(X, Y, W)\n W.assign_sub(dW * LEARNING_RATE)\n\n if step % 100 == 0:\n loss = loss_mse(Xf, Y, W)\n steps.append(step)\n losses.append(loss)\n plt.clf()\n plt.plot(steps, losses)\n\n\nprint(\"STEP: {} MSE: {}\".format(STEPS, loss_mse(Xf, Y, W)))\n\nplt.figure()\nplt.plot(X, Y, label='actual')\nplt.plot(X, predict(Xf, W), label='predicted')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e745333a6525a251bbeba921e6a2a13ff636b757 | 101,000 | ipynb | Jupyter Notebook | Project/SageMaker Project.ipynb | radleap/sagemaker-deployment | 27f31b333381aa73403bf567bf9bf0c5041c1282 | [
"MIT"
] | null | null | null | Project/SageMaker Project.ipynb | radleap/sagemaker-deployment | 27f31b333381aa73403bf567bf9bf0c5041c1282 | [
"MIT"
] | null | null | null | Project/SageMaker Project.ipynb | radleap/sagemaker-deployment | 27f31b333381aa73403bf567bf9bf0c5041c1282 | [
"MIT"
] | null | null | null | 37.870266 | 4,963 | 0.531653 | [
[
[
"# Creating a Sentiment Analysis Web App\n## Using PyTorch and SageMaker\n\n_Deep Learning Nanodegree Program | Deployment_\n\n---\n\nNow that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.\n\n## Instructions\n\nSome template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!\n\nIn addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.\n\n> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.\n\n## General Outline\n\nRecall the general outline for SageMaker projects using a notebook instance.\n\n1. Download or otherwise retrieve the data.\n2. Process / Prepare the data.\n3. Upload the processed data to S3.\n4. Train a chosen model.\n5. Test the trained model (typically using a batch transform job).\n6. Deploy the trained model.\n7. Use the deployed model.\n\nFor this project, you will be following the steps in the general outline with some modifications. \n\nFirst, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.\n\nIn addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.",
"_____no_output_____"
],
[
"## Step 1: Downloading the data\n\nAs in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)\n\n> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.",
"_____no_output_____"
]
],
[
[
"%mkdir ../data\n!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\n!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data",
"--2019-10-01 04:56:57-- http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\nResolving ai.stanford.edu (ai.stanford.edu)... 171.64.68.10\nConnecting to ai.stanford.edu (ai.stanford.edu)|171.64.68.10|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 84125825 (80M) [application/x-gzip]\nSaving to: ‘../data/aclImdb_v1.tar.gz’\n\n../data/aclImdb_v1. 100%[===================>] 80.23M 22.7MB/s in 4.2s \n\n2019-10-01 04:57:01 (19.1 MB/s) - ‘../data/aclImdb_v1.tar.gz’ saved [84125825/84125825]\n\n"
]
],
[
[
"## Step 2: Preparing and Processing the data\n\nAlso, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.",
"_____no_output_____"
]
],
[
[
"import os\nimport glob\n\ndef read_imdb_data(data_dir='../data/aclImdb'):\n data = {}\n labels = {}\n \n for data_type in ['train', 'test']:\n data[data_type] = {}\n labels[data_type] = {}\n \n for sentiment in ['pos', 'neg']:\n data[data_type][sentiment] = []\n labels[data_type][sentiment] = []\n \n path = os.path.join(data_dir, data_type, sentiment, '*.txt')\n files = glob.glob(path)\n \n for f in files:\n with open(f) as review:\n data[data_type][sentiment].append(review.read())\n # Here we represent a positive review by '1' and a negative review by '0'\n labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)\n \n assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \\\n \"{}/{} data size does not match labels size\".format(data_type, sentiment)\n \n return data, labels",
"_____no_output_____"
],
[
"data, labels = read_imdb_data()\nprint(\"IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg\".format(\n len(data['train']['pos']), len(data['train']['neg']),\n len(data['test']['pos']), len(data['test']['neg'])))",
"IMDB reviews: train = 12500 pos / 12500 neg, test = 12500 pos / 12500 neg\n"
]
],
[
[
"Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.",
"_____no_output_____"
]
],
[
[
"from sklearn.utils import shuffle\n\ndef prepare_imdb_data(data, labels):\n \"\"\"Prepare training and test sets from IMDb movie reviews.\"\"\"\n \n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n \n #Shuffle reviews and corresponding labels within training and test sets\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n \n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test",
"_____no_output_____"
],
[
"train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)\nprint(\"IMDb reviews (combined): train = {}, test = {}\".format(len(train_X), len(test_X)))",
"IMDb reviews (combined): train = 25000, test = 25000\n"
]
],
[
[
"Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.",
"_____no_output_____"
]
],
[
[
"print(train_X[100])\nprint(train_y[100])",
"FORBIDDEN PLANET is one of the best examples of Hollywood SF films. Its influence was felt for more than a decade. However, certain elements relating to how this wide-screen entertainment was aimed at a mid-fifties audience that is now gone have dated it quite a bit, and the film's sometimes sluggish pacing doesn't help. But, the story's compelling central idea involving the ancient,extinct Krell civilization and \"monsters from the Id\" hasn't lost its appeal and continue to make this film a relevant \"must see\" movie. What I'm mostly interested in saying here is that the current DVD for this movie is terrible. The movie has never really looked that good on home video and it's elements are in dire need of restoration. I hope that will happen soon and we get a special edition of this SF classic.\n1\n"
]
],
[
[
"The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.",
"_____no_output_____"
]
],
[
[
"import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import *\n\nimport re\nfrom bs4 import BeautifulSoup\n\ndef review_to_words(review):\n nltk.download(\"stopwords\", quiet=True)\n stemmer = PorterStemmer()\n \n text = BeautifulSoup(review, \"html.parser\").get_text() # Remove HTML tags\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower()) # Convert to lower case\n words = text.split() # Split string into words\n words = [w for w in words if w not in stopwords.words(\"english\")] # Remove stopwords\n words = [PorterStemmer().stem(w) for w in words] # stem\n \n return words",
"_____no_output_____"
]
],
[
[
"The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.",
"_____no_output_____"
]
],
[
[
"# TODO: Apply review_to_words to a review (train_X[100] or any other review)\nreview_to_words(train_X[100])",
"_____no_output_____"
]
],
[
[
"**Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?",
"_____no_output_____"
],
[
"**Answer:** The function review_to_words also removes common English stopwords, puts all to lower case, removes punctuation, and splits/tokenizes the words.",
"_____no_output_____"
],
[
"The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.",
"_____no_output_____"
]
],
[
[
"import pickle\n\ncache_dir = os.path.join(\"../cache\", \"sentiment_analysis\") # where to store cache files\nos.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists\n\ndef preprocess_data(data_train, data_test, labels_train, labels_test,\n cache_dir=cache_dir, cache_file=\"preprocessed_data.pkl\"):\n \"\"\"Convert each review to words; read from cache if available.\"\"\"\n\n # If cache_file is not None, try to read from it first\n cache_data = None\n if cache_file is not None:\n try:\n with open(os.path.join(cache_dir, cache_file), \"rb\") as f:\n cache_data = pickle.load(f)\n print(\"Read preprocessed data from cache file:\", cache_file)\n except:\n pass # unable to read from cache, but that's okay\n \n # If cache is missing, then do the heavy lifting\n if cache_data is None:\n # Preprocess training and test data to obtain words for each review\n #words_train = list(map(review_to_words, data_train))\n #words_test = list(map(review_to_words, data_test))\n words_train = [review_to_words(review) for review in data_train]\n words_test = [review_to_words(review) for review in data_test]\n \n # Write to cache file for future runs\n if cache_file is not None:\n cache_data = dict(words_train=words_train, words_test=words_test,\n labels_train=labels_train, labels_test=labels_test)\n with open(os.path.join(cache_dir, cache_file), \"wb\") as f:\n pickle.dump(cache_data, f)\n print(\"Wrote preprocessed data to cache file:\", cache_file)\n else:\n # Unpack data loaded from cache file\n words_train, words_test, labels_train, labels_test = (cache_data['words_train'],\n cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])\n \n return words_train, words_test, labels_train, labels_test",
"_____no_output_____"
],
[
"# Preprocess data\ntrain_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y) # reads it first if pickled",
"Read preprocessed data from cache file: preprocessed_data.pkl\n"
],
[
"len(train_X)",
"_____no_output_____"
],
[
"print(train_X[1])",
"['perspect', 'good', 'thing', 'sinc', 'releas', 'star', 'war', 'episod', 'phantom', 'menac', 'claim', 'counter', 'claim', 'episod', 'ii', 'iii', 'eventu', 'taken', 'spotlight', 'origin', 'star', 'war', 'film', 'make', 'part', 'cohes', 'whole', 'rather', 'segreg', 'older', 'new', 'film', 'separ', 'trilog', 'new', 'film', 'done', 'allow', 'fresh', 'perspect', 'place', 'older', 'film', 'new', 'outlook', 'allow', 'us', 'greater', 'appreci', 'often', 'view', 'weakest', 'origin', 'trilog', 'return', 'jedi', 'often', 'derid', 'overli', 'cute', 'factor', 'rotj', 'sens', 'strong', 'origin', 'slightli', 'less', 'impress', 'nearli', 'perfect', 'empir', 'strike', 'back', 'inde', 'cute', 'element', 'rotj', 'name', 'ewok', 'remain', 'weak', 'link', 'entir', 'seri', 'georg', 'luca', 'place', 'furri', 'midget', 'film', 'pure', 'merchandis', 'possibl', 'answer', 'question', 'cute', 'factor', 'asid', 'film', 'brilliant', 'full', 'circl', 'evolut', 'saga', 'follow', 'conclus', 'empir', 'strike', 'back', 'luke', 'skywalk', 'mark', 'hamil', 'follow', 'rebel', 'allianc', 'friend', 'tatooin', 'home', 'planet', 'rescu', 'han', 'solo', 'harrison', 'ford', 'space', 'pirat', 'turn', 'rebel', 'hero', 'captur', 'jabba', 'hutt', 'overdu', 'debt', 'skywalk', 'chang', 'man', 'sinc', 'leav', 'tatooin', 'ben', 'obi', 'wan', 'kenobi', 'alec', 'guiness', 'fight', 'evil', 'empir', 'swath', 'black', 'luke', 'discoveri', 'origin', 'left', 'confus', 'torn', 'psycholog', 'make', 'strong', 'outward', 'appear', 'would', 'suggest', 'might', 'aim', 'alway', 'assist', 'rebel', 'friend', 'yearn', 'anoth', 'chanc', 'confront', 'evil', 'darth', 'vader', 'despit', 'unassured', 'whether', 'destroy', 'eventu', 'turn', 'dark', 'side', 'join', 'vader', 'emperor', 'side', 'earli', 'scene', 'tatooin', 'impress', 'jabba', 'lair', 'float', 'palac', 'almighti', 'sarlac', 'intenstin', 'live', 'sand', 'luca', 'cgi', 'enhanc', 'film', '1997', 'actual', 'worsen', 'overal', 'effect', 'sarlac', 'make', 'look', 'fake', 'overdon', 'battl', 'scene', 'tatooin', 'outstand', 'one', 'memor', 'saga', 'luke', 'almost', 'singlehandedli', 'anihili', 'jabba', 'croni', 'prove', 'prowess', 'jedi', 'almost', 'complet', 'luke', 'return', 'degobah', 'system', 'visit', 'ail', 'yoda', 'one', 'time', 'viewer', 'let', 'yoda', 'distinct', 'lack', 'screentim', 'undoubt', 'star', 'empir', 'strike', 'back', 'yoda', 'eras', 'stori', 'progress', 'luke', 'destini', 'play', 'screen', 'rotj', 'realli', 'luke', 'film', 'perhap', 'even', 'origin', 'journey', 'carri', 'movi', 'move', 'closer', 'confront', 'darth', 'vader', 'fate', 'rebel', 'charact', 'certainli', 'work', 'shadow', 'romanc', 'leia', 'carri', 'fisher', 'solo', 'non', 'exist', 'unlik', 'empir', 'fact', 'leia', 'charact', 'develop', 'rotj', 'solo', 'charact', 'seem', 'fade', 'facet', 'person', 'becom', 'familiar', 'first', 'two', 'film', 'role', 'consign', 'work', 'alongsid', 'rebel', 'destroy', 'new', 'death', 'star', 'near', 'complet', 'time', 'emperor', 'overse', 'final', 'stage', 'construct', 'empir', 'intend', 'crush', 'rebellion', 'emperor', 'scheme', 'bring', 'power', 'skywalk', 'side', 'work', 'alongsid', 'replac', 'darth', 'vader', 'emperor', 'differ', 'kind', 'evil', 'film', 'less', 'cun', 'governor', 'tarkin', 'peter', 'cush', 'star', 'war', 'deepli', 'psycholog', 'dark', 'anyth', 'els', 'play', 'brilliantli', 'ian', 'mcdiarmid', 'emperor', 'one', 'charact', 'love', 'hate', 'actor', 'well', 'entrench', 'role', 'hamil', 'surpris', 'wisen', 'luke', 'make', 'charact', 'progress', 'whini', 'teenag', 'impati', 'student', 'enlighten', 'warrior', 'one', 'real', 'charact', 'develop', 'seri', 'ford', 'role', 'ware', 'thin', 'charm', 'charisma', 'spent', 'first', 'two', 'film', 'real', 'star', 'first', 'film', 'fisher', 'leia', 'prop', 'least', 'unti', 'end', 'film', 'learn', 'thing', 'never', 'sure', 'add', 'favourit', 'like', 'alec', 'guiness', 'kenobi', 'yoda', 'loveabl', 'chewbacca', 'c', '3po', 'r2d2', 'seri', 'resembl', 'famili', 'cast', 'despit', 'film', 'corni', 'forest', 'battl', 'involv', 'ewok', 'empir', 'end', 'well', 'includ', 'three', 'way', 'battl', 'sequenc', 'endor', 'space', 'death', 'star', 'impress', 'special', 'effect', 'music', 'alway', 'brilliant', 'captur', 'mood', 'perfectli', 'everi', 'instanc', 'blue', 'danub', 'work', 'perfectli', '2001', 'space', 'odyssey', 'john', 'william', 'score', 'much', 'part', 'star', 'war', 'folklor', 'light', 'saber', 'forc', 'luca', 'left', 'end', 'open', 'interpret', 'mean', 'could', 'episod', 'made', 'inde', 'sci', 'fi', 'fan', 'creat', 'version', 'episod', 'vii', 'viii', 'ix', 'head', 'rotj', 'work', 'given', 'chanc', 'furri', 'cute', 'anim', 'asid', 'good', 'finish', 'seri', 'six', 'episod', 'get', 'view', 'togeth', 'saga', 'could', 'well', 'best', 'ever', 'made', 'alreadi', 'addit', 'episod', 'chang', 'landscap', 'seri', 'return', 'jedi', 'view', 'differ', 'light', 'given', 'whole', 'new', 'appreci', 'nearli', '20', 'year', 'releas']\n"
]
],
[
[
"## Transform the data\n\nIn the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.\n\nSince we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.",
"_____no_output_____"
],
[
"### (TODO) Create a word dictionary\n\nTo begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.\n\n> **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.",
"_____no_output_____"
]
],
[
[
"from collections import Counter\n\nbase_dict = {}\n\ndef tokenize(corpus):\n for review in corpus:\n for word in review:\n if word in base_dict:\n base_dict[word] += 1\n else:\n base_dict[word] = 1\n return(base_dict)",
"_____no_output_____"
],
[
"tokenize(train_X)",
"_____no_output_____"
],
[
"from sklearn.feature_extraction.text import CountVectorizer\n#nltk.download('punkt')\nfrom nltk import word_tokenize\n\ndef build_dict(data, vocab_size = 5000):\n \"\"\"Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.\"\"\"\n \n # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a\n # sentence is a list of words.\n #vectorizer = CountVectorizer(max_features=vocab_size)\n #bag_of_words = vectorizer.fit_transform(data).toarray()\n #vocabulary = vectorizer.vocabulary_\n \n tokenizer_review = word_tokenize(data)\n \n return tokenizer_review\n\n#word_dict = build_dict(train_X[0])\n#word_dict",
"_____no_output_____"
],
[
"import numpy as np\n\ndef build_dict(data, vocab_size = 5000):\n \"\"\"Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer.\"\"\"\n \n # TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a\n # sentence is a list of words.\n \n word_count = {} # A dict storing the words that appear in the reviews along with how often they occur\n \n # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and\n # sorted_words[-1] is the least frequently appearing word.\n \n sorted_words = None\n \n word_dict = {} # This is what we are building, a dictionary that translates words into integers\n for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'\n word_dict[word] = idx + 2 # 'infrequent' labels\n \n return word_dict",
"_____no_output_____"
],
[
"type(train_X)",
"_____no_output_____"
],
[
"word_dict = build_dict(train_X)",
"_____no_output_____"
]
],
[
[
"**Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
]
],
[
[
"# TODO: Use this space to determine the five most frequently appearing words in the training set.",
"_____no_output_____"
]
],
[
[
"### Save `word_dict`\n\nLater on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.",
"_____no_output_____"
]
],
[
[
"data_dir = '../data/pytorch' # The folder we will use for storing data\nif not os.path.exists(data_dir): # Make sure that the folder exists\n os.makedirs(data_dir)",
"_____no_output_____"
],
[
"with open(os.path.join(data_dir, 'word_dict.pkl'), \"wb\") as f:\n pickle.dump(word_dict, f)",
"_____no_output_____"
]
],
[
[
"### Transform the reviews\n\nNow that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.",
"_____no_output_____"
]
],
[
[
"def convert_and_pad(word_dict, sentence, pad=500):\n NOWORD = 0 # We will use 0 to represent the 'no word' category\n INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict\n \n working_sentence = [NOWORD] * pad\n \n for word_index, word in enumerate(sentence[:pad]):\n if word in word_dict:\n working_sentence[word_index] = word_dict[word]\n else:\n working_sentence[word_index] = INFREQ\n \n return working_sentence, min(len(sentence), pad)\n\ndef convert_and_pad_data(word_dict, data, pad=500):\n result = []\n lengths = []\n \n for sentence in data:\n converted, leng = convert_and_pad(word_dict, sentence, pad)\n result.append(converted)\n lengths.append(leng)\n \n return np.array(result), np.array(lengths)",
"_____no_output_____"
],
[
"train_X, train_X_len = convert_and_pad_data(word_dict, train_X)\ntest_X, test_X_len = convert_and_pad_data(word_dict, test_X)",
"_____no_output_____"
]
],
[
[
"As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?",
"_____no_output_____"
]
],
[
[
"# Use this cell to examine one of the processed reviews to make sure everything is working as intended.",
"_____no_output_____"
]
],
[
[
"**Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
],
[
"## Step 3: Upload the data to S3\n\nAs in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.\n\n### Save the processed training dataset locally\n\nIt is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n \npd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \\\n .to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)",
"_____no_output_____"
]
],
[
[
"### Uploading the training data\n\n\nNext, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.",
"_____no_output_____"
]
],
[
[
"import sagemaker\n\nsagemaker_session = sagemaker.Session()\n\nbucket = sagemaker_session.default_bucket()\nprefix = 'sagemaker/sentiment_rnn'\n\nrole = sagemaker.get_execution_role()",
"_____no_output_____"
],
[
"input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)",
"_____no_output_____"
]
],
[
[
"**NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.",
"_____no_output_____"
],
[
"## Step 4: Build and Train the PyTorch Model\n\nIn the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects\n\n - Model Artifacts,\n - Training Code, and\n - Inference Code,\n \neach of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.\n\nWe will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.",
"_____no_output_____"
]
],
[
[
"!pygmentize train/model.py",
"_____no_output_____"
]
],
[
[
"The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.\n\nFirst we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.",
"_____no_output_____"
]
],
[
[
"import torch\nimport torch.utils.data\n\n# Read in only the first 250 rows\ntrain_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)\n\n# Turn the input pandas dataframe into tensors\ntrain_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()\ntrain_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()\n\n# Build the dataset\ntrain_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)\n# Build the dataloader\ntrain_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)",
"_____no_output_____"
]
],
[
[
"### (TODO) Writing the training method\n\nNext we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.",
"_____no_output_____"
]
],
[
[
"def train(model, train_loader, epochs, optimizer, loss_fn, device):\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))",
"_____no_output_____"
]
],
[
[
"Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.",
"_____no_output_____"
]
],
[
[
"import torch.optim as optim\nfrom train.model import LSTMClassifier\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = LSTMClassifier(32, 100, 5000).to(device)\noptimizer = optim.Adam(model.parameters())\nloss_fn = torch.nn.BCELoss()\n\ntrain(model, train_sample_dl, 5, optimizer, loss_fn, device)",
"_____no_output_____"
]
],
[
[
"In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.",
"_____no_output_____"
],
[
"### (TODO) Training the model\n\nWhen a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.\n\n**TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.\n\nThe way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.",
"_____no_output_____"
]
],
[
[
"from sagemaker.pytorch import PyTorch\n\nestimator = PyTorch(entry_point=\"train.py\",\n source_dir=\"train\",\n role=role,\n framework_version='0.4.0',\n train_instance_count=1,\n train_instance_type='ml.p2.xlarge',\n hyperparameters={\n 'epochs': 10,\n 'hidden_dim': 200,\n })",
"_____no_output_____"
],
[
"estimator.fit({'training': input_data})",
"_____no_output_____"
]
],
[
[
"## Step 5: Testing the model\n\nAs mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.\n\n## Step 6: Deploy the model for testing\n\nNow that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.\n\nThere is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.\n\n**NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )\n\nSince we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.\n\n**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.\n\nIn other words **If you are no longer using a deployed endpoint, shut it down!**\n\n**TODO:** Deploy the trained model.",
"_____no_output_____"
]
],
[
[
"# TODO: Deploy the trained model",
"_____no_output_____"
]
],
[
[
"## Step 7 - Use the model for testing\n\nOnce deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.",
"_____no_output_____"
]
],
[
[
"test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)",
"_____no_output_____"
],
[
"# We split the data into chunks and send each chunk seperately, accumulating the results.\n\ndef predict(data, rows=512):\n split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))\n predictions = np.array([])\n for array in split_array:\n predictions = np.append(predictions, predictor.predict(array))\n \n return predictions",
"_____no_output_____"
],
[
"predictions = predict(test_X.values)\npredictions = [round(num) for num in predictions]",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(test_y, predictions)",
"_____no_output_____"
]
],
[
[
"**Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
],
[
"### (TODO) More testing\n\nWe now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.",
"_____no_output_____"
]
],
[
[
"test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'",
"_____no_output_____"
]
],
[
[
"The question we now need to answer is, how do we send this review to our model?\n\nRecall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.\n - Removed any html tags and stemmed the input\n - Encoded the review as a sequence of integers using `word_dict`\n \nIn order process the review we will need to repeat these two steps.\n\n**TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.",
"_____no_output_____"
]
],
[
[
"# TODO: Convert test_review into a form usable by the model and save the results in test_data\ntest_data = None",
"_____no_output_____"
]
],
[
[
"Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.",
"_____no_output_____"
]
],
[
[
"predictor.predict(test_data)",
"_____no_output_____"
]
],
[
[
"Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.",
"_____no_output_____"
],
[
"### Delete the endpoint\n\nOf course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.",
"_____no_output_____"
]
],
[
[
"estimator.delete_endpoint()",
"_____no_output_____"
]
],
[
[
"## Step 6 (again) - Deploy the model for the web app\n\nNow that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.\n\nAs we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.\n\nWe will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.\n\nWhen deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.\n - `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.\n - `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.\n - `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.\n - `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.\n\nFor the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.\n\n### (TODO) Writing inference code\n\nBefore writing our custom inference code, we will begin by taking a look at the code which has been provided.",
"_____no_output_____"
]
],
[
[
"!pygmentize serve/predict.py",
"_____no_output_____"
]
],
[
[
"As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.\n\n**TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.",
"_____no_output_____"
],
[
"### Deploying the model\n\nNow that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.\n\n**NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.",
"_____no_output_____"
]
],
[
[
"from sagemaker.predictor import RealTimePredictor\nfrom sagemaker.pytorch import PyTorchModel\n\nclass StringPredictor(RealTimePredictor):\n def __init__(self, endpoint_name, sagemaker_session):\n super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')\n\nmodel = PyTorchModel(model_data=estimator.model_data,\n role = role,\n framework_version='0.4.0',\n entry_point='predict.py',\n source_dir='serve',\n predictor_cls=StringPredictor)\npredictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')",
"_____no_output_____"
]
],
[
[
"### Testing the model\n\nNow that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.",
"_____no_output_____"
]
],
[
[
"import glob\n\ndef test_reviews(data_dir='../data/aclImdb', stop=250):\n \n results = []\n ground = []\n \n # We make sure to test both positive and negative reviews \n for sentiment in ['pos', 'neg']:\n \n path = os.path.join(data_dir, 'test', sentiment, '*.txt')\n files = glob.glob(path)\n \n files_read = 0\n \n print('Starting ', sentiment, ' files')\n \n # Iterate through the files and send them to the predictor\n for f in files:\n with open(f) as review:\n # First, we store the ground truth (was the review positive or negative)\n if sentiment == 'pos':\n ground.append(1)\n else:\n ground.append(0)\n # Read in the review and convert to 'utf-8' for transmission via HTTP\n review_input = review.read().encode('utf-8')\n # Send the review to the predictor and store the results\n results.append(int(predictor.predict(review_input)))\n \n # Sending reviews to our endpoint one at a time takes a while so we\n # only send a small number of reviews\n files_read += 1\n if files_read == stop:\n break\n \n return ground, results",
"_____no_output_____"
],
[
"ground, results = test_reviews()",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score\naccuracy_score(ground, results)",
"_____no_output_____"
]
],
[
[
"As an additional test, we can try sending the `test_review` that we looked at earlier.",
"_____no_output_____"
]
],
[
[
"predictor.predict(test_review)",
"_____no_output_____"
]
],
[
[
"Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.",
"_____no_output_____"
],
[
"## Step 7 (again): Use the model for the web app\n\n> **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.\n\nSo far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.\n\n<img src=\"Web App Diagram.svg\">\n\nThe diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.\n\nIn the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.\n\nLastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.\n\n### Setting up a Lambda function\n\nThe first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.\n\n#### Part A: Create an IAM Role for the Lambda function\n\nSince we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.\n\nUsing the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.\n\nIn the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.\n\nLastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.\n\n#### Part B: Create a Lambda function\n\nNow it is time to actually create the Lambda function.\n\nUsing the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.\n\nOn the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below. \n\n```python\n# We need to use the low-level library to interact with SageMaker since the SageMaker API\n# is not available natively through Lambda.\nimport boto3\n\ndef lambda_handler(event, context):\n\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given\n response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created\n ContentType = 'text/plain', # The data format that is expected\n Body = event['body']) # The actual review\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n return {\n 'statusCode' : 200,\n 'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },\n 'body' : result\n }\n```\n\nOnce you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.",
"_____no_output_____"
]
],
[
[
"predictor.endpoint",
"_____no_output_____"
]
],
[
[
"Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.\n\n### Setting up API Gateway\n\nNow that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.\n\nUsing AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.\n\nOn the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.\n\nNow we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.\n\nSelect the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.\n\nFor the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.\n\nType the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.\n\nThe last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.\n\nYou have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.",
"_____no_output_____"
],
[
"## Step 4: Deploying our web app\n\nNow that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.\n\nIn the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\\*\\*REPLACE WITH PUBLIC API URL\\*\\***. Replace this string with the url that you wrote down in the last step and then save the file.\n\nNow, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.\n\nIf you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!\n\n> **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.\n\n**TODO:** Make sure that you include the edited `index.html` file in your project submission.",
"_____no_output_____"
],
[
"Now that your web app is working, trying playing around with it and see how well it works.\n\n**Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?",
"_____no_output_____"
],
[
"**Answer:**",
"_____no_output_____"
],
[
"### Delete the endpoint\n\nRemember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.",
"_____no_output_____"
]
],
[
[
"predictor.delete_endpoint()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
e7454131096fcb8f8f6c562f27baa00fc1df46f1 | 752 | ipynb | Jupyter Notebook | scientific_computing_UW-AMATH301/HW_Python/HW_Template.ipynb | jot33/learn_data_science | 8324672acff2523bc7a98eacaec96dd97335af09 | [
"MIT"
] | null | null | null | scientific_computing_UW-AMATH301/HW_Python/HW_Template.ipynb | jot33/learn_data_science | 8324672acff2523bc7a98eacaec96dd97335af09 | [
"MIT"
] | null | null | null | scientific_computing_UW-AMATH301/HW_Python/HW_Template.ipynb | jot33/learn_data_science | 8324672acff2523bc7a98eacaec96dd97335af09 | [
"MIT"
] | null | null | null | 17.904762 | 49 | 0.533245 | [
[
[
"# AMATH 301 - HW0\nUniversity of Washington\\\nDue 10/6/2021",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e7454aa28ccacc7b65012f7ab9f2d2cbe1894ff6 | 115,265 | ipynb | Jupyter Notebook | course2/session5/kadenze_mir_c2_s5_5_precion_recall_fmeasure.ipynb | Achilleasein/mir_program_kadenze | adc204f82dff565fe615e20681b84c94c2cff10d | [
"CC0-1.0"
] | 19 | 2021-03-16T00:00:29.000Z | 2022-02-01T05:03:45.000Z | course2/session5/kadenze_mir_c2_s5_5_precion_recall_fmeasure.ipynb | Achilleasein/mir_program_kadenze | adc204f82dff565fe615e20681b84c94c2cff10d | [
"CC0-1.0"
] | null | null | null | course2/session5/kadenze_mir_c2_s5_5_precion_recall_fmeasure.ipynb | Achilleasein/mir_program_kadenze | adc204f82dff565fe615e20681b84c94c2cff10d | [
"CC0-1.0"
] | 9 | 2021-03-16T03:07:45.000Z | 2022-02-12T04:29:03.000Z | 68.650983 | 560 | 0.717278 | [
[
[
"# Precision, recall and f-measure \n### George Tzanetakis, University of Victoria \n\nIn this notebook we go over the terminology of retrieval metrics: precision, recall and f-measure and how they are used in a variety of Music Information Retrieval (MIR) tasks. We first examine their classic original use as measures for the effectiveness of text information retrieval systems. Then we explore how they are used for evaluating classification systems, auto-tagging systems, as well as systems that estimate time boundaries (such as beat tracking and structure segmentation). \n",
"_____no_output_____"
],
[
"## Text Retrieval \n\nThe original scenario for which retrieval metrics were proposed is topic-based text retrieval. Today we are all familiar with this concept from search engines. The idea is that the user submits a query and the text retrieval system returns a set of what the system thinks are relevant documents. To evaluate the system an expert rates each returned document as relevant or not. \n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np \n\n# suppose that are query is beatles and we are interested in retrieving documents \n# about the corresponding insect species \nquery = 'beatles'\n\n# Let's say a set of 10 documents is returned - their exact representation is not important \n# and could be a bag-of-words representation, text, a list of keywords etc \n\n# an expert/user goes over these automatically retrieved documents and marks \n# them as relevant or not relevant. \n# The documents that are about the insects are marked with a 1 and the \n# ones that are not relevant (perhaps about the music group) are marked\n# with a zero. \nretrieved = np.array([1,1,0,1,1,1,1,0,1,0])\n",
"_____no_output_____"
]
],
[
[
"Precision is defined as the number of relevant retrieved documents divded by the number of returned documents. So for our particular example the precision is 0.70 or 70%. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ndef precision(retrieved):\n retrieved_relevant = np.count_nonzero(retrieved == 1)\n return (retrieved_relevant / len(retrieved))\n\n\nprint(\"Precision = %.2f\\n\" % (precision(retrieved)))",
"Precision = 0.70\n\n"
]
],
[
[
"Notice that we can improve the precision in this case by returning less items. For example the precision when returning the first two items is 1 or 100%. ",
"_____no_output_____"
]
],
[
[
"less_retrieved = np.array([1,1])\nprint(\"Precision = %.2f\\n\" % (precision(retrieved)))",
"Precision = 0.70\n\n"
]
],
[
[
"Now suppose that in the set of documents we are considering there are 15 documents about beatles (the insect species). Our set of retrieved results only contains 7 of them and therefore the recall in this case is 7/15=0.47",
"_____no_output_____"
]
],
[
[
"def recall(retrieved, num_relevant):\n retrieved_relevant = np.count_nonzero(retrieved == 1)\n return (retrieved_relevant / num_relevant)",
"_____no_output_____"
],
[
"retrieved = np.array([1,1,0,1,1,1,1,0,1,0])\nprint(\"Recall = %.2f\\n\" % (recall(retrieved,15)))",
"Recall = 0.47\n\n"
]
],
[
[
"We can trivially increase recall by returning more items with the extreme cases of returning all documents as relevant at the expense of diminishing precision. Alternatively we can only return less and less document increasing precision at the expense of recall. An effective retrieval system should achieve both high precision and recall. The most common measure that balances these two metrics is the f1-score or f-measure defined as the \nharmonic mean of precision and recall. \n\n\\begin{equation} \nF_{1} = 2 * \\frac{precision * recall}{precision + recall}\n\\end{equation} ",
"_____no_output_____"
]
],
[
[
"def f1_score(precision, recall): \n return 2 * ((precision * recall) / (precision+recall))\n\nprecision = 0.7 \nrecall = 0.47\nf1 = f1_score(precision,recall)\nprint(\"F1 = %.2f\\n\" % f1)",
"F1 = 0.56\n\n"
]
],
[
[
"## Binary Classification \n\nFor a binary classification problem we can consider the set of predictions as the retrieved documents and the ground truth as the annotations of relevance. For example suppose that we have a music/speech classification system that predicts 100 instances and lets say that 50 of them are labeled as music and 50 of them are labeled as speech. A system that would predict everything as music would have high recall (i.e all music instances according to the ground truth would be predicted correctly) but 0.5 precision as half of the predicted instances \n(the ones labeled speech according to the ground truth) would not be correctly predicted. A system that predicted a single instance of music correctly as music and all the other instances as speech would have a precision of 1 but really bad recall 1/50. Notice that in any binary classification system that outputs a class probability we can trade precision and recall by adjusting the threhold for classification. F1-score is defined similarly as with text retrieval as the harmonic mean of precision and recall. \n\nAnother way to view precision and recall is through the terminology used for binary classification problems in medical tests which is in terms of true positives (TP), true negatives (TN), false positives (FP) ad false negatives (FN). It is easy to see that using this terminology precision can be defined as: \n\n\\begin{equation}\nPrecision = \\frac{TP}{TP+FP} \n\\end{equation} \n\n\\begin{equation} \nRecall = \\frac{TP}{TP+FN} \n\\end{equation} ",
"_____no_output_____"
],
[
"## Multi-class and multi-label classification\n\nFor a multi-class classification problem with K classes we can not directly calculate retrieval metrics as described. There are three common approaches that are used. In macro-averaging the metrics are computed \nindependetly for each class and then the average is taken (this means that each class is treated equally even \nthough some classes might have more instances/support than others). A micro-average aggregates the contributions of each class and computes the metric overall which means that classes that are over-represented will effect the metrics more. Finally weighted average returns support-weighted metrics. \n\nIn multi-label classification (auto-tagging) each instances can be assigned more than one label. The approach to calculating retrieval metrics is similar with macro-averaging meaning that retrieval metrics are calculated separately for each tag column and then averagedm and micro-averaging meaning that retrieval metrics are calculating over the entiure matrix of predictions. \n\nLet's look at a particular example from musical genre classification. First audio features are extracted for the GTZAN dataset. \n\n\n",
"_____no_output_____"
]
],
[
[
"\n\nimport glob\nimport librosa\nimport numpy as np\n\nfnames = glob.glob(\"/Users/georgetzanetakis/data/sound/genres/*/*.wav\")\n\ngenres = ['classical', 'country', 'disco', 'hiphop', 'jazz', 'rock', 'blues', 'reggae', 'pop', 'metal']\n\n# allocate matrix for audio features and target \naudio_features = np.zeros((len(fnames), 40))\ntarget = np.zeros(len(fnames))\n\n# compute the features \nfor (i,fname) in enumerate(fnames): \n #print(\"Processing %d %s\" % (i, fname))\n for (label,genre) in enumerate(genres): \n if genre in fname: \n audio, srate = librosa.load(fname)\n mfcc_matrix = librosa.feature.mfcc(y=audio, sr=srate)\n mean_mfcc = np.mean(mfcc_matrix,axis=1)\n std_mfcc = np.std(mfcc_matrix, axis=1)\n audio_fvec = np.hstack([mean_mfcc, std_mfcc])\n audio_features[i] = audio_fvec\n target[i] = label\n\nprint(audio_features.shape)\n",
"Processing 0 /Users/georgetzanetakis/data/sound/genres/pop/pop.00027.wav\nProcessing 1 /Users/georgetzanetakis/data/sound/genres/pop/pop.00033.wav\nProcessing 2 /Users/georgetzanetakis/data/sound/genres/pop/pop.00032.wav\nProcessing 3 /Users/georgetzanetakis/data/sound/genres/pop/pop.00026.wav\nProcessing 4 /Users/georgetzanetakis/data/sound/genres/pop/pop.00030.wav\nProcessing 5 /Users/georgetzanetakis/data/sound/genres/pop/pop.00024.wav\nProcessing 6 /Users/georgetzanetakis/data/sound/genres/pop/pop.00018.wav\nProcessing 7 /Users/georgetzanetakis/data/sound/genres/pop/pop.00019.wav\nProcessing 8 /Users/georgetzanetakis/data/sound/genres/pop/pop.00025.wav\nProcessing 9 /Users/georgetzanetakis/data/sound/genres/pop/pop.00031.wav\nProcessing 10 /Users/georgetzanetakis/data/sound/genres/pop/pop.00009.wav\nProcessing 11 /Users/georgetzanetakis/data/sound/genres/pop/pop.00035.wav\nProcessing 12 /Users/georgetzanetakis/data/sound/genres/pop/pop.00021.wav\nProcessing 13 /Users/georgetzanetakis/data/sound/genres/pop/pop.00020.wav\nProcessing 14 /Users/georgetzanetakis/data/sound/genres/pop/pop.00034.wav\nProcessing 15 /Users/georgetzanetakis/data/sound/genres/pop/pop.00008.wav\nProcessing 16 /Users/georgetzanetakis/data/sound/genres/pop/pop.00022.wav\nProcessing 17 /Users/georgetzanetakis/data/sound/genres/pop/pop.00036.wav\nProcessing 18 /Users/georgetzanetakis/data/sound/genres/pop/pop.00037.wav\nProcessing 19 /Users/georgetzanetakis/data/sound/genres/pop/pop.00023.wav\nProcessing 20 /Users/georgetzanetakis/data/sound/genres/pop/pop.00044.wav\nProcessing 21 /Users/georgetzanetakis/data/sound/genres/pop/pop.00050.wav\nProcessing 22 /Users/georgetzanetakis/data/sound/genres/pop/pop.00078.wav\nProcessing 23 /Users/georgetzanetakis/data/sound/genres/pop/pop.00087.wav\nProcessing 24 /Users/georgetzanetakis/data/sound/genres/pop/pop.00093.wav\nProcessing 25 /Users/georgetzanetakis/data/sound/genres/pop/pop.00092.wav\nProcessing 26 /Users/georgetzanetakis/data/sound/genres/pop/pop.00086.wav\nProcessing 27 /Users/georgetzanetakis/data/sound/genres/pop/pop.00079.wav\nProcessing 28 /Users/georgetzanetakis/data/sound/genres/pop/pop.00051.wav\nProcessing 29 /Users/georgetzanetakis/data/sound/genres/pop/pop.00045.wav\nProcessing 30 /Users/georgetzanetakis/data/sound/genres/pop/pop.00053.wav\nProcessing 31 /Users/georgetzanetakis/data/sound/genres/pop/pop.00047.wav\nProcessing 32 /Users/georgetzanetakis/data/sound/genres/pop/pop.00090.wav\nProcessing 33 /Users/georgetzanetakis/data/sound/genres/pop/pop.00084.wav\nProcessing 34 /Users/georgetzanetakis/data/sound/genres/pop/pop.00085.wav\nProcessing 35 /Users/georgetzanetakis/data/sound/genres/pop/pop.00091.wav\nProcessing 36 /Users/georgetzanetakis/data/sound/genres/pop/pop.00046.wav\nProcessing 37 /Users/georgetzanetakis/data/sound/genres/pop/pop.00052.wav\nProcessing 38 /Users/georgetzanetakis/data/sound/genres/pop/pop.00056.wav\nProcessing 39 /Users/georgetzanetakis/data/sound/genres/pop/pop.00042.wav\nProcessing 40 /Users/georgetzanetakis/data/sound/genres/pop/pop.00095.wav\nProcessing 41 /Users/georgetzanetakis/data/sound/genres/pop/pop.00081.wav\nProcessing 42 /Users/georgetzanetakis/data/sound/genres/pop/pop.00080.wav\nProcessing 43 /Users/georgetzanetakis/data/sound/genres/pop/pop.00094.wav\nProcessing 44 /Users/georgetzanetakis/data/sound/genres/pop/pop.00043.wav\nProcessing 45 /Users/georgetzanetakis/data/sound/genres/pop/pop.00057.wav\nProcessing 46 /Users/georgetzanetakis/data/sound/genres/pop/pop.00069.wav\nProcessing 47 /Users/georgetzanetakis/data/sound/genres/pop/pop.00041.wav\nProcessing 48 /Users/georgetzanetakis/data/sound/genres/pop/pop.00055.wav\nProcessing 49 /Users/georgetzanetakis/data/sound/genres/pop/pop.00082.wav\nProcessing 50 /Users/georgetzanetakis/data/sound/genres/pop/pop.00096.wav\nProcessing 51 /Users/georgetzanetakis/data/sound/genres/pop/pop.00097.wav\nProcessing 52 /Users/georgetzanetakis/data/sound/genres/pop/pop.00083.wav\nProcessing 53 /Users/georgetzanetakis/data/sound/genres/pop/pop.00054.wav\nProcessing 54 /Users/georgetzanetakis/data/sound/genres/pop/pop.00040.wav\nProcessing 55 /Users/georgetzanetakis/data/sound/genres/pop/pop.00068.wav\nProcessing 56 /Users/georgetzanetakis/data/sound/genres/pop/pop.00065.wav\nProcessing 57 /Users/georgetzanetakis/data/sound/genres/pop/pop.00071.wav\nProcessing 58 /Users/georgetzanetakis/data/sound/genres/pop/pop.00059.wav\nProcessing 59 /Users/georgetzanetakis/data/sound/genres/pop/pop.00058.wav\nProcessing 60 /Users/georgetzanetakis/data/sound/genres/pop/pop.00070.wav\nProcessing 61 /Users/georgetzanetakis/data/sound/genres/pop/pop.00064.wav\nProcessing 62 /Users/georgetzanetakis/data/sound/genres/pop/pop.00072.wav\nProcessing 63 /Users/georgetzanetakis/data/sound/genres/pop/pop.00066.wav\nProcessing 64 /Users/georgetzanetakis/data/sound/genres/pop/pop.00099.wav\nProcessing 65 /Users/georgetzanetakis/data/sound/genres/pop/pop.00098.wav\nProcessing 66 /Users/georgetzanetakis/data/sound/genres/pop/pop.00067.wav\nProcessing 67 /Users/georgetzanetakis/data/sound/genres/pop/pop.00073.wav\nProcessing 68 /Users/georgetzanetakis/data/sound/genres/pop/pop.00077.wav\nProcessing 69 /Users/georgetzanetakis/data/sound/genres/pop/pop.00063.wav\nProcessing 70 /Users/georgetzanetakis/data/sound/genres/pop/pop.00088.wav\nProcessing 71 /Users/georgetzanetakis/data/sound/genres/pop/pop.00089.wav\nProcessing 72 /Users/georgetzanetakis/data/sound/genres/pop/pop.00062.wav\nProcessing 73 /Users/georgetzanetakis/data/sound/genres/pop/pop.00076.wav\nProcessing 74 /Users/georgetzanetakis/data/sound/genres/pop/pop.00048.wav\nProcessing 75 /Users/georgetzanetakis/data/sound/genres/pop/pop.00060.wav\nProcessing 76 /Users/georgetzanetakis/data/sound/genres/pop/pop.00074.wav\nProcessing 77 /Users/georgetzanetakis/data/sound/genres/pop/pop.00075.wav\nProcessing 78 /Users/georgetzanetakis/data/sound/genres/pop/pop.00061.wav\nProcessing 79 /Users/georgetzanetakis/data/sound/genres/pop/pop.00049.wav\nProcessing 80 /Users/georgetzanetakis/data/sound/genres/pop/pop.00006.wav\nProcessing 81 /Users/georgetzanetakis/data/sound/genres/pop/pop.00012.wav\nProcessing 82 /Users/georgetzanetakis/data/sound/genres/pop/pop.00013.wav\nProcessing 83 /Users/georgetzanetakis/data/sound/genres/pop/pop.00007.wav\nProcessing 84 /Users/georgetzanetakis/data/sound/genres/pop/pop.00011.wav\nProcessing 85 /Users/georgetzanetakis/data/sound/genres/pop/pop.00005.wav\nProcessing 86 /Users/georgetzanetakis/data/sound/genres/pop/pop.00039.wav\nProcessing 87 /Users/georgetzanetakis/data/sound/genres/pop/pop.00038.wav\nProcessing 88 /Users/georgetzanetakis/data/sound/genres/pop/pop.00004.wav\nProcessing 89 /Users/georgetzanetakis/data/sound/genres/pop/pop.00010.wav\nProcessing 90 /Users/georgetzanetakis/data/sound/genres/pop/pop.00028.wav\nProcessing 91 /Users/georgetzanetakis/data/sound/genres/pop/pop.00014.wav\nProcessing 92 /Users/georgetzanetakis/data/sound/genres/pop/pop.00000.wav\nProcessing 93 /Users/georgetzanetakis/data/sound/genres/pop/pop.00001.wav\nProcessing 94 /Users/georgetzanetakis/data/sound/genres/pop/pop.00015.wav\nProcessing 95 /Users/georgetzanetakis/data/sound/genres/pop/pop.00029.wav\nProcessing 96 /Users/georgetzanetakis/data/sound/genres/pop/pop.00003.wav\nProcessing 97 /Users/georgetzanetakis/data/sound/genres/pop/pop.00017.wav\nProcessing 98 /Users/georgetzanetakis/data/sound/genres/pop/pop.00016.wav\nProcessing 99 /Users/georgetzanetakis/data/sound/genres/pop/pop.00002.wav\nProcessing 100 /Users/georgetzanetakis/data/sound/genres/metal/metal.00022.wav\nProcessing 101 /Users/georgetzanetakis/data/sound/genres/metal/metal.00036.wav\nProcessing 102 /Users/georgetzanetakis/data/sound/genres/metal/metal.00037.wav\nProcessing 103 /Users/georgetzanetakis/data/sound/genres/metal/metal.00023.wav\nProcessing 104 /Users/georgetzanetakis/data/sound/genres/metal/metal.00009.wav\nProcessing 105 /Users/georgetzanetakis/data/sound/genres/metal/metal.00035.wav\nProcessing 106 /Users/georgetzanetakis/data/sound/genres/metal/metal.00021.wav\nProcessing 107 /Users/georgetzanetakis/data/sound/genres/metal/metal.00020.wav\nProcessing 108 /Users/georgetzanetakis/data/sound/genres/metal/metal.00034.wav\nProcessing 109 /Users/georgetzanetakis/data/sound/genres/metal/metal.00008.wav\nProcessing 110 /Users/georgetzanetakis/data/sound/genres/metal/metal.00030.wav\nProcessing 111 /Users/georgetzanetakis/data/sound/genres/metal/metal.00024.wav\n"
]
],
[
[
"We can view the confusion matrix and classificaton report with micro and macro average retrieval metrics. You can observe from the confusion matrix that classical is the easiest genre to classify with 89/100 instances classified correctly. This is reflected on the corresponding f1-score of 0.89. Because the classes are balanced there is no difference between the macro and micro averaging. ",
"_____no_output_____"
]
],
[
[
"\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn import svm, metrics\nclf = svm.SVC(gamma='scale', kernel='linear')\n\n# perform 10-fold cross-validation to calculate accuracy and confusion matrix \npredicted = cross_val_predict(clf, audio_features, target, cv=10)\n\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(target, predicted))\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (clf, metrics.classification_report(target, predicted, target_names=genres)))",
"Confusion matrix:\n[[89 1 1 0 8 0 0 1 0 0]\n [ 2 61 4 0 4 8 11 5 4 1]\n [ 1 9 52 9 0 12 3 7 6 1]\n [ 0 2 9 62 0 3 3 16 1 4]\n [ 7 6 2 0 76 6 2 1 0 0]\n [ 0 7 17 4 3 41 17 4 1 6]\n [ 0 18 8 1 9 8 46 2 0 8]\n [ 1 8 9 22 1 5 7 40 6 1]\n [ 0 4 10 5 2 2 0 4 73 0]\n [ 0 2 4 7 0 9 1 1 0 76]]\nClassification report for classifier SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='scale', kernel='linear',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False):\n precision recall f1-score support\n\n classical 0.89 0.89 0.89 100\n country 0.52 0.61 0.56 100\n disco 0.45 0.52 0.48 100\n hiphop 0.56 0.62 0.59 100\n jazz 0.74 0.76 0.75 100\n rock 0.44 0.41 0.42 100\n blues 0.51 0.46 0.48 100\n reggae 0.49 0.40 0.44 100\n pop 0.80 0.73 0.76 100\n metal 0.78 0.76 0.77 100\n\n micro avg 0.62 0.62 0.62 1000\n macro avg 0.62 0.62 0.62 1000\nweighted avg 0.62 0.62 0.62 1000\n\n\n"
],
[
"# merge rock and pop to a single genre with more support (300 instances) than the other genres (100 instanes)\nnew_genres = ['classsical_jazz', 'country', 'disco', 'hiphop','rock','pop', 'blues','reggage', 'metal']\nnew_target = [0 if k==4 else k for k in target]\nprint(new_genres)",
"['classsical_jazz', 'country', 'disco', 'hiphop', 'rock', 'pop', 'blues', 'reggage', 'metal']\n"
],
[
"# perform 10-fold cross-validation to calculate accuracy and confusion matrix \npredicted = cross_val_predict(clf, audio_features, new_target, cv=10)\n\nprint(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(target, predicted))\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (clf, metrics.classification_report(new_target, predicted, target_names=new_genres)))",
"Confusion matrix:\n[[95 1 1 0 0 1 0 2 0 0]\n [ 7 59 3 0 0 9 11 6 4 1]\n [ 2 8 52 9 0 12 3 7 6 1]\n [ 0 2 9 62 0 3 3 16 1 4]\n [83 5 2 0 0 5 4 1 0 0]\n [ 7 6 17 4 0 36 17 4 3 6]\n [ 6 15 10 1 0 8 48 5 0 7]\n [ 2 7 10 23 0 4 7 40 6 1]\n [ 2 4 10 5 0 2 0 4 73 0]\n [ 0 2 4 7 0 9 1 1 0 76]]\nClassification report for classifier SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma='scale', kernel='linear',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False):\n precision recall f1-score support\n\nclasssical_jazz 0.87 0.89 0.88 200\n country 0.54 0.59 0.56 100\n disco 0.44 0.52 0.48 100\n hiphop 0.56 0.62 0.59 100\n rock 0.40 0.36 0.38 100\n pop 0.51 0.48 0.49 100\n blues 0.47 0.40 0.43 100\n reggage 0.78 0.73 0.76 100\n metal 0.79 0.76 0.78 100\n\n micro avg 0.62 0.62 0.62 1000\n macro avg 0.60 0.59 0.59 1000\n weighted avg 0.62 0.62 0.62 1000\n\n\n"
]
],
[
[
"Notice that because of the unbalanced support of the different classes after merging classical and jazz the values of the micro-average f1-score and macro-average f1-score are different. ",
"_____no_output_____"
],
[
"## MIR tasks with time markers \n\nFinally let's look at a last usage of retrieval metrics (precision, recall and f-measure for tasks where the predicted output and the ground truth is a set of time markers corresponding to things like structure boundaries, \nbeat locations, downbeats etc. \n\nIn this case typically a time tolerance window is defined so that if a time marker is predicted near (within that window) a ground truth marker it is consider correct or relevant. \n\nOne can easily see how over-segmemntation will affect precision. For example consider a predicted beat track output that is twice as fast as the ground truth. All the ground truth markers will be matched (recall will be 1) \nbut only half of them will be correct (precision 0.5). Alternatively if the predicted beat track output is every two ground truth beats then the precision will be 1 (all the predicted beats are correct) but the recall will be 0.5 (half of the ground truth beat markers are predicted). \n\nAgain the f1-score provides a balance between precision and recall. As an example if the marker detection depends on some parameters, we can perform a grid search over their values and select the one that provides the best f1-measure. \n\nDepending on the application domain we might be more interested in precision or recall. For example in a automatic video generation system for music that takes into account segmentation boundaries precision might be more important as we want to make sure that if a boundary is detected it is a true boundary with high probability. At the samne time we might not care as much about recall as if a boundary is missed occassionally the video will still look ok. \n",
"_____no_output_____"
]
],
[
[
"# Let's consider a simple toy example for calculating the retrieval metrics based on a time markers \n# To make things simple the tolerance is +/-1 i.e an estimated time marker is consider correct if it is \n# within +/-1 of the reference ground truth. \n\nimport numpy as np\n\nref_times = np.array([0, 4, 8, 12, 16])\nest_times = np.array([0, 9, 11, 18])\n\n\n# precision is the number of \"correct\" estimated times divided by the number of estimates \nprecision = 0 \nfor e in est_times: \n diff = np.abs(e - ref_times)\n for d in diff: \n if (d <= 1): \n precision += 1\nprecision /= len(est_times)\nprint(precision)\n\n# Note that the precision is 0.75 because the 4th estimated time marker - 18 is not part of the ground truth \n\n\n# recall is the percentage of the reference time markers that are returned as estimates \nrecall = 0 \nfor r in ref_times:\n diff = np.abs(r - est_times)\n for d in diff: \n if (d <=1): \n recall += 1 \nrecall /= len(ref_times)\nprint(recall)\n\n# Not that the recall is 0.6 because out of the 5 reference time markers only 3 are estimated correctly \n# (within +/-1 of a reference marker) |\n\n\ndef f1_score(precision, recall): \n return 2 * ((precision * recall) / (precision+recall))\n\nf1 = f1_score(precision,recall)\nprint(\"F1 = %.2f\\n\" % f1)",
"0.75\n0.6\nF1 = 0.67\n\n"
],
[
"def segment_metrics(ref_times, est_times): \n precision = 0 \n for e in est_times: \n diff = np.abs(e - ref_times)\n for d in diff: \n if (d <= 1): \n precision += 1\n precision /= len(est_times)\n \n recall = 0 \n for r in ref_times:\n diff = np.abs(r - est_times)\n for d in diff: \n if (d <=1): \n recall += 1 \n recall /= len(ref_times)\n f1_score = 2 * ((precision * recall) / (precision+recall))\n return (precision, recall, f1_score)\n\nsegment_metrics(ref_times, est_times)",
"_____no_output_____"
],
[
"# high precision by undersegmentation \nref_times = np.array([0, 4, 8, 12, 16])\nest_times = np.array([9, 11])\nsegment_metrics(ref_times, est_times)",
"_____no_output_____"
],
[
"# high recall by oversegmentation \nref_times = np.array([0, 4, 8, 12, 16])\nest_times = np.array([0,2,4,6,8,10,12,14,16])\nsegment_metrics(ref_times, est_times)",
"_____no_output_____"
]
],
[
[
"## MIR tasks with pair-wise labels\n\nAnother use of retrieval metrics is when there are cluster/segment labels per frame as in chord detection or structure segmentation. This evaluation combines information from the precision of the boundaries as well as ensuring that corrresponding segments are marked by the same label. \n\nThe input consists of two sequences of labels (one corresponding to the reference ground truth and one \ncorresponding to the estimated labels) using the same time granularity - typically frames or beats. The vocabulary of the labels can be different between the two segmentations. The retrieval metrics are computed over all possible pairs with a pair considered valid/correct/relevant if both items share a label. \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e74554a4835d068f9384f2b505c26e15fa4ee89c | 901,143 | ipynb | Jupyter Notebook | time_series_course/Time_Series_Modeling_Ch1_Part_1.ipynb | looyclark/blog_material | 7410858d3bbb4a1e9287f53bf109531f1f0aa951 | [
"MIT"
] | 1 | 2021-06-20T23:04:52.000Z | 2021-06-20T23:04:52.000Z | time_series_course/Time_Series_Modeling_Ch1_Part_1.ipynb | james-montgomery-blog/blog_material | 7410858d3bbb4a1e9287f53bf109531f1f0aa951 | [
"MIT"
] | null | null | null | time_series_course/Time_Series_Modeling_Ch1_Part_1.ipynb | james-montgomery-blog/blog_material | 7410858d3bbb4a1e9287f53bf109531f1f0aa951 | [
"MIT"
] | null | null | null | 687.370709 | 73,638 | 0.936448 | [
[
[
"### Who is this course for?\n\nWe have designed this course for a technical audience with basic coding and statistical skill sets who are not already familiar with time series analysis. Readers who struggle to code in python are advised to take an introductory python coding course before going on. Readers without some background in statistics/machine learning may find the later sections of this course more challenging, so we advise that you be at least somewhat familiar with linear regression and neural network architectures before attempting the **later sections** of this course. However, you by no means need a Masters or Ph.D. to tackle this course!\n\nIf you like this tutorial, feel free to check out my course/blog posts at [jamesmontgomery.us](http://jamesmontgomery.us).",
"_____no_output_____"
],
[
"### What is a \"time series\"?\n\nA traditional machine learning dataset is a collection of observations. While these observations may be collected over time, these samples are equally weighted when training your model. Their order and relationship in time are largely ignored. To incorporate time, you may only train your model on a specific window of time, such as only the last year, to account for \"concept drift\" or \"general trends\" in the data. But this accounts for only minor temporal dynamics in your set. **Time series** datasets add an *explicit order* of observations over time. \n\nAs an example of each kind of dataset, let us compare two hypothetical modeling problems.\n",
"_____no_output_____"
],
[
"\n###### Identity Fraud \nSome fraudsters may try to apply for a credit card using a fake identity. We can use the information from credit card applications to identify which applicants might be fraudsters. Each observation can be treated as independant from each other (this is not quite true as multiple fake identities could be generated by a single fraudster and so follow some pattern of generation). Based on the data below, it's obvious that \"Frank\" is the fraudster (look at his social security number!). **This is obviously not a time series problem.**",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\ndf_normal = pd.DataFrame.from_dict({'Name':['Frank','Herbert','Lucy'],\n 'Social Security Number':[123456789,132985867,918276037]})\nprint df_normal",
" Name Social Security Number\n0 Frank 123456789\n1 Herbert 132985867\n2 Lucy 918276037\n"
]
],
[
[
"###### Website Traffic\n\nAll websites need to be hosted on physical servers somewhere. It costs money to rent these servers, and the more servers, the more money! If we can forecast when website traffic will be high, requiring more servers, and low, requiring fewer servers, then we could only rent the number of servers we need at the moment saving costs. [Note: Ignore AWS, we want to manage our own servers!] We might try predicting website traffic volumes based on some time dependency. In the data below we can clearly see that web traffic cycles in 6 hour segments (with peaks at hours three and nine). This might let us forcast our server usage for the next few hours based on traffic in the last few hours! In this case our forecast is dependant on the order of the last few observations. ",
"_____no_output_____"
]
],
[
[
"df_timeseries = pd.DataFrame.from_dict({'Hour':[1,2,3,4,5,6,7,8,9,10,11,12],\n 'Web Visitors':[100,150,300,200,100,50,120,180,250,200,130,75]})\nprint df_timeseries",
" Hour Web Visitors\n0 1 100\n1 2 150\n2 3 300\n3 4 200\n4 5 100\n5 6 50\n6 7 120\n7 8 180\n8 9 250\n9 10 200\n10 11 130\n11 12 75\n"
]
],
[
[
"#### Author's Note\n\nI'll somtimes include author's notes in my tutorials. These are **optional** sections that are interesting, but are either more advanced that some readers might care to dig into or are of an opinionated anture and so not canon. \n\nData with an inherent order of observations present a number of unique problems. We will discuss how to get around many of these issues throughout the course. Two great examples of problems inherent to series data are **future bleed** and **k-fold validation**. \n\n*Future bleed* is the situation where you are trying to predict or forecast an event in the future and accidently include data from that future in your training data. This is cheating since you would never have data from the future when you actually go to use your model. It is important that you properly clean your data so that you are not using data from future time steps to make predictions. \n\n*[K-fold validation](http://statweb.stanford.edu/~tibs/sta306bfiles/cvwrong.pdf)* is a technique often used to find optimal parameter values for a model, such as subset size. I will assume that the audience is already familiar with k-fold valdiation at a high level (if not, definitely read the link). Time-series (or other intrinsically ordered data) can be problematic for cross-validation because the strict ordering might break when the data is divided or \"folded\".\n\nYou need to break up data whose information is held within the order of the data itself. \n\nAn approach that is sometimes more principled for time series is **forward chaining**, where your procedure would be something like this:\n\n* fold 1 : training [1], test [2]\n* fold 2 : training [1 2], test [3]\n* fold 3 : training [1 2 3], test [4]\n* fold 4 : training [1 2 3 4], test [5]\n* fold 5 : training [1 2 3 4 5], test [6]\n\nThis more accurately models the situation you will see at prediction time: modeling on past data and predicting on forward-looking data. Do dig in more on this methodology, click [HERE](https://robjhyndman.com/hyndsight/tscv/).\n\nYou should keep these kinds of considerations in mind as we move through the course.",
"_____no_output_____"
],
[
"###### Types of time series problems\n\nTime series modeling can be broken down into a large number of different niche areas. In fact, not all of these areas strictly involve time at all. I have broken time series analysis down into what I believe are the overarching themes of the field:\n\n* **Time series forecasting**: Models used for prediction \n * Example: \"What will the price of an airplane ticket be in May of next year?\"\n \n \n* **Time series analytics**: Models used for induction/extracting insights \n * Example: \"What seasonal trends affect the price of an airplane ticket?\"\n \n \n* **Anomaly detection**: Models used to identify non-normal behavior\n * Example: \"Based on the last few credit card transactions Frank made, is it suspicious that he just made a large purchase at a Casino?\"\n \n \n* **Time sensitivity**: Models that adapt over time but do not model time\n * Example: \"Knowing that fraudsters adapt their behavior to defeat our models, can we build a model that automatically retrains itself as time goes on?\"\n \n \n* **Time as a feature**: Models that include time as a feature\n * Example: \"Can we use the average time a web visitor took to fill out a field on our website to predict if they are human or a bot?\"\n \n \n* **Sequence techniques**: Miscelaneous problems touching on fields like natural language processing and speech analytics\n * Example: \"Can we predict what word a texter will type next based on the words they ahve already typed?\"",
"_____no_output_____"
],
[
"We will not be covering all of these subject areas. Sequence techniques are covered to death in courses on natural language processing and there is no need to cover them again here. I (James) may come out with a seperate course in the future on this topic specifically. We will also skip time as a feature as it is very different from other time series problems (some might argue that it is not a series problem at all!). We will not explicitly cover time series analytics, but many of the same principles for time series forecasting still apply. In fact, most intro to time series analytics courses are almost indistinguishable from introductory time series forecasting courses. \n\nWhile we don't spend time covering the differences here, it is worth noting that there are important differences between building a predictive vs inductive model (don't remember the difference between prediction and induction? [I've got you covered!](https://historiesofecology.blogspot.com/2012/12/induction-deduction-and-prediction.html)). Inductive models are often over simplified for interpretability. More experienced machine learning engineers might also note that you might also choose to remove [suppressor variables](http://documentation.statsoft.com/STATISTICAHelp.aspx?path=glossary/GlossaryTwo/S/SuppressorVariable) from an inductive model but not from a predictive model. There are whole courses built around causal modeling that explain the reasoning around including or removing supressor variables from inductive models.\n\nWe will only cover **time series forecasting, basic anaomaly detection, and some time sensitivity methodologies!** Believe me, those topics alone will be quite enough to get started on your time series journey!\n\nFor a more comprehensive overview/table of centents, see the introductory section of this course. ",
"_____no_output_____"
],
[
"As a quick reminder, we assume that those taking this course have basic familiarity with data science as a discipline. We will not cover the basic steps of the typical models build (often called the data science control cycle):\n\n* Define the problem statment\n* Explore the data\n* Feature engineering\n* Choose and fit a model\n* Evaluate and use the model\n\nWe are here to learn about time series, and we will stick to that topic alone.",
"_____no_output_____"
],
[
"### Deconstructing a time series\n\n###### The four basic components\n\nWe can abstract any series into systematic and non-systematic features. **Systematic features** have consistency or recurrence and can be described and modelled. **Non-systematic features** can not be modelled directly. For instance, a one-time, black-swan event such as an unexpected technical failure might impact the time series we are trying to model, but since this is an out-of-the-blue, unexpected, one-time event, we can not hope to add it directly into our model. \n\nAll systematic features of a time series can be broken down into four basic components:\n\n* **Level:** The average value of the series\n* **Trend:** The general increasing or decreasing behavior of the series\n* **Seasonality:** Repeating patterns or cycles in the series\n* **Noise:** Unexplained variability\n\nThese four components are often called by other names. Some researchers will combine level and trend into one feature. Some researchers call seasonality by the name cycle. It is all just semanitics. If you grasp the core concepts these changes in jargon should not trip you up.",
"_____no_output_____"
],
[
"It is worth noting that *noise* can come from different sources. Noise could come from some naturally stochastic process contributing to the generation of the data we collect, it could come from measurement errors, or it could even come from exogenous variables.\n\nWe will refer to variables \"outside\" the model as exogenous variables. For example, rainfall is exogenous to the causal system constituting the process of farming and crop output. There are causal factors that determine the level of rainfall. So rainfall is endogenous to a weather model—but these factors are not themselves part of the causal model we use to explain the level of crop output. Said another way, there are no variables in our model that explain rainfall, but rainfall could be used to explain other variables in the system. (Example from Encyclopedia of Social Science Research Methods)\n\nAcording to Daniel Little, University of Michigan-Dearborn, \"a variable $x_j$ is said to be endogenous within the causal model $M$ if its value is determined or influenced by one or more of the independent variables $X$ (excluding itself).\"\n\nThis will become important further into chapter one of the course.",
"_____no_output_____"
],
[
"## Try a Time Series\n\nNow let us look at some basic time series and identify the level, trend, seasonality, and noise. Look at this first series (I know, it's super simple, bear with me) what is the level? trend? seasonality? noise?",
"_____no_output_____"
]
],
[
[
"# Setting up program with necessary tools and visuals\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style(\"white\")",
"_____no_output_____"
],
[
"numdays = 30\ndate_list = [datetime.datetime.today() - datetime.timedelta(days=x) for x in range(0, numdays)]\nticket_prices_list = numdays*[40.95]\n\nplt.figure(figsize=(10,4))\nplt.plot(date_list, ticket_prices_list)\nplt.title('Price of Carnival Tickets By Day')\nplt.ylabel('Carnival Ticket Price')\nplt.xlabel('Date (Day)')\nplt.ylim(0,60)\nplt.show()",
"_____no_output_____"
]
],
[
[
"It should be obvious that the level is $40.95. This is easy to identify as there is no trend. The series does not generally increase or decrease. We also see no patterns or cycles. This means that there is no seasonality. Finally, we see no noise (unexplained variability) in the data. In fact, there is no variability at all!\n\nOk, this is completely unreasonable! What carnival sets ticket prices like this? In reality, we might see the tickets increase in price. Word might get out about how great the carnival is and demand for tickets will increase over time. Let's try describing that series. ",
"_____no_output_____"
]
],
[
[
"numdays = 30\ndate_list = [datetime.datetime.today() - datetime.timedelta(days=x) for x in range(0, numdays)]\nticket_prices_list = np.linspace(40.95,47.82,numdays)[::-1]\n\nplt.figure(figsize=(10,4))\nplt.plot(date_list, ticket_prices_list)\nplt.title('Price of Carnival Tickets By Day')\nplt.ylabel('Carnival Ticket Price')\nplt.xlabel('Date (Day)')\nplt.ylim(0,60)\nplt.show()",
"_____no_output_____"
],
[
"print \"level = mean = {}\".format( np.mean(ticket_prices_list) )\nprint \"trend = slope = {}\".format( (ticket_prices_list[0] - ticket_prices_list[1]) / 1. )",
"level = mean = 44.385\ntrend = slope = 0.236896551724\n"
]
],
[
[
"We first start with the level of the series which is the series average: level = $44.39. We can then describe the general increase in the series over time as the slope of our line: trend = .24. We see no repeating patterns nor unexplained variability, so there is no seasonality or noise.",
"_____no_output_____"
],
[
"## Round Two, FIGHT\n\nOk, let's put on our big kid shorts and look at some real data. We will start with the [Airline Passengers](https://datamarket.com/data/set/22u3/international-airline-passengers-monthly-totals-in-thousands-jan-49-dec-60#!ds=22u3&display=line) dataset which describes the total number of airline passengers over a period of time. The units are a count of the number of airline passengers in thousands. There are 144 monthly observations from 1949 to 1960.\n\nLet's take a look at our data.",
"_____no_output_____"
]
],
[
[
"series = pd.Series.from_csv('./AirPassengers.csv', header=0)\n\nplt.figure(figsize=(10,4))\nseries.plot()\nplt.ylabel('# Passengers')\nplt.xlabel('Date (Month)')\nplt.title('Volume of US Airline Passengers by Month')\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can definitely see that there is a trend, seasonality, and some noise in this data based on the similarly shaped, repeated portions of the line! How can we get a clearer picture of each core component?\n\nFirst, we need to make an assumption about how these four components combine to create the series. We typically make one of two basic assumptions here.\n\n1) We could assume that the series is an additive combination of the four components:\n\n$y(t)$ = Level + Trend + Seasonality + Noise\n\nAn additive model is linear where changes over time are consistently made by the same amount (think linear derivative). A linear trend is a straight line. A linear seasonality has the same frequency (width of cycles) and amplitude (height of cycles).\n\n2) We could assume that the series is a multiplicative combination of the four components:\n\n$y(t)$ = Level $*$ Trend $*$ Seasonality $*$ Noise\n\nA multiplicative model is nonlinear, such as quadratic or exponential. Changes increase or decrease over time. A nonlinear trend is a curved line. A non-linear seasonality has an increasing or decreasing frequency and/or amplitude over time.\n\n*So which assumption should we make for the airline data?* Well, it looks to me as if the amplitude (height) of the seasonal peaks in the data increase as time goes on. That is a good indicator of a multiplicative relationship. It is worth noting that the relationship between the components might be some mix of additive and multiplicative, but for now we will stick to the two simple assumptions above.\n\nWe will use the package `statsmodels` and function [`seasonal_decompose`](http://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.seasonal_decompose.html) to do the actual heavy lifting for our decomposition. ",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.seasonal import seasonal_decompose",
"/Users/hpf505/anaconda/lib/python2.7/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n from pandas.core import datetools\n"
],
[
"result = seasonal_decompose(series, model='multiplicative')\nresult.plot()\nplt.show()",
"_____no_output_____"
]
],
[
[
"COOL! Now we have a rough estiamte of the trend, seasonality, and noise in our time series. What now? Typically we use this kind of demcomposition for **time series analysis** (induction). \n\nFor example, we could take a close look at the seasonality to try to identify some interesting insights.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,4))\nplt.plot(result.seasonal[:24])\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Observations and Theories**\n\nFrom this plot, it looks like there is a very clear seasonal trend in passenger volume throughout the year. Peak travel time is July/August (vacaton season). It looks like passenger volume dips around the holidays (perhaps people want to be with their families rather than travel?). ",
"_____no_output_____"
],
[
"###### Assumptions about the four components\n\nWhen we get to time series forecasting, our models will often make assumptions about the characteristics of these four components. We will almost always to answer the questions:\n\n* Is the trend stationary?\n* Is the noise homoscedastic?\n* Are there discontinuities? \n* Is the seasonality only local?\n\nLet us try to understand these questions and the assumptions they support. The first two questions have to do with *Stationarity*. A **stationary time series** is one whose statistical properties such as mean, variance, autocorrelation (we will address correlation in a bit), etc. are all constant over time. Most statistical forecasting methods are based on the assumption that the time series can be rendered approximately stationary (i.e., \"stationarized\") through the use of mathematical transformations. A stationarized series is relatively easy to predict: you simply predict that its statistical properties will be the same in the future as they have been in the past! \n\nA stationary trend is where the trend (a function of time alone) is flat. There is no general decrease or increase in the series. The series might have seasonality and noise but no trend. An simple example is shown below.\n\n**Author's Note**\n\nStrictly speaking, a perfect sine wave is NOT stationary. It only makes sense to apply the term stationary to a series produced by a process with at least one random variable. Sine functions are deterministic and not stochastic. However, we are machine learning engineers and not statisticians, so I'll strectch the technical defninition of a stationary series a tiny bit for the sake of education/simplicity. If this really bugs you, feel free to add a random noise term or random phase to the sine function to make it 'properly' stationary. ",
"_____no_output_____"
]
],
[
[
"trend = np.sin( np.linspace(-20*np.pi, 20*np.pi, 1000) ) * 20 + 100\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Stationary Trend')\nplt.show()",
"_____no_output_____"
]
],
[
[
"A non-stationary trend is anything else! See the example below.",
"_____no_output_____"
]
],
[
[
"trend = trend + np.linspace(0, 30, 1000)\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Non-Stationary Trend')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Non-stationary trends do not need to be linear. I've included a non-linear trend example to demonstrate this.",
"_____no_output_____"
]
],
[
[
"trend = trend + np.exp( np.linspace(0, 5, 1000) )\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Non-Stationary Trend')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Many models assume that a series is stationary. [KK FIXME: Why?] We can apply transformations and filters on the series to make the series conditionally stationary. The most popular method for doing this is called *differencing*. You will often see series described as 'Difference Stationary' (stationary only after differencing).\n\n**Differencing** is basically removing a series' dependance on time (aka we de-trend the series). It can also be used to remove seasonality. \n\nDifferencing is a very simple operation, we basically create a new series based on the differences between observations in the original series:\n\n$difference(t)$ = $observation(t)$ - $observation(t-1)$\n\nTaking the difference between consecutive observations is called a lag-1 difference. For time series with a seasonal component, the lag may be expected to be the period (width) of the seasonality.\n\nFirst Order Differencing:\n\n$y'_t=y_t-y_{t-1}$\n\nSecond Order Differencing:\n\n$y''_t=y'_t-y'_{t-1}=(y_t-y_{t-1})-(y_{t-1}-y_{t-2})=y_t-2y_{t-1}+y_{t-2}$\n\nSecond order differencing is the change in the changes in the original dataset (think of the relationship between distance, velocity, and acceleration). We rarely need to do second order differencing.\n\nTemporal structure may still exist after performing a differencing operation, such as in the case of a nonlinear trend. As such, the process of differencing can be repeated more than once until all temporal dependence has been removed. The number of times that differencing is performed is called the **difference order**.",
"_____no_output_____"
],
[
"## Ready for some practice?\n\nTo practice differencing, we're going to use a dataset on shampoo sales. The original data is from Makridakis, Wheelwright, and Hyndman (1998) and can be found [here](https://datamarket.com/data/set/22r0/sales-of-shampoo-over-a-three-year-period#!ds=22r0&display=line). ",
"_____no_output_____"
]
],
[
[
"series = pd.read_csv('Shampoo.csv', header=0, index_col=0, squeeze=True)\n\nplt.figure(figsize=(10,4))\nplt.ylabel('Shampoo Sales')\nplt.xlabel('Date (Month)')\nplt.title('Shampoo Sales Over Time')\nseries.plot()\nplt.show()",
"_____no_output_____"
]
],
[
[
"We can rather simply write our own first order differencing function. Let's try that!",
"_____no_output_____"
]
],
[
[
"# create a differenced series\ndef difference(dataset, interval=1):\n diff = list()\n for i in range(interval, len(dataset)):\n value = dataset[i] - dataset[i - interval]\n diff.append(value)\n return pd.Series(diff)\n\nX = series.values\ndiff = difference(X)\n\nplt.figure(figsize=(10,4))\nplt.ylabel('Shampoo Sales')\nplt.xlabel('Date (Month)')\nplt.title('Shampoo Sales Over Time')\nplt.plot(diff)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Cool, this no looks just like any old stationary trend! It is worth noting that `pandas` comes with a function to implicitly do differencing. I show an example of this below.",
"_____no_output_____"
]
],
[
[
"diff = series.diff()\n\nplt.figure(figsize=(10,4))\nplt.ylabel('Shampoo Sales')\nplt.xlabel('Date (Month)')\nplt.title('Shampoo Sales Over Time')\ndiff.plot()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Now remember, for a series to be stationary, all of its attributes must be stationary, not just the trend. A good example of this are heteroscedastic series. These are series where the amount of noise is not constant over time, A homoscedastic series has equal noise over time (aka the noise is stationary). An example of a heteroscedastic and homoscedastic series are shown below.",
"_____no_output_____"
]
],
[
[
"trend = 20.42*np.ones(3000) + np.random.normal(1,2,3000)\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Homoscedastic Series')\nplt.show()\n\ntrend = 20.42*np.ones(3000) + np.linspace(1,10,3000)*np.random.normal(1,2,3000)\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Heteroscedastic Series')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Changes in variance can be hard to deal with and often throw a wrench into our models. We see in many types of customer data, for instance, that variance is proportional to signal volume. This means that the variance in our signal will increase as the amplitude of the signal increases.\n\nWe can see an example of this below where noise is proportional to signal amplitude.",
"_____no_output_____"
]
],
[
[
"trend = np.sin( np.linspace(-20*np.pi, 20*np.pi, 1000) ) * 20 + 100\ntrend = trend + trend*np.random.normal(.5,3,1000)\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Heteroscedastic Series')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Another nasty feature we might run across are discontinuities. How we handle a discontinuity depends entirely on what caused the discontinuity. Was there a fundamental change in the series? Is the discontinuity caused by missing data? Do we expect more discontinuities in the future? An example of a discontinuity is shown below. ",
"_____no_output_____"
]
],
[
[
"trend = np.sin( np.linspace(-20*np.pi, 20*np.pi, 1000) ) * 20 + 100\ntrend = trend + np.concatenate([np.zeros(500),20*np.ones(500)])\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of a Discontinuity')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Finally, we want to know if the seasonality is 'local'? Basicaly, is there only one seasonal trend? Perhaps the series has a yearly and monthly period! An example of a series with multiple cycles/seasons is shown below. ",
"_____no_output_____"
]
],
[
[
"trend = np.sin( np.linspace(-20*np.pi, 20*np.pi, 1000) ) * 20 + 100\ntrend = trend + np.sin( .1*np.linspace(-20*np.pi, 20*np.pi, 1000) ) * 20 + 100\nplt.figure(figsize=(10, 4))\nplt.plot(trend)\nplt.title('Example of Multiple Cycles')\nplt.show()",
"_____no_output_____"
]
],
[
[
"How can we try to understand the cyclical patterns in our data? Well, there are a couple different ways to do this... ",
"_____no_output_____"
],
[
"## Intro to Signal Processing\n\nWe will start with the **Fourier transform**. Fourier transforms are amazing (as are their more general form, the Laplacian transform). When I did physics research in a past life, Fourier transforms were by best friend. These days I use them for a myriad of signal processing tasks such as building voice recognition models and speech to text models. So what is this magic transform?\n\nI am not going to dive into the math behind FFT, but I encourage you to do so. Here is a good introductory [resource](https://betterexplained.com/articles/an-interactive-guide-to-the-fourier-transform/). FFT is useful for so many signal processing tasks that it is almost a must know. It is also worth checking out [power spectral densities](https://www.youtube.com/watch?v=-Nt0FaofWL4) (PSDs). PSDs can be a [more powerful](https://dsp.stackexchange.com/questions/24780/power-spectral-density-vs-fft-bin-magnitude?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa) way of analysing signals in the frequency domain, all puns intended.\n\n*High level explanation of Fourier Transforms*: they take signals in the time domain and convert them to the frequency domain. \n\nBelow is a great example of this. I have convolved two signals. Each is a sine function with a different period. When we convert to the frequency domain this becomes blaringly obvious! Not only can we see two distinct periods, but we also see their values.\n\nWe are going to use the [`scipy`](https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.fft.html) implementation of the Fast Fourier Transform (FFT). I will say that I usually use [`librosa`](https://github.com/librosa/librosa) for audio projects. ",
"_____no_output_____"
]
],
[
[
"from scipy.fftpack import fft",
"_____no_output_____"
],
[
"# Number of sample points\nN = 600\n# sample spacing\nT = 1.0 / 800.0\nx = np.linspace(0.0, N*T, N)\ny = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x)\nyf = fft(y)\nxf = np.linspace(0.0, 1.0/(2.0*T), N//2)\n\nplt.figure(figsize=(10, 4))\nplt.title('Original Signal in Time Domain')\nplt.ylabel('values')\nplt.xlabel('time')\nplt.plot(y)\nplt.show()\n\nplt.figure(figsize=(10, 4))\nplt.plot(xf, 2.0/N * np.abs(yf[0:N//2]))\nplt.title(' Signal in Frequency Domain')\nplt.xlabel('frequency (periods)')\nplt.show()",
"_____no_output_____"
]
],
[
[
"The periodogram presents the two sine functions clearly. This is the [FFT of the output of the autocorrelation function](https://www.mathworks.com/matlabcentral/answers/56001-difference-b-w-periodogram-and-square-of-ft-of-signal-method-to-calculate-the-psd). We will address autocorrelation in a bit. For now, just think of the periodogram as calculating the significance of different frequencies in time-series data to identify any intrinsic periodic signals. A periodogram is similar to the Fourier Transform, but is optimized for unevenly time-sampled data, and for different shapes in periodic signals. To understand more about FFT, Periodograms, and PSD, try reading through this [article](https://lectures.quantecon.org/py/estspec.html).\n\nFor now, FFT is good enough for us. ",
"_____no_output_____"
],
[
"## Autocorrelation\n\nOur last stop deconstructing time series will be a bit of information theory. Hang in there, as soon as we are done here we can get to the modeling!\n\nNow presumably the data points in the series you are interested in relate to each other with respect to time. More often than not the neighboring points in a time series are more similar to each other than points in the distant past (although that is not always the case). \n\nWe can think about high [correlation](https://en.wikipedia.org/wiki/Correlation_and_dependence) between points like this: If two points are highly correlated, then information about one point provides some information about the other point. The higher the correlation, the more information. Remember, *high correlation does not mean causation*. If you see correlation between points try to understand what real-world relationship might produce such correlation. Try to consider any confounding variables, randomness in the set, and/or [suppressor variables](http://documentation.statsoft.com/STATISTICAHelp.aspx?path=glossary/GlossaryTwo/S/SuppressorVariable) at play.\n\nThe measure of how points correlate to each other within a series is called *autocorrelation*. **Autocorrelation** is correlation measured between the time series and itself lagged by an amount of time. It is a special case of cross correlation where you are comparing a series to itself rather than comparing two independant series.\n\n$$\nAutoCorr = \\frac{E[(X_{t} - \\mu_{t})(X_{t-n} - \\mu_{t-n})]}{\\sigma_{t}\\sigma_{t-n}}\n$$ \n\n**where:** <br/>\n$\\mu$ = mean <br/>\n$\\sigma = $ variance <br/>\n$E(X)$ = expected value of X <br/>\n$X_{t}$ = value at time step t <br/>\n$X_{t-n}$ = value at time step (t - n) where n is the \"lag\" \n\nNOTE: Autocorrelation is a special case of cross correlation and so suffers (as should be apparent from the above equation) from the [assumption that there is a linear relationship](https://stats.stackexchange.com/questions/23308/cross-correlation-vs-mutual-information?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa) between the series being considered. If we feel that this is a [poor assumption](https://stats.stackexchange.com/questions/81659/mutual-information-versus-correlation?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa), we can look at the [mutual information](https://www.pks.mpg.de/~tisean/Tisean_3.0.1/docs/chaospaper/node8.html) within the series\n\nLet's compute autocorrelation within the airline data using `statsmodels`. Compare a lag of 0 (comparing an observation with itself) with a lag of 1 (comparing adjacent observations). What do you see?",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.stattools import acf\n\nseries = pd.Series.from_csv('./AirPassengers.csv', header=0)\n\n# plot\nplt.figure(figsize=(20,5))\nseries.plot()\nplt.ylabel(\"Passenger Volums\")\nplt.xlabel(\"Time (Months)\")\nplt.title(\"Airline Passenger Volumes\", fontsize=20)\nplt.show()\n\n# ACF with lags 1,2,3,4\nacf_7 = acf(series, nlags=7)\n\nfor i, val in enumerate(acf_7):\n print('Autocorrelation at lag {}: {}'.format(i, val)) ",
"_____no_output_____"
]
],
[
[
"Let's try calculating the autocorrelation within the airline dataset over a wide range of lags. We can plot autocorrelation by lag to make the information more easily digestible. We call this an autocorrelation plot. This plot gives us information about the relationship between observations in the data set and observations n steps in the past (n representing the lag). Can you tease out a seasonal trend in our call center data?",
"_____no_output_____"
]
],
[
[
"from statsmodels.graphics.tsaplots import plot_acf\n\n# ACF plot\nfig, ax = plt.subplots(figsize=(20,5))\nplt.xlabel('lag')\nplt.ylabel('correlation coefficient')\n_ = plot_acf(series, lags=100, ax=ax)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Ok, so how do we read this? Each bar represents the magnitude of the correlation of points seperated by the lag on the x axis. We notice that points at zero lag are exactly correlated...because we are comparing a point to itself! We then notice that the auto-correlation of points at different lags takes on a cyclical pattern. It looks like points at lag 12 are very correlated, this is a good indicator of a seasonal pattern in our data with a period of 12. What's with the greyish-blueish filled in area? That represents the 95% confidence bounds of the ACF function. Some individuals choose to only consider points outside of this area as 'significant'. I choose to consider the entire plot and just layer on my common sense and modeling intuition. ",
"_____no_output_____"
],
[
"###### Partial Autocorrelation\n\nThe partial autocorrelation function can be another powerful tool in exploratory analysis of time series. To underststand partial autocorrelation we must first understand partial correlation. \n\nConsider an example where you are creating a simple linear regression model. You are predicting $y$ with three independent variables $x_{1}$, $x_{2}$, and $x_{3}$. The partial correlation between $y$ and $x_{3}$ is the amount of correlation between $y$ and $x_{3}$ that is not explained by their common correlations with $x_{1}$ and $x_{2}$. Said another way, partial correlation between two variables is the amount of correlation between them which is not explained by their mutual correlations with a specified set of other variables. \n\nPartial **autocorrelation** is the amount of correlation between a variable and a lag of itself that is not explained by correlations at all lower-order-lags. The autocorrelation of a time series $y$ at lag 1 is the correlation between $y_{t}$ and $y_{t-1}$. Now consider if we were finding the autocorrelation between $y_{t}$ and $y_{t-2}$. In the same way $y_{t}$ is correlated with $y_{t-1}$, then $y_{t-1}$ is also correlated with $y_{t-2}$. \n\nTherefore, when we are measuring lag 2 autocorrelation between $y_{t}$ and $y_{t-2}$ we are also including the effects of lag 1 autocorrelation betweeb $y_{t}$ and $y_{t-1}$. In short, autocorrelation propogates. The partial autocorrelation function can isolate the autocorrelation between $y_{t}$ and $y_{t-2}$, or any other lag order (i.e. $y_{t}$ and $y_{t-n}$).\n\nLet's calculate the autocorrelation plot of the airline dataset. How does it differ from the autocorrelation plot? How does this affect your initial intuition about seasonality in call center call volumes?",
"_____no_output_____"
]
],
[
[
"from statsmodels.graphics.tsaplots import plot_pacf\n\n# PACF plot\nfig, ax = plt.subplots(figsize=(12,5))\nplt.xlabel('lag')\nplt.ylabel('partial correlation coefficient')\n_ = plot_pacf(series, lags=100, ax=ax)\nplt.show()",
"_____no_output_____"
]
],
[
[
"We read this in the same way as the ACF (auto-correlation) plot, but this time we plot aprtial auto-correlation. Notice how much the plot has changed? Now we see that the point with the most predictive power about a following point is at lag one. ",
"_____no_output_____"
],
[
"We have just covered a lot of jargon. As we move on through the course, we will build on the base concepts covered here. I encourage you to take a second and read back through this lesson to ensure that you are comfortable with the terms, definitions, and material. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7455859f384a0a90e418c07beae46a61b33efd6 | 17,034 | ipynb | Jupyter Notebook | notebooks/query_prototyping.ipynb | parente/honey.data | 418fa282fdaf1941b91c364ee13b6cabc0e581ea | [
"MIT"
] | null | null | null | notebooks/query_prototyping.ipynb | parente/honey.data | 418fa282fdaf1941b91c364ee13b6cabc0e581ea | [
"MIT"
] | null | null | null | notebooks/query_prototyping.ipynb | parente/honey.data | 418fa282fdaf1941b91c364ee13b6cabc0e581ea | [
"MIT"
] | null | null | null | 27.429952 | 412 | 0.556945 | [
[
[
"# Query prototyping\n\nThis notebook computes interesting metrics from the raw wheel rotation data stored in S3. It's meant as a prototype for queries that'll run to produce summary data for use by a web frontend later on.\n\nAn earlier version of the notebook worked with the raw rotation CSV files locally using pandas. Ultimately, I want to keep the compute close to the data in S3 and avoid paying for extra compute elsewhere (e.g., Lambda, ECS). Therefore, this notebook uses Athena, under the assumption that the Raspberry Pi doing the data collection can also trigger Athena queries to produce the summary data in the future.",
"_____no_output_____"
]
],
[
[
"import datetime\nimport getpass\nimport math\nimport time\n\nimport boto3\n# import matplotlib.pyplot as plt\n# import pandas as pd\n# import pytz\nimport requests",
"_____no_output_____"
],
[
"%config InlineBackend.print_figure_kwargs={'facecolor' : \"w\"}",
"_____no_output_____"
]
],
[
[
"Constants to convert [wheel rotations](https://www.amazon.com/gp/product/B019RH7PPE/ref=ppx_yo_dt_b_asin_title_o04_s00?ie=UTF8&psc=1) into distances.",
"_____no_output_____"
]
],
[
[
"wheel_diameter = 8.5 # inches, not quite the 9\" advertised, I measured\nwheel_circumference = math.pi * wheel_diameter / 12 / 5280 # miles",
"_____no_output_____"
],
[
"athena = boto3.client('athena')\ns3 = boto3.client('s3')",
"_____no_output_____"
]
],
[
[
"## Utils",
"_____no_output_____"
],
[
"I'll execute Athena queries using boto3. I'm not using `pyathena` to keep demands on the Raspberry Pi light.",
"_____no_output_____"
]
],
[
[
"def q(query, max_checks=30):\n \"\"\"Executes an Athena query, waits for success or failure, and returns the first page\n of the query results.\n \n Waits up to max_checks * 10 seconds for the query to complete before raising.\n \"\"\"\n resp = athena.start_query_execution(\n QueryString=query,\n QueryExecutionContext={\n 'Database': 'honey_data'\n },\n WorkGroup='honey-data'\n )\n qid = resp['QueryExecutionId']\n for i in range(max_checks):\n resp = athena.get_query_execution(\n QueryExecutionId=qid\n )\n state = resp['QueryExecution']['Status']['State']\n if state == 'SUCCEEDED':\n return qid, athena.get_query_results(QueryExecutionId=qid)\n elif state == 'FAILED':\n return qid, resp\n time.sleep(10)\n else:\n raise RuntimeError('Reached max_checks')",
"_____no_output_____"
],
[
"def publish(qid, s3_key):\n \"\"\"Copies Athena results to the public bucket for client use.\"\"\"\n return s3.copy_object(\n CopySource=f'honey-data/athena-results/{qid}.csv',\n Bucket='honey-data-public',\n Key=s3_key,\n ACL=\"public-read\",\n )",
"_____no_output_____"
],
[
"def update_partitions():\n \"\"\"Update daily partitions.\"\"\"\n return q('msck repair table incoming_rotations')",
"_____no_output_____"
]
],
[
[
"## Interesting metrics",
"_____no_output_____"
]
],
[
[
"update_partitions()",
"_____no_output_____"
]
],
[
[
"### How far has Honey run since we started tracking?\n\nThis can serve as input to the geopoint API naming a city she could have reached if she traveled this far in straight line distance.",
"_____no_output_____"
]
],
[
[
"qid, resp = q('''\nselect sum(rotations) total\nfrom incoming_rotations\n''')",
"_____no_output_____"
]
],
[
[
"The web client will end up using published CSVs instead of results fetched using the API. Therefore, I'm not investing in data type parsing in this notebook.",
"_____no_output_____"
]
],
[
[
"total_miles = int(resp['ResultSet']['Rows'][1]['Data'][0]['VarCharValue']) * wheel_circumference\ntotal_miles",
"_____no_output_____"
]
],
[
[
"### What's the most recent activity available?\n\nNot sure this is useful.",
"_____no_output_____"
]
],
[
[
"q(f'''\nselect max(from_iso8601_timestamp(datetime)) as most_recent_activity\nfrom incoming_rotations \nwhere year = year(current_date) and month >= month(current_date)-1\n''')",
"_____no_output_____"
]
],
[
[
"### How has she progressed over the last 7 days? (cumulative plot, origin at sum before period)\n\nThis should be the total sum prior to the window of interest. The query scans everything instead of trying to skip scanning 7 day partitions--a drop in the bucket.",
"_____no_output_____"
]
],
[
[
"qid, resp = q('''\nselect sum(rotations) as prior_rotations\nfrom incoming_rotations\nwhere from_iso8601_timestamp(datetime) < (current_date - interval '7' day)\n''')",
"_____no_output_____"
],
[
"resp",
"_____no_output_____"
],
[
"publish(qid, 'prior-7-day-window.csv');",
"_____no_output_____"
]
],
[
[
"And this should be the sum of rotations by hour and the cumulative sum by hour within the window of interest. I'm trying to constrain the search space for the necessary data using partitions. I need a bit more data to make sure this is working properly.",
"_____no_output_____"
]
],
[
[
"qid, resp = q(f'''\nselect\n sum(rotations) as sum_rotations,\n to_iso8601(date_trunc('hour', from_iso8601_timestamp(datetime))) as datetime_hour,\n sum(sum(rotations)) over (\n order by date_trunc('hour', from_iso8601_timestamp(datetime)) asc \n rows between unbounded preceding and current row\n ) as cumsum_rotations\nfrom incoming_rotations\nwhere \n year >= year(current_date)-1 and\n from_iso8601_timestamp(datetime) >= (current_date - interval '7' day)\ngroup by date_trunc('hour', from_iso8601_timestamp(datetime))\norder by datetime_hour\n''')",
"_____no_output_____"
],
[
"publish(qid, '7-day-window.csv');",
"_____no_output_____"
]
],
[
[
"Let's work with the CSV forms of these metrics to create a plot.",
"_____no_output_____"
]
],
[
[
"tz = pytz.timezone('America/New_York')",
"_____no_output_____"
],
[
"resp = s3.get_object(Bucket='honey-data-public', Key='prior-7-day-window.csv')\nprior_df = pd.read_csv(resp['Body'])",
"_____no_output_____"
],
[
"try:\n offset = prior_df.iloc[0].iloc[0]\nexcept:\n offset = 0",
"_____no_output_____"
],
[
"resp = s3.get_object(Bucket='honey-data-public', Key='7-day-window.csv')\nweek_df = pd.read_csv(resp['Body'])",
"_____no_output_____"
],
[
"week_df['datetime_utc'] = pd.to_datetime(week_df.datetime_hour)\nweek_df['datetime'] = week_df.datetime_utc.dt.tz_convert(tz)\nweek_df.set_index('datetime', inplace=True)",
"_____no_output_____"
]
],
[
[
"Filling missing values is something I might want to do in Athena instead of relying on the frontend web client doing it if plot interpolation doesn't look pretty. Some techniques here: https://www.reddit.com/r/SQL/comments/80t1db/inserting_dates_between_a_start_date_and_enddate/",
"_____no_output_____"
]
],
[
[
"cumsum_df = week_df[['cumsum_rotations']] + offset\n#cumsum_df = cumsum_df.reindex(pd.date_range(week_df.index.min(), week_df.index.max(), freq='1h'), method='ffill')",
"_____no_output_____"
],
[
"cumsum_df.index.max() - cumsum_df.index.min()",
"_____no_output_____"
],
[
"_, ax = plt.subplots(figsize=(15, 5))\n(cumsum_df * wheel_circumference).rename(columns={'cumsum_rotations': 'miles'}).plot(ax=ax)\nax.set_facecolor('white')",
"_____no_output_____"
],
[
"week_df.loc['2020-09-05 12:00:00':'2020-09-06 12:00:00'].sum_rotations.sum() * wheel_circumference",
"_____no_output_____"
]
],
[
[
"### How far has she run each night for the past year?\n\nWe should subtract 12 hours to sum rotations for nocturnal sessions, plus a few hours more to account for the fact that the hamster is in EST/EDT. Then we add one day back to the date to align with end of session reporting used elsewhere. Don't bother getting precisely one year of data from the current date as the UI we're going to put on this wants to show a whole calendar year anyway.",
"_____no_output_____"
]
],
[
[
"qid, resp = q(f'''\nselect\n sum(rotations) as value,\n date(date_trunc('day', from_iso8601_timestamp(datetime) - interval '16' hour)) + interval '1' day as day\nfrom incoming_rotations\nwhere year >= year(current_date)-1\ngroup by date_trunc('day', from_iso8601_timestamp(datetime) - interval '16' hour)\norder by day\n''')",
"_____no_output_____"
],
[
"publish(qid, '1-year-window.csv');",
"_____no_output_____"
]
],
[
[
"### What city might she have reached by traveling this distance?\n\nhttps://rapidapi.com/wirefreethought/api/geodb-cities?endpoint=5aadab87e4b00687d35767b4 allows 1000 request per day. If the data upload / aggregation job runs every 10 minutes, I only need about a tenth of that.",
"_____no_output_____"
]
],
[
[
"rapid_key = getpass.getpass('Rapid API key:')",
"_____no_output_____"
],
[
"durham_lat = '35.994034'\ndurham_lon = '-78.898621'\nrapid_url = \"https://wft-geo-db.p.rapidapi.com\"",
"_____no_output_____"
],
[
"def furthest_poi(lat, lon, radius, api_key, base_url=rapid_url):\n path = f'/v1/geo/locations/{durham_lat_lon}/nearbyCities'\n\n # Results sort nearest to farthest\n resp = requests.get(\n f'{base_url}{path}',\n headers = {\n 'x-rapidapi-host': \"wft-geo-db.p.rapidapi.com\",\n 'x-rapidapi-key': api_key\n }, \n params={\"radius\": radius}\n )\n resp.raise_for_status()\n\n # Navigate to the last page\n for link in resp.json()['links']:\n if link['rel'] == 'last':\n path = link['href']\n break\n else:\n raise ValueError('Link to last result page not found')\n \n resp = requests.get(\n f'{base_url}{path}',\n headers = {\n 'x-rapidapi-host': \"wft-geo-db.p.rapidapi.com\",\n 'x-rapidapi-key': api_key\n }, \n )\n resp.raise_for_status()\n # Furthest point of interest within the given radius\n return resp.json()['data'][-1]",
"_____no_output_____"
],
[
"furthest_poi(durham_lat, durham_lon, total_miles, rapid_key)",
"_____no_output_____"
],
[
"resp = requests.get(\n f'{rapid_url}{path}',\n headers = {\n 'x-rapidapi-host': \"wft-geo-db.p.rapidapi.com\",\n 'x-rapidapi-key': rapid_key\n }, \n params={\"radius\": total_miles}\n)",
"_____no_output_____"
],
[
"for link in resp.json()['links']:\n if link['rel'] == 'last':\n path = link['href']\n break\nelse:\n raise ValueError('Link to last result page not found')",
"_____no_output_____"
],
[
"path",
"_____no_output_____"
],
[
"resp = requests.get(\n f'{rapid_url}{path}',\n headers = {\n 'x-rapidapi-host': \"wft-geo-db.p.rapidapi.com\",\n 'x-rapidapi-key': rapid_key\n }\n)",
"_____no_output_____"
],
[
"resp.json()['data'][-1]",
"_____no_output_____"
]
],
[
[
"I can turn the `wikiDataId` into a link to a Wikipedia page by querying the Wikidata API followed by the Wikipedia API.\n\n* https://www.wikidata.org/w/pi.php?action=wbgetentities&format=json&props=sitelinks&ids=Q1373463&sitefilter=enwiki\n* https://en.wikipedia.org/w/api.php?action=query&titles=Fries,%20Virginia&format=json\n* https://en.wikipedia.org/w/api.php?action=query&prop=info&pageids=137620&inprop=url&format=json\n* https://en.wikipedia.org/wiki/Fries,_Virginia",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7455c1395a771e02a99609bfe3c2e2a61fd457b | 33,694 | ipynb | Jupyter Notebook | Notebooks cidades/Manaus_Antes.ipynb | amandacaravieri/ProjetoFinal-COVID_Brasil | c10eb0d734e36e5bb888a7b5cd468420da86d066 | [
"MIT"
] | null | null | null | Notebooks cidades/Manaus_Antes.ipynb | amandacaravieri/ProjetoFinal-COVID_Brasil | c10eb0d734e36e5bb888a7b5cd468420da86d066 | [
"MIT"
] | null | null | null | Notebooks cidades/Manaus_Antes.ipynb | amandacaravieri/ProjetoFinal-COVID_Brasil | c10eb0d734e36e5bb888a7b5cd468420da86d066 | [
"MIT"
] | null | null | null | 32.968689 | 204 | 0.368582 | [
[
[
"# Manipulação e tratamento das bases\nimport pandas as pd\nimport numpy as np\n\n#Pré-Processamento das bases\n!pip install imblearn\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.model_selection import train_test_split\n\n#Modelagem de Dados\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n",
"Requirement already satisfied: imblearn in c:\\users\\windows10\\anaconda3\\lib\\site-packages (0.0)\nRequirement already satisfied: imbalanced-learn in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imblearn) (0.8.0)\nRequirement already satisfied: joblib>=0.11 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.0.1)\nRequirement already satisfied: scipy>=0.19.1 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.6.2)\nRequirement already satisfied: scikit-learn>=0.24 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (0.24.2)\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.19.5)\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\windows10\\anaconda3\\lib\\site-packages (from scikit-learn>=0.24->imbalanced-learn->imblearn) (2.2.0)\n"
],
[
"Antes= pd.read_csv('Base_Tratada.csv', sep= ',')\nAntes= Antes.loc[Antes['CO_MUN_NOT'].isin([130260])]\nAntes=Antes[(Antes['Periodo']==1.0)]\nAntes= Antes.drop(columns=[\"CO_MUN_NOT\", \"Periodo\"])\nAntes.head()",
"C:\\Users\\Windows10\\anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3441: DtypeWarning: Columns (16) have mixed types.Specify dtype option on import or set low_memory=False.\n exec(code_obj, self.user_global_ns, self.user_ns)\n"
]
],
[
[
"# PRÉ-PROCESSAMENTO",
"_____no_output_____"
]
],
[
[
"Antes['CS_GESTANT'].replace({1.0: 1, 2.0: 1, 3.0 :1, 4.0 : 1}, inplace= True)\nAntes['CS_GESTANT'].replace({5.0: 0, 6.0:0, 9.0:0}, inplace= True)\nAntes['CS_RACA'].fillna(9,inplace= True)\nAntes['CS_ESCOL_N'].fillna(9,inplace= True)\nAntes['SURTO_SG'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['SURTO_SG'].fillna(0,inplace= True)\nAntes['NOSOCOMIAL'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['NOSOCOMIAL'].fillna(0,inplace= True)\nAntes['FEBRE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['FEBRE'].fillna(0,inplace= True)\nAntes['TOSSE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['TOSSE'].fillna(0,inplace= True)\nAntes['GARGANTA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['GARGANTA'].fillna(0,inplace= True)\nAntes['DISPNEIA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DISPNEIA'].fillna(0,inplace= True)\nAntes['DESC_RESP'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DESC_RESP'].fillna(0,inplace= True)\nAntes['SATURACAO'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['SATURACAO'].fillna(0,inplace= True)\nAntes['DIARREIA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DIARREIA'].fillna(0,inplace= True)\nAntes['VOMITO'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['VOMITO'].fillna(0,inplace= True)\nAntes['PUERPERA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['PUERPERA'].fillna(0,inplace= True)\nAntes['CARDIOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['CARDIOPATI'].fillna(0,inplace= True)\nAntes['HEMATOLOGI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['HEMATOLOGI'].fillna(0,inplace= True)\nAntes['SIND_DOWN'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['SIND_DOWN'].fillna(0,inplace= True)\nAntes['HEPATICA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['HEPATICA'].fillna(0,inplace= True)\nAntes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['ASMA'].fillna(0,inplace= True)\nAntes['DIABETES'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['DIABETES'].fillna(0,inplace= True)\nAntes['NEUROLOGIC'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['NEUROLOGIC'].fillna(0,inplace= True)\nAntes['PNEUMOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['PNEUMOPATI'].fillna(0,inplace= True)\nAntes['IMUNODEPRE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['IMUNODEPRE'].fillna(0,inplace= True)\nAntes['RENAL'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['RENAL'].fillna(0,inplace= True)\nAntes['OBESIDADE'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['OBESIDADE'].fillna(0,inplace= True)\nAntes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['ASMA'].fillna(0,inplace= True)\nAntes['ANTIVIRAL'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['ANTIVIRAL'].fillna(0,inplace= True)\nAntes['UTI'].replace({2.0: 0, 9.0: 0}, inplace= True)\nAntes['UTI'].fillna(0,inplace= True)\nAntes['SUPORT_VEN'].replace({3.0: 0, 9.0: 0}, inplace= True)\nAntes['SUPORT_VEN'].fillna(0,inplace= True)\nAntes['PCR_RESUL'].fillna(4,inplace= True)\nAntes['HISTO_VGM'].replace({0: 2}, inplace= True)\nAntes['DOR_ABD'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['DOR_ABD'].fillna(0,inplace= True)\nAntes['FADIGA'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['FADIGA'].fillna(0,inplace= True)\nAntes['PERD_OLFT'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['PERD_OLFT'].fillna(0,inplace= True)\nAntes['PERD_PALA'].replace({9.0: 0, 2.0 :0}, inplace= True)\nAntes['PERD_PALA'].fillna(0,inplace= True)\nAntes['VACINA'].fillna(0,inplace= True)\nAntes['FATOR_RISC'].replace({'S': 1, 'N':2, '1':1, '2':2}, inplace= True)\nAntes['FATOR_RISC'].fillna(0,inplace= True)",
"_____no_output_____"
]
],
[
[
"- Resetando o Index novamente.",
"_____no_output_____"
]
],
[
[
"Antes= Antes.reset_index(drop=True)\nAntes.head()",
"_____no_output_____"
]
],
[
[
"- Aplicação da Dummy nas Features Categóricas",
"_____no_output_____"
]
],
[
[
"Antes=pd.get_dummies(Antes, columns=['CS_SEXO', 'CS_GESTANT', 'CS_RACA', 'CS_ESCOL_N',\n 'SURTO_SG', 'NOSOCOMIAL', 'FEBRE', 'TOSSE', 'GARGANTA', 'DISPNEIA',\n 'DESC_RESP', 'SATURACAO', 'DIARREIA', 'VOMITO', 'PUERPERA',\n 'FATOR_RISC', 'CARDIOPATI', 'HEMATOLOGI', 'SIND_DOWN', 'HEPATICA',\n 'ASMA', 'DIABETES', 'NEUROLOGIC', 'PNEUMOPATI', 'IMUNODEPRE', 'RENAL',\n 'OBESIDADE', 'VACINA', 'ANTIVIRAL', 'UTI', 'SUPORT_VEN', 'PCR_RESUL',\n 'HISTO_VGM', 'DOR_ABD', 'FADIGA', 'PERD_OLFT', 'PERD_PALA'], drop_first=True)\nAntes.head()",
"_____no_output_____"
]
],
[
[
"# Verificando o Balanceamento",
"_____no_output_____"
]
],
[
[
"Antes[\"EVOLUCAO\"].value_counts(normalize=True)",
"_____no_output_____"
],
[
"X = Antes[['IDADE_ANOS','CS_SEXO_M','CS_RACA_4.0','FEBRE_1.0','DISPNEIA_1.0','SATURACAO_1.0','UTI_1.0',\n 'SUPORT_VEN_1.0', 'SUPORT_VEN_2.0', 'PCR_RESUL_2.0','TOSSE_1.0','DESC_RESP_1.0', 'FATOR_RISC_2']]\n\ny = Antes['EVOLUCAO']\n\nXtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3, random_state=42)",
"_____no_output_____"
],
[
"Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape",
"_____no_output_____"
],
[
"smote = SMOTE(sampling_strategy = 'minority', random_state = 42)\nXtrain_over, ytrain_over = smote.fit_resample(Xtrain,ytrain)\n\nXtest_over, ytest_over = smote.fit_resample(Xtest,ytest)\nXtrain_over.shape, ytrain_over.shape, Xtest_over.shape, ytest_over.shape",
"_____no_output_____"
]
],
[
[
"# Aplicação do Modelo Escolhido",
"_____no_output_____"
]
],
[
[
"random_state=42\nRDF = RandomForestClassifier()\nRDF.fit(Xtrain_over, ytrain_over)",
"_____no_output_____"
],
[
"previsoes = RDF.predict(Xtest_over)\nprevisoes",
"_____no_output_____"
],
[
"accuracy_score(ytest_over, previsoes)",
"_____no_output_____"
],
[
"# Testar Modelo\n\nidade = 43.0\nsexo = 1\nraca = 0\nfebre = 1\ndispneia = 1\nsaturacao = 0\nuti = 1\nsuport1 = 1\nsuport2 = 0\npcr = 1\ntosse = 1\ndescresp = 0\nfrisc = 0\n\nprediction = RDF.predict(np.array([idade, sexo, raca, febre, dispneia, saturacao, uti, suport1, suport2, pcr, tosse, descresp, frisc]).reshape(1, -1))\n\nprint(prediction)\n",
"[2.]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e745878c7fca96c9455a84e7ec6aec60f15997b9 | 79,010 | ipynb | Jupyter Notebook | Pipeline progression/13_variance-weighted-done.ipynb | animesh-singhal/Project-2 | d58a674eb0030fb5e998027341ef387570ac4742 | [
"MIT"
] | null | null | null | Pipeline progression/13_variance-weighted-done.ipynb | animesh-singhal/Project-2 | d58a674eb0030fb5e998027341ef387570ac4742 | [
"MIT"
] | null | null | null | Pipeline progression/13_variance-weighted-done.ipynb | animesh-singhal/Project-2 | d58a674eb0030fb5e998027341ef387570ac4742 | [
"MIT"
] | null | null | null | 37.641734 | 10,432 | 0.574725 | [
[
[
"Ye ab tak ka code:",
"_____no_output_____"
]
],
[
[
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n\ndef process_image(frame):\n \n def cal_undistort(img):\n # Reads mtx and dist matrices, peforms image distortion correction and returns the undistorted image\n\n import pickle\n\n # Read in the saved matrices\n my_dist_pickle = pickle.load( open( \"output_files/calib_pickle_files/dist_pickle.p\", \"rb\" ) )\n mtx = my_dist_pickle[\"mtx\"]\n dist = my_dist_pickle[\"dist\"]\n\n img_size = (img.shape[1], img.shape[0]) \n\n undistorted_img = cv2.undistort(img, mtx, dist, None, mtx)\n #undistorted_img = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2RGB) #Use if you use cv2 to import image. ax.imshow() needs RGB image\n return undistorted_img\n\n \n def yellow_threshold(img, sxbinary):\n # Convert to HLS color space and separate the S channel\n # Note: img is the undistorted image\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n h_channel = hls[:,:,0]\n # Threshold color channel\n s_thresh_min = 100\n s_thresh_max = 255\n \n #for 360 degree, my value for yellow ranged between 35 and 50. So uska half kar diya\n h_thresh_min = 10 \n h_thresh_max = 25\n\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1\n\n h_binary = np.zeros_like(h_channel)\n h_binary[(h_channel >= h_thresh_min) & (h_channel <= h_thresh_max)] = 1\n\n # Combine the two binary thresholds\n yellow_binary = np.zeros_like(s_binary)\n yellow_binary[(((s_binary == 1) | (sxbinary == 1) ) & (h_binary ==1))] = 1\n return yellow_binary\n \n def xgrad_binary(img, thresh_min=30, thresh_max=100):\n # Grayscale image\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Sobel x\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n # Threshold x gradient\n #thresh_min = 30 #Already given above\n #thresh_max = 100\n\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1\n return sxbinary\n \n def white_threshold(img, sxbinary, lower_white_thresh = 170):\n r_channel = img[:,:,0]\n g_channel = img[:,:,1]\n b_channel = img[:,:,2]\n # Threshold color channel\n r_thresh_min = lower_white_thresh\n r_thresh_max = 255\n r_binary = np.zeros_like(r_channel)\n r_binary[(r_channel >= r_thresh_min) & (r_channel <= r_thresh_max)] = 1\n \n g_thresh_min = lower_white_thresh\n g_thresh_max = 255\n g_binary = np.zeros_like(g_channel)\n g_binary[(g_channel >= g_thresh_min) & (g_channel <= g_thresh_max)] = 1\n\n b_thresh_min = lower_white_thresh\n b_thresh_max = 255\n b_binary = np.zeros_like(b_channel)\n b_binary[(b_channel >= b_thresh_min) & (b_channel <= b_thresh_max)] = 1\n\n white_binary = np.zeros_like(r_channel)\n white_binary[((r_binary ==1) & (g_binary ==1) & (b_binary ==1) & (sxbinary==1))] = 1\n return white_binary\n \n def thresh_img(img):\n \n \n #sxbinary = xgrad_binary(img, thresh_min=30, thresh_max=100)\n sxbinary = xgrad_binary(img, thresh_min=25, thresh_max=130)\n yellow_binary = yellow_threshold(img, sxbinary) #(((s) | (sx)) & (h))\n white_binary = white_threshold(img, sxbinary, lower_white_thresh = 150)\n \n # Combine the two binary thresholds\n combined_binary = np.zeros_like(sxbinary)\n combined_binary[((yellow_binary == 1) | (white_binary == 1))] = 1\n \n out_img = np.dstack((combined_binary, combined_binary, combined_binary))*255\n \n return out_img\n \n def perspective_transform(img):\n \n # Define calibration box in source (original) and destination (desired or warped) coordinates\n\n img_size = (img.shape[1], img.shape[0])\n \"\"\"Notice the format used for img_size. Yaha bhi ulta hai. x axis aur fir y axis chahiye. \n Apne format mein rows(y axis) and columns (x axis) hain\"\"\"\n\n\n # Four source coordinates\n # Order of points: top left, top right, bottom right, bottom left\n \n src = np.array(\n [[435*img.shape[1]/960, 350*img.shape[0]/540],\n [530*img.shape[1]/960, 350*img.shape[0]/540],\n [885*img.shape[1]/960, img.shape[0]],\n [220*img.shape[1]/960, img.shape[0]]], dtype='f')\n \n\n # Next, we'll define a desired rectangle plane for the warped image.\n # We'll choose 4 points where we want source points to end up \n # This time we'll choose our points by eyeballing a rectangle\n\n dst = np.array(\n [[290*img.shape[1]/960, 0],\n [740*img.shape[1]/960, 0],\n [740*img.shape[1]/960, img.shape[0]],\n [290*img.shape[1]/960, img.shape[0]]], dtype='f')\n\n\n #Compute the perspective transform, M, given source and destination points:\n M = cv2.getPerspectiveTransform(src, dst)\n\n #Warp an image using the perspective transform, M; using linear interpolation \n #Interpolating points is just filling in missing points as it warps an image\n # The input image for this function can be a colored image too\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)\n \n return warped, src, dst \n\n def rev_perspective_transform(img, src, dst):\n\n img_size = (img.shape[1], img.shape[0])\n\n #Compute the perspective transform, M, given source and destination points:\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n #Warp an image using the perspective transform, M; using linear interpolation \n #Interpolating points is just filling in missing points as it warps an image\n # The input image for this function can be a colored image too\n un_warped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR)\n \n return un_warped \n\n \n def draw_polygon(img1, img2, src, dst):\n src = src.astype(int) #Very important step (Pixels cannot be in decimals)\n dst = dst.astype(int)\n cv2.polylines(img1, [src], True, (255,0,0), 3)\n cv2.polylines(img2, [dst], True, (255,0,0), 3)\n \n def histogram_bottom_peaks (warped_img):\n # This will detect the bottom point of our lane lines\n \n # Take a histogram of the bottom half of the image\n bottom_half = warped_img[((2*warped_img.shape[0])//5):,:,0] # Collecting all pixels in the bottom half\n histogram = np.sum(bottom_half, axis=0) # Summing them along y axis (or along columns)\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]//2) # 1D array hai histogram toh uska bas 0th index filled hoga \n #print(np.shape(histogram)) #OUTPUT:(1280,)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n\n return leftx_base, rightx_base\n \n def find_lane_pixels(warped_img):\n \n leftx_base, rightx_base = histogram_bottom_peaks(warped_img)\n \n \n # HYPERPARAMETERS\n # Choose the number of sliding windows\n nwindows = 9\n # Set the width of the windows +/- margin. So width = 2*margin \n margin = 90\n # Set minimum number of pixels found to recenter window\n minpix = 1000 #I've changed this from 50 as given in lectures\n \n # Set height of windows - based on nwindows above and image shape\n window_height = np.int(warped_img.shape[0]//nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = warped_img.nonzero() #pixel ke coordinates dega 2 seperate arrays mein\n nonzeroy = np.array(nonzero[0]) # Y coordinates milenge 1D array mein. They will we arranged in the order of pixels\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated later for each window in nwindows\n leftx_current = leftx_base #initially set kar diya hai. For loop ke end mein change karenge\n rightx_current = rightx_base\n\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = [] # Ismein lane-pixels ke indices collect karenge. \n # 'nonzerox' array mein index daalke coordinate mil jaayega\n right_lane_inds = [] \n\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = warped_img.shape[0] - (window+1)*window_height\n win_y_high = warped_img.shape[0] - window*window_height\n \"\"\"### TO-DO: Find the four below boundaries of the window ###\"\"\"\n win_xleft_low = leftx_current - margin \n win_xleft_high = leftx_current + margin \n win_xright_low = rightx_current - margin \n win_xright_high = rightx_current + margin \n \n \"\"\"\n # Create an output image to draw on and visualize the result\n out_img = np.copy(warped_img)\n # Draw the windows on the visualization image\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),\n (win_xleft_high,win_y_high),(0,255,0), 2) \n cv2.rectangle(out_img,(win_xright_low,win_y_low),\n (win_xright_high,win_y_high),(0,255,0), 2) \n \"\"\"\n\n ### TO-DO: Identify the nonzero pixels in x and y within the window ###\n #Iska poora explanation seperate page mein likha hai\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n\n\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n # If you found > minpix pixels, recenter next window on the mean position of the pixels in your current window (re-centre)\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n \n # Concatenate the arrays of indices (previously was a list of lists of pixels)\n try:\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n except ValueError:\n # Avoids an error if the above is not implemented fully\n pass\n \n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n \"\"\"return leftx, lefty, rightx, righty, out_img\"\"\" #agar rectangles bana rahe ho toh out_image rakhna\n return leftx, lefty, rightx, righty\n \n\n def fit_polynomial(warped_img, leftx, lefty, rightx, righty, right_fit_history, right_variance_history):\n \n #Fit a second order polynomial to each using `np.polyfit` ###\n left_fit = np.polyfit(lefty,leftx,2)\n right_fit = np.polyfit(righty,rightx,2)\n\n # Generate x and y values for plotting. \n #NOTE: y is the independent variable. Refer \"fit polynomial\" notes for explanation\n # We'll plot x as a function of y\n ploty = np.linspace(0, warped_img.shape[0]-1, warped_img.shape[0])\n \n # Eqn of parabola: a(x**2) + bx + c. Where a and b denote the shape of parabola. Shape of parabola will be amost constant inn our case \n \n variance_new=0 #initializing the variable\n \n try: \n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n \n if(right_fit_history == None):\n a2 = (0.6*left_fit[0] + 0.4*right_fit[0])\n b2 = (0.6*left_fit[1] + 0.4*right_fit[1])\n c2 = (warped_img.shape[1] - (left_fit[0]*(warped_img.shape[0]-1)**2 + left_fit[1]*(warped_img.shape[0]-1) + left_fit[2]))*0.1 + 0.9*right_fit[2]\n \n for index in range(len(rightx)):\n variance_new+= abs(rightx[index]-(a2*righty[index]**2 + b2*righty[index] + c2))\n variance_new=variance_new/len(rightx)\n print(\"variance_new\",variance_new)\n \n else:\n a2_new = (0.6*left_fit[0] + 0.4*right_fit[0])\n b2_new = (0.6*left_fit[1] + 0.4*right_fit[1]) \n c2_new = (warped_img.shape[1] - (left_fit[0]*(warped_img.shape[0]-1)**2 + left_fit[1]*(warped_img.shape[0]-1) + left_fit[2]))*0.1 + 0.9*right_fit[2]\n \n # Finding weighted average for the previous elements data within right_fit_history\n a2_old= sum([(0.2*(index+1)*element[0]) for index,element in enumerate(right_fit_history)])/sum([0.2*(index+1) for index in range(0,5)]) \n b2_old= sum([(0.2*(index+1)*element[1]) for index,element in enumerate(right_fit_history)])/sum([0.2*(index+1) for index in range(0,5)]) \n c2_old= sum([(0.2*(index+1)*element[2]) for index,element in enumerate(right_fit_history)])/sum([0.2*(index+1) for index in range(0,5)])\n \n \"\"\"Trying to find variance\"\"\"\n \n for index in range(len(rightx)):\n variance_new+= abs(rightx[index]-(a2_new*righty[index]**2 + b2_new*righty[index] + c2_new))\n variance_new=variance_new/len(rightx)\n print(\"variance_new\",variance_new)\n \n \n #variance_old = sum([(0.2*(index+1)*element) for index,element in enumerate(right_variance_history)])/sum([0.2*(index+1) for index in range(0,5)])\n variance_old = sum([(0.2*((5-index)**3)*element) for index,element in enumerate(right_variance_history)])/sum([0.2*((5-index)**3) for index in range(0,5)])\n #variance_old = right_variance_history[4]\n #variance_old = sum([element for element in right_variance_history])/5\n \"\"\"yaha ke coefficients variance se aa sakte hain\"\"\"\n coeff_new=variance_old/(variance_new+variance_old)\n coeff_old=variance_new/(variance_new+variance_old)\n \n a2= a2_new*coeff_new + a2_old*coeff_old\n b2= b2_new*coeff_new + b2_old*coeff_old\n c2= c2_new*coeff_new + c2_old*coeff_old\n \n \n right_fitx = a2*ploty**2 + b2*ploty + c2\n \n status = True\n \n #try:\n # left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n # right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n except TypeError:\n # Avoids an error if `left` and `right_fit` are still none or incorrect\n print('The function failed to fit a line!')\n left_fitx = 1*ploty**2 + 1*ploty\n right_fitx = 1*ploty**2 + 1*ploty\n status = False\n\n \n return left_fit, [a2,b2,c2], left_fitx, right_fitx, status, variance_new\n # out_img here has boxes drawn and the pixels are colored \n \n def color_pixels_and_curve(out_img, leftx, lefty, rightx, righty, left_fitx, right_fitx):\n ploty = np.linspace(0, warped_img.shape[0]-1, warped_img.shape[0])\n ## Visualization ##\n # Colors in the left and right lane regions\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n \n # Converting the coordinates of our line into integer values as index of the image can't take decimals\n left_fitx_int = left_fitx.astype(np.int32)\n right_fitx_int = right_fitx.astype(np.int32)\n ploty_int = ploty.astype(np.int32)\n \n # Coloring the curve as yellow\n out_img[ploty_int,left_fitx_int] = [255,255,0]\n out_img[ploty_int,right_fitx_int] = [255,255,0]\n \n # To thicken the curve\n out_img[ploty_int,left_fitx_int+1] = [255,255,0]\n out_img[ploty_int,right_fitx_int+1] = [255,255,0]\n out_img[ploty_int,left_fitx_int-1] = [255,255,0]\n out_img[ploty_int,right_fitx_int-1] = [255,255,0]\n out_img[ploty_int,left_fitx_int+2] = [255,255,0]\n out_img[ploty_int,right_fitx_int+2] = [255,255,0]\n out_img[ploty_int,left_fitx_int-2] = [255,255,0]\n out_img[ploty_int,right_fitx_int-2] = [255,255,0]\n \n \n \n def search_around_poly(warped_img, left_fit, right_fit):\n # HYPERPARAMETER\n # Choose the width of the margin around the previous polynomial to search\n # The quiz grader expects 100 here, but feel free to tune on your own!\n margin = 100\n\n # Grab activated pixels\n nonzero = warped_img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n\n ### TO-DO: Set the area of search based on activated x-values ###\n ### within the +/- margin of our polynomial function ###\n ### Hint: consider the window areas for the similarly named variables ###\n ### in the previous quiz, but change the windows to our new search area ###\n left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + \n left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + \n left_fit[1]*nonzeroy + left_fit[2] + margin)))\n right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + \n right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + \n right_fit[1]*nonzeroy + right_fit[2] + margin)))\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n return leftx, lefty, rightx, righty\n\n def modify_array(array, new_value):\n if len(array)!=5:\n for i in range(0,5):\n array.append(new_value)\n\n else:\n dump_var=array[0]\n array[0]=array[1]\n array[1]=array[2]\n array[2]=array[3]\n array[3]=array[4]\n array[4]=new_value\n return array\n\n undist_img = cal_undistort(frame)\n thresh_img = thresh_img(undist_img) # Note: This is not a binary iamge. It has been stacked already within the function\n warped_img, src, dst = perspective_transform(thresh_img)\n\n #draw_polygon(frame, warped_img, src, dst) #the first image is the original image that you import into the system\n \n print(\"starting count\",lane.count)\n if (lane.count == 0):\n leftx, lefty, rightx, righty = find_lane_pixels(warped_img) # Find our lane pixels first\n left_fit, right_fit, left_fitx, right_fitx, status, variance_new = fit_polynomial(warped_img, leftx, lefty, rightx, righty, right_fit_history=None, right_variance_history=None)\n print(\"First case mein variance ye hai\", variance_new)\n elif (lane.count > 0):\n \n left_fit_previous = [i[0] for i in lane.curve_fit] \n right_fit_previous = [i[1] for i in lane.curve_fit]\n \n #print(left_fit_previous)\n #print(right_fit_previous)\n leftx, lefty, rightx, righty = search_around_poly(warped_img, left_fit_previous[4], right_fit_previous[4])\n left_fit, right_fit, left_fitx, right_fitx, status, variance_new = fit_polynomial(warped_img, leftx, lefty, rightx, righty, right_fit_history=right_fit_previous, right_variance_history=lane.right_variance)\n \n color_pixels_and_curve(warped_img, leftx, lefty, rightx, righty, left_fitx, right_fitx)\n \n lane.detected = status\n lane.curve_fit = modify_array(lane.curve_fit,[left_fit, right_fit])\n lane.right_variance = modify_array(lane.right_variance, variance_new)\n print(lane.right_variance)\n \n #lane.current_xfitted.append([left_fitx, right_fitx])\n #lane.allx.append([leftx,rightx])\n #lane.ally.append([lefty, righty])\n #lane.image_output.append(warped_img)\n \n unwarped_img = rev_perspective_transform(warped_img, src, dst)\n \n lane.count = lane.count+1\n return unwarped_img\n \n ",
"_____no_output_____"
]
],
[
[
"Let's try classes",
"_____no_output_____"
]
],
[
[
"# Define a class to receive the characteristics of each line detection\nclass Line():\n def __init__(self):\n #Let's count the number of consecutive frames\n self.count = 0\n # was the line detected in the last iteration?\n self.detected = False \n #polynomial coefficients for the most recent fit\n self.curve_fit = [] \n # Traking variance for the right lane\n self.right_variance = []\n # x values of the curve that we fit intially\n #self.current_xfitted = []\n # x values for detected line pixels\n #self.allx = [] \n # y values for detected line pixels\n #self.ally = []\n #store your image in this\n #self.image_output = []\n \n \n # x values of the last n fits of the line\n self.recent_xfitted = [] \n #average x values of the fitted line over the last n iterations\n self.bestx = None \n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None \n #radius of curvature of the line in some units\n self.radius_of_curvature = None \n #distance in meters of vehicle center from the line\n self.line_base_pos = None \n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float') \n \n \n \nlane=Line()\n\n\nframe1= mpimg.imread(\"my_test_images/Highway_snaps/image (1).jpg\")\nframe2= mpimg.imread(\"my_test_images/Highway_snaps/image (2).jpg\")\nframe3= mpimg.imread(\"my_test_images/Highway_snaps/image (3).jpg\")\n\n\nprint(\"starting count value\",lane.count)\n\n\n(process_image(frame1))\n(process_image(frame2))\nplt.imshow(process_image(frame3))\n",
"starting count value 0\nstarting count 0\nvariance_new 16.411728943874007\nFirst case mein variance ye hai 16.411728943874007\n[16.411728943874007, 16.411728943874007, 16.411728943874007, 16.411728943874007, 16.411728943874007]\nstarting count 1\nvariance_new 20.454135208135213\n[16.411728943874007, 16.411728943874007, 16.411728943874007, 16.411728943874007, 20.454135208135213]\nstarting count 2\nvariance_new 14.975480798140142\n[16.411728943874007, 16.411728943874007, 16.411728943874007, 20.454135208135213, 14.975480798140142]\n"
]
],
[
[
"Videoo test\n\n",
"_____no_output_____"
]
],
[
[
"# Define a class to receive the characteristics of each line detection\nclass Line():\n def __init__(self):\n #Let's count the number of consecutive frames\n self.count = 0\n # was the line detected in the last iteration?\n self.detected = False \n #polynomial coefficients for the most recent fit\n self.curve_fit = [] \n # Traking variance for the right lane\n self.right_variance = []\n # x values of the curve that we fit intially\n #self.current_xfitted = []\n # x values for detected line pixels\n #self.allx = [] \n # y values for detected line pixels\n #self.ally = []\n #store your image in this\n #self.image_output = []\n \n \n # x values of the last n fits of the line\n self.recent_xfitted = [] \n #average x values of the fitted line over the last n iterations\n self.bestx = None \n #polynomial coefficients averaged over the last n iterations\n self.best_fit = None \n #radius of curvature of the line in some units\n self.radius_of_curvature = None \n #distance in meters of vehicle center from the line\n self.line_base_pos = None \n #difference in fit coefficients between last and new fits\n self.diffs = np.array([0,0,0], dtype='float') \n \nlane=Line()\n\nproject_output = 'output_files/video_clips/project_video_with_history.mp4'\n\n#clip1 = VideoFileClip(\"project_video.mp4\")\nclip1 = VideoFileClip(\"project_video.mp4\").subclip(20,23)\n\nproject_clip = clip1.fl_image(process_image) #NOTE: this function expects color images! \n%time project_clip.write_videofile(project_output, audio=False)",
"starting count 0\n"
],
[
"HTML(\"\"\"\n<video width=\"960\" height=\"540\" controls>\n <source src=\"{0}\">\n</video>\n\"\"\".format(project_output))",
"_____no_output_____"
]
],
[
[
".",
"_____no_output_____"
]
],
[
[
"import numpy as np\n\ndef modify_array(array, new_value):\n if len(array)!=5:\n for i in range(0,5):\n array.append(new_value)\n \n else:\n dump_var=array[0]\n array[0]=array[1]\n array[1]=array[2]\n array[2]=array[3]\n array[3]=array[4]\n array[4]=new_value\n return array\n \na=[]\nmodify_array(a,[4,2])\n \nmodify_array(a,[7,3])\nmodify_array(a,[2,1])\nmodify_array(a,[9,6])\n\nprint(a)\n\nAns = [i[0] for i in a]\nprint(Ans)\n\"\"\"a[:,0] \"\"\" # This wont work. TypeError: list indices must be integers or slices, not tuple\n\na = np.array(a)\n\nmodify_array(a,[1,4])\n\nprint(a)\na[:,0]\n\n\n",
"_____no_output_____"
],
[
"a=[[10,20,30],[30,60,80],[60,10,20], [100,20,10], [90,70,10]]\n\nans = sum([(0.2*(index+1)*element[0]) for index,element in enumerate(a)])/sum([0.2*(index+1) for index in range(0,5)])\n\n\nprint(ans)\n[(0.25*(index+1)*element[0]) for index,element in enumerate(a)]\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7458be06aceccda63ff0cca210b455c83a70571 | 322,831 | ipynb | Jupyter Notebook | day5_hyperopt_xgboost.ipynb | kowalcorp/dw_matrix_car | 11ef896b7ab5f2e249e103172be4f0742313ebdb | [
"MIT"
] | null | null | null | day5_hyperopt_xgboost.ipynb | kowalcorp/dw_matrix_car | 11ef896b7ab5f2e249e103172be4f0742313ebdb | [
"MIT"
] | null | null | null | day5_hyperopt_xgboost.ipynb | kowalcorp/dw_matrix_car | 11ef896b7ab5f2e249e103172be4f0742313ebdb | [
"MIT"
] | null | null | null | 322,831 | 322,831 | 0.853635 | [
[
[
"!pip install --upgrade tables\n!pip install eli5\n!pip install xgboost\n!pip install hyperopt",
"Collecting tables\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ed/c3/8fd9e3bb21872f9d69eb93b3014c86479864cca94e625fd03713ccacec80/tables-3.6.1-cp36-cp36m-manylinux1_x86_64.whl (4.3MB)\n\u001b[K |████████████████████████████████| 4.3MB 2.8MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nInstalling collected packages: tables\n Found existing installation: tables 3.4.4\n Uninstalling tables-3.4.4:\n Successfully uninstalled tables-3.4.4\nSuccessfully installed tables-3.6.1\nCollecting eli5\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/97/2f/c85c7d8f8548e460829971785347e14e45fa5c6617da374711dec8cb38cc/eli5-0.10.1-py2.py3-none-any.whl (105kB)\n\u001b[K |████████████████████████████████| 112kB 2.8MB/s \n\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5) (1.12.0)\nRequirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (19.3.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from eli5) (1.4.1)\nRequirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5) (0.10.1)\nRequirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.8.6)\nRequirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from eli5) (0.22.1)\nRequirement already satisfied: numpy>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from eli5) (1.17.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5) (2.11.1)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.18->eli5) (0.14.1)\nRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5) (1.1.1)\nInstalling collected packages: eli5\nSuccessfully installed eli5-0.10.1\nRequirement already satisfied: xgboost in /usr/local/lib/python3.6/dist-packages (0.90)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.17.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.4.1)\nRequirement already satisfied: hyperopt in /usr/local/lib/python3.6/dist-packages (0.1.2)\nRequirement already satisfied: pymongo in /usr/local/lib/python3.6/dist-packages (from hyperopt) (3.10.1)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from hyperopt) (0.16.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from hyperopt) (1.4.1)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from hyperopt) (4.28.1)\nRequirement already satisfied: networkx in /usr/local/lib/python3.6/dist-packages (from hyperopt) (2.4)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from hyperopt) (1.17.5)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from hyperopt) (1.12.0)\nRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx->hyperopt) (4.4.1)\n"
],
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"from sklearn.dummy import DummyRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\nimport xgboost as xgb\n\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.model_selection import cross_val_score, KFold\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance",
"/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.scorer module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\n warnings.warn(message, FutureWarning)\n/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.feature_selection.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.feature_selection. Anything that cannot be imported from sklearn.feature_selection is now part of the private API.\n warnings.warn(message, FutureWarning)\nUsing TensorFlow backend.\n"
],
[
"from hyperopt import hp, fmin, tpe, STATUS_OK",
"_____no_output_____"
],
[
"pwd",
"_____no_output_____"
],
[
"cd \"drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car\"",
"/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car\n"
],
[
"!ls -l",
"total 60295\n-rw------- 1 root root 60525650 Mar 3 00:01 cat.h5\ndrwx------ 2 root root 4096 Mar 2 23:44 data\n-rw------- 1 root root 252463 Mar 4 22:58 day2_visualisation.ipynb\n-rw------- 1 root root 293508 Mar 6 22:17 day3_simple_model.ipynb\n-rw------- 1 root root 329838 Mar 6 22:15 day4_xgboost_features_engineering.ipynb\n-rw------- 1 root root 332609 Mar 8 11:23 day5_xgboost_features_engineering.ipynb\n-rw------- 1 root root 1066 Mar 2 23:43 LICENSE\n-rw------- 1 root root 32 Mar 2 23:43 README.md\n"
],
[
"df = pd.read_hdf(\"data/car.h5\")\ndf.shape",
"_____no_output_____"
],
[
"df.columns.values",
"_____no_output_____"
],
[
"df[\"price_value\"].hist(bins=100)",
"_____no_output_____"
],
[
"df[\"price_value\"].max()",
"_____no_output_____"
],
[
"df[\"price_value\"].describe()",
"_____no_output_____"
],
[
"df[\"param_marka-pojazdu\"].unique()",
"_____no_output_____"
],
[
"df.groupby('param_marka-pojazdu')['price_value'].mean()",
"_____no_output_____"
],
[
"df.groupby('param_marka-pojazdu')['price_value'].agg(np.mean).plot(kind='bar')",
"_____no_output_____"
],
[
"(\n df.groupby('param_marka-pojazdu')['price_value']\n .agg(np.median)\n .sort_values(ascending=False)\n .head(50)\n).plot(kind='bar', figsize=(15,5))",
"_____no_output_____"
],
[
"(\n df.groupby('param_marka-pojazdu')['price_value']\n .agg([np.mean, np.median, np.size])\n .sort_values(by='mean', ascending=False)\n .head(50)\n).plot(kind='bar', figsize=(15,5))",
"_____no_output_____"
],
[
"(\n df.groupby('param_marka-pojazdu')['price_value']\n .agg([np.mean, np.median, np.size])\n .sort_values(by='mean', ascending=False)\n .head(50)\n).plot(kind='bar', figsize=(15,5), subplots=True)",
"_____no_output_____"
],
[
"def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=50):\n return (\n df\n .groupby(feat_groupby)[feat_agg]\n .agg(agg_funcs)\n .sort_values(by=feat_sort, ascending=False)\n .head(top)\n ).plot(kind='bar', figsize=(15,5), subplots=subplots)",
"_____no_output_____"
],
[
"group_and_barplot('param_marka-pojazdu');",
"_____no_output_____"
],
[
"group_and_barplot('param_kolor',feat_sort='mean');",
"_____no_output_____"
],
[
"#wczytywanie danych",
"_____no_output_____"
]
],
[
[
"Wczytywanie danych\n",
"_____no_output_____"
]
],
[
[
"pwd",
"_____no_output_____"
],
[
"df = pd.read_hdf(\"data/car.h5\")\ndf.shape",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
]
],
[
[
"Dummy model",
"_____no_output_____"
]
],
[
[
"df.select_dtypes(np.number).columns",
"_____no_output_____"
],
[
"feats = ['car_id'] \n\nX = df[ feats ].values\ny = df[ ['car_id'] ].values",
"_____no_output_____"
],
[
"model = DummyRegressor()\nmodel.fit(X,y)\ny_pred = model.predict(X)\n\nmae(y, y_pred)",
"_____no_output_____"
],
[
"[x for x in df.columns if 'price' in x]",
"_____no_output_____"
],
[
"df['price_currency'].value_counts()",
"_____no_output_____"
],
[
"df['price_currency'].value_counts(normalize=True)*100",
"_____no_output_____"
],
[
"print(df.shape)\ndf = df[ df['price_currency'] != 'EUR' ]\nprint(df.shape)",
"(106494, 155)\n(106290, 155)\n"
],
[
"df.head()",
"_____no_output_____"
],
[
"df['param_color'].factorize()",
"_____no_output_____"
],
[
"df['param_color'].value_counts()",
"_____no_output_____"
],
[
"SUFFIX_CAT = '__cat'\n\nfor feat in df.columns:\n\tif isinstance(df[feat][0],list): continue\n\t\n\tfactorized_values = df[feat].factorize()[0]\n\tif SUFFIX_CAT in feat:\n\t\tdf[feat] = factorized_value\n\telse:\n\t df[feat+ SUFFIX_CAT ] = factorized_values",
"_____no_output_____"
],
[
"cat_feats = [x for x in df.columns if SUFFIX_CAT in x]\nlen(cat_feats)",
"_____no_output_____"
],
[
"cat_feats = [x for x in df.columns if SUFFIX_CAT in x]\ncat_feats = [x for x in cat_feats if 'price' not in x]\n\nlen(cat_feats)",
"_____no_output_____"
],
[
"def run_model(model, feats):\n X = df[feats].values\n y = df['price_value'].values\n\n scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')\n return np.mean(scores) , np.std(scores)",
"_____no_output_____"
]
],
[
[
"Decision Tree",
"_____no_output_____"
]
],
[
[
"model = DecisionTreeRegressor(max_depth=5)\nrun_model(model,cat_feats)",
"_____no_output_____"
]
],
[
[
"Random Forest",
"_____no_output_____"
]
],
[
[
"model = RandomForestRegressor(max_depth=5 , n_estimators=50, random_state=0)\nrun_model(model,cat_feats)",
"_____no_output_____"
]
],
[
[
"XGBoost\n",
"_____no_output_____"
]
],
[
[
"xgb_params = {\n 'max_depth' : 5 , \n 'n_estimators' : 50, \n 'learning_rate' : 0.1,\n 'seed':0\n}\nmodel = xgb.XGBRegressor(**xgb_params)",
"_____no_output_____"
],
[
"\nrun_model(model, cat_feats)",
"[17:22:24] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[17:22:43] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[17:23:03] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"xgb_params = {\n 'max_depth' : 5 , \n 'n_estimators' : 50, \n 'learning_rate' : 0.1,\n 'seed':0\n}\nm = xgb.XGBRegressor(**xgb_params)",
"_____no_output_____"
],
[
"\n\nm.fit(X,y)\nimp = PermutationImportance(m, random_state=0).fit(X,y)\neli5.show_weights(imp, feature_names=cat_feats)",
"[18:52:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"feats=['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat','feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat','seller_name__cat','feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat','feature_system-start-stop__cat','feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat']\n# less features\nprint(len(feats))\nrun_model(model, feats)",
"20\n[22:00:11] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[22:00:15] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[22:00:19] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"df['param_rok-produkcji'].unique()",
"_____no_output_____"
],
[
"#year of production as category number\ndf['param_rok-produkcji__cat'].unique()",
"_____no_output_____"
],
[
"df['param_rok-produkcji']= df['param_rok-produkcji'].map(lambda x: -1 if str(x)=='None' else int(x) )",
"_____no_output_____"
],
[
"# param_rok-produkcji__cat changed to param_rok-produkcji\n\nfeats=['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat','feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat','seller_name__cat','feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat','feature_system-start-stop__cat','feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat']\n# less features\nprint(len(feats))\nrun_model(model, feats)",
"20\n[22:00:44] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[22:00:48] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[22:00:52] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"df['param_moc'].unique()",
"_____no_output_____"
],
[
"df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x)=='None' else int(x.split(' ')[0]))",
"_____no_output_____"
],
[
"df['param_moc'].unique()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# param_rok-produkcji__cat changed to param_rok-produkcji\n\nfeats=['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat','feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat','seller_name__cat','feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat','feature_system-start-stop__cat','feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat']\n# less features\nprint(len(feats))\nrun_model(model, feats)",
"20\n[22:05:21] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[22:05:25] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[22:05:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"df['param_pojemność-skokowa'].unique()",
"_____no_output_____"
],
[
"df['param_pojemność-skokowa']= df['param_pojemność-skokowa'].map(lambda x: -1 if str(x)=='None' else int(x.split('cm')[0].replace(' ','')))",
"_____no_output_____"
],
[
"df['param_pojemność-skokowa'].unique()",
"_____no_output_____"
],
[
"feats=['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat','feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa','seller_name__cat','feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat','feature_system-start-stop__cat','feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat']\n# less features\nprint(len(feats))\nrun_model(model, feats)",
"20\n[11:29:47] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[11:29:51] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[11:29:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"def obj_func(params):\n print(\"Training with params:\")\n print(params)\n\n mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)\n \n return {'loss': np. abs(mean_mae), 'status': STATUS_OK}\n\n #return {'loss': np. abs(mean_mae), 'status': STATUS_FAILED}\n #try:\n\t# mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats)\n\t#except:\n\t# return {'loss': np. abs(mean_mae), 'status': STATUS_FAILED}\n\n#space\n\nxgb_reg_params = {\n\t'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),\n\t'max_depth':\thp.choice('max_depth', np.arange(5,16,1, dtype=int)),\n\t'subsample':\thp.quniform('subsample', 0.5, 1, 0.05),\n\t'colsample_bytree': hp.quniform('colsample_bytree', 0.5,1, 0.05),\n\t'objective':\t'reg:squarederror',\n\t'n_estimators': 100,\t\n\t'seed':0,\n}\nbest = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25)",
"Training with params:\n{'colsample_bytree': 0.55, 'learning_rate': 0.3, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params:\n{'colsample_bytree': 0.9, 'learning_rate': 0.25, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params:\n{'colsample_bytree': 0.7000000000000001, 'learning_rate': 0.2, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params:\n{'colsample_bytree': 0.75, 'learning_rate': 0.3, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.7000000000000001}\nTraining with params:\n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.25, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params:\n{'colsample_bytree': 0.8500000000000001, 'learning_rate': 0.15000000000000002, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\nTraining with params:\n{'colsample_bytree': 0.5, 'learning_rate': 0.05, 'max_depth': 11, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params:\n{'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.05, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params:\n{'colsample_bytree': 0.8500000000000001, 'learning_rate': 0.05, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params:\n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.2, 'max_depth': 5, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params:\n{'colsample_bytree': 0.75, 'learning_rate': 0.1, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params:\n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.15000000000000002, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.6000000000000001}\nTraining with params:\n{'colsample_bytree': 0.7000000000000001, 'learning_rate': 0.1, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.65}\nTraining with params:\n{'colsample_bytree': 0.8500000000000001, 'learning_rate': 0.1, 'max_depth': 6, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8500000000000001}\nTraining with params:\n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.2, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params:\n{'colsample_bytree': 0.55, 'learning_rate': 0.1, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params:\n{'colsample_bytree': 0.75, 'learning_rate': 0.05, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params:\n{'colsample_bytree': 0.9, 'learning_rate': 0.25, 'max_depth': 10, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.55}\nTraining with params:\n{'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.2, 'max_depth': 11, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.75}\nTraining with params:\n{'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.15000000000000002, 'max_depth': 8, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9}\nTraining with params:\n{'colsample_bytree': 1.0, 'learning_rate': 0.15000000000000002, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 1.0}\nTraining with params:\n{'colsample_bytree': 1.0, 'learning_rate': 0.05, 'max_depth': 7, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 1.0}\nTraining with params:\n{'colsample_bytree': 0.8500000000000001, 'learning_rate': 0.05, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params:\n{'colsample_bytree': 0.8, 'learning_rate': 0.15000000000000002, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.8}\nTraining with params:\n{'colsample_bytree': 0.8, 'learning_rate': 0.15000000000000002, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9500000000000001}\n100%|██████████| 25/25 [21:17<00:00, 66.72s/it, best loss: 7443.842182632055]\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e745a369a223995bed133f3d17af5db186961615 | 4,017 | ipynb | Jupyter Notebook | index.ipynb | janbucher/normalverteilung | 051a026a3b50135d6c39f717cb04bdcfd972fb85 | [
"BSD-3-Clause"
] | null | null | null | index.ipynb | janbucher/normalverteilung | 051a026a3b50135d6c39f717cb04bdcfd972fb85 | [
"BSD-3-Clause"
] | null | null | null | index.ipynb | janbucher/normalverteilung | 051a026a3b50135d6c39f717cb04bdcfd972fb85 | [
"BSD-3-Clause"
] | null | null | null | 28.899281 | 134 | 0.585263 | [
[
[
"Dies ist bisher mehr eine Widget Demo als eine Erklärung der Normalverteilung. Aber was nicht ist, kann ja noch werden.\n\nIn this app we:\n* Plot the gaussian density for a specific $\\mu$ and $\\sigma$\n* Use the FloatSlider widget in ipywidgets to represent $\\mu$ and $\\sigma$ values\n* Stack the density plot along with the sliders into a nice layout using HBox and VBox layout objects available in ipywidgets\n* Link the sliders to the plot so that the plot gets updated when the values of $\\mu$ and $\\sigma$ change\n\nFind the code [here](https://github.com/pbugnion/voila-gallery/blob/master/gaussian-density/index.ipynb).\n\nThis example is taken from [ChakriCherukuri/mlviz](https://github.com/ChakriCherukuri/mlviz).",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom scipy.stats import norm\n\nfrom ipywidgets import FloatSlider, HBox, VBox\nimport bqplot.pyplot as plt",
"_____no_output_____"
],
[
"x = np.linspace(-10, 10, 200)\ny = norm.pdf(x)\n\n# plot the gaussian density\ntitle_tmpl = 'Gaussian Density (mu = {} and sigma = {})'\npdf_fig = plt.figure(title=title_tmpl.format(0, 1))\npdf_line = plt.plot(x, y, 'm', stroke_width=3)",
"_____no_output_____"
],
[
"# use two sliders to represent mu and sigma\nmu_slider = FloatSlider(description='mu', value=0, min=-5, max=5, step=.1)\nsigma_slider = FloatSlider(description='sigma', value=1, min=0.1, max=5, step=.1)\n\nslider_layout = HBox([mu_slider, sigma_slider])",
"_____no_output_____"
],
[
"def update_density(change):\n new_mu = mu_slider.value\n new_sigma = sigma_slider.value\n # update the y attribute of the plot with the new pdf\n # computed using new mu and sigma values\n pdf_line.y = norm.pdf(x, new_mu, new_sigma)\n \n # also update the fig title\n pdf_fig.title = title_tmpl.format(new_mu, new_sigma)\n\n# register the above callback with the 'value' trait of the sliders\nmu_slider.observe(update_density, 'value')\nsigma_slider.observe(update_density, 'value')",
"_____no_output_____"
],
[
"# now put all the widgets together into a simple dashboard\n# the plot should update now when the slider values are updated!\nfinal_layout = VBox([pdf_fig, slider_layout])\nfinal_layout",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e745b763a769d1e0b64ba1b2ef74a9cbe02d2677 | 8,163 | ipynb | Jupyter Notebook | M2AA3/M2AA3-Polynomials/Lesson 02 - Newton Method/.ipynb_checkpoints/Newton Tableau-checkpoint.ipynb | ImperialCollegeLondon/Random-Stuff | 219bc0e26ea6f5ee7548009c849959b268f54821 | [
"MIT"
] | 2 | 2021-05-16T04:08:34.000Z | 2021-11-27T12:56:10.000Z | M2AA3/M2AA3-Polynomials/Lesson 02 - Newton Method/.ipynb_checkpoints/Newton Tableau-checkpoint.ipynb | ImperialCollegeLondon/Random-Stuff | 219bc0e26ea6f5ee7548009c849959b268f54821 | [
"MIT"
] | null | null | null | M2AA3/M2AA3-Polynomials/Lesson 02 - Newton Method/.ipynb_checkpoints/Newton Tableau-checkpoint.ipynb | ImperialCollegeLondon/Random-Stuff | 219bc0e26ea6f5ee7548009c849959b268f54821 | [
"MIT"
] | 3 | 2020-03-31T00:23:13.000Z | 2020-05-13T15:01:46.000Z | 25.996815 | 270 | 0.46723 | [
[
[
"import sympy as sp\nimport numpy as np",
"_____no_output_____"
],
[
"x = sp.symbols('x')\np = sp.Function('p')\nl = sp.Function('l')\npoly = sp.Function('poly')\np3 = sp.Function('p3')\np4 = sp.Function('p4')",
"_____no_output_____"
]
],
[
[
"# Introduction\n\nLast time we have used Lagrange basis to interpolate polynomial. However, it is not efficient to update the interpolating polynomial when a new data point is added. We look at an iterative approach.\n\nGiven points $\\{(z_i, f_i) \\}_{i=0}^{n-1}$, $z_i$ are distinct and $p_{n-1} \\in \\mathbb{C}[z]_{n-1}\\, , p_{n-1}(z_i) = f_i$. <br> We add a point $(z_n, f_n)$ and find a polynomial $p_n \\in \\mathbb{C}[x]_{n-1}$ which satisfies $\\{(z_i, f_i) \\}_{i=0}^{n}$. ",
"_____no_output_____"
],
[
"We assume $p_n(z)$ be the form\n\\begin{equation}\np_n(z) = p_{n-1}(z) + C\\prod_{i=0}^{n-1}(z - z_i)\n\\end{equation}\nso that the second term vanishes at $z = z_0,...,z_{n-1}$ and $p_n(z_i) = p_{n-1}(z_i), i = 0,...,n-1$. We also want $p_n(z_n) = f_n$ so we have\n\\begin{equation}\nf_n = p_{n-1}(z_n) + C\\prod_{i=0}^{n-1}(z_n - z_i) \\Rightarrow C = \\frac{f_n - p_{n-1}(z_n)}{\\prod_{i=0}^{n-1}(z_n - z_i)}\n\\end{equation}\nThus we may perform interpolation iteratively.",
"_____no_output_____"
],
[
"**Example:** Last time we have\n\\begin{equation}\n(z_0, f_0) = (-1,-3), \\quad\n(z_1, f_1) = (0,-1), \\quad\n(z_2, f_2) = (2,4), \\quad\n(z_3, f_3) = (5,1)\n\\end{equation}\nand \n\\begin{equation}\np_3(x) = \\frac{-13}{90}z^3 + \\frac{14}{45}z^2 + \\frac{221}{90}z - 1\n\\end{equation}",
"_____no_output_____"
]
],
[
[
"z0 = -1; f0 = -3; z1 = 0; f1 = -1; z2 = 2; f2 = 4; z3 = 5; f3 = 1; z4 = 1; f4 = 1\np3 = -13*x**3/90 + 14*x**2/45 + 221*x/90 - 1",
"_____no_output_____"
]
],
[
[
"We add a point $(z_4,f_4) = (1,1)$ and obtain $p_4(x)$",
"_____no_output_____"
]
],
[
[
"z4 = 1; f4 = 1",
"1 1\n"
],
[
"C = (f4 - p3.subs(x,z4))/((z4-z0)*(z4-z1)*(z4-z2)*(z4-z3))\nC",
"_____no_output_____"
],
[
"p4 = p3 + C*(x-z0)*(x-z1)*(x-z2)*(x-z3)\nsp.expand(p4)",
"_____no_output_____"
]
],
[
[
"**Remark:** the constant $C$ is usually written as $f[z_0,z_1,z_2,z_3,z_4]$. Moreover by iteration we have\n$$p_n(z) = \\sum_{i=0}^n f[z_0,...,z_n] \\prod_{j=0}^i (z - z_j)$$",
"_____no_output_____"
],
[
"# Newton Tableau",
"_____no_output_____"
],
[
"We look at efficient ways to compute $f[z_0,...,z_n]$, iteratively from $f[z_0,...,z_{n-1}]$ and $f[z_1,...,z_n]$. <br>\nWe may first construct $p_{n-1}$ and $q_{n-1}$ before constructing $p_n$ itself, where\n\\begin{gather}\np_{n-1}(z_i) = f_i \\quad i = 0,...,n-1\\\\\nq_{n-1}(z_i) = f_i \\quad i = 1,...,n\n\\end{gather}\n**Claim:** The following polynomial interpolate $\\{(z_i,f_i)\\}_{i=0}^n$\n\\begin{equation}\np_n(z_i) = \\frac{(z - z_n)p_{n-1}(z) - (z - z_0)q_{n-1}(z)}{z_0 - z_n}\n\\end{equation}\nSince interpolating polynomial is unique, by comparing coefficient of $z_n$, we have\n$$f[z_0,...,z_{n}] = \\frac{f[z_0,...,z_{n-1}]-f[z_1,...,z_{n}]}{z_0 - z_n}$$",
"_____no_output_____"
]
],
[
[
"def product(xs,key,i):\n \n #Key: Forward or Backward\n \n n = len(xs)-1\n l = 1\n \n for j in range(i):\n if key == 'forward':\n l *= (x - xs[j])\n else:\n l *= (x - xs[n-j])\n\n return l",
"_____no_output_____"
],
[
"def newton(xs,ys,key):\n \n # Key: Forward or Backward\n \n n = len(xs)-1\n # print(xs)\n print(ys)\n old_column = ys\n \n if key == 'forward':\n coeff = [fs[0]]\n elif key == 'backward':\n coeff = [fs[len(fs)-1]]\n else:\n return 'error'\n \n for i in range(1,n+1): # Column Index\n new_column = [(old_column[j+1] - old_column[j])/(xs[j+i] - xs[j]) for j in range(n-i+1)]\n print(new_column)\n if key == 'forward':\n coeff.append(new_column[0])\n else:\n coeff.append(new_column[len(new_column)-1])\n old_column = new_column\n \n # print(coeff)\n \n poly = 0\n for i in range(n+1):\n poly += coeff[i] * product(xs,key,i)\n \n return poly",
"_____no_output_____"
],
[
"zs = [1, 4/3, 5/3, 2]; fs = [np.sin(x) for x in zs]",
"_____no_output_____"
],
[
"p = newton(zs,fs,'forward')\nprint(p)\nprint(sp.simplify(p))",
"[0.8414709848078965, 0.9719379013633127, 0.9954079577517649, 0.9092974268256817]\n[0.3914007496662487, 0.07041016916535667, -0.25833159277824974]\n[-0.481485870751338, -0.4931126429154095]\n[-0.011626772164071542]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e745ba0312717ac6655db3db45c614ae2c7d63e6 | 69,948 | ipynb | Jupyter Notebook | playground_classification.ipynb | mansourkheffache/intESN | 41581213b9a8d3e333d97b40d34a941f53d31dc1 | [
"MIT"
] | 2 | 2018-08-10T10:33:49.000Z | 2021-12-02T17:03:54.000Z | playground_classification.ipynb | mansourkheffache/intESN | 41581213b9a8d3e333d97b40d34a941f53d31dc1 | [
"MIT"
] | 1 | 2018-04-07T20:31:06.000Z | 2018-09-03T11:38:29.000Z | playground_classification.ipynb | mansourkheffache/intESN | 41581213b9a8d3e333d97b40d34a941f53d31dc1 | [
"MIT"
] | 2 | 2018-04-20T18:13:09.000Z | 2019-10-18T08:12:53.000Z | 208.8 | 62,740 | 0.910777 | [
[
[
"%reload_ext autoreload\n%autoreload 2\n\nimport numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\nfrom intESN import intESN",
"_____no_output_____"
],
[
"# dataset\n\ndomain = np.arange(1024)\n\nsine = np.sin(0.1 * domain)\nsquare = signal.square(0.1 * domain)\n\nX_train = np.zeros([2, domain.size, 1])\nX_train[0] = sine.reshape([domain.size, 1])\nX_train[1] = square.reshape([domain.size, 1])\n\ny_train = np.arange(2)\n\nprint(y_train)",
"[0 1]\n"
],
[
"# plot data\n\nf, (ax1, ax2) = plt.subplots(2, 1, figsize=(15,5), sharex=True)\nax1.plot(domain, sine)\nax2.plot(domain, square)\n\nplt.show()",
"_____no_output_____"
],
[
"# setup\n\n# dimensnions\n# K: inputs , N: neurons, L: outputs\nK = 1\nN = 100\nL = 2\n\n# quantization\nsensitivity = 200\nlevels = np.linspace(-1.2, 1.2, sensitivity)\nref_vectors = np.array([np.random.choice([-1.0, 0.0, 1.0], size=N) for i in range(2)])\n\ndef quantize(value):\n level = np.digitize(value, levels)\n section = int(level / float(sensitivity) * N)\n return np.append(ref_vectors[0][:section], ref_vectors[1][section:])\n\nmodel = intESN(N, K, L, quantize, clip=3)",
"_____no_output_____"
],
[
"# train\n\nmodel.fit(X_train, y_train, task='classification')",
"0.707106781187\n"
],
[
"# test\nsig = np.cos(0.1 * domain + 20)\npred = model.classify(sig, np.array([0]))",
"1.0\n"
],
[
"# display results\n\nprint(pred)",
"[0]\n"
],
[
"# test\n\nmodel.softmax(X_train, np.array([0, 1]))",
"0.676986926618\n0.250386075861\n0.141221692631\n0.0980961741281\n0.0746959129089\n0.0601484257121\n0.0502773609384\n0.0431601086593\n0.0377949421178\n0.0336108722122\n0.0302594782365\n0.0275164770666\n0.0252311435628\n0.0232985348443\n0.021643386315\n0.0202103428657\n0.0189578018892\n0.0178539032958\n0.0168738425011\n0.01599802452\n0.0152107677011\n0.0144993754708\n0.0138534598784\n0.0132644408105\n0.01272516993\n0.0122296445913\n0.0117727876096\n0.0113502758747\n0.0109584056355\n0.0105939856276\n0.0102542515612\n0.00993679715447\n0.0096395180988\n0.00936056621464\n0.00909831170268\n0.00885131187135\n0.00861828508194\n0.00839808892405\n0.00818970184211\n0.00799220759313\n0.00780478203995\n0.00762668188095\n0.00745723499316\n0.00729583212582\n0.00714191972927\n0.00699499374236\n0.0068545941922\n0.00672030048528\n0.00659172728862\n0.00646852091691\n0.0063503561543\n0.00623693345129\n0.00612797644606\n0.00602322976726\n0.00592245708173\n0.00582543935587\n0.00573197330397\n0.00564187000037\n0.00555495363571\n0.0054710604001\n0.00539003747837\n0.0053117421445\n0.00523604094403\n0.00516280895456\n0.00509192911589\n0.00502329162224\n0.00495679336986\n0.00489233745435\n0.00482983271252\n0.00476919330419\n0.00471033833003\n0.00465319148177\n0.00459768072174\n0.00454373798878\n0.00449129892816\n0.00444030264316\n0.00439069146633\n0.0043424107487\n0.0042954086652\n0.00424963603498\n0.00420504615524\n0.0041615946474\n0.00411923931462\n0.00407794000963\n0.00403765851204\n0.00399835841437\n0.00396000501605\n0.00392256522474\n0.00388600746448\n0.00385030158999\n0.0038154188067\n0.00378133159617\n0.00374801364627\n0.00371543978599\n0.00368358592436\n0.0036524289933\n0.003621946894\n0.00359211844666\n0.00356292334338\n0.00353434210378\n"
],
[
"# test\n# sig = np.cos(0.1 * domain + 20)\npred = model.predict(X_train, np.array([0, 1]))\nprint(pred)",
"3.02331715101\n[[ 2.71486765 -3.09810305]\n [-3.06594109 2.73113738]]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e745f50f85ab282aa509cc7e5b05b82b002fb549 | 235,853 | ipynb | Jupyter Notebook | ColorSegmentation.ipynb | gabilodeau/INF6804 | 126defd9397beafbf1c97ddeec6effe699da235d | [
"MIT"
] | 5 | 2019-06-03T21:17:07.000Z | 2022-01-30T19:43:56.000Z | ColorSegmentation.ipynb | gabilodeau/INF6804 | 126defd9397beafbf1c97ddeec6effe699da235d | [
"MIT"
] | null | null | null | ColorSegmentation.ipynb | gabilodeau/INF6804 | 126defd9397beafbf1c97ddeec6effe699da235d | [
"MIT"
] | 5 | 2018-10-22T20:43:07.000Z | 2022-01-19T03:29:15.000Z | 827.554386 | 148,810 | 0.948383 | [
[
[
"<a href=\"https://colab.research.google.com/github/gabilodeau/INF6804/blob/master/ColorSegmentation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"INF6804 Vision par ordinateur\n\nPolytechnique Montréal\n\nExemple de la segmentation d'une image",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport cv2\nimport os\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"Lecture d'une image.",
"_____no_output_____"
]
],
[
[
"image_name = 'bureau.jpg'\nif not os.path.exists(image_name):\n !gdown https://raw.githubusercontent.com/gabilodeau/INF6804/master/images/bureau.jpg",
"Downloading...\nFrom: https://raw.githubusercontent.com/gabilodeau/INF6804/master/images/bureau.jpg\nTo: /content/bureau.jpg\n\r 0% 0.00/21.9k [00:00<?, ?B/s]\r100% 21.9k/21.9k [00:00<00:00, 1.48MB/s]\n"
],
[
"image = cv2.imread(image_name)\nb,g,r = cv2.split(image) #OpenCV lit les images en BGR\nimage = cv2.merge([r,g,b])\n#ou image = image[:,:,::-1]\nplt.figure(figsize = (8,8))\nplt.imshow(image)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Segmentation avec K-means. Segmentation en incluant la position des pixels dans l'image. On fera le groupement de vecteurs [R,G,B,X,Y] La plupart des pixels d'un groupe seront connectés entre eux.\n\nCas 1: Sous-segmentation, pas assez de groupes",
"_____no_output_____"
]
],
[
[
"# Nombre de groupes\nK = 6 \n# Définition des critères d'arret. EPS est le déplacement des centres \ncriteresArret = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n\nRGBPix = image.reshape((-1,3))\n\n# Pour ajouter la position. Groupement de vecteur 1 par 5 plutôt que 1 par 3.\nRGBetPosPix= np.zeros((len(RGBPix),5),dtype=np.float32)\n\n# Création d'une matrice de coordonnées.\nx = np.linspace(0, image.shape[1]-1,image.shape[1])\ny = np.linspace(0, image.shape[0]-1,image.shape[0])\n# Pour obtenir les paires de coordonnées possibles.\nX,Y = np.meshgrid(x, y)\nX= X.reshape((-1),1)\nY= Y.reshape((-1),1)\n\nRGBetPosPix = np.hstack((RGBPix,X,Y))\nRGBetPosPix = np.float32(RGBetPosPix)\n\nret,etiquettes,centres=cv2.kmeans(RGBetPosPix,K,None,criteresArret,1,cv2.KMEANS_RANDOM_CENTERS)\n\ncentres = np.uint8(centres)\ncentressansPos= np.zeros((K,3),dtype=np.uint8) #Pour affichage des classes\nfor i in range(0,len(centres)):\n centressansPos[i]=centres[i][0:3]\n\nres = centressansPos[etiquettes] #On utilise la couleur de son centre pour représenter chaque segment.\nres2 = res.reshape((image.shape))\nplt.figure(figsize = (8,8))\nplt.imshow(res2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Cas 2: Sur-segmentation, trop de groupes",
"_____no_output_____"
]
],
[
[
"K = 100\n\nret,etiquettes,centres=cv2.kmeans(RGBetPosPix,K,None,criteresArret,1,cv2.KMEANS_RANDOM_CENTERS)\n\ncentres = np.uint8(centres)\ncentressansPos= np.zeros((K,3),dtype=np.uint8) #Pour affichage des classes\nfor i in range(0,len(centres)):\n centressansPos[i]=centres[i][0:3]\n\nres = centressansPos[etiquettes]\nres2 = res.reshape((image.shape))\nplt.figure(figsize = (8,8))\nplt.imshow(res2)\nplt.show()",
"_____no_output_____"
]
],
[
[
"Difficile d'avoir un résultat parfait.",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e745f871d98e8c72b1e5077c9440e0510a598056 | 53,089 | ipynb | Jupyter Notebook | Logistic_Regression_Multiclass_classification.ipynb | iambalakrishnan/Classification-models | 8285e8cbae38ba6528900cd82faf9ba3096332f3 | [
"MIT"
] | null | null | null | Logistic_Regression_Multiclass_classification.ipynb | iambalakrishnan/Classification-models | 8285e8cbae38ba6528900cd82faf9ba3096332f3 | [
"MIT"
] | null | null | null | Logistic_Regression_Multiclass_classification.ipynb | iambalakrishnan/Classification-models | 8285e8cbae38ba6528900cd82faf9ba3096332f3 | [
"MIT"
] | null | null | null | 85.078526 | 4,298 | 0.811844 | [
[
[
"import matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits #digits dataset",
"_____no_output_____"
],
[
"digits = load_digits()",
"_____no_output_____"
],
[
"dir(digits)",
"_____no_output_____"
],
[
"digits.data[0]",
"_____no_output_____"
],
[
"plt.gray()\nplt.matshow(digits.images[0])",
"_____no_output_____"
],
[
"plt.matshow(digits.images[3])",
"_____no_output_____"
],
[
"plt.matshow(digits.images[1])\n#images are in sequence ",
"_____no_output_____"
],
[
"plt.gray()\nfor i in range(5):\n plt.matshow(digits.images[i])",
"_____no_output_____"
],
[
"digits.target[0:5]",
"_____no_output_____"
],
[
"#lets import train test split\n\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size = 0.2)",
"_____no_output_____"
],
[
"len(X_train)",
"_____no_output_____"
],
[
"len(X_test)",
"_____no_output_____"
],
[
"#import logistic regression \nfrom sklearn.linear_model import LogisticRegression",
"_____no_output_____"
],
[
"model = LogisticRegression()\n",
"_____no_output_____"
],
[
"model.fit(X_train, y_train)",
"/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_logistic.py:818: ConvergenceWarning: lbfgs failed to converge (status=1):\nSTOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n\nIncrease the number of iterations (max_iter) or scale the data as shown in:\n https://scikit-learn.org/stable/modules/preprocessing.html\nPlease also refer to the documentation for alternative solver options:\n https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG,\n"
],
[
"#score \nmodel.score(X_test, y_test)",
"_____no_output_____"
],
[
"plt.matshow(digits.images[67])",
"_____no_output_____"
],
[
"digits.target[67]",
"_____no_output_____"
],
[
"model.predict(digits.data[[67]])",
"_____no_output_____"
],
[
"model.predict(X_test[[67]])",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74605b87715d2f276a2ed18074638080cc5be06 | 3,007 | ipynb | Jupyter Notebook | 2/7_chainermn_result.ipynb | rymzt/aaic_gathering | 5ce8fa130257e6c53efb431ebd84c009c9d4b641 | [
"MIT"
] | 3 | 2017-09-05T10:29:26.000Z | 2019-02-08T08:01:38.000Z | 2/7_chainermn_result.ipynb | rymzt/aaic_gathering | 5ce8fa130257e6c53efb431ebd84c009c9d4b641 | [
"MIT"
] | null | null | null | 2/7_chainermn_result.ipynb | rymzt/aaic_gathering | 5ce8fa130257e6c53efb431ebd84c009c9d4b641 | [
"MIT"
] | 2 | 2018-07-09T01:04:06.000Z | 2018-08-09T09:46:24.000Z | 19.653595 | 133 | 0.53841 | [
[
[
"resultというディレクトリにtrain_mnist.pyの実行結果が出力されているとします。",
"_____no_output_____"
]
],
[
[
"ls result",
"_____no_output_____"
]
],
[
[
"cg.dotはDOT言語で記述されたネットワーク構造のファイル、logはJSONで記述された実行時間、エポック数、反復回数、精度などを記述したファイルになります。",
"_____no_output_____"
],
[
"cg.dotはdotコマンドによりpngなどの画像ファイルに変換することができます。",
"_____no_output_____"
]
],
[
[
"%%bash\ndot -Tpng result/cg.dot -o result/cg.png",
"_____no_output_____"
]
],
[
[
"result/cg.pngを表示してみます。<br>\nここではPythonスクリプトを使用して表示してみます。(eogコマンド等を使っても大丈夫です。)",
"_____no_output_____"
]
],
[
[
"from PIL import Image\nfrom matplotlib import pylab as plt\n\n%matplotlib inline\nplt.figure(figsize=(8, 8), dpi=400)\nplt.imshow(np.array(Image.open('result/cg.png')))",
"_____no_output_____"
]
],
[
[
"result/logを表示してみます。<br>\nPythonスクリプトを使ってファイルを読み込んでみましょう。",
"_____no_output_____"
]
],
[
[
"import json\n\nwith open('result/log', 'r') as f:\n data = json.load(f)\n print(data)",
"_____no_output_____"
]
],
[
[
"epoch毎にelapsed_time, iteration, main/loss, main/accuracy, validation/main/loss, validation/main/accuracyが出力されているのがわかります。<br>\nここでは、epoch毎のvalidation/main/accuracyを表示してみましょう。",
"_____no_output_____"
]
],
[
[
"x, y = [],[]\nxlabel, ylabel = 'epoch', 'validation/main/accuracy'\n\nfor d in data:\n x.append(d[xlabel])\n y.append(d[ylabel])\n\n%matplotlib inline\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nplt.plot(x,y)",
"_____no_output_____"
]
],
[
[
"xlabelとylabelを変更して他のデータも表示してみましょう。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7460a42b26773e06740f0143d1b4ceaf65d42d1 | 483,577 | ipynb | Jupyter Notebook | HeroesOfPymoli_starter.ipynb | JarrodCasey/pandas-challenge | 7f457af18df461f8b61a0260ed7929d06c6703f6 | [
"Apache-2.0"
] | null | null | null | HeroesOfPymoli_starter.ipynb | JarrodCasey/pandas-challenge | 7f457af18df461f8b61a0260ed7929d06c6703f6 | [
"Apache-2.0"
] | null | null | null | HeroesOfPymoli_starter.ipynb | JarrodCasey/pandas-challenge | 7f457af18df461f8b61a0260ed7929d06c6703f6 | [
"Apache-2.0"
] | null | null | null | 68.709434 | 156 | 0.49472 | [
[
[
"### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport pandas as pd\n\n# File to Load (Remember to Change These)\nfile_to_load = \"Resources/purchase_data.csv\"\n\n# Read Purchasing File and store into Pandas data frame\npurchase_data = pd.read_csv(file_to_load)\npurchase_data.head()",
"_____no_output_____"
]
],
[
[
"## Player Count",
"_____no_output_____"
],
[
"* Display the total number of players\n",
"_____no_output_____"
]
],
[
[
"# Calculate the number of unique players in the DataFram\nPlayerCount = len(purchase_data[\"SN\"].unique())\n\n# Place data found into a DataFrame\nTotalPlayer_df = pd.DataFrame({\"Total Players\":[PlayerCount]})\nTotalPlayer_df",
"_____no_output_____"
]
],
[
[
"## Purchasing Analysis (Total)",
"_____no_output_____"
],
[
"* Run basic calculations to obtain number of unique items, average price, etc.\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display the summary data frame\n",
"_____no_output_____"
]
],
[
[
"#Calculate number of unique items\nTotUniqueItems = len(purchase_data[\"Item Name\"].unique())\n\n#Calculate average price\nAvePrice = purchase_data[\"Price\"].mean()\n\n#Calculate Number of Purchases\nTotalPurch = len(purchase_data[\"Item Name\"])\n\n#Calculate Total Revenue\nTotRev = purchase_data[\"Price\"].sum()\n\n#Create Data Frame\nPurchAnalTot_df = pd.DataFrame({\"Number of Unique Items\": [TotUniqueItems],\n \"Average Price\": AvePrice,\n \"Number of Purchases\": TotalPurch,\n \"Total Revenue\": TotRev})\n\n#Set correct numbering format\nPurchAnalTot_df.style.format({\"Average Price\":\"${:20,.2f}\",\n \"Total Revenue\":\"${:20,.2f}\"})",
"_____no_output_____"
]
],
[
[
"## Gender Demographics",
"_____no_output_____"
],
[
"* Percentage and Count of Male Players\n\n\n* Percentage and Count of Female Players\n\n\n* Percentage and Count of Other / Non-Disclosed\n\n\n",
"_____no_output_____"
]
],
[
[
"#Calculate No. male players\nMalePlayer_df = purchase_data[purchase_data['Gender']==\"Male\"]\nTotMalePlayers = len(MalePlayer_df[\"SN\"].unique())\n#Calculate Percentage of Male Players\nPercMale = TotMalePlayers/PlayerCount\n\n#Calculate No. female players\nFemalePlayer_df = purchase_data[purchase_data['Gender']==\"Female\"]\nTotFemalePlayers = len(FemalePlayer_df[\"SN\"].unique())\n#Calculate Percentage of Female Players\nPercFemale = TotFemalePlayers/PlayerCount\n\n#Calculate No. Other / Non-Disclosed players\nOtherPlayer_df = purchase_data[purchase_data['Gender']==\"Other / Non-Disclosed\"]\nTotOtherPlayers = len(OtherPlayer_df[\"SN\"].unique())\n#Calculate Percentage of Other / Non-Disclosed Players\nPercOther = TotOtherPlayers/PlayerCount\nPercOther\n\n#Create Data Frame for Gender Demographics\nGenderDemo_df = pd.DataFrame({\"Gender\":[\"Male\",\"Female\",\"Other / Non-Disclosed\"],\n \"Number of Players\":[TotMalePlayers,TotFemalePlayers,TotOtherPlayers],\n \"Percentage of Total Players\":[PercMale,PercFemale,PercOther]})\n\n#Set correct numbering format\nGenderDemo_df.style.format({\"Percentage of Total Players\":\"{:,.2%}\"})",
"_____no_output_____"
]
],
[
[
"\n## Purchasing Analysis (Gender)",
"_____no_output_____"
],
[
"* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender\n\n\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display the summary data frame",
"_____no_output_____"
]
],
[
[
"#Calculate No. purchases by females\nFemalePurch = len(FemalePlayer_df[\"SN\"])\n#Calculate Average Purchase Price by female\nFemaleAvePurch = FemalePlayer_df[\"Price\"].mean()\n#Calculate Total Purchase Value by Female\nFemaleTotPurch = FemalePlayer_df[\"Price\"].sum()\n#Calculate averae total purchase per person\nFemalePPAvePurch = FemaleTotPurch/TotFemalePlayers\n\n#Calculate No. purchases by males\nMalePurch = len(MalePlayer_df[\"SN\"])\n#Calculate Average Purchase Price by male\nMaleAvePurch = MalePlayer_df[\"Price\"].mean()\n#Calculate Total Purchase Value by male\nMaleTotPurch = MalePlayer_df[\"Price\"].sum()\n#Calculate averae total purchase per person\nMalePPAvePurch = MaleTotPurch/TotMalePlayers\n\n#Calculate No. purchases by other\nOtherPurch = len(OtherPlayer_df[\"SN\"])\n#Calculate Average Purchase Price by other\nOtherAvePurch = OtherPlayer_df[\"Price\"].mean()\n#Calculate Total Purchase Value by other\nOtherTotPurch = OtherPlayer_df[\"Price\"].sum()\n#Calculate averae total purchase per person\nOtherPPAvePurch = OtherTotPurch/TotOtherPlayers\n\n#Create Data Frame for Gender Demographics\nPurchAnalGender_df = pd.DataFrame({\"Gender\":[\"Female\",\"Male\",\"Other / Non-Disclosed\"],\n \"Purchase Count\":[FemalePurch,MalePurch,OtherPurch],\n \"Average Purchase Price\":[FemaleAvePurch,MaleAvePurch,OtherAvePurch],\n \"Total Purchase Value\":[FemaleTotPurch,MaleTotPurch,OtherTotPurch],\n \"Avg Total Purchases Per Person\":[FemalePPAvePurch,MalePPAvePurch,OtherPPAvePurch]})\n\n#Set correct numbering format\nPurchAnalGender_df.style.format({\"Average Purchase Price\":\"${:20,.2f}\",\n \"Total Purchase Value\":\"${:20,.2f}\",\n \"Avg Total Purchases Per Person\":\"${:20,.2f}\"})",
"_____no_output_____"
]
],
[
[
"## Age Demographics",
"_____no_output_____"
],
[
"* Establish bins for ages\n\n\n* Categorize the existing players using the age bins. Hint: use pd.cut()\n\n\n* Calculate the numbers and percentages by age group\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: round the percentage column to two decimal points\n\n\n* Display Age Demographics Table\n",
"_____no_output_____"
]
],
[
[
"#Create age range bins\nbins = [0,9,14,19,24,29,34,39,10000]\n\n#Create labels for bins\ngroup_labels = [\"<10\",\"10-14\",\"15-19\",\"20-24\",\"25-29\",\"30-34\",\"35-39\",\"40+\"]\n\n#Slice the data and place it into bins and Place data series into a new column inside of the DataFrame\npurchase_data[\"Age Range\"] = pd.cut(purchase_data[\"Age\"], bins, labels=group_labels)\n\n#Create a GroupBy object based on \"Age Range\"\nPurchByAgeRg_Group = purchase_data.groupby(\"Age Range\")[\"SN\"].agg([\"count\"]).reset_index()\nPurchByAgeRg_Group[\"Percent of Total Purchases\"] = PurchByAgeRg_Group[\"count\"]/purchase_data[\"SN\"].count()\nPurchByAgeRg_Group = PurchByAgeRg_Group.rename(columns={\"count\":\"No. Purchases\"})\n\n#Set correct numbering format\nformat_dict = {'Percent of Total Purchases':'{:.2%}'}\nPurchByAgeRg_Group.style.format(format_dict).hide_index()",
"_____no_output_____"
]
],
[
[
"## Purchasing Analysis (Age)",
"_____no_output_____"
],
[
"* Bin the purchase_data data frame by age\n\n\n* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below\n\n\n* Create a summary data frame to hold the results\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display the summary data frame",
"_____no_output_____"
]
],
[
[
"#Create a second GroupBy object based on \"Age Range\"\nPurchByAgeRg_Group2 = purchase_data.groupby(\"Age Range\")[\"Price\"].agg([\"count\",\"mean\",\"sum\"])\nPurchByAgeRg_Group3 = purchase_data.groupby(\"Age Range\")[\"SN\"].agg([\"nunique\"])\nPurchAnalAge_df= pd.concat([PurchByAgeRg_Group2,PurchByAgeRg_Group3],axis=1,join=\"inner\")\nPurchAnalAge_df[\"Avg Total Purchase per Person\"] = PurchAnalAge_df[\"sum\"]/PurchAnalAge_df[\"nunique\"]\n\n#Rename Columns\nPurchAnalAge_df=PurchAnalAge_df.rename(columns={\"count\":\"Purchase Count\",\"mean\":\"Average Purchase Price\",\"sum\":\"Total Purchase Value\"})\nPurchAnalAge_df\n\n#Drop column \"unique\"\ndel PurchAnalAge_df[\"nunique\"]\nPurchAnalAge_df\n\n#Set correct numbering format\nPurchAnalAge_df.style.format({\"Average Purchase Price\":\"${:20,.2f}\",\n \"Total Purchase Value\":\"${:20,.2f}\",\n \"Avg Total Purchase per Person\":\"${:20,.2f}\"})",
"_____no_output_____"
]
],
[
[
"## Top Spenders",
"_____no_output_____"
],
[
"* Run basic calculations to obtain the results in the table below\n\n\n* Create a summary data frame to hold the results\n\n\n* Sort the total purchase value column in descending order\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display a preview of the summary data frame\n\n",
"_____no_output_____"
]
],
[
[
"#Create a GroupBy object based on \"SN\"\nPurchBySN_gp = purchase_data.groupby(\"SN\")[\"Price\"].agg([\"count\",\"mean\",\"sum\"])\n\n#Sort the total purchase value column in descending order\nTotalSpender = PurchBySN_gp.sort_values(\"sum\", ascending=False)\n\n#Rename Columns\nTotalSpender=TotalSpender.rename(columns={\"count\":\"Purchase Count\",\"mean\":\"Average Purchase Price\",\"sum\":\"Total Purchase Value\"})\n\n#Set correct numbering format\nTotalSpender.style.format({\"Average Purchase Price\":\"${:20,.2f}\",\n \"Total Purchase Value\":\"${:20,.2f}\",\n \"Avg Total Purchase per Person\":\"${:20,.2f}\"})",
"_____no_output_____"
]
],
[
[
"## Most Popular Items",
"_____no_output_____"
],
[
"* Retrieve the Item ID, Item Name, and Item Price columns\n\n\n* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value\n\n\n* Create a summary data frame to hold the results\n\n\n* Sort the purchase count column in descending order\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display a preview of the summary data frame\n\n",
"_____no_output_____"
]
],
[
[
"#Create a GroupBy object based on \"SN\"\nItemsAnal_gp = purchase_data.groupby([\"Item ID\",\"Item Name\"])[\"Price\"].agg([\"count\",\"mean\",\"sum\"])\n\n#Sort the total purchase value column in descending order\nItemsAnalDesc_gp = ItemsAnal_gp.sort_values(\"count\", ascending=False)\n\n#Rename Columns\nItemsAnalDesc_gp=ItemsAnalDesc_gp.rename(columns={\"count\":\"Purchase Count\",\"mean\":\"Item Price\",\"sum\":\"Total Purchase Value\"})\n\n#Set correct numbering format\nItemsAnalDesc_gp.style.format({\"Item Price\":\"${:20,.2f}\",\n \"Total Purchase Value\":\"${:20,.2f}\"})",
"_____no_output_____"
]
],
[
[
"## Most Profitable Items",
"_____no_output_____"
],
[
"* Sort the above table by total purchase value in descending order\n\n\n* Optional: give the displayed data cleaner formatting\n\n\n* Display a preview of the data frame\n\n",
"_____no_output_____"
]
],
[
[
"#Sort the total purchase value column in descending order\nItemsAnalTotDesc_gp = ItemsAnalDesc_gp.sort_values(\"Total Purchase Value\", ascending=False)\n\n#Set correct numbering format\nItemsAnalTotDesc_gp.style.format({\"Item Price\":\"${:20,.2f}\",\n \"Total Purchase Value\":\"${:20,.2f}\"})",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e746315a2bd0a146d8cdaf1f6e8bf0c340f53700 | 92,279 | ipynb | Jupyter Notebook | tutorials/Tutorial5_Evaluation.ipynb | vchulski/haystack | bbfccf5cf639190e3b0b34feea444a1bf22f6027 | [
"Apache-2.0"
] | null | null | null | tutorials/Tutorial5_Evaluation.ipynb | vchulski/haystack | bbfccf5cf639190e3b0b34feea444a1bf22f6027 | [
"Apache-2.0"
] | null | null | null | tutorials/Tutorial5_Evaluation.ipynb | vchulski/haystack | bbfccf5cf639190e3b0b34feea444a1bf22f6027 | [
"Apache-2.0"
] | null | null | null | 74.538772 | 230 | 0.583231 | [
[
[
"# Evalutaion\nTo be able to make a statement about the performance of a question-asnwering system, it is important to evalute it. Furthermore, evaluation allows to determine which parts of the system can be improved.",
"_____no_output_____"
],
[
"## Start an Elasticsearch server\nYou can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source.",
"_____no_output_____"
]
],
[
[
"# Recommended: Start Elasticsearch using Docker\n! docker run -d -p 9200:9200 -e \"discovery.type=single-node\" elasticsearch:7.6.2",
"a844e3ec4f41b5d2b24fe3d562e8302896baea1d0a761295998434c2de490714\r\n"
],
[
"# In Colab / No Docker environments: Start Elasticsearch from source\n#! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz -q\n#! tar -xzf elasticsearch-7.6.2-linux-x86_64.tar.gz\n#! chown -R daemon:daemon elasticsearch-7.6.2\n\n#import os\n#from subprocess import Popen, PIPE, STDOUT\n#es_server = Popen(['elasticsearch-7.6.2/bin/elasticsearch'],\n stdout=PIPE, stderr=STDOUT,\n preexec_fn=lambda: os.setuid(1) # as daemon\n )\n# wait until ES has started\n#! sleep 30",
"_____no_output_____"
],
[
"from farm.utils import initialize_device_settings\n\ndevice, n_gpu = initialize_device_settings(use_cuda=True)",
"05/19/2020 09:03:25 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n"
],
[
"\nfrom haystack.indexing.io import fetch_archive_from_http\n\n# Download evaluation data, which is a subset of Natural Questions development set containing 50 documents\ndoc_dir = \"../data/nq\"\ns3_url = \"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset.json.zip\"\nfetch_archive_from_http(url=s3_url, output_dir=doc_dir)",
"_____no_output_____"
],
[
"# Connect to Elasticsearch\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\n\ndocument_store = ElasticsearchDocumentStore(host=\"localhost\", username=\"\", password=\"\", create_index=False)",
"_____no_output_____"
],
[
"# Add evaluation data to Elasticsearch database\ndocument_store.add_eval_data(\"../data/natural_questions/dev_subset.json\")",
"05/19/2020 09:03:37 - INFO - elasticsearch - POST http://localhost:9200/_bulk [status:200 request:0.796s]\n05/19/2020 09:03:38 - INFO - elasticsearch - POST http://localhost:9200/_bulk [status:200 request:0.222s]\n"
]
],
[
[
"## Initialize components of QA-System",
"_____no_output_____"
]
],
[
[
"# Initialize Retriever\nfrom haystack.retriever.elasticsearch import ElasticsearchRetriever\n\nretriever = ElasticsearchRetriever(document_store=document_store)",
"_____no_output_____"
],
[
"# Initialize Reader\nfrom haystack.reader.farm import FARMReader\n\nreader = FARMReader(\"deepset/roberta-base-squad2\")",
"05/19/2020 09:03:46 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n05/19/2020 09:03:46 - INFO - farm.infer - Could not find `deepset/roberta-base-squad2` locally. Try to download from model hub ...\n05/19/2020 09:03:50 - WARNING - farm.modeling.language_model - Could not automatically detect from language model name what language it is. \n\t We guess it's an *ENGLISH* model ... \n\t If not: Init the language model by supplying the 'language' param.\n05/19/2020 09:03:56 - WARNING - farm.modeling.prediction_head - Some unused parameters are passed to the QuestionAnsweringHead. Might not be a problem. Params: {\"loss_ignore_index\": -1}\n05/19/2020 09:04:02 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n05/19/2020 09:04:02 - INFO - farm.infer - Got ya 7 parallel workers to do inference ...\n05/19/2020 09:04:02 - INFO - farm.infer - 0 0 0 0 0 0 0 \n05/19/2020 09:04:02 - INFO - farm.infer - /w\\ /w\\ /w\\ /w\\ /w\\ /w\\ /w\\\n05/19/2020 09:04:02 - INFO - farm.infer - /'\\ / \\ /'\\ /'\\ / \\ / \\ /'\\\n05/19/2020 09:04:02 - INFO - farm.infer - \n"
],
[
"# Initialize Finder which sticks together Reader and Retriever\nfrom haystack.finder import Finder\n\nfinder = Finder(reader, retriever)",
"_____no_output_____"
]
],
[
[
"## Evaluation of Retriever",
"_____no_output_____"
]
],
[
[
"# Evaluate Retriever on its own\nretriever_eval_results = retriever.eval()\n\n## Retriever Recall is the proportion of questions for which the correct document containing the answer is\n## among the correct documents\nprint(\"Retriever Recall:\", retriever_eval_results[\"recall\"])\n## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank\nprint(\"Retriever Mean Avg Precision:\", retriever_eval_results[\"mean avg precision\"])",
"05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.090s]\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.051s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:11 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n05/19/2020 09:04:11 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:12 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:12 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n"
]
],
[
[
"## Evaluation of Reader",
"_____no_output_____"
]
],
[
[
"# Evaluate Reader on its own\nreader_eval_results = reader.eval(document_store=document_store, device=device)\n\n# Evaluation of Reader can also be done directly on a SQuAD-formatted file \n# without passing the data to Elasticsearch\n#reader_eval_results = reader.eval_on_file(\"../data/natural_questions\", \"dev_subset.json\", device=device)\n\n## Reader Top-N-Recall is the proportion of predicted answers that overlap with their corresponding correct answer\nprint(\"Reader Top-N-Recall:\", reader_eval_results[\"top_n_recall\"])\n## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer\nprint(\"Reader Exact Match:\", reader_eval_results[\"EM\"])\n## Reader F1-Score is the average overlap between the predicted answers and the correct answers\nprint(\"Reader F1-Score:\", reader_eval_results[\"f1\"])",
"05/19/2020 09:04:22 - INFO - elasticsearch - GET http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.007s]\n05/19/2020 09:04:22 - INFO - elasticsearch - GET http://localhost:9200/_search/scroll [status:200 request:0.003s]\n05/19/2020 09:04:22 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.001s]\n05/19/2020 09:04:22 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search?scroll=5m&size=1000 [status:200 request:0.014s]\n05/19/2020 09:04:22 - INFO - elasticsearch - GET http://localhost:9200/_search/scroll [status:200 request:0.002s]\n05/19/2020 09:04:22 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.002s]\nEvaluating: 100%|██████████| 64/64 [00:14<00:00, 4.28it/s]"
]
],
[
[
"## Evaluation of Finder",
"_____no_output_____"
]
],
[
[
"# Evaluate combination of Reader and Retriever through Finder\nfinder_eval_results = finder.eval()\n\nprint(\"Retriever Recall in Finder:\", finder_eval_results[\"retriever_recall\"])\nprint(\"Retriever Mean Avg Precision in Finder:\", finder_eval_results[\"retriever_map\"])\n\n# Reader is only evaluated with those questions, where the correct document is among the retrieved ones\nprint(\"Reader Recall in Finder:\", finder_eval_results[\"reader_recall\"])\nprint(\"Reader Mean Avg Precision in Finder:\", finder_eval_results[\"reader_map\"])\nprint(\"Reader Exact Match in Finder:\", finder_eval_results[\"reader_em\"])\nprint(\"Reader F1-Score in Finder:\", finder_eval_results[\"reader_f1\"])",
"05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:57 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:57 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.005s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.007s]\n05/19/2020 09:04:58 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n05/19/2020 09:04:58 - INFO - elasticsearch - GET http://localhost:9200/eval_document/_search [status:200 request:0.006s]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7464c9f9061a1e12fd5cdfc6c8814e9c438e21a | 71,553 | ipynb | Jupyter Notebook | _notebooks/2021-01-22-US oil & gas production with EIA.ipynb | ujpradhan/blog | 6f53c62726590f3ca94570c44c737c72aca115e0 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021-01-22-US oil & gas production with EIA.ipynb | ujpradhan/blog | 6f53c62726590f3ca94570c44c737c72aca115e0 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021-01-22-US oil & gas production with EIA.ipynb | ujpradhan/blog | 6f53c62726590f3ca94570c44c737c72aca115e0 | [
"Apache-2.0"
] | 1 | 2021-01-23T02:14:19.000Z | 2021-01-23T02:14:19.000Z | 220.163077 | 45,761 | 0.623733 | [
[
[
"# \"US oil & gas production\"\n> \"Accessing EIA with Jupyter\"\n",
"_____no_output_____"
],
[
"In this post, we will plot a chart of (1) US oil production and (2) US gas production by accessing this data from EIA.\n\nFirst, install the python wrapper for Energy Information Administration (EIA) API using your Command Prompt",
"_____no_output_____"
],
[
"pip install EIA_python",
"_____no_output_____"
]
],
[
[
"#import customary packages\nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"#import EIA package\nimport eia",
"_____no_output_____"
]
],
[
[
"define key using your personal EIA API key <br>\nkey = eia.API(\"Peronal API KEY\")",
"_____no_output_____"
]
],
[
[
"#hide_input\nkey = eia.API('fae9d0bd7f4172e57a1876b2e5802392')",
"_____no_output_____"
],
[
"#Let's try quering EIA with our key. EIA has unique series ID for a variety of data. \n#Browse series ID here: https://www.eia.gov/opendata/qb.php?category=371\n\n#We'll first query for \"U.S. Field Production of Crude Oil, Annual\" with \"PET.MCRFPUS2.A\"\noil = key.data_by_series('PET.MCRFPUS2.A')\n\ntype(oil) #check type of data series we get back",
"_____no_output_____"
],
[
"#key.data_by_series(\"series_ID\") returns a dictionary\n#We'll convert it into a pandas dataframe to enhance display and functionality\n\noildf = pd.DataFrame(oil)\n\n#Observe US crude oil production over the past 10 year \noil10 = oildf[-10:]\n\noil10",
"_____no_output_____"
],
[
"#Conver the production to million barrels per day (mmbpd) by dividing by 1000\n\noil10 = oil10/1000\noil10",
"_____no_output_____"
],
[
"#Now lets do the same for US natural gas production\n#We'll query for \"U.S. Dry Natural Gas Production, Annual\" with \"NG.N9070US2.A\"\n\ngas = key.data_by_series('NG.N9070US2.A')\ngasdf = pd.DataFrame(gas)\ngas10 = gasdf[-10:]\n\ngas10\n",
"_____no_output_____"
],
[
"#Convert the production value to billion cubic feet per day (bcf/d) by dividing by (1000*365)\n\ngas10 = gas10/(1000*365)\n\ngas10",
"_____no_output_____"
],
[
"#Merge the two dataframes\n\nprod = pd.concat([oil10, gas10], axis = 1)\nprod.columns = [\"oil\", \"gas\"]\nprod",
"_____no_output_____"
],
[
"#Plot in bar chart\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax2 = ax.twinx()\n\n\nprod.oil.plot(kind='bar', ax = ax, width = 0.3, position = 0)\nprod.gas.plot(kind='bar', color = 'orange', ax = ax2, width = 0.3, position = 1)\n\nax.set_ylabel('oil production (mmbpd)')\nax2.set_ylabel('gas production (bcf/d)')\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74662a07fd1829fd6e1161cceeea680d61b7c28 | 34,067 | ipynb | Jupyter Notebook | house_prices/analysis11.ipynb | randat9/House_Prices | 3a7e51e1ac36aea0faabc61786652cf706b53c7e | [
"MIT"
] | null | null | null | house_prices/analysis11.ipynb | randat9/House_Prices | 3a7e51e1ac36aea0faabc61786652cf706b53c7e | [
"MIT"
] | null | null | null | house_prices/analysis11.ipynb | randat9/House_Prices | 3a7e51e1ac36aea0faabc61786652cf706b53c7e | [
"MIT"
] | null | null | null | 37.191048 | 93 | 0.338862 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas_profiling as pp\nimport seaborn as sns\n\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom functions.preprocessing import Imputer\n\nplt.style.use('ggplot')",
"_____no_output_____"
],
[
"def remove_empty_features(data, threshold):\n \"\"\"...\"\"\"\n cols_to_drop = [column for column in data.columns \n if data[column].isna().mean() > threshold]\n data = data.drop(columns = cols_to_drop)\n return data, cols_to_drop\n\ndef mapping_from_list(order):\n return {label: idx for idx, label in enumerate(order)}\n\ndef ordinal_feature(data: pd.DataFrame, dictionary: dict):\n \"\"\" Transform ordinal features\n\n Args:\n data (dataframe)\n dictionary (dict)\n\n Returns:\n data (dataframe): encoded dataframe\n \"\"\"\n data_copy = data.copy()\n for key,value in dictionary.items():\n data_copy[key] = data_copy[key].map(mapping_from_list(value))\n\n return data_copy",
"_____no_output_____"
],
[
"# Road raw training data\nraw_data = pd.read_csv('data/train.csv', index_col=0)\nraw_data.head(5)",
"_____no_output_____"
],
[
"options = {\n \"MSSubClass\": {\"strategy\": \"most_frequent\"},\n \"MSZoning\": {\"strategy\": \"most_frequent\"},\n \"LotFrontage\": {\"strategy\": \"mean\"},\n \"LotArea\": {\"strategy\": \"mean\"},\n \"Street\": {\"strategy\": \"most_frequent\"},\n \"Alley\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"LotShape\": {\"strategy\": \"most_frequent\"},\n \"LandContour\": {\"strategy\": \"most_frequent\"},\n \"Utilities\": {\"strategy\": \"most_frequent\"},\n \"LotConfig\": {\"strategy\": \"most_frequent\"},\n \"LandSlope\": {\"strategy\": \"most_frequent\"},\n \"Neighborhood\": {\"strategy\": \"most_frequent\"},\n \"Condition1\": {\"strategy\": \"most_frequent\"},\n \"Condition2\": {\"strategy\": \"most_frequent\"},\n \"Electrical\": {\"strategy\": \"most_frequent\"},\n \"1stFlrSF\": {\"strategy\": \"mean\"},\n \"2ndFlrSF\": {\"strategy\": \"mean\"},\n \"LowQualFinSF\": {\"strategy\": \"mean\"},\n \"GrLivArea\": {\"strategy\": \"mean\"},\n \"BsmtFullBath\": {\"strategy\": \"median\"},\n \"BsmtHalfBath\": {\"strategy\": \"median\"},\n \"FullBath\": {\"strategy\": \"median\"},\n \"HalfBath\": {\"strategy\": \"median\"},\n \"BedroomAbvGr\": {\"strategy\": \"median\"},\n \"KitchenAbvGr\": {\"strategy\": \"median\"},\n \"KitchenQual\": {\"strategy\": \"most_frequent\"},\n \"TotRmsAbvGrd\": {\"strategy\": \"median\"},\n \"BldgType\": {\"strategy\": \"most_frequent\"},\n \"HouseStyle\": {\"strategy\": \"most_frequent\"},\n \"OverallQual\": {\"strategy\": \"median\"},\n \"OverallCond\": {\"strategy\": \"median\"},\n \"YearBuilt\": {\"strategy\": \"median\"},\n \"YearRemodAdd\": {\"strategy\": \"median\"},\n \"RoofStyle\": {\"strategy\": \"most_frequent\"},\n \"RoofMatl\": {\"strategy\": \"most_frequent\"},\n \"Exterior1st\": {\"strategy\": \"most_frequent\"},\n \"Exterior2nd\": {\"strategy\": \"most_frequent\"},\n \"MasVnrType\": {\"strategy\": \"constant\", \"fill_value\": \"None\"},\n \"MasVnrArea\": {\"strategy\": \"mean\"},\n \"ExterQual\": {\"strategy\": \"most_frequent\"},\n \"ExterCond\": {\"strategy\": \"most_frequent\"},\n \"Foundation\": {\"strategy\": \"most_frequent\"},\n \"BsmtQual\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtCond\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtExposure\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtFinType1\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtFinSF1\": {\"strategy\": \"mean\"},\n \"BsmtFinType2\": {\"strategy\": \"constant\", \"fill_value\": \"NoBasement\"},\n \"BsmtFinSF2\": {\"strategy\": \"mean\"},\n \"BsmtUnfSF\": {\"strategy\": \"mean\"},\n \"TotalBsmtSF\": {\"strategy\": \"mean\"},\n \"Heating\": {\"strategy\": \"most_frequent\"},\n \"HeatingQC\": {\"strategy\": \"most_frequent\"},\n \"CentralAir\": {\"strategy\": \"most_frequent\"},\n \"ScreenPorch\": {\"strategy\": \"mean\"},\n \"PoolArea\": {\"strategy\": \"mean\"},\n \"PoolQC\": {\"strategy\": \"constant\", \"fill_value\": \"NoPool\"},\n \"Fence\": {\"strategy\": \"constant\", \"fill_value\": \"NoFence\"},\n \"MiscFeature\": {\"strategy\": \"constant\", \"fill_value\": \"None\"},\n \"MiscVal\": {\"strategy\": \"mean\"},\n \"MoSold\": {\"strategy\": \"median\"},\n \"YrSold\": {\"strategy\": \"median\"},\n \"SaleType\": {\"strategy\": \"most_frequent\"},\n \"SaleCondition\": {\"strategy\": \"most_frequent\"},\n \"Functional\": {\"strategy\": \"most_frequent\"},\n \"Fireplaces\": {\"strategy\": \"most_frequent\"},\n \"FireplaceQu\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageType\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageYrBlt\": {\"strategy\": \"most_frequent\"},\n \"GarageFinish\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageCars\": {\"strategy\": \"most_frequent\"},\n \"GarageArea\": {\"strategy\": \"median\"},\n \"GarageQual\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"GarageCond\": {\"strategy\": \"constant\", \"fill_value\": \"NoAccess\"},\n \"PavedDrive\": {\"strategy\": \"most_frequent\"},\n \"WoodDeckSF\": {\"strategy\": \"most_frequent\"},\n \"OpenPorchSF\": {\"strategy\": \"most_frequent\"},\n \"EnclosedPorch\": {\"strategy\": \"mean\"},\n \"3SsnPorch\": {\"strategy\": \"most_frequent\"},\n}",
"_____no_output_____"
],
[
"params = {\n \"threshold_empty_features\": 0.3,\n}\n\ncols_to_drop = {\n \"remove_empty_features\": []\n}\n\ncategorical_colums = ['Exterior1st', 'Foundation', 'MasVnrType', 'Neighborhood', \n 'PavedDrive', 'Electrical', 'MSSubClass', 'SaleCondition',\n 'GarageType', 'Exterior2nd', 'MSZoning', 'CentralAir']\n\n# Ordinal features options\nordinal_columns = ['HeatingQC', 'GarageQual', 'BsmtFinType1', 'ExterQual', \n 'GarageFinish', 'BsmtExposure', 'LotShape', 'OverallQual',\n 'BsmtQual', 'KitchenQual']\n\nordinal_mapping = {\n 'BsmtExposure': ['None', 'No', 'Mn', 'Av', 'Gd'],\n 'BsmtFinType1': ['None', 'Unf', 'LwQ', 'Rec', 'BLQ', 'ALQ', 'GLQ'],\n 'GarageFinish': ['None', 'Unf', 'RFn', 'Fin'],\n 'LotShape': ['IR3', 'IR2', 'IR1', 'Reg']\n}\n\nordinal_common = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'PoolQC']\nfor column in ordinal_common:\n ordinal_mapping[column] = ['None', 'Po', 'Fa', 'TA', 'Gd']",
"_____no_output_____"
],
[
"# Removing features with a lot of missing values\ndata, cols_to_drop[\"remove_empty_features\"] = remove_empty_features(\n raw_data, \n params[\"threshold_empty_features\"]\n)\n\n# Impute missing values\nimp = Imputer(options=options)\ndata = imp.fit_transform(raw_data)\n\n# HOTFIX\nfor key in imp.options:\n if isinstance(imp.options[key]['_fill'], np.integer):\n imp.options[key]['_fill'] = int(imp.options[key]['_fill'])\nimp.save_options('imputer_options.json')\n\n# Encoding categorical features\nohe = OneHotEncoder(drop='first', sparse=False)\nohe.fit(data[categorical_colums])\ndata_category_transformed = pd.DataFrame(\n ohe.transform(data[categorical_colums]),\n columns=ohe.get_feature_names(input_features=categorical_colums),\n index = data.index)\ndata = pd.concat([data.drop(categorical_colums, axis=1), \n data_category_transformed],\n axis=1)\n\n# Encoding ordinal features\n# data = ordinal_feature(data, ordinal_mapping)\n\n# data\ndata",
"_____no_output_____"
]
],
[
[
"## TODO:\n\n- Krzysiek:\n - funkcje zwracają indeksy i kolumny\n \n- kbdev\n - Encoding ordinal features as a class\n - fix np.int64 bug in json serialization\n - \n \n- miri\n - nie będzie jej (na 50%)\n \n- Patryk\n - zapis do pliku Encoder, konstruktor z pliku\n - PR \n \n```python\nour_encoder = OurOneHotEncoder(columns=...)\ndata = our_encoder.fit(data)\nour_encoder.save(file.json)\n \nour_encoder.from_file(file.json)\nour_encoder.transform(other_data)\n```\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e74666bce60958bba5cb9e78d6b171a6ed1ddbb2 | 12,403 | ipynb | Jupyter Notebook | examples/cppcon_2020/04_oneTBB_concurrent_containers/oneTBB_concurrent_containers.ipynb | alvdd/oneTBB | d87e076d530007b48f619b2f58b53fb0d1a173b1 | [
"Apache-2.0"
] | null | null | null | examples/cppcon_2020/04_oneTBB_concurrent_containers/oneTBB_concurrent_containers.ipynb | alvdd/oneTBB | d87e076d530007b48f619b2f58b53fb0d1a173b1 | [
"Apache-2.0"
] | null | null | null | examples/cppcon_2020/04_oneTBB_concurrent_containers/oneTBB_concurrent_containers.ipynb | alvdd/oneTBB | d87e076d530007b48f619b2f58b53fb0d1a173b1 | [
"Apache-2.0"
] | null | null | null | 33.612466 | 187 | 0.5464 | [
[
[
"# oneTBB Concurrent Containers",
"_____no_output_____"
],
[
"##### Sections\n- [oneTBB Concurrent Containers](#oneTBB-Concurrent-Containers)\n- _Code_: [A Producer-Consumer Application with tbb::concurrent_queue](#A-Producer-Consumer-Application-with-tbb::concurrent_queue)",
"_____no_output_____"
],
[
"## Learning Objectives\n* Learn how thread-unsafe uses of a standard container might be addressed by using a oneTBB concurrent container",
"_____no_output_____"
],
[
"# oneTBB Concurrent Containers",
"_____no_output_____"
],
[
"The oneTBB library provides a number of \n[concurrent containers](https://spec.oneapi.com/versions/latest/elements/oneTBB/source/containers.html),\nincluding `concurrent_vector`, `concurrent_queue`, `concurrent_bounded_queue`, `concurrent_priority_queue`,\n`concurrent_hash_map`, `concurrent_unordered_map` and more. These classes provide optimized containers that \npermit multiple threads to simultaneously invoke certain functions on the same container.",
"_____no_output_____"
],
[
"## A Producer-Consumer Application with tbb::concurrent_priority_queue\n\nIn this section, we will implement a very simple producer-consumer application that uses a shared \npriority queue. A producer thread will generate 1000 items, putting them into a queue and a consumer \nthread will read the items from the queue, generating a sum of the values as a result.\n\n### Run the sequential baseline implementation\n\nIn our baseline implementation, the producer thread and the consumer thread are the same thread, the main thread.\nThere is no concurrency expressed in our serial code, but if there was, we would have a race condition since we\nuse a `std::priority_queue`. In the `while`-loop, we call `empty` before calling `pop`. If more than one thread\nwas simultaneously popping from this shared queue, the queue might appear non-empty at the test in the `while`-loop\nbut be empty by the time `pop` is called. This problem does not exist in our serial code since only one thread is\nused. In the next section, we will add concurrency and the potential for such a race condition.\n\nIn this section, just inspect the sequential code below - there are no modifications necessary. Run the first cell \nto create the file, then run the cell below it to compile and execute the code. This represents the baseline for \nour producer-consumer exercise.\n\n1. Inspect the code cell below, then click run ▶ to save the code to a file\n2. Run ▶ the cell in the __Build and Run the baseline__ section below the code snippet to compile and execute the code in the saved file",
"_____no_output_____"
]
],
[
[
"%%writefile lab/q-serial.cpp\n//==============================================================\n// Copyright (c) 2020 Intel Corporation\n//\n// SPDX-License-Identifier: Apache-2.0\n// =============================================================\n\n#include <iostream>\n#include <queue>\n\nint main() {\n int sum (0);\n int item;\n\n std::priority_queue<int> myPQ;\n \n for(int i=0; i<10001; i+=1) {\n myPQ.push(i);\n }\n \n while( !myPQ.empty() ) {\n sum += myPQ.top();\n myPQ.pop();\n }\n\n // prints \"total: 50005000\" (for 0,10001,1)\n std::cout << \"total: \" << sum << '\\n';\n return 0;\n}",
"_____no_output_____"
]
],
[
[
"### Build and Run the baseline\nSelect the cell below and click Run ▶ to compile and execute the code above:",
"_____no_output_____"
]
],
[
[
"! chmod 755 q; chmod 755 ./scripts/run_q-serial.sh; if [ -x \"$(command -v qsub)\" ]; then ./q scripts/run_q-serial.sh; else ./scripts/run_q-serial.sh; fi",
"_____no_output_____"
]
],
[
[
"### Implement a parallel version with tbb::concurrent_priority_queue\n\nIn this section, we modify the example to create one producer thread\nand two consumer threads that run concurrently. To eliminate the potential\nrace on the call to `empty` and `pop`, we replace the `std::priority_queue` \nwith `tbb::concurrent_priority_queue`. In fact, if you run the code below before\nmaking the required modifications, the output might include a segfault.\n\nThe key interfaces in `tbb::concurrent_priority_queue` needed for this exercise \nare reproduced below:\n\n```cpp\nnamespace tbb {\n template <typename T, typename Compare = std::less<T>,\n typename Allocator = cache_aligned_allocator<T>>\n class concurrent_priority_queue {\n public:\n concurrent_priority_queue();\n void push( const value_type& value );\n bool try_pop( value_type& value );\n };\n```\n\nYou can find detailed documentation for ``concurrent_queue`` [here](https://spec.oneapi.com/versions/latest/elements/oneTBB/source/containers/concurrent_priority_queue_cls.html).\n\nFor this exercise, complete the following steps:\n\n1. Inspect the code cell below and make the following modifications.\n 1. Replace the type `std::priority_queue` with `tbb::concurrent_priority_queue`.\n 2. Replace the invocation of `empty`, `top` and `pop` with a single call to `try_pop`.\n2. When the modifications are complete, click run ▶ to save the code to a file. \n3. Run ▶ the cell in the __Build and Run the modified code__ section below the code snippet to compile and execute the code in the saved file.",
"_____no_output_____"
]
],
[
[
"%%writefile lab/q-parallel.cpp\n//==============================================================\n// Copyright (c) 2020 Intel Corporation\n//\n// SPDX-License-Identifier: Apache-2.0\n// =============================================================\n\n#include <iostream>\n#include <queue>\n#include <thread>\n#include <tbb/tbb.h>\n\n#define INCORRECT_QUEUE_TYPE std::priority_queue<int>;\n\n// STEP A: Replace std::priority_queue with tbb::concurrent_priority_queue\nusing queue_type = INCORRECT_QUEUE_TYPE;\n \nint consume(queue_type &myPQ) {\n int local_sum = 0;\n int value = -1;\n \n bool consuming = true;\n while(consuming) {\n // STEP B: replace the pattern of empty, top and pop \n // with a single call to myPQ.try_pop(value)\n if (!myPQ.empty()) {\n value = myPQ.top();\n myPQ.pop();\n if (value == -1)\n consuming = false;\n else\n local_sum += value;\n }\n }\n return local_sum;\n}\n\nint main() {\n int sum (0);\n int item;\n\n queue_type myPQ;\n \n std::thread producer([&]() {\n for(int i=0; i<10001; i+=1) {\n myPQ.push(i);\n }\n // to signal the end to the two consumers\n myPQ.push(-1);\n myPQ.push(-1);\n });\n\n int local_sum1 = 0, local_sum2 = 0;\n std::thread consumer1([&]() { local_sum1 = consume(myPQ); });\n std::thread consumer2([&]() { local_sum2 = consume(myPQ); });\n\n producer.join();\n consumer1.join();\n consumer2.join();\n \n // prints \"total: 50005000\" (for 0,10001,1)\n std::cout << \"total: \" << local_sum1 + local_sum2 << '\\n';\n return 0;\n}",
"_____no_output_____"
]
],
[
[
"### Build and Run the modified code\n\nSelect the cell below and click Run ▶ to compile and execute the code that you modified above:",
"_____no_output_____"
]
],
[
[
"! chmod 755 q; chmod 755 ./scripts/run_q-parallel.sh; if [ -x \"$(command -v qsub)\" ]; then ./q scripts/run_q-parallel.sh; else ./scripts/run_q-parallel.sh; fi",
"_____no_output_____"
]
],
[
[
"## Producer-Consumer Solution (Don't peak unless you have to)",
"_____no_output_____"
]
],
[
[
"%%writefile solutions/q-parallel.cpp\n//==============================================================\n// Copyright (c) 2020 Intel Corporation\n//\n// SPDX-License-Identifier: Apache-2.0\n// =============================================================\n\n#include <iostream>\n#include <queue>\n#include <thread>\n#include <tbb/tbb.h>\n\n// STEP A: Replace std::priority_queue with tbb::concurrent_priority_queue\nusing queue_type = tbb::concurrent_priority_queue<int>;\n \nint consume(queue_type &myPQ) {\n int local_sum = 0;\n int value = -1;\n \n bool consuming = true;\n while(consuming) {\n // STEP B: replace the pattern of empty, top and pop \n // with a single call to myPQ.try_pop(value)\n if (myPQ.try_pop(value)) {\n if (value == -1)\n consuming = false;\n else\n local_sum += value;\n }\n }\n return local_sum;\n}\n\nint main() {\n int sum (0);\n int item;\n\n queue_type myPQ;\n \n std::thread producer([&]() {\n for(int i=0; i<10001; i+=1) {\n myPQ.push(i);\n }\n // to signal the end to the two consumers\n myPQ.push(-1);\n myPQ.push(-1);\n });\n\n int local_sum1 = 0, local_sum2 = 0;\n std::thread consumer1([&]() { local_sum1 = consume(myPQ); });\n std::thread consumer2([&]() { local_sum2 = consume(myPQ); });\n\n producer.join();\n consumer1.join();\n consumer2.join();\n \n // prints \"total: 50005000\" (for 0,10001,1)\n std::cout << \"total: \" << local_sum1 + local_sum2 << '\\n';\n return 0;\n}",
"_____no_output_____"
],
[
"! chmod 755 q; chmod 755 ./scripts/run_q-solution.sh; if [ -x \"$(command -v qsub)\" ]; then ./q scripts/run_q-solution.sh; else ./scripts/run_q-solution.sh; fi",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e746680bad451b93c4b4f5e278a6556589d7478d | 25,753 | ipynb | Jupyter Notebook | Research/Stack.ipynb | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | Research/Stack.ipynb | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | Research/Stack.ipynb | ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge | d8b06969c9393cfce6d9ac96b58c9d365ff4369d | [
"MIT"
] | null | null | null | 54.793617 | 6,608 | 0.67429 | [
[
[
"import os\nimport numpy as np \nimport pandas as pd \nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))",
"test.json\ntrain.json\ntrain_test.json\n\n"
],
[
"sub_path = \"../best_models/\"\nall_files = [name for name in os.listdir(sub_path)]\n\n# Read and concatenate submissions\nouts = [pd.read_csv(os.path.join(sub_path, f), index_col=0) for f in all_files]\nconcat_sub = pd.concat(outs, axis=1)\ncols = list(map(lambda x: \"is_iceberg_\" + str(x), range(len(concat_sub.columns))))\nconcat_sub.columns = cols\nconcat_sub.reset_index(inplace=True)\nconcat_sub.head()",
"_____no_output_____"
],
[
"np.sum(concat_sub.corr() > 0.6)",
"_____no_output_____"
],
[
"threshold_high = 0.8\nthreshold_low = 0.2",
"_____no_output_____"
],
[
"result = []\nscores = []\nundefined = []\n\nfor _, row in concat_sub.iterrows():\n name = row.id\n row = row.copy().drop('id')\n high_score_count = np.sum(row > threshold_high)\n low_score_count = np.sum(row < threshold_low)\n mid_score_count = len(row) - high_score_count - low_score_count\n \n if high_score_count == len(row):\n dist = 1\n elif low_score_count == len(row):\n dist = 0\n else:\n dist = 0.5\n undefined.extend(row)\n \n scores.append(dist)\n \n if dist == 1.:\n score = np.max(row)\n elif dist == 0:\n score = np.min(row)\n else:\n score = np.median(row)\n \n result.append({\n 'id': name,\n 'is_iceberg': score,\n })",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\n%matplotlib inline\n\nplt.hist(scores, bins=10)",
"_____no_output_____"
],
[
"plt.hist(undefined, bins=50)",
"_____no_output_____"
],
[
"df = pd.DataFrame.from_dict(result)\ndf.to_csv('stack.csv', index=False, float_format='%.15f')\npd.read_csv('stack.csv').head()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7466a12c6070485163ddef9ec817c62dd3b7016 | 10,061 | ipynb | Jupyter Notebook | NLP Exercise .ipynb | paritosh3006/NLP_Exercise | 3f742416d99d686e92436e48d4e6eb923c110727 | [
"MIT"
] | null | null | null | NLP Exercise .ipynb | paritosh3006/NLP_Exercise | 3f742416d99d686e92436e48d4e6eb923c110727 | [
"MIT"
] | null | null | null | NLP Exercise .ipynb | paritosh3006/NLP_Exercise | 3f742416d99d686e92436e48d4e6eb923c110727 | [
"MIT"
] | null | null | null | 20.407708 | 139 | 0.497764 | [
[
[
"import nltk",
"_____no_output_____"
],
[
"from nltk.tokenize import word_tokenize",
"_____no_output_____"
],
[
"from nltk.tokenize import sent_tokenize",
"_____no_output_____"
],
[
"text = \"Hello My name is Paritosh and I'm looking for a job in DataScience\"",
"_____no_output_____"
],
[
"text_token = word_tokenize(text)",
"_____no_output_____"
],
[
"#nltk.download('all')",
"_____no_output_____"
],
[
"#list(nltk.unigrams(text))",
"_____no_output_____"
],
[
"from nltk.stem import PorterStemmer",
"_____no_output_____"
],
[
"ps = PorterStemmer()",
"_____no_output_____"
],
[
"print (ps.stem(\"jumping\"))",
"jump\n"
],
[
"from nltk import PorterStemmer",
"_____no_output_____"
],
[
"PorterStemmer().stem(\"complications\")",
"_____no_output_____"
],
[
"from nltk.stem.wordnet import WordNetLemmatizer",
"_____no_output_____"
],
[
"len= WordNetLemmatizer()",
"_____no_output_____"
],
[
"print (len.lemmatize('leaves'))",
"leaf\n"
]
],
[
[
"Term Frequency: (No. of terms (t) Appears in a document ) / (Total no. of terms in the Documents )",
"_____no_output_____"
],
[
"IDF :- It means how important a term is",
"_____no_output_____"
],
[
"A document containing 100 words & the word 'cat' appears 3 times: \n\nTF: = 3/100 = 0.03",
"_____no_output_____"
],
[
"Now Assume that we have 10 million documents & the word 'cat' appears in 1000 times \n\nIDF:- log (10,000,000 / 1000) = 4",
"_____no_output_____"
],
[
"NLP Pipeline:\n\n1. Text Gathering\n2. Text Cleaning (Stemming , Lemmetization)\n3. Feature Generation (Bag of Words)\n4. Embedding and Senetence Representation (Word2Vec)\n5. Training the model by leveraging neural nets or regression techniques\n6. Model evaluation\n7. Making adjustments to the model\n8. Deployment of the model.",
"_____no_output_____"
],
[
"Named Entity Recognition(NER)?\n\nNamed entity recognition is a method to divide a sentence into categories.",
"_____no_output_____"
],
[
"Ex: Neil Armstong of the US had landed on the moon in 1969 will be categorized as\n\nNeil Armstong- name;The US – country;1969 – time(temporal token).\n",
"_____no_output_____"
],
[
"The idea behind NER is to enable the machine to pull out entities like people, places, things, locations, monetary figures, and more.",
"_____no_output_____"
],
[
"How is feature extraction done in NLP\n",
"_____no_output_____"
],
[
"Q. Name some popular models other than Bag of words?\nAns: Latent semantic indexing, word2vec.",
"_____no_output_____"
],
[
"Q. Explain briefly about word2vec",
"_____no_output_____"
],
[
"Q. What is Latent Semantic Indexing?",
"_____no_output_____"
],
[
"Q. What are the metrics used to test an NLP model?",
"_____no_output_____"
],
[
"What are some popular Python libraries used for NLP:\n\nStanford’s CoreNLP, SpaCy , NLTK and TextBlob.",
"_____no_output_____"
]
],
[
[
"from nltk.stem import PorterStemmer \nfrom nltk.tokenize import word_tokenize \n \nps = PorterStemmer() \n \n# choose some words to be stemmed \nwords = [\"program\", \"programs\", \"programer\", \"programing\", \"programers\"] \n\nfor w in words:\n print (w , \":\" , ps.stem(w))",
"program : program\nprograms : program\nprogramer : program\nprograming : program\nprogramers : program\n"
],
[
"from nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\nps = PorterStemmer()\n\nwords = [\"jumping\", \"leaves\" , \"changes\", \"fasting\"]\n\nfor w in words:\n print (w, \":\" , ps.stem(w))",
"jumping : jump\nleaves : leav\nchanges : chang\nfasting : fast\n"
],
[
"from nltk.stem import WordNetLemmatizer\n\nlm = WordNetLemmatizer()\n\nwords = [\"jumping\", \"leaves\" , \"changes\", \"fasting\"]\n\n\nfor w in words:\n print (w, \":\", lm.lemmatize(w))\n\nprint (lm.lemmatize(\"better\"))",
"jumping : jumping\nleaves : leaf\nchanges : change\nfasting : fasting\nbetter\n"
],
[
"from nltk.stem import WordNetLemmatizer \n \nlemmatizer = WordNetLemmatizer() \n\nwords = [\"jumping\", \"leaves\" , \"changes\", \"fasting\", \"rocks\", \"better\"]\n\n# a denotes adjective in \"pos\" \nprint(\"better :\", lemmatizer.lemmatize(\"better\", pos =\"a\")) \n\n \nfor w in words:\n print (w, \":\", lemmatizer.lemmatize(w))",
"better : good\njumping : jumping\nleaves : leaf\nchanges : change\nfasting : fasting\nrocks : rock\nbetter : better\n"
]
],
[
[
"Normalization : \n\neliminating the punctuation , converting the entire text into lowercase or uppercase, converting numbers into words.",
"_____no_output_____"
]
],
[
[
"text = \"He ? Hmm, Why not!\"\n\npunct = \" ? !\"\n\nfor string in punct:\n \n text = text.replace (string , \" \")\n \nprint (text)",
"He Hmm, Why not \n"
]
],
[
[
"POS Tagging : Why we Use?\n\nWhen a word is being used twice in a sentence and has two different meaning.\n then we need to know which word is being used in order to pronounce the text correctly.\n \n \nExample : They refuse (it's verb (deny)) to permit us to obtain the refuse permit ( it's noun (trash)).",
"_____no_output_____"
],
[
"Types of POS:\n \n 1. Rule_Based POS\n 2. Stochastic POS",
"_____no_output_____"
]
]
] | [
"code",
"raw",
"code",
"raw",
"code",
"raw"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw",
"raw"
],
[
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"raw",
"raw"
]
] |
e7466e5bb72d4fc8c3ebc74d801e4b41d757e104 | 198,436 | ipynb | Jupyter Notebook | Shopify-DataScience-Intern-Challenge-S2022.ipynb | aniruddhashinde29/Shopify-Data-Science-Intern-Summer-2022-Challenge | 5fc8aa9972109412c867ee625be617d3db1539fa | [
"MIT"
] | null | null | null | Shopify-DataScience-Intern-Challenge-S2022.ipynb | aniruddhashinde29/Shopify-Data-Science-Intern-Summer-2022-Challenge | 5fc8aa9972109412c867ee625be617d3db1539fa | [
"MIT"
] | null | null | null | Shopify-DataScience-Intern-Challenge-S2022.ipynb | aniruddhashinde29/Shopify-Data-Science-Intern-Summer-2022-Challenge | 5fc8aa9972109412c867ee625be617d3db1539fa | [
"MIT"
] | null | null | null | 46.428638 | 25,340 | 0.541424 | [
[
[
"# Question 1: Given some sample data, write a program to answer the following:",
"_____no_output_____"
],
[
"On Shopify, we have exactly 100 sneaker shops, and each of these shops sells only one model of shoe. We want to do some analysis of the average order value (AOV). When we look at orders data over a 30 day window, we naively calculate an AOV of $3145.13. Given that we know these shops are selling sneakers, a relatively affordable item, something seems wrong with our analysis.\n\na. Think about what could be going wrong with our calculation.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Load dataset\n\nshopify_ds = pd.read_csv(\"2019 Winter Data Science Intern Challenge Data Set - Sheet1.csv\", parse_dates = ['created_at'])\nshopify_ds",
"_____no_output_____"
],
[
"shopify_ds.head()",
"_____no_output_____"
]
],
[
[
"## Exploratory Data Analysis and Descriptive Statistics",
"_____no_output_____"
]
],
[
[
"shopify_ds.describe()",
"_____no_output_____"
]
],
[
[
"- Mean order value ($3145.12) is crazy high for a sneaker store considering even if the most expensive sneakers are sold from the most sought after brands.",
"_____no_output_____"
]
],
[
[
"shopify_ds.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 5000 entries, 0 to 4999\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 order_id 5000 non-null int64 \n 1 shop_id 5000 non-null int64 \n 2 user_id 5000 non-null int64 \n 3 order_amount 5000 non-null int64 \n 4 total_items 5000 non-null int64 \n 5 payment_method 5000 non-null object \n 6 created_at 5000 non-null datetime64[ns]\ndtypes: datetime64[ns](1), int64(5), object(1)\nmemory usage: 273.6+ KB\n"
]
],
[
[
"- No nul values in the dataset!",
"_____no_output_____"
]
],
[
[
"shopify_ds['shop_id'].nunique()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize = (10, 10))\nsns.boxplot(shopify_ds['order_amount']);",
"_____no_output_____"
]
],
[
[
"- Looks like there are a lot of outliers that are affecting the average order value. Lets find out those.",
"_____no_output_____"
]
],
[
[
"data = shopify_ds['order_amount']\n\n# Function to Detect Outliers\ndef find_anomalies(data):\n #define a list to accumlate anomalies\n anomalies = []\n \n # Set upper and lower limit to 3 standard deviation\n data_std = np.std(data)\n data_mean = np.mean(data)\n anomaly_cut_off = data_std * 3\n \n lower_limit = data_mean - anomaly_cut_off \n upper_limit = data_mean + anomaly_cut_off\n print(\"Lower Limit:\", lower_limit)\n print(\"Upper Limit:\", upper_limit)\n # Generate outliers\n for outlier in data:\n if outlier > upper_limit or outlier < lower_limit:\n anomalies.append(outlier)\n return anomalies\n\noutliers = find_anomalies(data)\nprint(\"Outliers:\", outliers)",
"Lower Limit: -120690.10466525992\nUpper Limit: 126980.36066525991\nOutliers: [704000, 704000, 704000, 154350, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000, 704000]\n"
]
],
[
[
"- Any value that is beyond 3 standard deviations from the mean is an outlier. Above we have a list of order amounts that fall in the outlier region and because of them the AOV is high for an affordable product like shoes.",
"_____no_output_____"
]
],
[
[
"shopify_ds.loc[shopify_ds['order_amount'].isin(outliers)]",
"_____no_output_____"
]
],
[
[
"Interesting findings here.\n\n- All of the outliers amounts are transactions done by two specific users.\n- They are buying shoes in bulk; 2000 pairs of shoes by user_id 607 and 6 pairs of shoes by user_id 878 which is shooting up the AOV.\n- We can still consider user id 878's order as legitimate as it is just one transaction using debit card. However, the amount is still suspicious enough to spend in one go.\n- User id 607 has purchased sneakers in bulk using a credit card, which is a huge red flag. This is likely a case of fraud. Let's check the amount spent by this user.",
"_____no_output_____"
]
],
[
[
"shopify_ds_user_amount = pd.DataFrame({'mean_amount': shopify_ds.groupby('user_id')['order_amount'].mean()}).reset_index()\nshopify_ds_user_amount.sort_values(by = 'mean_amount', ascending = False).head(30)",
"_____no_output_____"
]
],
[
[
"- A massive mean amount of $704,000 just by one user using a credit card. That is definitley a case of credit card fraud.\n- Second and third highest spending by users 878 and 766 is also suspicious which we will take a look at later.",
"_____no_output_____"
]
],
[
[
"shopify_ds[shopify_ds['user_id'] == 607]",
"_____no_output_____"
]
],
[
[
"- User ID 607 made 17 transactions and each time purchased 2000 shoes from shop id 42 worth 70400 dollars, that is total expenditure of $11.96m with a credit card. Fraud alert.",
"_____no_output_____"
],
[
"Let's remove user Id 607 and plot again.",
"_____no_output_____"
]
],
[
[
"subset_df = shopify_ds_user_amount[shopify_ds_user_amount['user_id'] != 607]\nsubset_df.head()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize = (20, 10))\nplt.bar(subset_df['user_id'], subset_df['mean_amount']);",
"_____no_output_____"
],
[
"# Plot for users with mean amount greater tham $2000\n\nsubset_df = subset_df[subset_df['mean_amount'] > 2000]\nfig = plt.figure(figsize = (20, 10))\nplt.bar(subset_df['user_id'], subset_df['mean_amount']);",
"_____no_output_____"
]
],
[
[
"- User id 878 and 766's spending amount are the ones that stand out in this graph. Let's check.",
"_____no_output_____"
]
],
[
[
"subset_df[subset_df['user_id'] == 878]",
"_____no_output_____"
],
[
"shopify_ds[shopify_ds['user_id'] == 878]",
"_____no_output_____"
]
],
[
[
"- User id 878 has a very high one time spending which is through a debit card, shop id is 78.",
"_____no_output_____"
]
],
[
[
"shopify_ds[shopify_ds['user_id'] == 766]",
"_____no_output_____"
]
],
[
[
"- User 766 also has a one time high spending at shop id 78.\n- Shop id 78 clearly looks suspicious, let us check",
"_____no_output_____"
]
],
[
[
"shopify_ds[shopify_ds['shop_id'] == 78]",
"_____no_output_____"
],
[
"shopify_ds_shop = pd.DataFrame({'mean_amount': shopify_ds.groupby('shop_id')['order_amount'].mean()}).reset_index()\nshopify_ds_shop",
"_____no_output_____"
],
[
"fig = plt.figure(figsize = (30, 15))\nsns.barplot(x = 'shop_id', y = 'mean_amount', data = shopify_ds_shop);",
"_____no_output_____"
]
],
[
[
"- Another case of fraud at shop with id 42 along with 78.",
"_____no_output_____"
]
],
[
[
"shopify_ds[shopify_ds['shop_id'] == 42]",
"_____no_output_____"
]
],
[
[
"- Let's remove the 2 fraud cases: user id 607 and shop id 78 and observe the data again.",
"_____no_output_____"
]
],
[
[
"clean_df = shopify_ds[shopify_ds['user_id' ]!= 607]\nclean_df = clean_df[clean_df['shop_id'] != 78]\nclean_df",
"_____no_output_____"
],
[
"clean_df.describe()",
"_____no_output_____"
]
],
[
[
"b. What metric would you report for this dataset?\n- Mean can still be used because we have identified and isolated the fraud cases and outliers.\n\nc. What is its value?\n- $302.58",
"_____no_output_____"
],
[
"## Question 2: Please use queries to answer the following questions. Paste your queries along with your final numerical answers below.",
"_____no_output_____"
],
[
"#### Q1. How many orders were shipped by Speedy Express in total?\n\n<br> SELECT COUNT(OrderID)\n<br> FROM Orders O, Shippers S\n<br> WHERE O.ShipperID = S.ShipperID AND S.ShipperName = 'Speedy Express'\n\n<br> Ans: 54",
"_____no_output_____"
],
[
"#### Q2. What is the last name of the employee with the most orders?\n\n<br> SELECT e.LastName, count(*) as Most_Orders\n<br> FROM Employees e, Orders o\n<br> WHERE e.EmployeeID = o.EmployeeID\n<br> GROUP BY e.LastName\n<br> ORDER BY Most_Orders DESC LIMIT 1\n\n<br> Ans: Peacock (40 orders)",
"_____no_output_____"
],
[
"#### Q3. What product was ordered the most by customers in Germany?\n\n<br> SELECT ProductName, MaxOrders \n<br> FROM (SELECT ProductID, MAX(NetOrders) as MaxOrders \n<br> FROM (Select *, Count(DISTINCT OrderID) AS NetOrders \n<br> FROM (SELECT *\n<br> FROM Orders o Inner Join OrderDetails od\n<br> ON o.OrderID = od.OrderID\n<br> WHERE CustomerID IN\n<br> (SELECT CustomerID \n<br> FROM Customers WHERE Country = 'Germany'))\n<br> GROUP BY ProductID\n<br> ORDER BY COUNT(DISTINCT OrderID) DESC)) t1 Inner Join Products p\n<br> WHERE t1.ProductID = p.ProductID\n\n\n\n<br> Ans: Gorgonzola Telino (number of orders are 5)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e74678894adef746a9e54eb3ca0a6a3ab5fc50fb | 9,721 | ipynb | Jupyter Notebook | TextProcessing/CountingWords.ipynb | AneteNordena/Digital_Discourse_ETH713_Fall_2020_CLONE | 34e232473a5d7e265305804143215aedcfa68111 | [
"MIT"
] | null | null | null | TextProcessing/CountingWords.ipynb | AneteNordena/Digital_Discourse_ETH713_Fall_2020_CLONE | 34e232473a5d7e265305804143215aedcfa68111 | [
"MIT"
] | 5 | 2020-11-13T18:53:55.000Z | 2022-02-10T01:58:38.000Z | TextProcessing/CountingWords.ipynb | AneteNordena/Digital_Discourse_ETH713_Fall_2020_CLONE | 34e232473a5d7e265305804143215aedcfa68111 | [
"MIT"
] | 2 | 2021-01-14T19:30:58.000Z | 2021-12-22T21:48:12.000Z | 21.554324 | 153 | 0.485752 | [
[
[
"# Assignment: Count Most Frequenly used words in Veidenbaums.txt",
"_____no_output_____"
],
[
"# Open File\n# Read Text\n# Split Text into word tokens\n# Count these tokens (we need to figure out how to)\n# Save/Print Results",
"_____no_output_____"
],
[
"# File is under /data/Veidenbaums.txt\n# we are under /TextProcessing/CountingWords.ipynb",
"_____no_output_____"
],
[
"# This means one level up and then again down into data\n# \"../data/Veidenbaums.txt\"\n# So called relative path\n",
"_____no_output_____"
],
[
"filePath = \"../data/Veidenbaums.txt\"\nwith open(filePath, encoding=\"utf-8\") as fstream:\n mytext = fstream.read()\nlen(mytext)",
"_____no_output_____"
],
[
"mytext[:120]",
"_____no_output_____"
],
[
"# we could try splitting already but we will get dirty data(words)\nmywords = mytext.split(\" \")\nlen(mywords)",
"_____no_output_____"
],
[
"mywords[:5]",
"_____no_output_____"
],
[
"# we will need to clean all lines which contain *** as ending characters\n# so lets try reading lines\nfilePath = \"../data/Veidenbaums.txt\"\nwith open(filePath, encoding=\"utf-8\") as fstream:\n mylines = fstream.readlines()\nlen(mylines)",
"_____no_output_____"
],
[
"mylines[:15]",
"_____no_output_____"
],
[
"cleanlines = [line for line in mylines if line[0]!='\\n']\nlen(cleanlines)",
"_____no_output_____"
],
[
"cleanlines[:5]",
"_____no_output_____"
],
[
"# we do not want the lines which end with ***\\n\nheadlines = [line for line in cleanlines if line.endswith(\"***\\n\")]\nheadlines[:5]\n# we do not need the headlines!",
"_____no_output_____"
],
[
"# we do not want the lines which end with ***\\n\nnoheadlines = [line for line in cleanlines if not line.endswith(\"***\\n\")]\nnoheadlines[:5]",
"_____no_output_____"
],
[
"# we could save the results \nsavePath = \"../data/noHeadVeidenbaums.txt\"\nwith open(savePath, mode=\"w\", encoding=\"utf-8\") as fstream:\n fstream.writelines(noheadlines)",
"_____no_output_____"
],
[
"stopChars = \"\"\"!?.,\"':;-\"\"\"",
"_____no_output_____"
],
[
"for char in stopChars:\n print(char)",
"!\n?\n.\n,\n\"\n'\n:\n;\n-\n"
],
[
"# One big text\ntextNoHead = \"\".join(noheadlines)\ntextNoHead[:55]",
"_____no_output_____"
],
[
"# take off new lines replace with space (why space ? :)\ntextNoHead = textNoHead.replace(\"\\n\", \" \")\ntextNoHead[:75]",
"_____no_output_____"
],
[
"for char in stopChars:\n print(f\"Replacing {char} with nothing\")\n textNoHead = textNoHead.replace(char, \"\")\ntextNoHead[:55]",
"Replacing ! with nothing\nReplacing ? with nothing\nReplacing . with nothing\nReplacing , with nothing\nReplacing \" with nothing\nReplacing ' with nothing\nReplacing : with nothing\nReplacing ; with nothing\nReplacing - with nothing\n"
],
[
"savePath = \"../data/noHeadVeidenbaumsOneLine.txt\"\nwith open(savePath, mode=\"w\", encoding=\"utf-8\") as fstream:\n fstream.write(textNoHead)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7468783b530297b9d67ef3af2c03b911868d88b | 325,533 | ipynb | Jupyter Notebook | Yiwei/w and h work.ipynb | firasm/PIMS-BCFSA | 0000098cfa1e7d0662078def51516ea658579a55 | [
"MIT"
] | null | null | null | Yiwei/w and h work.ipynb | firasm/PIMS-BCFSA | 0000098cfa1e7d0662078def51516ea658579a55 | [
"MIT"
] | null | null | null | Yiwei/w and h work.ipynb | firasm/PIMS-BCFSA | 0000098cfa1e7d0662078def51516ea658579a55 | [
"MIT"
] | 3 | 2020-08-15T01:37:30.000Z | 2020-08-20T18:24:04.000Z | 325,533 | 325,533 | 0.81945 | [
[
[
"import os\nos.system('pip install pandas')\nos.system('pip install numpy')\nos.system('pip install pydotplus')\nos.system('pip install io')\nos.system('pip install pydotplus')\nos.system('pip install sklearn')\nos.system('pip install matplotlib')\nos.system('pip install seaborn')\nos.system('pip install scipy')\nos.system('pip install xgboost')\n\n\n\nimport pandas as pd\nimport numpy as np\nimport pydotplus \nimport io\nfrom sklearn import tree\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import export_graphviz\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom scipy import misc\nimport xgboost as xgb\nfrom sklearn.metrics import balanced_accuracy_score, roc_auc_score, make_scorer\n#from sklearn.model_selection import CridSearchCV\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import plot_confusion_matrix\n\n\n\n\n%matplotlib inline\n\n\nurl = 'https://raw.githubusercontent.com/firasm/PIMS-BCFSA/master/New_Sample_Clean_latest.csv'\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (16,10)\nimport pandas as pd\nurl = \"https://raw.githubusercontent.com/neha0812/PIMS-BCFSA/master/New_Sample_Clean_latest.csv\"\ndf = pd.read_csv(url, error_bad_lines=False)\ndf = df.dropna()\ndf=df.drop(['Unnamed: 0'],axis=1)\ndf = df.replace(',','', regex=True)\n\n\n\n\n\n\n\n\n\n#url ='https://raw.githubusercontent.com/shughestr/PIMS_2020_Real_Estate_data/master/sample_clean.csv'\n \n#df = pd.read_csv(url, error_bad_lines=False)\n#df=df.dropna()\n# in this step, we removed 2 columns \nfor col in df.columns:\n if len(df[col].unique())==1:\n df.drop(col,inplace=True,axis=1)\n\ndf.columns",
"_____no_output_____"
],
[
"df['pct_change_bin']=pd.cut(df['pct_change'], bins=np.array([-0.12,-0.06, 0,0.06,0.12]),labels=range(0,4))\ndf=df.dropna()",
"_____no_output_____"
],
[
"from itertools import product\n\ndef check_cardinality(df):\n\n combinations_lst = list(product(df.columns, df.columns))\n relations = ['one_to_one', 'one_to_many', 'many_to_one']\n\n output = []\n for col1, col2 in combinations_lst:\n for relation in relations:\n try:\n pd.merge(df[[col1]], df[[col2]], left_on=col1, right_on=col2, validate=relation)\n output.append([col1, col2, relation])\n except:\n continue\n\n return output",
"_____no_output_____"
],
[
"def get_relation(df, col1, col2): \n first_max = df[[col1, col2]].groupby(col1).count().max()[0]\n second_max = df[[col1, col2]].groupby(col2).count().max()[0]\n if first_max==1:\n if second_max==1:\n return 'one-to-one'\n else:\n return 'one-to-many'\n else:\n if second_max==1:\n return 'many-to-one'\n else:\n return 'many-to-many'\n\nfrom itertools import product\nfor col_i, col_j in product(df.columns, df.columns):\n if col_i == col_j:\n continue\n print(col_i, col_j, get_relation(df, col_i, col_j))",
"ADDRESS ASSESSED_VALUE many-to-many\nADDRESS COMM_CODE many-to-many\nADDRESS COMM_NAME many-to-many\nADDRESS LATITUDE many-to-many\nADDRESS LONGITUDE many-to-many\nADDRESS ROLL_NUMBER many-to-many\nADDRESS Year many-to-many\nADDRESS location many-to-many\nADDRESS fsa many-to-many\nADDRESS pct_change many-to-many\nADDRESS YEAR_OF_CONSTRUCTION many-to-many\nADDRESS saf1 many-to-many\nADDRESS saf2 many-to-many\nADDRESS saf3 many-to-many\nADDRESS saf4 many-to-many\nADDRESS saf5 many-to-many\nADDRESS saf6 many-to-many\nADDRESS saf7 many-to-many\nADDRESS saf8 many-to-many\nADDRESS mr5y many-to-many\nADDRESS Inflation many-to-many\nADDRESS pop1 many-to-many\nADDRESS pop2 many-to-many\nADDRESS pop3 many-to-many\nADDRESS pop4 many-to-many\nADDRESS pop5 many-to-many\nADDRESS pop6 many-to-many\nADDRESS lan1 many-to-many\nADDRESS lan2 many-to-many\nADDRESS inc1 many-to-many\nADDRESS inc2 many-to-many\nADDRESS inc3 many-to-many\nADDRESS inc4 many-to-many\nADDRESS own1 many-to-many\nADDRESS own2 many-to-many\nADDRESS own3 many-to-many\nADDRESS own4 many-to-many\nADDRESS lab1 many-to-many\nADDRESS lab2 many-to-many\nADDRESS lab3 many-to-many\nADDRESS vacancy_rate many-to-many\nADDRESS walk_score_comm many-to-many\nADDRESS transit_score_comm many-to-many\nADDRESS bike_score_comm many-to-many\nADDRESS comm_polulation many-to-many\nADDRESS pct_change_bin many-to-many\nASSESSED_VALUE ADDRESS many-to-many\nASSESSED_VALUE COMM_CODE many-to-many\nASSESSED_VALUE COMM_NAME many-to-many\nASSESSED_VALUE LATITUDE many-to-many\nASSESSED_VALUE LONGITUDE many-to-many\nASSESSED_VALUE ROLL_NUMBER many-to-many\nASSESSED_VALUE Year many-to-many\nASSESSED_VALUE location many-to-many\nASSESSED_VALUE fsa many-to-many\nASSESSED_VALUE pct_change many-to-many\nASSESSED_VALUE YEAR_OF_CONSTRUCTION many-to-many\nASSESSED_VALUE saf1 many-to-many\nASSESSED_VALUE saf2 many-to-many\nASSESSED_VALUE saf3 many-to-many\nASSESSED_VALUE saf4 many-to-many\nASSESSED_VALUE saf5 many-to-many\nASSESSED_VALUE saf6 many-to-many\nASSESSED_VALUE saf7 many-to-many\nASSESSED_VALUE saf8 many-to-many\nASSESSED_VALUE mr5y many-to-many\nASSESSED_VALUE Inflation many-to-many\nASSESSED_VALUE pop1 many-to-many\nASSESSED_VALUE pop2 many-to-many\nASSESSED_VALUE pop3 many-to-many\nASSESSED_VALUE pop4 many-to-many\nASSESSED_VALUE pop5 many-to-many\nASSESSED_VALUE pop6 many-to-many\nASSESSED_VALUE lan1 many-to-many\nASSESSED_VALUE lan2 many-to-many\nASSESSED_VALUE inc1 many-to-many\nASSESSED_VALUE inc2 many-to-many\nASSESSED_VALUE inc3 many-to-many\nASSESSED_VALUE inc4 many-to-many\nASSESSED_VALUE own1 many-to-many\nASSESSED_VALUE own2 many-to-many\nASSESSED_VALUE own3 many-to-many\nASSESSED_VALUE own4 many-to-many\nASSESSED_VALUE lab1 many-to-many\nASSESSED_VALUE lab2 many-to-many\nASSESSED_VALUE lab3 many-to-many\nASSESSED_VALUE vacancy_rate many-to-many\nASSESSED_VALUE walk_score_comm many-to-many\nASSESSED_VALUE transit_score_comm many-to-many\nASSESSED_VALUE bike_score_comm many-to-many\nASSESSED_VALUE comm_polulation many-to-many\nASSESSED_VALUE pct_change_bin many-to-many\nCOMM_CODE ADDRESS many-to-many\nCOMM_CODE ASSESSED_VALUE many-to-many\nCOMM_CODE COMM_NAME many-to-many\nCOMM_CODE LATITUDE many-to-many\nCOMM_CODE LONGITUDE many-to-many\nCOMM_CODE ROLL_NUMBER many-to-many\nCOMM_CODE Year many-to-many\nCOMM_CODE location many-to-many\nCOMM_CODE fsa many-to-many\nCOMM_CODE pct_change many-to-many\nCOMM_CODE YEAR_OF_CONSTRUCTION many-to-many\nCOMM_CODE saf1 many-to-many\nCOMM_CODE saf2 many-to-many\nCOMM_CODE saf3 many-to-many\nCOMM_CODE saf4 many-to-many\nCOMM_CODE saf5 many-to-many\nCOMM_CODE saf6 many-to-many\nCOMM_CODE saf7 many-to-many\nCOMM_CODE saf8 many-to-many\nCOMM_CODE mr5y many-to-many\nCOMM_CODE Inflation many-to-many\nCOMM_CODE pop1 many-to-many\nCOMM_CODE pop2 many-to-many\nCOMM_CODE pop3 many-to-many\nCOMM_CODE pop4 many-to-many\nCOMM_CODE pop5 many-to-many\nCOMM_CODE pop6 many-to-many\nCOMM_CODE lan1 many-to-many\nCOMM_CODE lan2 many-to-many\nCOMM_CODE inc1 many-to-many\nCOMM_CODE inc2 many-to-many\nCOMM_CODE inc3 many-to-many\nCOMM_CODE inc4 many-to-many\nCOMM_CODE own1 many-to-many\nCOMM_CODE own2 many-to-many\nCOMM_CODE own3 many-to-many\nCOMM_CODE own4 many-to-many\nCOMM_CODE lab1 many-to-many\nCOMM_CODE lab2 many-to-many\nCOMM_CODE lab3 many-to-many\nCOMM_CODE vacancy_rate many-to-many\nCOMM_CODE walk_score_comm many-to-many\nCOMM_CODE transit_score_comm many-to-many\nCOMM_CODE bike_score_comm many-to-many\nCOMM_CODE comm_polulation many-to-many\nCOMM_CODE pct_change_bin many-to-many\nCOMM_NAME ADDRESS many-to-many\nCOMM_NAME ASSESSED_VALUE many-to-many\nCOMM_NAME COMM_CODE many-to-many\nCOMM_NAME LATITUDE many-to-many\nCOMM_NAME LONGITUDE many-to-many\nCOMM_NAME ROLL_NUMBER many-to-many\nCOMM_NAME Year many-to-many\nCOMM_NAME location many-to-many\nCOMM_NAME fsa many-to-many\nCOMM_NAME pct_change many-to-many\nCOMM_NAME YEAR_OF_CONSTRUCTION many-to-many\nCOMM_NAME saf1 many-to-many\nCOMM_NAME saf2 many-to-many\nCOMM_NAME saf3 many-to-many\nCOMM_NAME saf4 many-to-many\nCOMM_NAME saf5 many-to-many\nCOMM_NAME saf6 many-to-many\nCOMM_NAME saf7 many-to-many\nCOMM_NAME saf8 many-to-many\nCOMM_NAME mr5y many-to-many\nCOMM_NAME Inflation many-to-many\nCOMM_NAME pop1 many-to-many\nCOMM_NAME pop2 many-to-many\nCOMM_NAME pop3 many-to-many\nCOMM_NAME pop4 many-to-many\nCOMM_NAME pop5 many-to-many\nCOMM_NAME pop6 many-to-many\nCOMM_NAME lan1 many-to-many\nCOMM_NAME lan2 many-to-many\nCOMM_NAME inc1 many-to-many\nCOMM_NAME inc2 many-to-many\nCOMM_NAME inc3 many-to-many\nCOMM_NAME inc4 many-to-many\nCOMM_NAME own1 many-to-many\nCOMM_NAME own2 many-to-many\nCOMM_NAME own3 many-to-many\nCOMM_NAME own4 many-to-many\nCOMM_NAME lab1 many-to-many\nCOMM_NAME lab2 many-to-many\nCOMM_NAME lab3 many-to-many\nCOMM_NAME vacancy_rate many-to-many\nCOMM_NAME walk_score_comm many-to-many\nCOMM_NAME transit_score_comm many-to-many\nCOMM_NAME bike_score_comm many-to-many\nCOMM_NAME comm_polulation many-to-many\nCOMM_NAME pct_change_bin many-to-many\nLATITUDE ADDRESS many-to-many\nLATITUDE ASSESSED_VALUE many-to-many\nLATITUDE COMM_CODE many-to-many\nLATITUDE COMM_NAME many-to-many\nLATITUDE LONGITUDE many-to-many\nLATITUDE ROLL_NUMBER many-to-many\nLATITUDE Year many-to-many\nLATITUDE location many-to-many\nLATITUDE fsa many-to-many\nLATITUDE pct_change many-to-many\nLATITUDE YEAR_OF_CONSTRUCTION many-to-many\nLATITUDE saf1 many-to-many\nLATITUDE saf2 many-to-many\nLATITUDE saf3 many-to-many\nLATITUDE saf4 many-to-many\nLATITUDE saf5 many-to-many\nLATITUDE saf6 many-to-many\nLATITUDE saf7 many-to-many\nLATITUDE saf8 many-to-many\nLATITUDE mr5y many-to-many\nLATITUDE Inflation many-to-many\nLATITUDE pop1 many-to-many\nLATITUDE pop2 many-to-many\nLATITUDE pop3 many-to-many\nLATITUDE pop4 many-to-many\nLATITUDE pop5 many-to-many\nLATITUDE pop6 many-to-many\nLATITUDE lan1 many-to-many\nLATITUDE lan2 many-to-many\nLATITUDE inc1 many-to-many\nLATITUDE inc2 many-to-many\nLATITUDE inc3 many-to-many\nLATITUDE inc4 many-to-many\nLATITUDE own1 many-to-many\nLATITUDE own2 many-to-many\nLATITUDE own3 many-to-many\nLATITUDE own4 many-to-many\nLATITUDE lab1 many-to-many\nLATITUDE lab2 many-to-many\nLATITUDE lab3 many-to-many\nLATITUDE vacancy_rate many-to-many\nLATITUDE walk_score_comm many-to-many\nLATITUDE transit_score_comm many-to-many\nLATITUDE bike_score_comm many-to-many\nLATITUDE comm_polulation many-to-many\nLATITUDE pct_change_bin many-to-many\nLONGITUDE ADDRESS many-to-many\nLONGITUDE ASSESSED_VALUE many-to-many\nLONGITUDE COMM_CODE many-to-many\nLONGITUDE COMM_NAME many-to-many\nLONGITUDE LATITUDE many-to-many\nLONGITUDE ROLL_NUMBER many-to-many\nLONGITUDE Year many-to-many\nLONGITUDE location many-to-many\nLONGITUDE fsa many-to-many\nLONGITUDE pct_change many-to-many\nLONGITUDE YEAR_OF_CONSTRUCTION many-to-many\nLONGITUDE saf1 many-to-many\nLONGITUDE saf2 many-to-many\nLONGITUDE saf3 many-to-many\nLONGITUDE saf4 many-to-many\nLONGITUDE saf5 many-to-many\nLONGITUDE saf6 many-to-many\nLONGITUDE saf7 many-to-many\nLONGITUDE saf8 many-to-many\nLONGITUDE mr5y many-to-many\nLONGITUDE Inflation many-to-many\nLONGITUDE pop1 many-to-many\nLONGITUDE pop2 many-to-many\nLONGITUDE pop3 many-to-many\nLONGITUDE pop4 many-to-many\nLONGITUDE pop5 many-to-many\nLONGITUDE pop6 many-to-many\nLONGITUDE lan1 many-to-many\nLONGITUDE lan2 many-to-many\nLONGITUDE inc1 many-to-many\nLONGITUDE inc2 many-to-many\nLONGITUDE inc3 many-to-many\nLONGITUDE inc4 many-to-many\nLONGITUDE own1 many-to-many\nLONGITUDE own2 many-to-many\nLONGITUDE own3 many-to-many\nLONGITUDE own4 many-to-many\nLONGITUDE lab1 many-to-many\nLONGITUDE lab2 many-to-many\nLONGITUDE lab3 many-to-many\nLONGITUDE vacancy_rate many-to-many\nLONGITUDE walk_score_comm many-to-many\nLONGITUDE transit_score_comm many-to-many\nLONGITUDE bike_score_comm many-to-many\n"
],
[
"\ndf_num = df.drop(['ADDRESS','COMM_CODE','COMM_NAME','location','fsa','comm_polulation'],axis=1)\ndf_num.columns\ndf_num.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 14548 entries, 0 to 15999\nData columns (total 41 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ASSESSED_VALUE 14548 non-null float64 \n 1 LATITUDE 14548 non-null float64 \n 2 LONGITUDE 14548 non-null float64 \n 3 ROLL_NUMBER 14548 non-null int64 \n 4 Year 14548 non-null int64 \n 5 pct_change 14548 non-null float64 \n 6 YEAR_OF_CONSTRUCTION 14548 non-null float64 \n 7 saf1 14548 non-null float64 \n 8 saf2 14548 non-null float64 \n 9 saf3 14548 non-null float64 \n 10 saf4 14548 non-null float64 \n 11 saf5 14548 non-null float64 \n 12 saf6 14548 non-null float64 \n 13 saf7 14548 non-null float64 \n 14 saf8 14548 non-null float64 \n 15 mr5y 14548 non-null float64 \n 16 Inflation 14548 non-null float64 \n 17 pop1 14548 non-null int64 \n 18 pop2 14548 non-null int64 \n 19 pop3 14548 non-null int64 \n 20 pop4 14548 non-null float64 \n 21 pop5 14548 non-null float64 \n 22 pop6 14548 non-null float64 \n 23 lan1 14548 non-null float64 \n 24 lan2 14548 non-null float64 \n 25 inc1 14548 non-null int64 \n 26 inc2 14548 non-null float64 \n 27 inc3 14548 non-null int64 \n 28 inc4 14548 non-null int64 \n 29 own1 14548 non-null float64 \n 30 own2 14548 non-null float64 \n 31 own3 14548 non-null float64 \n 32 own4 14548 non-null float64 \n 33 lab1 14548 non-null float64 \n 34 lab2 14548 non-null float64 \n 35 lab3 14548 non-null float64 \n 36 vacancy_rate 14548 non-null float64 \n 37 walk_score_comm 14548 non-null float64 \n 38 transit_score_comm 14548 non-null float64 \n 39 bike_score_comm 14548 non-null float64 \n 40 pct_change_bin 14548 non-null category\ndtypes: category(1), float64(32), int64(8)\nmemory usage: 4.6 MB\n"
],
[
"from sklearn import preprocessing\nx=df_num.drop(['ASSESSED_VALUE','pct_change','Year','ROLL_NUMBER','pct_change_bin'],axis=1).values\n#min_max_scaler=preprocessing.MinMaxScaler()\n#x_scaled=min_max_scaler.fit_transform(x)\n\n\nstandard_scaler=preprocessing.StandardScaler()\nx_scaled=standard_scaler.fit_transform(x)\n\n\ndf_scaled=pd.DataFrame(x_scaled,columns=df_num.drop(['ASSESSED_VALUE','pct_change_bin','pct_change','Year','ROLL_NUMBER'],axis=1).columns)\n#df_scaled['ASSESSED_VALUE']=df_num['ASSESSED_VALUE']\n\ndf_scaled[['ASSESSED_VALUE','pct_change','pct_change_bin']]=df_num[['ASSESSED_VALUE','pct_change','pct_change_bin']]\n\ndf_scaled=df_scaled.dropna()",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 14548 entries, 0 to 15999\nData columns (total 47 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ADDRESS 14548 non-null object \n 1 ASSESSED_VALUE 14548 non-null float64 \n 2 COMM_CODE 14548 non-null object \n 3 COMM_NAME 14548 non-null object \n 4 LATITUDE 14548 non-null float64 \n 5 LONGITUDE 14548 non-null float64 \n 6 ROLL_NUMBER 14548 non-null int64 \n 7 Year 14548 non-null int64 \n 8 location 14548 non-null object \n 9 fsa 14548 non-null object \n 10 pct_change 14548 non-null float64 \n 11 YEAR_OF_CONSTRUCTION 14548 non-null float64 \n 12 saf1 14548 non-null float64 \n 13 saf2 14548 non-null float64 \n 14 saf3 14548 non-null float64 \n 15 saf4 14548 non-null float64 \n 16 saf5 14548 non-null float64 \n 17 saf6 14548 non-null float64 \n 18 saf7 14548 non-null float64 \n 19 saf8 14548 non-null float64 \n 20 mr5y 14548 non-null float64 \n 21 Inflation 14548 non-null float64 \n 22 pop1 14548 non-null int64 \n 23 pop2 14548 non-null int64 \n 24 pop3 14548 non-null int64 \n 25 pop4 14548 non-null float64 \n 26 pop5 14548 non-null float64 \n 27 pop6 14548 non-null float64 \n 28 lan1 14548 non-null float64 \n 29 lan2 14548 non-null float64 \n 30 inc1 14548 non-null int64 \n 31 inc2 14548 non-null float64 \n 32 inc3 14548 non-null int64 \n 33 inc4 14548 non-null int64 \n 34 own1 14548 non-null float64 \n 35 own2 14548 non-null float64 \n 36 own3 14548 non-null float64 \n 37 own4 14548 non-null float64 \n 38 lab1 14548 non-null float64 \n 39 lab2 14548 non-null float64 \n 40 lab3 14548 non-null float64 \n 41 vacancy_rate 14548 non-null float64 \n 42 walk_score_comm 14548 non-null float64 \n 43 transit_score_comm 14548 non-null float64 \n 44 bike_score_comm 14548 non-null float64 \n 45 comm_polulation 14548 non-null object \n 46 pct_change_bin 14548 non-null category\ndtypes: category(1), float64(32), int64(8), object(6)\nmemory usage: 5.9+ MB\n"
],
[
"df_scaled1=df_num.drop(['Year','ROLL_NUMBER'],axis=1)",
"_____no_output_____"
],
[
"df_scaled1['Age']=2020-df_scaled1['YEAR_OF_CONSTRUCTION']\ndf_scaled=df_scaled1.drop(['YEAR_OF_CONSTRUCTION'],axis=1)",
"_____no_output_____"
],
[
"x_train, x_test, y_train, y_test=train_test_split(df_scaled.drop(['ASSESSED_VALUE','pct_change'],axis=1),df_scaled['pct_change'],test_size=0.2,random_state=0)",
"_____no_output_____"
],
[
"df_scaled1.info()\n#x_train['pct_change_bin']",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 14548 entries, 0 to 15999\nData columns (total 40 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ASSESSED_VALUE 14548 non-null float64 \n 1 LATITUDE 14548 non-null float64 \n 2 LONGITUDE 14548 non-null float64 \n 3 pct_change 14548 non-null float64 \n 4 YEAR_OF_CONSTRUCTION 14548 non-null float64 \n 5 saf1 14548 non-null float64 \n 6 saf2 14548 non-null float64 \n 7 saf3 14548 non-null float64 \n 8 saf4 14548 non-null float64 \n 9 saf5 14548 non-null float64 \n 10 saf6 14548 non-null float64 \n 11 saf7 14548 non-null float64 \n 12 saf8 14548 non-null float64 \n 13 mr5y 14548 non-null float64 \n 14 Inflation 14548 non-null float64 \n 15 pop1 14548 non-null int64 \n 16 pop2 14548 non-null int64 \n 17 pop3 14548 non-null int64 \n 18 pop4 14548 non-null float64 \n 19 pop5 14548 non-null float64 \n 20 pop6 14548 non-null float64 \n 21 lan1 14548 non-null float64 \n 22 lan2 14548 non-null float64 \n 23 inc1 14548 non-null int64 \n 24 inc2 14548 non-null float64 \n 25 inc3 14548 non-null int64 \n 26 inc4 14548 non-null int64 \n 27 own1 14548 non-null float64 \n 28 own2 14548 non-null float64 \n 29 own3 14548 non-null float64 \n 30 own4 14548 non-null float64 \n 31 lab1 14548 non-null float64 \n 32 lab2 14548 non-null float64 \n 33 lab3 14548 non-null float64 \n 34 vacancy_rate 14548 non-null float64 \n 35 walk_score_comm 14548 non-null float64 \n 36 transit_score_comm 14548 non-null float64 \n 37 bike_score_comm 14548 non-null float64 \n 38 pct_change_bin 14548 non-null category\n 39 Age 14548 non-null float64 \ndtypes: category(1), float64(33), int64(6)\nmemory usage: 5.1 MB\n"
],
[
"x_test.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 2910 entries, 11228 to 1254\nData columns (total 37 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 LATITUDE 2910 non-null float64 \n 1 LONGITUDE 2910 non-null float64 \n 2 saf1 2910 non-null float64 \n 3 saf2 2910 non-null float64 \n 4 saf3 2910 non-null float64 \n 5 saf4 2910 non-null float64 \n 6 saf5 2910 non-null float64 \n 7 saf6 2910 non-null float64 \n 8 saf7 2910 non-null float64 \n 9 saf8 2910 non-null float64 \n 10 mr5y 2910 non-null float64 \n 11 Inflation 2910 non-null float64 \n 12 pop1 2910 non-null int64 \n 13 pop2 2910 non-null int64 \n 14 pop3 2910 non-null int64 \n 15 pop4 2910 non-null float64 \n 16 pop5 2910 non-null float64 \n 17 pop6 2910 non-null float64 \n 18 lan1 2910 non-null float64 \n 19 lan2 2910 non-null float64 \n 20 inc1 2910 non-null int64 \n 21 inc2 2910 non-null float64 \n 22 inc3 2910 non-null int64 \n 23 inc4 2910 non-null int64 \n 24 own1 2910 non-null float64 \n 25 own2 2910 non-null float64 \n 26 own3 2910 non-null float64 \n 27 own4 2910 non-null float64 \n 28 lab1 2910 non-null float64 \n 29 lab2 2910 non-null float64 \n 30 lab3 2910 non-null float64 \n 31 vacancy_rate 2910 non-null float64 \n 32 walk_score_comm 2910 non-null float64 \n 33 transit_score_comm 2910 non-null float64 \n 34 bike_score_comm 2910 non-null float64 \n 35 pct_change_bin 2910 non-null category\n 36 Age 2910 non-null float64 \ndtypes: category(1), float64(30), int64(6)\nmemory usage: 844.1 KB\n"
],
[
"feature_selected=['saf4','saf5','mr5y','Inflation','pop1','pop2','inc3','own3','lab1','walk_score_comm','Age','saf2','saf3','pop3','pop4','inc1','inc2','own2','lab2','vacancy_rate']\n\nx_train2=x_train[feature_selected]\n\n\nx_test2=x_test[feature_selected]\ny_train=x_train['pct_change_bin']\n#x_train2.info()\n#x_train.info()\ny_test=x_test['pct_change_bin']\n\nprint(x_train.shape[0],x_train2.shape[0],x_test.shape[0],y_test.shape[0])\n",
"11638 11638 2910 2910\n"
],
[
"xgb_classifier=xgb.XGBClassifier(objective='multi:softprob')\n\n\nxgb_classifier.fit(x_train2, y_train)\ny_pred_x=xgb_classifier.predict(x_test2)\nprint(y_pred_x.shape[0])\ny_pred_x=list(y_pred_x)\ny_test=list(y_test)\n\n\nl1=[]\nfor index in range(2910):\n if (y_pred_x[index]==1) & (y_test[index]==0):\n l1.append(index)\n \nl2=[] \nfor index in range(2910):\n if (y_pred_x[index]==1) & (y_test[index]==2):\n l2.append(index)\n\nl3=[] \nfor index in range(2910):\n if (y_pred_x[index]==2) & (y_test[index]==1):\n l3.append(index) \nl0_1=l1\nl2_1=l2\nl1_2=l3\n\n#d0_1=x_train2[l0_1]\n#d2_1=x_train2[l2_1]\n#d0_1\n#d2_1\n\nprint(len(l0_1),len(l2_1))",
"2910\n256 213\n"
],
[
"x0_1=x_test2.iloc[l0_1]\nx2_1=x_test2.iloc[l2_1]\nx0_1\nx2_1",
"_____no_output_____"
],
[
"Correlation=df_scaled.corr()\nCorrelation\ninformation=x_test2.describe()\nCorrelation",
"_____no_output_____"
],
[
"di1={'saf4':0,'saf5':0,'mr5y':0,'Inflation':0,'pop1':0,'pop2':0,'inc3':0,'own3':0,'lab1':0,'walk_score_comm':0,'Age':0,'saf2':0,'saf3':0,'pop3':0,'pop4':0,'inc1':0,'inc2':0,'own2':0,'lab2':0,'vacancy_rate':0}\ndef reason0_1(df0_1):\n for index in range(256):\n for value in x_train2.columns:\n if (Correlation.iloc[4][value]>0) & (x_test2.iloc[index][value]>information[value][-2]):\n print(str(index)+':'+value+' '+'is too high')\n di1[value]+=1\n elif (Correlation.iloc[4][value]<0) & (x_test2.iloc[index][value]<information[value][-4]):\n print(str(index)+':'+value+' '+'is too low')\n di1[value]+=1\n else:\n pass\n return print(di1)\n\n\ndi2={'Age':0,'saf4':0,'saf5':0,'mr5y':0,'Inflation':0,'pop1':0,'pop2':0,'inc3':0,'own3':0,'lab1':0,'walk_score_comm':0,'saf3':0,'saf2':0,'pop3':0,'pop4':0,'inc1':0,'inc2':0,'own2':0,'lab2':0,'vacancy_rate':0}\ndef reason2_1(df2_to_1):\n for index in range(213):\n for value in x_train2.columns:\n if (Correlation.iloc[0][value]>0) & (x2_1.iloc[index][value]<information[value][-4]):\n print(str(index)+':'+value+' '+'is too high')\n di2[value]+=1\n elif (Correlation.iloc[4][value]<0) & (x_test2.iloc[index][value]>information[value][-2]):\n print(str(index)+':'+value+' '+'is too low')\n di2[value]+=1\n else:\n pass\n return print(di2)\nprint('First')\nreason0_1(x0_1)\nprint('Second')\nreason2_1(x2_1)\n",
"First\n0:saf3 is too high\n1:inc3 is too low\n1:inc1 is too low\n1:own2 is too high\n1:vacancy_rate is too high\n2:mr5y is too high\n2:Inflation is too high\n3:saf4 is too high\n3:saf5 is too high\n3:mr5y is too high\n3:Inflation is too high\n3:inc3 is too low\n3:walk_score_comm is too high\n3:inc1 is too low\n3:own2 is too high\n4:saf3 is too high\n5:mr5y is too high\n5:Inflation is too high\n5:lab2 is too low\n6:saf4 is too high\n6:saf5 is too high\n6:own3 is too high\n6:lab1 is too high\n6:walk_score_comm is too high\n6:Age is too high\n6:saf2 is too high\n6:saf3 is too high\n6:pop4 is too low\n6:inc2 is too high\n7:lab2 is too low\n8:saf4 is too high\n8:saf5 is too high\n8:own3 is too high\n8:lab1 is too high\n8:walk_score_comm is too high\n8:saf2 is too high\n8:saf3 is too high\n8:pop4 is too low\n8:inc2 is too high\n8:vacancy_rate is too high\n10:saf3 is too high\n11:saf4 is too high\n11:saf5 is too high\n11:pop1 is too low\n11:pop2 is too low\n11:inc3 is too low\n11:own3 is too high\n11:saf2 is too high\n11:pop3 is too low\n11:inc1 is too low\n11:own2 is too high\n12:saf5 is too high\n12:pop1 is too low\n12:pop2 is too low\n12:inc3 is too low\n12:walk_score_comm is too high\n12:Age is too high\n12:pop3 is too low\n12:inc1 is too low\n12:vacancy_rate is too high\n14:pop1 is too low\n14:pop2 is too low\n14:own3 is too high\n14:pop3 is too low\n14:pop4 is too low\n14:inc2 is too high\n14:lab2 is too low\n15:saf5 is too high\n15:pop1 is too low\n15:pop2 is too low\n15:walk_score_comm is too high\n15:saf2 is too high\n15:pop3 is too low\n15:pop4 is too low\n15:inc2 is too high\n15:own2 is too high\n15:lab2 is too low\n15:vacancy_rate is too high\n16:saf4 is too high\n16:mr5y is too high\n16:Inflation is too high\n16:inc3 is too low\n16:Age is too high\n16:saf3 is too high\n16:inc1 is too low\n17:mr5y is too high\n17:Inflation is too high\n17:lab2 is too low\n18:saf4 is too high\n18:mr5y is too high\n18:Inflation is too high\n18:pop1 is too low\n18:lab1 is too high\n18:walk_score_comm is too high\n18:Age is too high\n18:saf2 is too high\n18:saf3 is too high\n18:pop3 is too low\n18:pop4 is too low\n18:inc2 is too high\n18:lab2 is too low\n18:vacancy_rate is too high\n19:saf4 is too high\n19:pop1 is too low\n19:pop2 is too low\n19:lab1 is too high\n19:Age is too high\n19:saf2 is too high\n19:pop3 is too low\n19:pop4 is too low\n19:inc2 is too high\n19:vacancy_rate is too high\n20:Age is too high\n21:inc3 is too low\n21:walk_score_comm is too high\n21:inc1 is too low\n21:own2 is too high\n22:saf4 is too high\n22:own3 is too high\n22:saf2 is too high\n24:saf4 is too high\n24:mr5y is too high\n24:Inflation is too high\n24:pop1 is too low\n24:pop2 is too low\n24:inc3 is too low\n24:own3 is too high\n24:walk_score_comm is too high\n24:Age is too high\n24:saf2 is too high\n24:saf3 is too high\n24:pop3 is too low\n24:inc1 is too low\n26:saf4 is too high\n26:mr5y is too high\n26:Inflation is too high\n26:own3 is too high\n26:vacancy_rate is too high\n28:saf5 is too high\n28:pop1 is too low\n28:pop2 is too low\n28:walk_score_comm is too high\n28:saf2 is too high\n28:pop3 is too low\n28:pop4 is too low\n28:inc2 is too high\n28:own2 is too high\n28:lab2 is too low\n28:vacancy_rate is too high\n29:saf4 is too high\n29:saf5 is too high\n29:pop1 is too low\n29:pop2 is too low\n29:own3 is too high\n29:lab1 is too high\n29:saf2 is too high\n29:pop3 is too low\n29:pop4 is too low\n29:own2 is too high\n29:lab2 is too low\n29:vacancy_rate is too high\n30:inc2 is too high\n31:saf4 is too high\n31:inc3 is too low\n31:own3 is too high\n31:inc1 is too low\n31:own2 is too high\n32:saf4 is too high\n32:saf5 is too high\n32:mr5y is too high\n32:Inflation is too high\n32:pop1 is too low\n32:pop2 is too low\n32:lab1 is too high\n32:walk_score_comm is too high\n32:Age is too high\n32:saf2 is too high\n32:pop3 is too low\n32:pop4 is too low\n32:lab2 is too low\n33:saf3 is too high\n34:inc3 is too low\n34:walk_score_comm is too high\n34:inc1 is too low\n34:own2 is too high\n35:saf4 is too high\n35:saf5 is too high\n35:inc3 is too low\n35:Age is too high\n35:inc1 is too low\n36:mr5y is too high\n36:Inflation is too high\n36:walk_score_comm is too high\n36:Age is too high\n36:saf2 is too high\n36:saf3 is too high\n36:vacancy_rate is too high\n38:saf3 is too high\n40:saf3 is too high\n41:mr5y is too high\n41:Inflation is too high\n42:pop1 is too low\n42:pop2 is too low\n42:own3 is too high\n42:saf2 is too high\n42:pop3 is too low\n42:pop4 is too low\n42:inc2 is too high\n42:lab2 is too low\n44:inc3 is too low\n44:inc1 is too low\n44:own2 is too high\n45:inc3 is too low\n45:own3 is too high\n45:walk_score_comm is too high\n45:inc1 is too low\n45:own2 is too high\n46:mr5y is too high\n46:Inflation is too high\n46:lab1 is too high\n46:pop4 is too low\n46:inc2 is too high\n46:lab2 is too low\n49:saf5 is too high\n49:mr5y is too high\n49:Inflation is too high\n49:inc3 is too low\n49:inc1 is too low\n49:own2 is too high\n49:vacancy_rate is too high\n50:saf4 is too high\n50:saf5 is too high\n50:inc3 is too low\n50:walk_score_comm is too high\n50:Age is too high\n50:saf2 is too high\n50:saf3 is too high\n50:inc1 is too low\n50:own2 is too high\n51:inc3 is too low\n51:inc1 is too low\n51:own2 is too high\n54:inc3 is too low\n54:own3 is too high\n54:walk_score_comm is too high\n54:inc1 is too low\n54:own2 is too high\n55:lab2 is too low\n56:saf5 is too high\n56:own3 is too high\n57:lab2 is too low\n58:inc2 is too high\n61:saf4 is too high\n61:saf5 is too high\n61:mr5y is too high\n61:Inflation is too high\n61:pop1 is too low\n61:pop2 is too low\n61:own3 is too high\n61:lab1 is too high\n61:Age is too high\n61:saf2 is too high\n61:pop3 is too low\n61:pop4 is too low\n61:own2 is too high\n61:lab2 is too low\n61:vacancy_rate is too high\n62:saf4 is too high\n62:saf5 is too high\n62:inc3 is too low\n62:inc1 is too low\n64:saf4 is too high\n64:saf5 is too high\n64:mr5y is too high\n64:Inflation is too high\n64:pop1 is too low\n64:pop2 is too low\n64:lab1 is too high\n64:walk_score_comm is too high\n64:Age is too high\n64:saf2 is too high\n64:pop3 is too low\n64:pop4 is too low\n64:lab2 is too low\n64:vacancy_rate is too high\n65:inc3 is too low\n65:Age is too high\n65:inc1 is too low\n66:saf5 is too high\n66:mr5y is too high\n66:Inflation is too high\n66:inc3 is too low\n66:inc1 is too low\n66:own2 is too high\n67:mr5y is too high\n67:Inflation is too high\n67:lab1 is too high\n67:pop4 is too low\n67:inc2 is too high\n67:lab2 is too low\n68:walk_score_comm is too high\n68:saf3 is too high\n68:vacancy_rate is too high\n69:mr5y is too high\n69:Inflation is too high\n69:pop1 is too low\n69:pop2 is too low\n69:lab1 is too high\n69:saf3 is too high\n69:pop3 is too low\n69:pop4 is too low\n69:own2 is too high\n69:lab2 is too low\n70:saf4 is too high\n70:saf5 is too high\n70:mr5y is too high\n70:Inflation is too high\n70:inc3 is too low\n70:Age is too high\n70:inc1 is too low\n70:own2 is too high\n70:vacancy_rate is too high\n71:saf3 is too high\n72:saf4 is too high\n72:saf5 is too high\n72:mr5y is too high\n72:Inflation is too high\n72:pop1 is too low\n72:pop2 is too low\n72:inc3 is too low\n72:Age is too high\n72:saf2 is too high\n72:pop3 is too low\n72:inc1 is too low\n73:saf4 is too high\n73:saf5 is too high\n73:mr5y is too high\n73:Inflation is too high\n73:pop1 is too low\n73:pop2 is too low\n73:inc3 is too low\n73:walk_score_comm is too high\n73:Age is too high\n73:saf2 is too high\n73:pop3 is too low\n73:inc1 is too low\n73:vacancy_rate is too high\n74:inc2 is too high\n75:lab2 is too low\n76:saf5 is too high\n76:mr5y is too high\n76:Inflation is too high\n76:inc3 is too low\n76:inc1 is too low\n76:own2 is too high\n76:vacancy_rate is too high\n77:saf4 is too high\n77:inc3 is too low\n77:own3 is too high\n77:walk_score_comm is too high\n77:inc1 is too low\n77:own2 is too high\n78:inc3 is too low\n78:walk_score_comm is too high\n78:inc1 is too low\n78:own2 is too high\n79:lab2 is too low\n80:pop1 is too low\n80:pop2 is too low\n80:inc3 is too low\n80:own3 is too high\n80:Age is too high\n80:pop3 is too low\n80:inc1 is too low\n80:own2 is too high\n80:vacancy_rate is too high\n81:inc3 is too low\n81:own3 is too high\n81:walk_score_comm is too high\n81:inc1 is too low\n81:own2 is too high\n82:inc3 is too low\n82:Age is too high\n82:inc1 is too low\n83:saf3 is too high\n85:saf3 is too high\n86:mr5y is too high\n86:Inflation is too high\n86:inc3 is too low\n86:own3 is too high\n86:saf3 is too high\n86:inc1 is too low\n86:own2 is too high\n86:vacancy_rate is too high\n87:mr5y is too high\n87:Inflation is too high\n87:inc2 is too high\n88:mr5y is too high\n88:Inflation is too high\n88:saf3 is too high\n89:lab2 is too low\n90:saf4 is too high\n90:mr5y is too high\n90:Inflation is too high\n90:inc3 is too low\n90:saf3 is too high\n90:inc1 is too low\n91:inc3 is too low\n91:inc1 is too low\n91:own2 is too high\n92:saf5 is too high\n92:pop1 is too low\n92:pop2 is too low\n92:inc3 is too low\n92:own3 is too high\n92:walk_score_comm is too high\n92:Age is too high\n92:saf2 is too high\n92:pop3 is too low\n92:inc1 is too low\n92:own2 is too high\n93:saf3 is too high\n94:inc2 is too high\n95:Age is too high\n96:lab1 is too high\n96:saf2 is too high\n96:pop4 is too low\n96:inc2 is too high\n96:lab2 is too low\n97:mr5y is too high\n97:Inflation is too high\n97:lab1 is too high\n97:pop4 is too low\n97:inc2 is too high\n97:lab2 is too low\n98:saf5 is too high\n98:mr5y is too high\n98:Inflation is too high\n98:inc3 is too low\n98:inc1 is too low\n98:own2 is too high\n98:vacancy_rate is too high\n99:inc3 is too low\n99:inc1 is too low\n99:own2 is too high\n99:vacancy_rate is too high\n100:saf4 is too high\n100:saf5 is too high\n100:inc3 is too low\n100:Age is too high\n100:inc1 is too low\n"
],
[
"##0 to 1",
"_____no_output_____"
],
[
"plt.style.use('fivethirtyeight')\nfig, ax = plt.subplots()\n\n# Example data\npeople = ('Age','saf4','saf5','mr5y','Inflation','pop1','pop2','inc3','own3','lab1','walk_score_comm','saf3','saf2','pop3','pop4','inc1','inc2','own2','lab2','vacancy_rate')\ny_pos = np.arange(len(people))\nperformance = list(di1.values())\n\n\nbars=ax.barh(y_pos, performance,align='center')\nlabels=ax.get_xticklabels()\n#for bar in bars:\n # yval = bar.get_height()\n #ax.text(bar.get_x()+0.08, yval + .05, xval,fontsize=14)\nax.set_yticks(y_pos)\nax.set_yticklabels(people)\nax.invert_yaxis() # labels read top-to-bottom\nax.set_xlabel('Frequency')\nax.set_title('Reasons for misclassified from group 0 to group 1')",
"_____no_output_____"
],
[
"bars=ax.barh(y_pos, performance,align='center')\nperformance=di2.values()\nplt.style.use('fivethirtyeight')\nfig, ax = plt.subplots()\n\n# Example data\npeople = ('Age','saf4','saf5','mr5y','Inflation','pop1','pop2','inc3','own3','lab1','walk_score_comm','saf3','saf2','pop3','pop4','inc1','inc2','own2','lab2','vacancy_rate')\ny_pos = np.arange(len(people))\n\n\n\nbars=ax.barh(y_pos, performance,align='center')\nlabels=ax.get_xticklabels()\n#for bar in bars:\n # yval = bar.get_height()\n #ax.text(bar.get_x()+0.08, yval + .05, xval,fontsize=14)\nax.set_yticks(y_pos)\nax.set_yticklabels(people)\nax.invert_yaxis() # labels read top-to-bottom\nax.set_xlabel('Frequency')\nax.set_title('Reasons for misclassified from group 2 to group 1')",
"_____no_output_____"
],
[
"plotdata = pd.DataFrame({\n \"c_0_to_1\":list(di1.values()),\n \"c_2_to_1\":list(di2.values()),\n }, \n index=list(people)\n)\nplotdata.plot(kind=\"barh\",width=0.8)\nplt.title(\"Reasons why outliers appear\")\nplt.xlabel(\"Frequency\")\nplt.ylabel(\"Reasons\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74688d60b33fd4441b99903524ab003c3b2ce15 | 134,053 | ipynb | Jupyter Notebook | 1 Supervised Learning/Classification/Circle-No Circle/Deep Learning Circle-No Circle.ipynb | HJJ256/Machine-Learning-2018-present- | f01d2f13025b02792612ee42e2e58f13f461b999 | [
"MIT"
] | 1 | 2020-09-26T20:02:57.000Z | 2020-09-26T20:02:57.000Z | 1 Supervised Learning/Classification/Circle-No Circle/Deep Learning Circle-No Circle.ipynb | HJJ256/Machine-Learning-2018-present- | f01d2f13025b02792612ee42e2e58f13f461b999 | [
"MIT"
] | null | null | null | 1 Supervised Learning/Classification/Circle-No Circle/Deep Learning Circle-No Circle.ipynb | HJJ256/Machine-Learning-2018-present- | f01d2f13025b02792612ee42e2e58f13f461b999 | [
"MIT"
] | null | null | null | 48.870944 | 6,696 | 0.673845 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom scipy import misc\nimport pandas as pd\n%matplotlib inline",
"_____no_output_____"
],
[
"train_labels = open('Train/labels.txt')\ndata = np.loadtxt(train_labels,delimiter = ' ',dtype=str)",
"_____no_output_____"
],
[
"train_imname = data[0:,0]\ntrain_y = np.array([[int(x) for x in data[0:,1]]])",
"_____no_output_____"
],
[
"train_x_orig = []\nfor name in train_imname:\n s = \"Train/\"+name\n train_x_orig.append(Image.open(s).convert('L')) #Accuracy with L more than LA",
"_____no_output_____"
],
[
"plt.imshow(train_x_orig[0])",
"_____no_output_____"
],
[
"test_labels = open('Test/labels.txt')\ndata = np.loadtxt(test_labels,delimiter = ' ',dtype=str)\ntest_imname = data[0:,0]\ntest_y = np.array([[int(x) for x in data[0:,1]]])\ntest_x_orig = []\nfor name in test_imname:\n s = \"Test/\"+name\n test_x_orig.append(Image.open(s).convert('L'))",
"_____no_output_____"
],
[
"plt.imshow(test_x_orig[-2])",
"_____no_output_____"
],
[
"train_set_x_orig = np.array([np.array(i) for i in train_x_orig])\nplt.imshow(train_set_x_orig[0],cmap='gray')",
"_____no_output_____"
],
[
"test_set_x_orig = np.array([np.array(i) for i in test_x_orig])",
"_____no_output_____"
],
[
"m_train = train_y.shape[0]\nm_test = test_y.shape[0]",
"_____no_output_____"
],
[
"train_set_x_orig.shape",
"_____no_output_____"
],
[
"train_set_x_orig.shape",
"_____no_output_____"
],
[
"train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T\ntest_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T",
"_____no_output_____"
],
[
"train_set_x_flatten",
"_____no_output_____"
],
[
"test_set_x_flatten.shape",
"_____no_output_____"
],
[
"train_set_x = train_set_x_flatten/255\ntest_set_x = test_set_x_flatten/255",
"_____no_output_____"
],
[
"train_set_x",
"_____no_output_____"
],
[
"def sigmoid(z):\n return (1/(1+np.exp(-z)))\n\nprint(sigmoid(0))",
"0.5\n"
],
[
"def init_with_zeros(dim):\n w = np.zeros(shape = (dim,1))\n b= 0\n return w,b",
"_____no_output_____"
],
[
"dim = 160000\nw,b = init_with_zeros(dim)\nprint(w)\nprint(b)",
"[[0.]\n [0.]\n [0.]\n ...\n [0.]\n [0.]\n [0.]]\n0\n"
],
[
"def propagate(w,b,X,Y):\n m = X.shape[1]\n A = sigmoid(np.dot(w.T,X)+b)\n #print(A)\n cost = (-1/m)*np.sum(Y*np.log(A)+(1-Y)*(np.log(1-A)))\n dw = (1/m)*np.dot(X,(A-Y).T)\n db = (1/m)*np.sum(A-Y)\n grads = {\"dw\":dw,\n \"db\":db}\n cost = np.squeeze(cost)\n return grads,cost",
"_____no_output_____"
],
[
"w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])\ngrads, cost = propagate(w, b, X, Y)\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\nprint (\"cost = \" + str(cost))",
"dw = [[0.99993216]\n [1.99980262]]\ndb = 0.49993523062470574\ncost = 6.000064773192205\n"
],
[
"def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs = []\n \n for i in range(num_iterations):\n grads,cost = propagate(w,b,X,Y)\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n w = w - learning_rate*dw\n b = b - learning_rate*db\n \n if i%1 == 0:\n costs.append(cost)\n \n if print_cost and i%1==0:\n print (\"Cost after iteration %i: %f\" % (i, cost))\n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs",
"_____no_output_____"
],
[
"params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\n\nprint (\"w = \" + str(params[\"w\"]))\nprint (\"b = \" + str(params[\"b\"]))\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))",
"w = [[0.1124579 ]\n [0.23106775]]\nb = 1.5593049248448891\ndw = [[0.90158428]\n [1.76250842]]\ndb = 0.4304620716786828\n"
],
[
"def predict(w,b,X):\n m = X.shape[1]\n Y_pred = np.zeros((1,m))\n w = w.reshape(X.shape[0],1)\n A = sigmoid(np.dot(w.T,X)+b)\n for i in range(A.shape[1]):\n Y_pred[0,i] = 1 if A[0,i] > 0.5 else 0\n return Y_pred",
"_____no_output_____"
],
[
"def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):\n w,b = init_with_zeros(X_train.shape[0])\n print(X_train.shape[0])\n parameters,grads,costs = optimize(w,b,X_train,Y_train,num_iterations, learning_rate, print_cost)\n \n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n Y_prediction_train = predict(w, b, X_train)\n Y_prediction_test = predict(w, b, X_test)\n \n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d",
"_____no_output_____"
],
[
"d = model(train_set_x, train_y, test_set_x, test_y, num_iterations = 2000, learning_rate = 0.0005, print_cost = True)",
"160000\nCost after iteration 0: 0.693147\nCost after iteration 1: 0.959775\nCost after iteration 2: 9.744965\nCost after iteration 3: 6.865258\nCost after iteration 4: 7.963958\nCost after iteration 5: 8.418714\nCost after iteration 6: 6.186803\nCost after iteration 7: 9.964616\nCost after iteration 8: 4.418181\nCost after iteration 9: 11.481781\nCost after iteration 10: 2.690335\nCost after iteration 11: 12.794696\nCost after iteration 12: 1.263776\nCost after iteration 13: 11.977247\nCost after iteration 14: 2.051944\nCost after iteration 15: 12.945243\nCost after iteration 16: 1.040220\nCost after iteration 17: 10.938935\nCost after iteration 18: 3.084976\nCost after iteration 19: 12.358032\nCost after iteration 20: 1.507766\nCost after iteration 21: 12.506867\nCost after iteration 22: 1.313783\nCost after iteration 23: 12.075085\nCost after iteration 24: 1.704331\nCost after iteration 25: 12.653989\nCost after iteration 26: 1.088858\nCost after iteration 27: 11.199941\nCost after iteration 28: 2.547828\nCost after iteration 29: 12.498840\nCost after iteration 30: 1.146688\nCost after iteration 31: 11.400068\nCost after iteration 32: 2.231044\nCost after iteration 33: 12.551146\nCost after iteration 34: 1.017899\nCost after iteration 35: 10.721464\nCost after iteration 36: 2.875658\nCost after iteration 37: 12.118977\nCost after iteration 38: 1.337244\nCost after iteration 39: 11.824053\nCost after iteration 40: 1.589483\nCost after iteration 41: 12.201463\nCost after iteration 42: 1.172710\nCost after iteration 43: 11.231686\nCost after iteration 44: 2.122239\nCost after iteration 45: 12.307722\nCost after iteration 46: 0.997817\nCost after iteration 47: 10.335815\nCost after iteration 48: 3.006011\nCost after iteration 49: 11.763343\nCost after iteration 50: 1.426305\nCost after iteration 51: 11.685583\nCost after iteration 52: 1.460282\nCost after iteration 53: 11.689351\nCost after iteration 54: 1.412919\nCost after iteration 55: 11.537923\nCost after iteration 56: 1.521098\nCost after iteration 57: 11.670004\nCost after iteration 58: 1.346559\nCost after iteration 59: 11.259875\nCost after iteration 60: 1.717236\nCost after iteration 61: 11.783702\nCost after iteration 62: 1.157867\nCost after iteration 63: 10.534835\nCost after iteration 64: 2.399837\nCost after iteration 65: 11.762344\nCost after iteration 66: 1.100757\nCost after iteration 67: 10.155754\nCost after iteration 68: 2.719305\nCost after iteration 69: 11.511503\nCost after iteration 70: 1.252827\nCost after iteration 71: 10.564319\nCost after iteration 72: 2.178397\nCost after iteration 73: 11.615306\nCost after iteration 74: 1.083110\nCost after iteration 75: 9.766661\nCost after iteration 76: 2.955791\nCost after iteration 77: 11.179126\nCost after iteration 78: 1.408606\nCost after iteration 79: 10.691330\nCost after iteration 80: 1.859160\nCost after iteration 81: 11.295232\nCost after iteration 82: 1.220782\nCost after iteration 83: 9.979902\nCost after iteration 84: 2.530592\nCost after iteration 85: 11.231690\nCost after iteration 86: 1.203621\nCost after iteration 87: 9.752692\nCost after iteration 88: 2.684665\nCost after iteration 89: 11.066767\nCost after iteration 90: 1.278624\nCost after iteration 91: 9.841033\nCost after iteration 92: 2.493648\nCost after iteration 93: 11.047320\nCost after iteration 94: 1.222166\nCost after iteration 95: 9.480305\nCost after iteration 96: 2.794201\nCost after iteration 97: 10.820921\nCost after iteration 98: 1.353735\nCost after iteration 99: 9.735491\nCost after iteration 100: 2.421510\nCost after iteration 101: 10.858606\nCost after iteration 102: 1.245739\nCost after iteration 103: 9.218972\nCost after iteration 104: 2.892302\nCost after iteration 105: 10.578761\nCost after iteration 106: 1.426633\nCost after iteration 107: 9.603259\nCost after iteration 108: 2.379483\nCost after iteration 109: 10.646981\nCost after iteration 110: 1.290565\nCost after iteration 111: 9.023147\nCost after iteration 112: 2.918864\nCost after iteration 113: 10.375813\nCost after iteration 114: 1.463782\nCost after iteration 115: 9.363273\nCost after iteration 116: 2.454424\nCost after iteration 117: 10.426361\nCost after iteration 118: 1.344145\nCost after iteration 119: 8.846085\nCost after iteration 120: 2.925167\nCost after iteration 121: 10.182392\nCost after iteration 122: 1.492758\nCost after iteration 123: 9.097812\nCost after iteration 124: 2.557309\nCost after iteration 125: 10.202217\nCost after iteration 126: 1.401532\nCost after iteration 127: 8.671141\nCost after iteration 128: 2.929239\nCost after iteration 129: 9.987724\nCost after iteration 130: 1.523524\nCost after iteration 131: 8.834667\nCost after iteration 132: 2.657784\nCost after iteration 133: 9.974999\nCost after iteration 134: 1.462232\nCost after iteration 135: 8.497280\nCost after iteration 136: 2.931417\nCost after iteration 137: 9.791155\nCost after iteration 138: 1.556553\nCost after iteration 139: 8.575770\nCost after iteration 140: 2.752496\nCost after iteration 141: 9.746310\nCost after iteration 142: 1.524685\nCost after iteration 143: 8.320912\nCost after iteration 144: 2.933206\nCost after iteration 145: 9.591634\nCost after iteration 146: 1.592611\nCost after iteration 147: 8.323676\nCost after iteration 148: 2.834379\nCost after iteration 149: 9.519012\nCost after iteration 150: 1.586039\nCost after iteration 151: 8.136171\nCost after iteration 152: 2.933271\nCost after iteration 153: 9.389447\nCost after iteration 154: 1.630996\nCost after iteration 155: 8.077639\nCost after iteration 156: 2.889584\nCost after iteration 157: 9.297959\nCost after iteration 158: 1.641174\nCost after iteration 159: 7.933752\nCost after iteration 160: 2.916377\nCost after iteration 161: 9.188257\nCost after iteration 162: 1.667238\nCost after iteration 163: 7.830031\nCost after iteration 164: 2.878616\nCost after iteration 165: 9.090545\nCost after iteration 166: 1.681504\nCost after iteration 167: 7.701167\nCost after iteration 168: 2.817639\nCost after iteration 169: 8.988852\nCost after iteration 170: 1.697610\nCost after iteration 171: 7.582161\nCost after iteration 172: 2.688932\nCost after iteration 173: 8.863621\nCost after iteration 174: 1.730692\nCost after iteration 175: 7.509078\nCost after iteration 176: 2.495180\nCost after iteration 177: 8.654136\nCost after iteration 178: 1.823859\nCost after iteration 179: 7.571834\nCost after iteration 180: 2.283909\nCost after iteration 181: 8.318620\nCost after iteration 182: 1.967989\nCost after iteration 183: 7.731126\nCost after iteration 184: 2.087943\nCost after iteration 185: 7.891455\nCost after iteration 186: 1.995536\nCost after iteration 187: 7.662010\nCost after iteration 188: 1.913736\nCost after iteration 189: 7.450035\nCost after iteration 190: 1.817686\nCost after iteration 191: 7.210526\nCost after iteration 192: 1.759985\nCost after iteration 193: 7.051088\nCost after iteration 194: 1.754260\nCost after iteration 195: 7.000189\nCost after iteration 196: 1.743927\nCost after iteration 197: 6.940395\nCost after iteration 198: 1.742048\nCost after iteration 199: 6.898588\nCost after iteration 200: 1.737701\nCost after iteration 201: 6.851992\nCost after iteration 202: 1.738380\nCost after iteration 203: 6.816101\nCost after iteration 204: 1.736326\nCost after iteration 205: 6.774707\nCost after iteration 206: 1.738912\nCost after iteration 207: 6.743149\nCost after iteration 208: 1.737617\nCost after iteration 209: 6.703590\nCost after iteration 210: 1.742158\nCost after iteration 211: 6.676357\nCost after iteration 212: 1.740309\nCost after iteration 213: 6.635766\nCost after iteration 214: 1.747707\nCost after iteration 215: 6.614706\nCost after iteration 216: 1.743217\nCost after iteration 217: 6.568583\nCost after iteration 218: 1.756117\nCost after iteration 219: 6.559295\nCost after iteration 220: 1.744378\nCost after iteration 221: 6.497740\nCost after iteration 222: 1.769853\nCost after iteration 223: 6.515344\nCost after iteration 224: 1.739170\nCost after iteration 225: 6.413167\nCost after iteration 226: 1.796610\nCost after iteration 227: 6.499293\nCost after iteration 228: 1.715217\nCost after iteration 229: 6.287712\nCost after iteration 230: 1.860282\nCost after iteration 231: 6.561674\nCost after iteration 232: 1.640364\nCost after iteration 233: 6.049467\nCost after iteration 234: 2.035605\nCost after iteration 235: 6.849466\nCost after iteration 236: 1.472542\nCost after iteration 237: 5.597941\nCost after iteration 238: 2.467278\nCost after iteration 239: 7.481727\nCost after iteration 240: 1.408870\nCost after iteration 241: 5.367574\nCost after iteration 242: 2.683146\nCost after iteration 243: 7.611994\nCost after iteration 244: 1.367650\nCost after iteration 245: 5.186925\nCost after iteration 246: 2.846361\nCost after iteration 247: 7.629529\nCost after iteration 248: 1.311836\nCost after iteration 249: 4.974226\nCost after iteration 250: 3.052477\nCost after iteration 251: 7.608877\nCost after iteration 252: 1.255221\n"
],
[
"d[\"Y_prediction_test\"]",
"_____no_output_____"
],
[
"i = Image.open('Test/circle1.jpg').convert('L')\nim = np.array(i)\nplt.imshow(im,cmap='gray')\nim = im.reshape(160000,1)\n\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], im)",
"_____no_output_____"
],
[
"my_predicted_image",
"_____no_output_____"
],
[
"print(im)",
"[[0]\n [0]\n [0]\n ...\n [0]\n [0]\n [0]]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7468a06c0f4460fdb87c2ec379080fa57eb82fc | 130 | ipynb | Jupyter Notebook | Notebooks/IMDB/notebook.ipynb | surajsjain/social-media-analytics-app | 1f310dcf2f79c9f80edee80dd59d8c63f827f04a | [
"MIT"
] | null | null | null | Notebooks/IMDB/notebook.ipynb | surajsjain/social-media-analytics-app | 1f310dcf2f79c9f80edee80dd59d8c63f827f04a | [
"MIT"
] | 8 | 2020-06-05T20:49:10.000Z | 2022-02-10T00:37:59.000Z | Notebooks/IMDB/notebook.ipynb | surajsjain/social-media-analytics-app | 1f310dcf2f79c9f80edee80dd59d8c63f827f04a | [
"MIT"
] | 3 | 2020-01-26T10:48:25.000Z | 2020-08-25T17:47:54.000Z | 32.5 | 75 | 0.884615 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7468a795d37259f39307028cf88481dfd3e0c3e | 24,205 | ipynb | Jupyter Notebook | notebook/Unit3-1-Classes.ipynb | Py03013050/Home-Py03013050 | ee61b90207e3c67a65d1c122a87df93152aa26d1 | [
"MIT"
] | null | null | null | notebook/Unit3-1-Classes.ipynb | Py03013050/Home-Py03013050 | ee61b90207e3c67a65d1c122a87df93152aa26d1 | [
"MIT"
] | null | null | null | notebook/Unit3-1-Classes.ipynb | Py03013050/Home-Py03013050 | ee61b90207e3c67a65d1c122a87df93152aa26d1 | [
"MIT"
] | null | null | null | 26.598901 | 290 | 0.530428 | [
[
[
"# CLASSES",
"_____no_output_____"
],
[
"## 1 Object-oriented programming \n\nWe now turn our attention to the **object-oriented programming** topic. \n\n\nThe OOP is a programming paradigm based on the concept of **objects**: contain \n\n* **attributes(data)** and \n\n* the **methods(operations)** that operate on that attributes.\n\n>Formally, an object is a collection of data and associated behaviors. \n\nThe object-oriented programming focus on components that the **user** perceives, with **objects** as the basic unit. \n\nWe figure out all the objects by putting all the data and operations that describe the user's interaction with the data.\n\n\n\nAs an example, suppose you wish to write a computer **soccer** games (which I consider as a complex application). \n\nUsing OOP languages, We can model the program accordingly to the **\"real things\"** appear in the soccer games.\n\n* Player: attributes include name, number, x and y location in the field, and etc; operations include run, jump, kick-the-ball, and etc.\n* Ball: attributes include x, y, z position in the field, radius, weight, etc.\n* Referee:\n* Field:\n* Audience:\n* Weather:\n\n\nMost importantly, some of these classes (such as `Ball and Audience`) can be **reused** in another application, e.g., computer basketball game, with little or no modification.\n\n\n\n**Benefits of the Object-Oriented programming**\n\nConcepts in the object-oriented approach enable analysts to **break a complex system into smaller, more-manageable modules**, work on the modules individually, and easily piece the modules back together to form an information system.\n\nThis modularity makes systems development easier to grasp, easier to share among members of a project team, and easier to communicate to users, who are needed to provide requirements and confirm how well the system meets the requirements throughout the systems development process.\n\nBy modularizing systems development, the project team actually is creating reusable pieces that can be plugged into other systems efforts or used as starting points for other projects. \n\nUltimately, this can save time because new projects don’t have to start completely from scratch\n\n",
"_____no_output_____"
],
[
"## 2 Class in Python\n\n\nIn Python, using **class** to organize programs in the context of **object-oriented programming**.",
"_____no_output_____"
]
],
[
[
"class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n def myfunc(self):\n print(\"Hello my name is \" + self.name)\n",
"_____no_output_____"
],
[
"p1 = Person(\"Li\",56)\np1.myfunc()",
"_____no_output_____"
],
[
"p2=p1\np2.age",
"_____no_output_____"
],
[
"p2.age=44\np1.age",
"_____no_output_____"
]
],
[
[
"### 2.1 Create an `object` of `class` type \nuse the `class` keyword to define a new type class:`Circle`\n\n* a subclass of `object`\n\n```python\nclass Person: \n``` ",
"_____no_output_____"
]
],
[
[
"print(type(Person))",
"_____no_output_____"
]
],
[
[
"### 2.2 Creates a set of `attributes` and `methods`\n\nAn class contains \n\n* **attributes**\n\n * think of data as other objects that make up the class \n\n* **methods**\n\n * think of methods as functions that only work with this class\n \n * how to interact with the object\n\n\n**Access any attribute**\n\nThe dot **“`.`”** operator is used to access any attribute\n\n* a data attribute of an object\n\n* a method of an object\n\n#### 2.2.1 attributes\n\nclass attributes: <b style=\"color:blue\">Instance variable</b>\n\n```python\nself.name\nself.age\n```\n* Every <b style=\"color:blue\">Instance variable</b> begin with <b style=\"color:blue\">self.</b> \n\n* One Instance variable can be **defined in any method** `as you need`,begined with <b style=\"color:blue\">self.</b>:\n\n \n>* <b style=\"color:blue\">self</b>: the instance of the class\n\n",
"_____no_output_____"
],
[
"#### 2.2.2 Methods\n\n```python\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def myfunc(self):\n print(\"Hello my name is \" + self.name)\n\n```\nEvery method uses <b style=\"color:blue\">self</b> as the name of <b style=\"color:blue\">the first argument</b> of all methods\n\n>* Python always passes the **object** as the `first` argument.\n\n##### 2.2.2.1 The Magic method `__init__` \n\nThe magic method names that start and end with **two** underscores <b style=\"color:blue\">__</b>. \n\n**Constructor `__init__()`** : create instances of the class.\n\n* Whenever a class is **instantiated**, a call is made to the `__init__` method defined in that class.\n\n```python\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n```",
"_____no_output_____"
],
[
"##### 2.2.2.2 The methods to output string \n\n```python\n def myfunc(self):\n print(\"Hello my name is \" + self.name)\n\n``` ",
"_____no_output_____"
]
],
[
[
"p1.myfunc()",
"_____no_output_____"
]
],
[
[
"## 3 The Magic Method `__str__` \n\nAdd the Magic Method `__str__` to the class Circle\n\n>**Magic Method:**\n>\n>One of the design goals for Python was to allow programmers to use classes to define new types that are as easy to **use as the `built-in` types** of Python. \n>\n>Using magic methods to provide **class-specific definitions of `built-in` functions** such as `str` plays an important role in achieving this goal.",
"_____no_output_____"
]
],
[
[
"class Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n \n def myfunc(self):\n print(\"Hello my name is \" + self.name)\n \n def __str__(self):\n \"\"\"Returns a string representation of Person\"\"\"\n return f\"The age of {self.name} is {self.age}\" ",
"_____no_output_____"
]
],
[
[
"### 3.1 the `print` command\n\nthe **`__str__`** function associated with the object to be `printed` is **automatically invoked**",
"_____no_output_____"
]
],
[
[
"p1=Person(\"zhang shan\",21)\n#p1.__str__()\nprint(p1)",
"_____no_output_____"
]
],
[
[
"### 3.2 calling `str`\n\nthe `__str__` function is automatically invoked to convert a instance of that class a string",
"_____no_output_____"
]
],
[
[
"str(p1)",
"_____no_output_____"
],
[
"p1.__str__()",
"_____no_output_____"
]
],
[
[
"### 3.3 Build-in `__str__`\n\n* List,dict,tuple",
"_____no_output_____"
]
],
[
[
"l=[1,2,3]\nprint(l)\nstr(l)",
"_____no_output_____"
],
[
"l.__str__()",
"_____no_output_____"
],
[
"d={'a':1,'b':2}\nprint(d)\nstr(d)",
"_____no_output_____"
],
[
"d.__str__()",
"_____no_output_____"
],
[
"t=('a',1,'c')\nprint(t)\nstr(t)",
"_____no_output_____"
],
[
"t.__str__()",
"_____no_output_____"
]
],
[
[
"## 4 Inheritance\n\n**Inheritance** provides a convenient mechanism for building **groups of `related` abstractions**\n\nIt allows programmers to create <b>a type hierarchy</b> in which each type inherits attributes from the types above it in the hierarchy.\n\n```python\nclass subclass(superclass):\n```\n\n### 4.1 The class Student`\n\n We shall define a **Student** class, as a subclass of **Person**. \n ```python\n class Student(Person):\n ```\n",
"_____no_output_____"
]
],
[
[
"class Student(Person):\n \n next_id_num = 0 #identification number\n \n def __init__(self, name,age):\n super().__init__(name,age)\n self.id_num = Student.next_id_num\n Student.next_id_num += 1\n \n def __str__(self):\n \"\"\"Returns a string representation of Student\"\"\"\n return f\"The Student {self.id_num} : name is {self.name},age is {self.age}\" \n",
"_____no_output_____"
]
],
[
[
"\n",
"_____no_output_____"
],
[
"The subclass Student add **new** attributes: \n\n* **the class variable(类变量)**: \n\n * `_next_id_num`, belongs to the class` Student`, rather than to instances of the class.\n>* belongs to the class\n>* shared by all instance of the classs\n\n* **the instance variable(实例变量)**:\n * `_id_num`: id of each cyclinder instance\n\noverride methods: \n\n* `__str__(self)`:\n",
"_____no_output_____"
],
[
"**s1 = Student(name=\"Li Shi\",age=22)**",
"_____no_output_____"
]
],
[
[
"s1 = Student(name=\"Li Shi\",age=22)\nprint(s1) ",
"_____no_output_____"
]
],
[
[
"**Class variable**\n\n* belongs to the class\n\n* shared by all instance of the classs",
"_____no_output_____"
]
],
[
[
"print(Student.next_id_num) # belongs to the class",
"_____no_output_____"
],
[
"print(s1.next_id_num) # shared by all instance of the classs",
"_____no_output_____"
]
],
[
[
"**s2 = Student(name=\"wang yi\", age=20)**",
"_____no_output_____"
]
],
[
[
"s2 =Student(\"wang yi\", 20)\nprint(s2) ",
"_____no_output_____"
]
],
[
[
"## 5 Private Variables and Methods in Python\n\nPython does not have keywords to access control.\n\nIn other words,\n\n* All attributes and methods are <b style=\"color:blue\">PUBLIC</b> by default in Python\n\nBy convention, \n\n* Names begin with **double underscores (`__`)** and **not end with double underscores** are further hidden from direct access \n",
"_____no_output_____"
]
],
[
[
"class Student(Person):\n \n next_id_num = 0 #identification number\n \n def __init__(self, name,age):\n super().__init__(name,age)\n self.id_num = Student.next_id_num\n Student.next_id_num += 1\n \n def add_grades(self,grades):\n self.grades=grades\n self.avg_grade=self.__get_average_grade()\n self.__min_grade=min(self.grades)\n \n def __get_average_grade(self):\n self.ave_grade= sum(self.grades)/len(self.grades)\n \n def __str__(self):\n \"\"\"Returns a string representation of Student\"\"\"\n if self.__min_grade>=60.0:\n return f\"The Student {self._id_num} : name is {self.name},age is {self.age},ave grade {self.ave_grade}\" \n else:\n return \"The Student ...\" \n \n\n",
"_____no_output_____"
],
[
"s3 =Student(\"wang yi\", 20)",
"_____no_output_____"
]
],
[
[
"**Public**",
"_____no_output_____"
]
],
[
[
"s3.add_grades([100,90,80,50])",
"_____no_output_____"
],
[
"s3.ave_grade",
"_____no_output_____"
]
],
[
[
"**Private**",
"_____no_output_____"
]
],
[
[
"s3.__get_average_grade()",
"_____no_output_____"
],
[
"s3.__min_grade",
"_____no_output_____"
]
],
[
[
"## 6 The UML class diagram\n\nThe [Unified Modeling Language(UML统一建模语言)](https://en.wikipedia.org/wiki/Unified_Modeling_Language) is a general-purpose, developmental, modeling language in the field of software engineering that is intended to provide a standard way to visualize the design of a system.\n\n[A class diagram](https://en.wikipedia.org/wiki/Class_diagram) in UML is a type of static structure diagram that describes the structure of a system by showing the system's classes:\n\n* **attributes**, **operations (or methods)**, and the **relationships** among objects.\n\nIn the diagram, **classes** are represented with **boxes** that contain **three compartments**:\n\n* The **top** compartment contains the **name** of the class\n\n* The **middle** compartment contains the **attributes** of the class\n\n* The **bottom** compartment contains the **operations** the class can execute. \n\n\n\n### 6.1 The **[UML class diagram](https://en.wikipedia.org/wiki/Class_diagram) of Person**\n\nUML provides mechanisms to represent class members, such as \n\n* **attributes** and **methods**, and\n\n* additional information about them like **constructors**\n\n\n\n#### 6.1.1 Visibility\n\nTo specify the visibility of a class member (i.e. any attribute or method), these notations must be placed before the member's name:\n\n\n| Visibility | UML Character |Icon for field in PlantUML |Icon for method in PlantUML |\n|:---------:|:--------: |:------------------------------------: |:--------------------------: |\n| private | - | □ | ■ |\n| public | + | ○ | ● |\n\n#### 6.1.2 Scope\n\nThe UML specifies two types of scope for members:\n\n* **instance**\n* **class**, is represented by <u>**underlined**</u> names.\n\n**Instance members** are scoped to a **specific** instance.\n \n* Attribute values may vary between instances\n* Method invocation may affect the instance’s state (i.e. change instance’s attributes)\n\n**Class members** are commonly recognized as **“static”** in many programming languages. The scope is the class itself.\n \n* Attribute values are equal for **all** instances\n* Method invocation does not affect the classifier’s state\n\n**Constructors** are shown like **static** methods in the Class Diagrams form `class(arguments)` and are underlined\n",
"_____no_output_____"
],
[
"#### 6.1.3 PlantUML\n\n[PlantUML is a component that allows to quickly write: Class Diagram](https://plantuml.com/zh/class-diagram)\n\n\n\n>\n>python -m pip install iplantuml\n>",
"_____no_output_____"
]
],
[
[
"import iplantuml",
"_____no_output_____"
],
[
"%%plantuml\nclass Person {\n + name: str\n + age: int\n + {static} Person(name:str.age:int)\n + myfunc()\n + __str()__:str\n}",
"_____no_output_____"
]
],
[
[
"\n\n### 6.2 The UML class Inheritance\n\n#### 6.2.1 The Class-level(类) relationship: Inheritance\n\nIf two classes are in an **inheritance** relation, \n\n* the subclass inherits its attributes and methods from the superclass.\n\nThe UML graphical representation of **an inheritance relation** is **a hollow triangle shape** on the **superclass end** of the line (or tree of lines) that connects it to one or more subclasses.\n\n\n\n#### 6.2.2 The UML Inheritance\n\n\n\n",
"_____no_output_____"
],
[
"**PlatUML**\n\n\n",
"_____no_output_____"
]
],
[
[
"%%plantuml\nclass Person {\n + name: str\n + age: int\n + {static} Person(name:str.age:int)\n + myfunc()\n + __str()__:str\n}\n\nclass Student{\n + {static} next_id_num:int\n + id_num:int\n + grades: float [1..*]\n + ave_grade:float\n - __min_grade:float\n + {static} Student(name:str,age:int)\n + add_grades(grades:float [1..*]):void\n - __get_average_grade():void\n + __str__():str\n}\n\nPerson <|-- Student\n",
"_____no_output_____"
]
],
[
[
"### 6.3 Reverse Python Code to UML\n\nThe **Reverse** is a process to produce UML class model from a given input of **source code**.\n\n**逆向工程**\n\nBy bringing **code** content into visual **UML** model, this helps programmers or software engineers to \n\n**review an implementation, identify potential bugs or deficiency and look for possible improvements**.\n\n**Tools**\n\n* pyrevers [Creating UML diagrams for Python code with pyrevers](https://gitee.com/thermalogic/sees/blob/S2019/guide/UMLPython.md)\n\n* pynsource [Reverse engineer Python source code into UML diagrams](https://www.pynsource.com/)",
"_____no_output_____"
],
[
"## Further Reading: \n\n\n* [Python Object Oriented Programming (OOP)](http://www3.ntu.edu.sg/home/ehchua/programming/webprogramming/Python1a_OOP.html)\n* [Unified Modeling Language](https://en.wikipedia.org/wiki/Unified_Modeling_Language)\n* [The UML Class diagram](https://en.wikipedia.org/wiki/Class_diagram)\n\n**PlantUML**\n\n * [PlantUML:Class Diagram](https://plantuml.com/zh/class-diagram)\n \n * [The Class Diagrams of VCCE in PlantUML](https://gitee.com/thermalogic/simvcce/tree/B2022/uml/)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7468cf7030336c0ccdfcd17305d0cd506b95c7f | 2,389 | ipynb | Jupyter Notebook | excel.ipynb | nooshhub/pycv-sample | d49c5588032b8ba27e5891d4dfa0238989ab5e64 | [
"Apache-2.0"
] | null | null | null | excel.ipynb | nooshhub/pycv-sample | d49c5588032b8ba27e5891d4dfa0238989ab5e64 | [
"Apache-2.0"
] | null | null | null | excel.ipynb | nooshhub/pycv-sample | d49c5588032b8ba27e5891d4dfa0238989ab5e64 | [
"Apache-2.0"
] | 1 | 2021-06-23T07:01:44.000Z | 2021-06-23T07:01:44.000Z | 27.77907 | 270 | 0.456676 | [
[
[
"from openpyxl import Workbook, load_workbook\nwb= load_workbook(\"D:\\opencv\\mtcv\\李春宝-色块分类表V1.3 2021-06-21.xlsx\")\nws=wb[\"色块分类\"] # 也可以使用wb.get_sheet_by_name(\"Sheet1\") 获取工作表\n\n# 读取数据,把excel中的一个table按行读取出来,存入一个二维的list\ntotal_list=[]\nfor row in ws.rows:\n row_list=[]\n for cell in row:\n row_list.append(cell.value)\n total_list.append(row_list)\n \n# 利用字典进行数据统计\nbgr_list = []\nfor term in total_list:\n if term[1] == None or term[1] == \"颜色(RGB)\": # 颜色(RGB)\n continue\n else:\n rgb = term[1].split(\",\")\n# print(rgb)\n bgr = [int(rgb[2]),int(rgb[1]),int(rgb[0])]\n bgr_list.append(bgr)\n print(bgr, '=', term[2])\n\nprint(bgr_list)\n\n",
"[0, 255, 255] = 居住\n[0, 0, 255] = 商场\n[255, 0, 255] = 办公\n[0, 127, 255] = 酒店\n[127, 127, 255] = 医疗\n[191, 127, 255] = 教育\n[38, 57, 76] = 传统产业工业\n[76, 114, 153] = 新兴产业工业\n[0, 255, 0] = 绿地\n[255, 255, 127] = 湖泊\n[128, 128, 128] = 交通枢纽\n[192, 192, 192] = 其他\n[38, 153, 0] = 绿地\n[204, 102, 153] = 物流仓储\n[0, 38, 76] = 矿场\n[153, 114, 0] = 电厂\n[22, 114, 20] = 港口\n[[0, 255, 255], [0, 0, 255], [255, 0, 255], [0, 127, 255], [127, 127, 255], [191, 127, 255], [38, 57, 76], [76, 114, 153], [0, 255, 0], [255, 255, 127], [128, 128, 128], [192, 192, 192], [38, 153, 0], [204, 102, 153], [0, 38, 76], [153, 114, 0], [22, 114, 20]]\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e746aaf0ee14534a1bd3a7125dcbacef15a3785b | 58,006 | ipynb | Jupyter Notebook | mlp_v2.ipynb | marquis08/dacon_wafer | 479fe580b3518b8c7aaa02444f787e88b10ccf12 | [
"Apache-2.0"
] | null | null | null | mlp_v2.ipynb | marquis08/dacon_wafer | 479fe580b3518b8c7aaa02444f787e88b10ccf12 | [
"Apache-2.0"
] | null | null | null | mlp_v2.ipynb | marquis08/dacon_wafer | 479fe580b3518b8c7aaa02444f787e88b10ccf12 | [
"Apache-2.0"
] | null | null | null | 60.739267 | 1,753 | 0.559752 | [
[
[
"import os\nimport gc\nimport numpy as np\nimport pandas as pd\nimport math\nimport random\nfrom time import time\nfrom tqdm import tqdm\nfrom pathlib import Path\n\nimport torch\nfrom torch import nn, cuda\nimport torchvision.models as models\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom utils import CosineAnnealingWithRestartsLR\n\nfrom torch.optim import Adam, SGD, Optimizer\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"def seed_everything(seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\nclass Semi_dataset(Dataset):\n def __init__(self, X, Y):\n self.X = X\n self.Y = Y\n self.X_dataset = []\n self.Y_dataset = []\n for x in X:\n self.X_dataset.append(torch.FloatTensor(x))\n try:\n for y in Y.values:\n self.Y_dataset.append(torch.tensor(y))\n except:\n print(\"no label\")\n \n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n data = self.X_dataset[index]\n try:\n target = self.Y_dataset[index]\n return data, target\n except:\n return data\n\n\ndef build_dataloader(X, Y, batch_size, shuffle=False):\n \n dataset = Semi_dataset(X, Y)\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=8\n )\n return dataloader\n\ndef mean_absolute_error(y_true, y_pred,\n sample_weight=None,\n multioutput='uniform_average'):\n \n output_errors = np.average(np.abs(y_pred - y_true),\n weights=sample_weight, axis=0)\n if isinstance(multioutput, str):\n if multioutput == 'raw_values':\n return output_errors\n elif multioutput == 'uniform_average':\n multioutput = None\n\n return np.average(output_errors, weights=multioutput)\n\nclass AdamW(Optimizer):\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay)\n super(AdamW, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('AdamW does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n if group['weight_decay'] != 0:\n p.data.add_(-group['weight_decay'], p.data)\n\n return loss\n\nclass MLP_only_flatfeatures(nn.Module):\n def __init__(self, num_classes=1):\n super(MLP_only_flatfeatures, self).__init__()\n self.num_classes = num_classes \n self.fc_layers = nn.Sequential(\n #nn.Linear(226, 1000),\n nn.Linear(226, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n ####### Block 1 #######\n nn.Linear(256, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(),\n \n nn.Linear(64, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(),\n \n nn.Linear(64, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 2 #######\n nn.Linear(256, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(),\n \n nn.Linear(64, 64),\n nn.BatchNorm1d(64),\n nn.ReLU(),\n \n nn.Linear(64, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 3 #######\n nn.Linear(256, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n \n nn.Linear(128, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n \n nn.Linear(128, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 4 #######\n nn.Linear(512, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n \n nn.Linear(128, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n \n nn.Linear(128, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 5 #######\n nn.Linear(512, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n \n nn.Linear(128, 128),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n \n nn.Linear(128, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 6 #######\n nn.Linear(512, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n \n nn.Linear(256, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n \n nn.Linear(256, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 7 #######\n nn.Linear(1024, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n \n nn.Linear(256, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n \n nn.Linear(256, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Dropout(0.1),\n ####### Block 8 #######\n nn.Linear(1024, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n \n nn.Linear(256, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n \n nn.Linear(256, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Dropout(0.1),\n ######### LAST ##########\n \n nn.Linear(1024, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n \n nn.Dropout(0.1),\n nn.Linear(512, self.num_classes)\n ) \n self._initialize_weights()\n\n def forward(self, x):\n out = self.fc_layers(x)\n return out\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\ndef build_model(device, model_name='mlp', weight_path=None):\n\n if model_name == 'mlp':\n model = MLP_only_flatfeatures(4)\n \n return model\n\ndef validation(model, criterion, valid_loader, device):\n \n model.eval()\n valid_preds = np.zeros((len(valid_loader.dataset), 4))\n valid_targets = np.zeros((len(valid_loader.dataset), 4))\n val_loss = 0.\n \n with torch.no_grad():\n for i, (data, target) in enumerate(valid_loader):\n \n valid_targets[i * batch_size: (i+1) * batch_size] = target.float().numpy().copy()\n\n data = data.to(device)\n target = target.float().to(device)\n \n output = model(data)\n loss = criterion(output, target)\n \n valid_preds[i * batch_size: (i+1) * batch_size] = output.detach().cpu().numpy()\n \n val_loss += loss.item() / len(valid_loader)\n \n val_score = mean_absolute_error(valid_preds, valid_targets)\n\n return val_loss, val_score\n\nseed = 42\nseed_everything(seed)\n\nif cuda.is_available:\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")",
"_____no_output_____"
],
[
"%%time\nDATASET_PATH = '../wafer'\ntrain_df = pd.read_csv(os.path.join(DATASET_PATH, 'train.csv'))\ntest_df = pd.read_csv(os.path.join(DATASET_PATH, 'test.csv'))\nsubmission_df = pd.read_csv(os.path.join(DATASET_PATH, 'sample_submission.csv'))\n\nX_train, X_val, y_train, y_val = train_test_split(train_df.iloc[:, 4:], train_df.iloc[:, :4], test_size=0.1, random_state=42, shuffle=True)\n\nX_train = X_train.reset_index(drop=True)\nX_val = X_val.reset_index(drop=True)\ny_train = y_train.reset_index(drop=True)\ny_val = y_val.reset_index(drop=True)\n\nscaler = StandardScaler()\nscaler.fit(X_train)\nX_train = scaler.transform(X_train)\nX_val = scaler.transform(X_val)\n\nbatch_size = 2048\ntrain_loader = build_dataloader(X_train, y_train, batch_size, shuffle=True)\nvalid_loader = build_dataloader(X_val, y_val, batch_size, shuffle=False)\n\ntest_df.iloc[:, 1:] = scaler.transform(test_df.iloc[:, 1:])",
"CPU times: user 35.5 s, sys: 4.86 s, total: 40.4 s\nWall time: 23.3 s\n"
],
[
"# output path\n#output_dir = Path('./', 'output')\n#output_dir.mkdir(exist_ok=True, parents=True)\n\nnum_epochs = 20000\ncriterion = nn.L1Loss()\n\nbest_epoch_list = []\nbest_valid_score_list = []\n\n# build model\nmodel = build_model(device, model_name='mlp')\nmodel.to(device)\n\nlr = 0.001\noptimizer = AdamW(model.parameters(), lr)\n################### Scheduler ################\n#eta_min = 0.000001\n#T_max = 10\n#T_mult = 1\n#restart_decay = 0.97\n#scheduler = CosineAnnealingWithRestartsLR(optimizer, T_max=T_max, eta_min=eta_min, T_mult=T_mult, restart_decay=restart_decay)\n###############################################\n\nstart_time = time()\n\nbest_epoch = 0\nbest_train_loss = 1000\nbest_valid_score = 1000\n\nfor epoch in range(num_epochs):\n\n model.train()\n optimizer.zero_grad()\n train_loss = 0.0\n\n for batch_idx, (data, target) in enumerate(train_loader):\n\n if device:\n data = data.to(device)\n target = target.float().to(device)\n else:\n target = target.float()\n\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n train_loss += loss.item() / len(train_loader)\n\n val_loss, val_score = validation(model, criterion, valid_loader, device)\n\n elapsed = time() - start_time\n\n lr = [_['lr'] for _ in optimizer.param_groups]\n\n #scheduler.step(val_score)\n \n print('Epoch {} / {} train Loss: {:.4f} val_loss: {:.4f} val_score: {:.4f} lr: {:.5f} elapsed: {:.0f}m {:.0f}s' \\\n .format(epoch, num_epochs - 1, train_loss, val_loss, val_score, lr[0], elapsed // 60, elapsed % 60))\n \n #model_path = output_dir / 'best_model.pt'\n model_path = '../wafer/mlp_weights/mlp_v2.pt'\n\n if val_score < best_valid_score:\n best_valid_score = val_score\n best_epoch = epoch\n torch.save(model.state_dict(), model_path)\n print('----------------------------------------------------------------------->> loss improved to {:.5f}'.format(best_valid_score))\n\n best_epoch_list.append(best_epoch)\n best_valid_score_list.append(best_valid_score)\nprint(\"==================== mlp - Best val_loss - {:.5f} =================\".format(best_valid_score))",
"532 val_loss: 3.0597 val_score: 3.0602 lr: 0.00100 elapsed: 49m 40s\nEpoch 320 / 19999 train Loss: 6.2484 val_loss: 3.1612 val_score: 3.1610 lr: 0.00100 elapsed: 49m 49s\nEpoch 321 / 19999 train Loss: 6.2549 val_loss: 2.9586 val_score: 2.9595 lr: 0.00100 elapsed: 49m 59s\nEpoch 322 / 19999 train Loss: 6.2093 val_loss: 3.0348 val_score: 3.0352 lr: 0.00100 elapsed: 50m 8s\nEpoch 323 / 19999 train Loss: 6.2340 val_loss: 3.0129 val_score: 3.0144 lr: 0.00100 elapsed: 50m 17s\nEpoch 324 / 19999 train Loss: 6.2639 val_loss: 3.2287 val_score: 3.2285 lr: 0.00100 elapsed: 50m 27s\nEpoch 325 / 19999 train Loss: 6.2270 val_loss: 3.0607 val_score: 3.0608 lr: 0.00100 elapsed: 50m 36s\nEpoch 326 / 19999 train Loss: 6.1415 val_loss: 2.8638 val_score: 2.8646 lr: 0.00100 elapsed: 50m 45s\nEpoch 327 / 19999 train Loss: 6.1642 val_loss: 3.1133 val_score: 3.1144 lr: 0.00100 elapsed: 50m 54s\nEpoch 328 / 19999 train Loss: 6.2314 val_loss: 3.0220 val_score: 3.0232 lr: 0.00100 elapsed: 51m 4s\nEpoch 329 / 19999 train Loss: 6.1998 val_loss: 2.8327 val_score: 2.8328 lr: 0.00100 elapsed: 51m 13s\n----------------------------------------------------------------------->> loss improved to 2.83281\nEpoch 330 / 19999 train Loss: 6.1979 val_loss: 3.0113 val_score: 3.0117 lr: 0.00100 elapsed: 51m 22s\nEpoch 331 / 19999 train Loss: 6.2105 val_loss: 3.1539 val_score: 3.1533 lr: 0.00100 elapsed: 51m 31s\nEpoch 332 / 19999 train Loss: 6.2476 val_loss: 3.2366 val_score: 3.2353 lr: 0.00100 elapsed: 51m 41s\nEpoch 333 / 19999 train Loss: 6.1803 val_loss: 2.9205 val_score: 2.9219 lr: 0.00100 elapsed: 51m 50s\nEpoch 334 / 19999 train Loss: 6.1711 val_loss: 2.9614 val_score: 2.9618 lr: 0.00100 elapsed: 51m 60s\nEpoch 335 / 19999 train Loss: 6.1952 val_loss: 2.8688 val_score: 2.8688 lr: 0.00100 elapsed: 52m 9s\nEpoch 336 / 19999 train Loss: 6.1677 val_loss: 2.9656 val_score: 2.9665 lr: 0.00100 elapsed: 52m 18s\nEpoch 337 / 19999 train Loss: 6.1683 val_loss: 2.8961 val_score: 2.8974 lr: 0.00100 elapsed: 52m 28s\nEpoch 338 / 19999 train Loss: 6.1549 val_loss: 3.1306 val_score: 3.1321 lr: 0.00100 elapsed: 52m 37s\nEpoch 339 / 19999 train Loss: 6.2153 val_loss: 3.1214 val_score: 3.1226 lr: 0.00100 elapsed: 52m 46s\nEpoch 340 / 19999 train Loss: 6.2074 val_loss: 3.0632 val_score: 3.0627 lr: 0.00100 elapsed: 52m 56s\nEpoch 341 / 19999 train Loss: 6.2123 val_loss: 2.8695 val_score: 2.8711 lr: 0.00100 elapsed: 53m 5s\nEpoch 342 / 19999 train Loss: 6.1266 val_loss: 2.9341 val_score: 2.9352 lr: 0.00100 elapsed: 53m 14s\nEpoch 343 / 19999 train Loss: 6.1276 val_loss: 3.0509 val_score: 3.0507 lr: 0.00100 elapsed: 53m 24s\nEpoch 344 / 19999 train Loss: 6.1218 val_loss: 2.8270 val_score: 2.8276 lr: 0.00100 elapsed: 53m 33s\n----------------------------------------------------------------------->> loss improved to 2.82764\nEpoch 345 / 19999 train Loss: 6.1393 val_loss: 2.9067 val_score: 2.9085 lr: 0.00100 elapsed: 53m 43s\nEpoch 346 / 19999 train Loss: 6.1779 val_loss: 3.2484 val_score: 3.2507 lr: 0.00100 elapsed: 53m 52s\nEpoch 347 / 19999 train Loss: 6.1584 val_loss: 2.9175 val_score: 2.9183 lr: 0.00100 elapsed: 54m 1s\nEpoch 348 / 19999 train Loss: 6.1463 val_loss: 2.8545 val_score: 2.8555 lr: 0.00100 elapsed: 54m 11s\nEpoch 349 / 19999 train Loss: 6.1014 val_loss: 2.9539 val_score: 2.9539 lr: 0.00100 elapsed: 54m 20s\nEpoch 350 / 19999 train Loss: 6.0997 val_loss: 3.1085 val_score: 3.1090 lr: 0.00100 elapsed: 54m 30s\nEpoch 351 / 19999 train Loss: 6.0668 val_loss: 2.9901 val_score: 2.9910 lr: 0.00100 elapsed: 54m 39s\nEpoch 352 / 19999 train Loss: 6.1616 val_loss: 2.9785 val_score: 2.9788 lr: 0.00100 elapsed: 54m 48s\nEpoch 353 / 19999 train Loss: 6.1469 val_loss: 3.0949 val_score: 3.0945 lr: 0.00100 elapsed: 54m 58s\nEpoch 354 / 19999 train Loss: 6.1788 val_loss: 2.8787 val_score: 2.8794 lr: 0.00100 elapsed: 55m 7s\nEpoch 355 / 19999 train Loss: 6.0697 val_loss: 2.9433 val_score: 2.9442 lr: 0.00100 elapsed: 55m 16s\nEpoch 356 / 19999 train Loss: 6.0978 val_loss: 2.8947 val_score: 2.8955 lr: 0.00100 elapsed: 55m 26s\nEpoch 357 / 19999 train Loss: 6.1104 val_loss: 2.9273 val_score: 2.9276 lr: 0.00100 elapsed: 55m 35s\nEpoch 358 / 19999 train Loss: 6.0831 val_loss: 2.9865 val_score: 2.9867 lr: 0.00100 elapsed: 55m 45s\nEpoch 359 / 19999 train Loss: 6.1229 val_loss: 3.0937 val_score: 3.0944 lr: 0.00100 elapsed: 55m 54s\nEpoch 360 / 19999 train Loss: 6.0467 val_loss: 2.9066 val_score: 2.9065 lr: 0.00100 elapsed: 56m 3s\nEpoch 361 / 19999 train Loss: 6.0603 val_loss: 2.8671 val_score: 2.8675 lr: 0.00100 elapsed: 56m 13s\nEpoch 362 / 19999 train Loss: 6.1144 val_loss: 2.8985 val_score: 2.8999 lr: 0.00100 elapsed: 56m 22s\nEpoch 363 / 19999 train Loss: 6.0874 val_loss: 3.0226 val_score: 3.0239 lr: 0.00100 elapsed: 56m 31s\nEpoch 364 / 19999 train Loss: 6.0992 val_loss: 2.9130 val_score: 2.9146 lr: 0.00100 elapsed: 56m 41s\nEpoch 365 / 19999 train Loss: 6.1073 val_loss: 3.1250 val_score: 3.1263 lr: 0.00100 elapsed: 56m 50s\nEpoch 366 / 19999 train Loss: 6.0342 val_loss: 2.7254 val_score: 2.7257 lr: 0.00100 elapsed: 56m 59s\n----------------------------------------------------------------------->> loss improved to 2.72574\nEpoch 367 / 19999 train Loss: 6.0446 val_loss: 2.7092 val_score: 2.7102 lr: 0.00100 elapsed: 57m 9s\n----------------------------------------------------------------------->> loss improved to 2.71017\nEpoch 368 / 19999 train Loss: 6.0014 val_loss: 2.9597 val_score: 2.9606 lr: 0.00100 elapsed: 57m 18s\nEpoch 369 / 19999 train Loss: 6.1271 val_loss: 2.9423 val_score: 2.9433 lr: 0.00100 elapsed: 57m 28s\nEpoch 370 / 19999 train Loss: 6.0750 val_loss: 2.8258 val_score: 2.8259 lr: 0.00100 elapsed: 57m 37s\nEpoch 371 / 19999 train Loss: 6.0792 val_loss: 2.7007 val_score: 2.7015 lr: 0.00100 elapsed: 57m 46s\n----------------------------------------------------------------------->> loss improved to 2.70149\nEpoch 372 / 19999 train Loss: 5.9958 val_loss: 2.7988 val_score: 2.7995 lr: 0.00100 elapsed: 57m 55s\nEpoch 373 / 19999 train Loss: 6.0034 val_loss: 2.8952 val_score: 2.8938 lr: 0.00100 elapsed: 58m 5s\nEpoch 374 / 19999 train Loss: 6.0829 val_loss: 2.7771 val_score: 2.7784 lr: 0.00100 elapsed: 58m 14s\nEpoch 375 / 19999 train Loss: 6.0663 val_loss: 3.0487 val_score: 3.0498 lr: 0.00100 elapsed: 58m 23s\nEpoch 376 / 19999 train Loss: 6.0670 val_loss: 2.8808 val_score: 2.8820 lr: 0.00100 elapsed: 58m 33s\nEpoch 377 / 19999 train Loss: 6.0295 val_loss: 3.0317 val_score: 3.0330 lr: 0.00100 elapsed: 58m 42s\nEpoch 378 / 19999 train Loss: 6.0489 val_loss: 2.8442 val_score: 2.8448 lr: 0.00100 elapsed: 58m 51s\nEpoch 379 / 19999 train Loss: 5.9508 val_loss: 2.8387 val_score: 2.8394 lr: 0.00100 elapsed: 59m 1s\nEpoch 380 / 19999 train Loss: 5.9541 val_loss: 2.9239 val_score: 2.9241 lr: 0.00100 elapsed: 59m 10s\nEpoch 381 / 19999 train Loss: 6.0275 val_loss: 2.8657 val_score: 2.8658 lr: 0.00100 elapsed: 59m 19s\nEpoch 382 / 19999 train Loss: 6.0226 val_loss: 2.6385 val_score: 2.6393 lr: 0.00100 elapsed: 59m 29s\n----------------------------------------------------------------------->> loss improved to 2.63931\nEpoch 383 / 19999 train Loss: 6.0156 val_loss: 2.9353 val_score: 2.9352 lr: 0.00100 elapsed: 59m 38s\nEpoch 384 / 19999 train Loss: 6.0199 val_loss: 2.8376 val_score: 2.8383 lr: 0.00100 elapsed: 59m 47s\nEpoch 385 / 19999 train Loss: 6.0016 val_loss: 2.6730 val_score: 2.6745 lr: 0.00100 elapsed: 59m 57s\nEpoch 386 / 19999 train Loss: 6.0381 val_loss: 2.8569 val_score: 2.8577 lr: 0.00100 elapsed: 60m 6s\nEpoch 387 / 19999 train Loss: 5.9869 val_loss: 2.8065 val_score: 2.8077 lr: 0.00100 elapsed: 60m 16s\nEpoch 388 / 19999 train Loss: 5.9730 val_loss: 2.7291 val_score: 2.7305 lr: 0.00100 elapsed: 60m 25s\nEpoch 389 / 19999 train Loss: 5.9587 val_loss: 2.7685 val_score: 2.7705 lr: 0.00100 elapsed: 60m 34s\nEpoch 390 / 19999 train Loss: 6.0011 val_loss: 2.9387 val_score: 2.9412 lr: 0.00100 elapsed: 60m 44s\nEpoch 391 / 19999 train Loss: 6.0916 val_loss: 2.9252 val_score: 2.9260 lr: 0.00100 elapsed: 60m 53s\nEpoch 392 / 19999 train Loss: 5.9616 val_loss: 2.9987 val_score: 2.9990 lr: 0.00100 elapsed: 61m 2s\nEpoch 393 / 19999 train Loss: 6.0019 val_loss: 2.8255 val_score: 2.8264 lr: 0.00100 elapsed: 61m 12s\nEpoch 394 / 19999 train Loss: 5.9772 val_loss: 2.7077 val_score: 2.7088 lr: 0.00100 elapsed: 61m 21s\nEpoch 395 / 19999 train Loss: 6.0437 val_loss: 2.8215 val_score: 2.8234 lr: 0.00100 elapsed: 61m 30s\nEpoch 396 / 19999 train Loss: 5.9122 val_loss: 2.5911 val_score: 2.5909 lr: 0.00100 elapsed: 61m 40s\n----------------------------------------------------------------------->> loss improved to 2.59087\nEpoch 397 / 19999 train Loss: 5.9513 val_loss: 2.7415 val_score: 2.7428 lr: 0.00100 elapsed: 61m 49s\nEpoch 398 / 19999 train Loss: 5.9705 val_loss: 2.8265 val_score: 2.8275 lr: 0.00100 elapsed: 61m 58s\nEpoch 399 / 19999 train Loss: 5.9671 val_loss: 2.7792 val_score: 2.7789 lr: 0.00100 elapsed: 62m 8s\nEpoch 400 / 19999 train Loss: 5.9330 val_loss: 2.8474 val_score: 2.8484 lr: 0.00100 elapsed: 62m 17s\nEpoch 401 / 19999 train Loss: 6.0331 val_loss: 2.9109 val_score: 2.9114 lr: 0.00100 elapsed: 62m 26s\nEpoch 402 / 19999 train Loss: 5.9122 val_loss: 2.8825 val_score: 2.8825 lr: 0.00100 elapsed: 62m 35s\nEpoch 403 / 19999 train Loss: 5.9299 val_loss: 2.7487 val_score: 2.7473 lr: 0.00100 elapsed: 62m 44s\nEpoch 404 / 19999 train Loss: 5.9472 val_loss: 2.8578 val_score: 2.8585 lr: 0.00100 elapsed: 62m 54s\nEpoch 405 / 19999 train Loss: 5.8811 val_loss: 2.7274 val_score: 2.7270 lr: 0.00100 elapsed: 63m 3s\nEpoch 406 / 19999 train Loss: 6.0210 val_loss: 2.9645 val_score: 2.9620 lr: 0.00100 elapsed: 63m 12s\nEpoch 407 / 19999 train Loss: 5.9788 val_loss: 2.8582 val_score: 2.8572 lr: 0.00100 elapsed: 63m 22s\nEpoch 408 / 19999 train Loss: 5.9267 val_loss: 2.7440 val_score: 2.7447 lr: 0.00100 elapsed: 63m 31s\nEpoch 409 / 19999 train Loss: 6.0150 val_loss: 2.8833 val_score: 2.8842 lr: 0.00100 elapsed: 63m 40s\nEpoch 410 / 19999 train Loss: 5.9105 val_loss: 2.8155 val_score: 2.8140 lr: 0.00100 elapsed: 63m 50s\nEpoch 411 / 19999 train Loss: 5.9187 val_loss: 2.8011 val_score: 2.8011 lr: 0.00100 elapsed: 63m 59s\nEpoch 412 / 19999 train Loss: 5.8959 val_loss: 2.7229 val_score: 2.7234 lr: 0.00100 elapsed: 64m 8s\nEpoch 413 / 19999 train Loss: 5.9161 val_loss: 2.6338 val_score: 2.6344 lr: 0.00100 elapsed: 64m 18s\nEpoch 414 / 19999 train Loss: 5.8779 val_loss: 2.7695 val_score: 2.7705 lr: 0.00100 elapsed: 64m 27s\nEpoch 415 / 19999 train Loss: 5.8768 val_loss: 2.9475 val_score: 2.9487 lr: 0.00100 elapsed: 64m 37s\nEpoch 416 / 19999 train Loss: 5.8903 val_loss: 2.7013 val_score: 2.7024 lr: 0.00100 elapsed: 64m 46s\nEpoch 417 / 19999 train Loss: 5.9124 val_loss: 2.8692 val_score: 2.8707 lr: 0.00100 elapsed: 64m 55s\nEpoch 418 / 19999 train Loss: 5.8840 val_loss: 2.7474 val_score: 2.7481 lr: 0.00100 elapsed: 65m 5s\nEpoch 419 / 19999 train Loss: 5.9178 val_loss: 2.8860 val_score: 2.8854 lr: 0.00100 elapsed: 65m 14s\nEpoch 420 / 19999 train Loss: 5.9056 val_loss: 2.7318 val_score: 2.7313 lr: 0.00100 elapsed: 65m 23s\nEpoch 421 / 19999 train Loss: 5.8454 val_loss: 2.7603 val_score: 2.7611 lr: 0.00100 elapsed: 65m 33s\nEpoch 422 / 19999 train Loss: 5.8798 val_loss: 2.7376 val_score: 2.7381 lr: 0.00100 elapsed: 65m 42s\nEpoch 423 / 19999 train Loss: 5.9434 val_loss: 2.9586 val_score: 2.9588 lr: 0.00100 elapsed: 65m 51s\nEpoch 424 / 19999 train Loss: 5.9367 val_loss: 2.7782 val_score: 2.7795 lr: 0.00100 elapsed: 66m 1s\nEpoch 425 / 19999 train Loss: 5.8869 val_loss: 2.9043 val_score: 2.9051 lr: 0.00100 elapsed: 66m 10s\nEpoch 426 / 19999 train Loss: 5.8485 val_loss: 2.6236 val_score: 2.6230 lr: 0.00100 elapsed: 66m 19s\nEpoch 427 / 19999 train Loss: 5.8306 val_loss: 2.6239 val_score: 2.6242 lr: 0.00100 elapsed: 66m 29s\nEpoch 428 / 19999 train Loss: 5.8716 val_loss: 2.6729 val_score: 2.6741 lr: 0.00100 elapsed: 66m 38s\nEpoch 429 / 19999 train Loss: 5.8503 val_loss: 2.6906 val_score: 2.6912 lr: 0.00100 elapsed: 66m 47s\nEpoch 430 / 19999 train Loss: 5.8591 val_loss: 2.6260 val_score: 2.6268 lr: 0.00100 elapsed: 66m 56s\nEpoch 431 / 19999 train Loss: 5.8135 val_loss: 2.6123 val_score: 2.6127 lr: 0.00100 elapsed: 67m 5s\nEpoch 432 / 19999 train Loss: 5.8582 val_loss: 2.7601 val_score: 2.7608 lr: 0.00100 elapsed: 67m 15s\nEpoch 433 / 19999 train Loss: 5.8327 val_loss: 2.6560 val_score: 2.6574 lr: 0.00100 elapsed: 67m 24s\nEpoch 434 / 19999 train Loss: 5.8897 val_loss: 2.7126 val_score: 2.7130 lr: 0.00100 elapsed: 67m 34s\nEpoch 435 / 19999 train Loss: 5.8558 val_loss: 2.6712 val_score: 2.6719 lr: 0.00100 elapsed: 67m 43s\nEpoch 436 / 19999 train Loss: 5.8308 val_loss: 2.6307 val_score: 2.6295 lr: 0.00100 elapsed: 67m 52s\nEpoch 437 / 19999 train Loss: 5.8484 val_loss: 2.9107 val_score: 2.9111 lr: 0.00100 elapsed: 68m 2s\nEpoch 438 / 19999 train Loss: 5.8111 val_loss: 2.6620 val_score: 2.6634 lr: 0.00100 elapsed: 68m 11s\nEpoch 439 / 19999 train Loss: 5.7857 val_loss: 2.6723 val_score: 2.6733 lr: 0.00100 elapsed: 68m 21s\nEpoch 440 / 19999 train Loss: 5.8223 val_loss: 2.7523 val_score: 2.7528 lr: 0.00100 elapsed: 68m 30s\nEpoch 441 / 19999 train Loss: 5.8797 val_loss: 2.7215 val_score: 2.7219 lr: 0.00100 elapsed: 68m 39s\nEpoch 442 / 19999 train Loss: 5.8760 val_loss: 2.7974 val_score: 2.7991 lr: 0.00100 elapsed: 68m 48s\nEpoch 443 / 19999 train Loss: 5.8534 val_loss: 2.7439 val_score: 2.7447 lr: 0.00100 elapsed: 68m 58s\nEpoch 444 / 19999 train Loss: 5.8010 val_loss: 2.7214 val_score: 2.7228 lr: 0.00100 elapsed: 69m 7s\nEpoch 445 / 19999 train Loss: 5.8362 val_loss: 2.6792 val_score: 2.6790 lr: 0.00100 elapsed: 69m 16s\nEpoch 446 / 19999 train Loss: 5.7693 val_loss: 2.6060 val_score: 2.6061 lr: 0.00100 elapsed: 69m 26s\nEpoch 447 / 19999 train Loss: 5.7941 val_loss: 2.7743 val_score: 2.7749 lr: 0.00100 elapsed: 69m 35s\nEpoch 448 / 19999 train Loss: 5.8322 val_loss: 2.5606 val_score: 2.5612 lr: 0.00100 elapsed: 69m 44s\n----------------------------------------------------------------------->> loss improved to 2.56122\nEpoch 449 / 19999 train Loss: 5.7524 val_loss: 2.5867 val_score: 2.5872 lr: 0.00100 elapsed: 69m 54s\nEpoch 450 / 19999 train Loss: 5.7898 val_loss: 2.7490 val_score: 2.7498 lr: 0.00100 elapsed: 70m 3s\nEpoch 451 / 19999 train Loss: 5.8553 val_loss: 2.6920 val_score: 2.6919 lr: 0.00100 elapsed: 70m 12s\nEpoch 452 / 19999 train Loss: 5.8176 val_loss: 2.6751 val_score: 2.6752 lr: 0.00100 elapsed: 70m 22s\nEpoch 453 / 19999 train Loss: 5.8328 val_loss: 2.6842 val_score: 2.6846 lr: 0.00100 elapsed: 70m 31s\nEpoch 454 / 19999 train Loss: 5.8777 val_loss: 2.6891 val_score: 2.6901 lr: 0.00100 elapsed: 70m 40s\nEpoch 455 / 19999 train Loss: 5.8566 val_loss: 2.6969 val_score: 2.6976 lr: 0.00100 elapsed: 70m 50s\nEpoch 456 / 19999 train Loss: 5.7404 val_loss: 2.6417 val_score: 2.6424 lr: 0.00100 elapsed: 70m 59s\nEpoch 457 / 19999 train Loss: 5.8005 val_loss: 2.9136 val_score: 2.9122 lr: 0.00100 elapsed: 71m 8s\nEpoch 458 / 19999 train Loss: 5.7286 val_loss: 2.6263 val_score: 2.6253 lr: 0.00100 elapsed: 71m 18s\nEpoch 459 / 19999 train Loss: 5.7970 val_loss: 2.5751 val_score: 2.5752 lr: 0.00100 elapsed: 71m 27s\nEpoch 460 / 19999 train Loss: 5.7022 val_loss: 2.5890 val_score: 2.5894 lr: 0.00100 elapsed: 71m 36s\nEpoch 461 / 19999 train Loss: 5.7588 val_loss: 2.6247 val_score: 2.6246 lr: 0.00100 elapsed: 71m 46s\nEpoch 462 / 19999 train Loss: 5.7987 val_loss: 2.5205 val_score: 2.5210 lr: 0.00100 elapsed: 71m 55s\n----------------------------------------------------------------------->> loss improved to 2.52099\nEpoch 463 / 19999 train Loss: 5.7815 val_loss: 2.6330 val_score: 2.6332 lr: 0.00100 elapsed: 72m 4s\nEpoch 464 / 19999 train Loss: 5.7593 val_loss: 2.5942 val_score: 2.5954 lr: 0.00100 elapsed: 72m 13s\nEpoch 465 / 19999 train Loss: 5.7742 val_loss: 2.8263 val_score: 2.8260 lr: 0.00100 elapsed: 72m 23s\nEpoch 466 / 19999 train Loss: 5.8016 val_loss: 2.8364 val_score: 2.8362 lr: 0.00100 elapsed: 72m 32s\nEpoch 467 / 19999 train Loss: 5.7497 val_loss: 2.5094 val_score: 2.5100 lr: 0.00100 elapsed: 72m 41s\n----------------------------------------------------------------------->> loss improved to 2.50999\nEpoch 468 / 19999 train Loss: 5.7392 val_loss: 2.7292 val_score: 2.7297 lr: 0.00100 elapsed: 72m 51s\nEpoch 469 / 19999 train Loss: 5.7542 val_loss: 2.6880 val_score: 2.6885 lr: 0.00100 elapsed: 73m 0s\nEpoch 470 / 19999 train Loss: 5.7676 val_loss: 2.9074 val_score: 2.9073 lr: 0.00100 elapsed: 73m 9s\nEpoch 471 / 19999 train Loss: 5.7714 val_loss: 2.6242 val_score: 2.6237 lr: 0.00100 elapsed: 73m 19s\nEpoch 472 / 19999 train Loss: 5.7257 val_loss: 2.5769 val_score: 2.5774 lr: 0.00100 elapsed: 73m 28s\nEpoch 473 / 19999 train Loss: 5.6943 val_loss: 2.6224 val_score: 2.6223 lr: 0.00100 elapsed: 73m 38s\nEpoch 474 / 19999 train Loss: 5.7154 val_loss: 3.2993 val_score: 3.2991 lr: 0.00100 elapsed: 73m 47s\nEpoch 475 / 19999 train Loss: 5.7783 val_loss: 2.6784 val_score: 2.6784 lr: 0.00100 elapsed: 73m 56s\nEpoch 476 / 19999 train Loss: 5.7643 val_loss: 2.6626 val_score: 2.6616 lr: 0.00100 elapsed: 74m 5s\nEpoch 477 / 19999 train Loss: 5.7472 val_loss: 2.6903 val_score: 2.6902 lr: 0.00100 elapsed: 74m 15s\nEpoch 478 / 19999 train Loss: 5.7509 val_loss: 2.5454 val_score: 2.5455 lr: 0.00100 elapsed: 74m 24s\nEpoch 479 / 19999 train Loss: 5.8245 val_loss: 2.6686 val_score: 2.6691 lr: 0.00100 elapsed: 74m 33s\nEpoch 480 / 19999 train Loss: 5.6830 val_loss: 2.7228 val_score: 2.7227 lr: 0.00100 elapsed: 74m 43s\nEpoch 481 / 19999 train Loss: 5.7193 val_loss: 2.6375 val_score: 2.6377 lr: 0.00100 elapsed: 74m 52s\nEpoch 482 / 19999 train Loss: 5.7287 val_loss: 2.6853 val_score: 2.6866 lr: 0.00100 elapsed: 75m 1s\nEpoch 483 / 19999 train Loss: 5.7211 val_loss: 2.5611 val_score: 2.5616 lr: 0.00100 elapsed: 75m 11s\nEpoch 484 / 19999 train Loss: 5.7012 val_loss: 2.6339 val_score: 2.6346 lr: 0.00100 elapsed: 75m 20s\nEpoch 485 / 19999 train Loss: 5.7341 val_loss: 2.6566 val_score: 2.6571 lr: 0.00100 elapsed: 75m 29s\nEpoch 486 / 19999 train Loss: 5.7136 val_loss: 2.6010 val_score: 2.6021 lr: 0.00100 elapsed: 75m 39s\nEpoch 487 / 19999 train Loss: 5.7098 val_loss: 2.5602 val_score: 2.5602 lr: 0.00100 elapsed: 75m 48s\nEpoch 488 / 19999 train Loss: 5.6926 val_loss: 2.6252 val_score: 2.6256 lr: 0.00100 elapsed: 75m 57s\nEpoch 489 / 19999 train Loss: 5.7531 val_loss: 2.7364 val_score: 2.7370 lr: 0.00100 elapsed: 76m 7s\nEpoch 490 / 19999 train Loss: 5.6658 val_loss: 2.5434 val_score: 2.5425 lr: 0.00100 elapsed: 76m 16s\nEpoch 491 / 19999 train Loss: 5.8743 val_loss: 2.6859 val_score: 2.6863 lr: 0.00100 elapsed: 76m 25s\nEpoch 492 / 19999 train Loss: 5.7086 val_loss: 2.6438 val_score: 2.6438 lr: 0.00100 elapsed: 76m 35s\nEpoch 493 / 19999 train Loss: 5.7015 val_loss: 2.5897 val_score: 2.5908 lr: 0.00100 elapsed: 76m 44s\nEpoch 494 / 19999 train Loss: 5.6764 val_loss: 2.6466 val_score: 2.6460 lr: 0.00100 elapsed: 76m 53s\nEpoch 495 / 19999 train Loss: 5.6656 val_loss: 2.5711 val_score: 2.5713 lr: 0.00100 elapsed: 77m 3s\nEpoch 496 / 19999 train Loss: 5.6527 val_loss: 2.4893 val_score: 2.4894 lr: 0.00100 elapsed: 77m 12s\n----------------------------------------------------------------------->> loss improved to 2.48935\nEpoch 497 / 19999 train Loss: 5.6939 val_loss: 2.6161 val_score: 2.6170 lr: 0.00100 elapsed: 77m 21s\n"
],
[
"epoch_df = pd.DataFrame()\nepoch_df['epoch'] = best_epoch_list\nepoch_df['val_score'] = best_valid_score_list",
"_____no_output_____"
],
[
"epoch_df.sort_values('val_score').head()",
"_____no_output_____"
],
[
"score_to = round(min(best_valid_score_list),6)\nscore_to",
"_____no_output_____"
],
[
"%%time\nbatch_size = 2048\ntest_loader = build_dataloader(test_df.iloc[:, 1:].values, Y=None, batch_size=batch_size, shuffle=False)\n\nmodel = build_model(device, model_name='mlp')\nmodel.to(device)\n\nmodel.eval()\nmodel.load_state_dict(torch.load(model_path))\n\ntest_preds = np.zeros((len(test_loader.dataset), 4))\n\nwith torch.no_grad():\n for batch_idx, data in enumerate(test_loader):\n if device:\n data = data.to(device)\n outputs = model(data)\n test_preds[batch_idx * batch_size:(batch_idx+1) * batch_size] = outputs.detach().cpu().numpy()",
"no label\nCPU times: user 125 ms, sys: 364 ms, total: 489 ms\nWall time: 539 ms\n"
],
[
"submission = pd.DataFrame({'id': submission_df['id'],\n 'layer_1':test_preds.transpose()[0],\n 'layer_2':test_preds.transpose()[1],\n 'layer_3':test_preds.transpose()[2],\n 'layer_4':test_preds.transpose()[3]})\nsubmission.to_csv('../wafer/mlp_submission/mlp_v2_3610e_{}_submission.csv'.format(score_to), index=False)\n\nsubmission.head()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e746b02f41ae552c1a3923a8a2b1d8ed2190b477 | 73,425 | ipynb | Jupyter Notebook | Homework/2018-hw2-python-solutions.ipynb | U2NG/win2020-gps-python | e28a99ae5c8733c7b202b1e2d708d48e58f65938 | [
"CC-BY-4.0"
] | 2 | 2019-01-31T02:18:33.000Z | 2022-01-03T19:21:14.000Z | Homework/2018-hw2-python-solutions.ipynb | U2NG/win2020-gps-python | e28a99ae5c8733c7b202b1e2d708d48e58f65938 | [
"CC-BY-4.0"
] | null | null | null | Homework/2018-hw2-python-solutions.ipynb | U2NG/win2020-gps-python | e28a99ae5c8733c7b202b1e2d708d48e58f65938 | [
"CC-BY-4.0"
] | 1 | 2019-02-01T15:09:33.000Z | 2019-02-01T15:09:33.000Z | 196.323529 | 33,132 | 0.902172 | [
[
[
"**Instructions:**\n\n1. Rename the notebook (click on the title at the top of the page next to 'jupyter') to your A number, name and HW2Py:\n example (replace spaces with underscores -- this seems to work better): \"Reid-Otsuji-A1234567-HW2Py\"\n2. Add your code or markdown answers under the question. \n3. `File>Download as` a 'notebook' NOTE: Jupyter notebook is constantly autosaving to whatever directory you have opened it in.\n4. Upload to TritonED in Assignment 2 upload. ",
"_____no_output_____"
],
[
"# Working with lists",
"_____no_output_____"
],
[
"Create a `list` called `me` that contains the following elements in this order:\n\n* Your name\n* Your favorite color\n* You sudent ID number\n\nPerform the following on this list:\n\n## Loop through the list and print out the items. ",
"_____no_output_____"
]
],
[
[
"me = [\"reid otsuji\", 'blue', 'a23456789']",
"_____no_output_____"
]
],
[
[
"## Print just your name and student ID. You can use a slicing operator or a loop. ",
"_____no_output_____"
]
],
[
[
"me[0::2]",
"_____no_output_____"
]
],
[
[
"## Using the join method, join the list into a string separated by a comma ",
"_____no_output_____"
]
],
[
[
"print('join the list:', ','.join(me))",
"join the list: reid otsuji,blue,a23456789\n"
]
],
[
[
"# Writing a loop to generate a list of letters.\n\n### Use a for-loop to convert the string \"global\" into a list of letters:\n[\"g\", \"l\", \"o\", \"b\", \"a\", \"l\"]\n\nHint: You can create an empty list like this:",
"_____no_output_____"
]
],
[
[
"my_list = []",
"_____no_output_____"
],
[
"my_list = []\nfor char in \"global\":\n my_list.append(char)\nprint(my_list)",
"['g', 'l', 'o', 'b', 'a', 'l']\n"
]
],
[
[
"# Write a program that reads in the regional gapminder data sets and plots the average GDP per capita for each region over time in a single chart.\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport glob\nimport pandas",
"_____no_output_____"
]
],
[
[
"Fill in the blanks for the following code. Remember you want to:\n\n1. Loop thru a list of the contintent gdp files in the data/ directory\n2. Read each of those files in as data.frames\n3. Get the mean for each\n4. Plot each \n5. Add a legend for each. Notice we are doing this by using a list of the countries in alpha order\n\nNote: Your graph should look like the one below. ",
"_____no_output_____"
]
],
[
[
"for filename in glob.____('data/______.csv'):\n df = pandas._______(filename)\n cont = df.____()\n cont.____()\n plt.____(rotation=90)\n plt.____(['Africa', 'Asia', 'Americas', 'Europe', 'Oceania'])\n plt.style.use('seaborn-muted')",
"_____no_output_____"
],
[
"for filename in glob.glob('data/gapminder_gdp*.csv'):\n df = pandas.read_csv(filename)\n df.mean().plot()\n plt.xticks(rotation=90)\n plt.legend(['africa','asia','americas','europe','oceania'])\n plt.style.use('ggplot')",
"_____no_output_____"
]
],
[
[
"# Encapsulating Data Analysis\n\nRun the following code. This code reads in the gapminder file for asia and then subsets the Japan data into a new dataframe called `japan`. Remember: Use the correct file path for the .csv saved on your computer.\n",
"_____no_output_____"
]
],
[
[
"import pandas\n\ndf = pandas.read_csv('data/gapminder_gdp_asia.csv', index_col=0) #note: use the file path for your saved data location\njapan = df.ix['Japan']\njapan.tail(6)",
"_____no_output_____"
]
],
[
[
"Using the japan data frame we created above, the below code is one way to get the average for Japan in the 80s. We create a year base (198) using floor division and then add the strings 2 and 7 to that to index for the 1982 and 1987 years (the two years we have in the 80s). We add those together and then divide by 2 to get the average. If the below code doesn't make sense, run the parts individually to see how it works. ",
"_____no_output_____"
]
],
[
[
"year = 1983 \ngdp_decade = 'gdpPercap_' + str(year // 10) #`//` is Floor division gives us 198\navg = (japan.ix[gdp_decade + '2'] + japan.ix[gdp_decade + '7']) / 2 # we want to add 1982 and 1987 and divide by 2 \nprint(avg)",
"20880.0238\n"
]
],
[
[
"Given the code above, abstract the code into a function named `avg_gdp_in_decade` that will take a `country`, `continent` and `year` as parameters and return the average. We should be able to call your function like this: \n\n```{python}\navg_gdp_in_decade('Algeria','Africa', 1970) #1970 for the 70s, 1980 would be 80s\n```\n\nI started the function for you below:",
"_____no_output_____"
]
],
[
[
"def avg_dgp_in_decade(country, continent, year):\n #read the data in\n #create a new data.frame for the country\n #get the decade base three numbers 198 for 80s\n #subset and calculate avg\n #return average\n pass # pass tells python to pass on the function, remove it when you want to run your function",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e746c75d4b80d5691754eda94a91f63261946922 | 8,659 | ipynb | Jupyter Notebook | doc/de.ipynb | kyodocn/deplacy | 5e6edbcc23151e6625abbaf018f513778ac1d3da | [
"MIT"
] | 1 | 2021-06-21T14:02:24.000Z | 2021-06-21T14:02:24.000Z | doc/de.ipynb | kyodocn/deplacy | 5e6edbcc23151e6625abbaf018f513778ac1d3da | [
"MIT"
] | null | null | null | doc/de.ipynb | kyodocn/deplacy | 5e6edbcc23151e6625abbaf018f513778ac1d3da | [
"MIT"
] | null | null | null | 31.717949 | 241 | 0.528237 | [
[
[
"# Syntaxanalyse durch [Deplacy](https://koichiyasuoka.github.io/deplacy/)\n",
"_____no_output_____"
],
[
"## mit [Camphr-Udify](https://camphr.readthedocs.io/en/latest/notes/udify.html)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy camphr 'unofficial-udify>=0.3.0' en-udify@https://github.com/PKSHATechnology-Research/camphr_models/releases/download/0.7.0/en_udify-0.7.tar.gz\nimport pkg_resources,imp\nimp.reload(pkg_resources)\nimport spacy\nnlp=spacy.load(\"en_udify\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [Stanza](https://stanfordnlp.github.io/stanza)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy stanza\nimport stanza\nstanza.download(\"de\")\nnlp=stanza.Pipeline(\"de\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [COMBO-pytorch](https://gitlab.clarin-pl.eu/syntactic-tools/combo)\n",
"_____no_output_____"
]
],
[
[
"!pip install --index-url https://pypi.clarin-pl.eu/simple deplacy combo\nimport combo.predict\nnlp=combo.predict.COMBO.from_pretrained(\"german-ud27\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [UDPipe 2](http://ufal.mff.cuni.cz/udpipe/2)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy\ndef nlp(t):\n import urllib.request,urllib.parse,json\n with urllib.request.urlopen(\"https://lindat.mff.cuni.cz/services/udpipe/api/process?model=de&tokenizer&tagger&parser&data=\"+urllib.parse.quote(t)) as r:\n return json.loads(r.read())[\"result\"]\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [SpaCy-jPTDP](https://github.com/KoichiYasuoka/spaCy-jPTDP)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy spacy_jptdp\nimport spacy_jptdp\nnlp=spacy_jptdp.load(\"de_gsd\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [Turku-neural-parser-pipeline](https://turkunlp.org/Turku-neural-parser-pipeline/)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy ufal.udpipe configargparse 'tensorflow<2' torch==0.4.1 torchtext==0.3.1 torchvision==0.2.1\n!test -d Turku-neural-parser-pipeline || git clone --depth=1 https://github.com/TurkuNLP/Turku-neural-parser-pipeline\n!cd Turku-neural-parser-pipeline && git submodule update --init --recursive && test -d models_de_gsd || python fetch_models.py de_gsd\nimport sys,subprocess\nnlp=lambda t:subprocess.run([sys.executable,\"full_pipeline_stream.py\",\"--gpu\",\"-1\",\"--conf\",\"models_de_gsd/pipelines.yaml\"],cwd=\"Turku-neural-parser-pipeline\",input=t,encoding=\"utf-8\",stdout=subprocess.PIPE).stdout\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [NLP-Cube](https://github.com/Adobe/NLP-Cube)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy nlpcube\nfrom cube.api import Cube\nnlp=Cube()\nnlp.load(\"de\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [Spacy-udpipe](https://github.com/TakeLab/spacy-udpipe)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy spacy-udpipe\nimport spacy_udpipe\nspacy_udpipe.download(\"de\")\nnlp=spacy_udpipe.load(\"de\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [SpaCy-COMBO](https://github.com/KoichiYasuoka/spaCy-COMBO)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy spacy_combo\nimport spacy_combo\nnlp=spacy_combo.load(\"de_gsd\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [Trankit](https://github.com/nlp-uoregon/trankit)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy trankit transformers\nimport trankit\nnlp=trankit.Pipeline(\"german\")\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
],
[
[
"## mit [Spacy](https://spacy.io/)\n",
"_____no_output_____"
]
],
[
[
"!pip install deplacy\n!python -m spacy download de_core_news_sm\nimport de_core_news_sm\nnlp=de_core_news_sm.load()\ndoc=nlp(\"Er sieht sehr jung aus.\")\nimport deplacy\ndeplacy.render(doc)\ndeplacy.serve(doc,port=None)\n# import graphviz\n# graphviz.Source(deplacy.dot(doc))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e746e0d7abbcdd164f37807d0007d3d562b2940f | 2,504 | ipynb | Jupyter Notebook | Tree/0830/590. N-ary Tree Postorder Traversal.ipynb | YuHe0108/Leetcode | 90d904dde125dd35ee256a7f383961786f1ada5d | [
"Apache-2.0"
] | 1 | 2020-08-05T11:47:47.000Z | 2020-08-05T11:47:47.000Z | Tree/0830/590. N-ary Tree Postorder Traversal.ipynb | YuHe0108/LeetCode | b9e5de69b4e4d794aff89497624f558343e362ad | [
"Apache-2.0"
] | null | null | null | Tree/0830/590. N-ary Tree Postorder Traversal.ipynb | YuHe0108/LeetCode | b9e5de69b4e4d794aff89497624f558343e362ad | [
"Apache-2.0"
] | null | null | null | 21.042017 | 58 | 0.442891 | [
[
[
"说明:\n 给定一个n元树,返回其节点值的PostOrder: left->right->node。 \n Nary-Tree输入序列化以其级别顺序遍历表示,每组子级由空值分隔(请参见示例)。",
"_____no_output_____"
]
],
[
[
"<img src='590.jpg' width=550>",
"_____no_output_____"
]
],
[
[
"class Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children",
"_____no_output_____"
]
],
[
[
"### 1、递归的方式",
"_____no_output_____"
]
],
[
[
"class Solution:\n def postorder(self, root: 'Node'):\n if not root:\n return []\n self.res = []\n self.helper(root)\n return self.res\n \n def helper(self, root):\n if not root:\n return\n for child in root.children:\n self.helper(child)\n self.res.append(root.val)",
"_____no_output_____"
]
],
[
[
"### 2、循环的方式",
"_____no_output_____"
]
],
[
[
"# 循环的方式:Left->Right->Node\nclass Solution:\n def postorder(self, root: 'Node'):\n if not root:\n return []\n res = []\n nodes = [root]\n while nodes:\n n = nodes.pop()\n if n:\n for child in n.children:\n nodes.append(child)\n res.insert(0, n.val)\n return res",
"_____no_output_____"
]
]
] | [
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e746e8e1dc7aea0eed52d91011c2324684791fe1 | 587 | ipynb | Jupyter Notebook | Work in Progress/Dividendinfo.ipynb | ryanhhogan3/financialintel | 0a15e2cd8bc692b83ef62d754b0e6f151cd304f1 | [
"MIT"
] | null | null | null | Work in Progress/Dividendinfo.ipynb | ryanhhogan3/financialintel | 0a15e2cd8bc692b83ef62d754b0e6f151cd304f1 | [
"MIT"
] | null | null | null | Work in Progress/Dividendinfo.ipynb | ryanhhogan3/financialintel | 0a15e2cd8bc692b83ef62d754b0e6f151cd304f1 | [
"MIT"
] | null | null | null | 17.264706 | 34 | 0.531516 | [] | [] | [] |
e746f8c7194593546cac9047eb6d1dd0868c4aa4 | 8,951 | ipynb | Jupyter Notebook | Chapter05/.ipynb_checkpoints/Exercise 5.04-checkpoint.ipynb | arifmudi/The-Data-Wrangling-Workshop | c325f6fa1c6daf8dd22e9705df48ce2644217a73 | [
"MIT"
] | 22 | 2020-06-27T04:21:49.000Z | 2022-03-08T04:39:44.000Z | Chapter05/.ipynb_checkpoints/Exercise 5.04-checkpoint.ipynb | arifmudi/The-Data-Wrangling-Workshop | c325f6fa1c6daf8dd22e9705df48ce2644217a73 | [
"MIT"
] | 2 | 2021-02-02T22:49:16.000Z | 2021-06-02T02:09:21.000Z | Chapter05/.ipynb_checkpoints/Exercise 5.04-checkpoint.ipynb | Hubertus444/The-Data-Wrangling-Workshop | ddad20f8676602ac6624e72e802769fcaff45b0f | [
"MIT"
] | 46 | 2020-04-20T13:04:11.000Z | 2022-03-22T05:23:52.000Z | 27.626543 | 75 | 0.325215 | [
[
[
"import pandas as pd\ndf5 = pd.read_csv(\"CSV_EX_skiprows.csv\")\ndf5\n",
"_____no_output_____"
],
[
"df5 = pd.read_csv(\"CSV_EX_skiprows.csv\",skiprows=2)\ndf5\n",
"_____no_output_____"
],
[
"df6 = pd.read_csv(\"CSV_EX_skipfooter.csv\",skiprows=2,\nskipfooter=1,engine='python')\ndf6\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e746ffbf434c1623a2ae533d0ac3a18a59f1d3e8 | 86,374 | ipynb | Jupyter Notebook | tutorials/FEniCS/00-StefanProblem.ipynb | yoczhang/phaseflow-fenics | 85e9e3478cba0d21c3c1dbcad4f4ce2b75d8c2d8 | [
"MIT"
] | 2 | 2021-01-03T14:57:31.000Z | 2021-01-16T08:11:48.000Z | tutorials/FEniCS/00-StefanProblem.ipynb | yoczhang/phaseflow-fenics | 85e9e3478cba0d21c3c1dbcad4f4ce2b75d8c2d8 | [
"MIT"
] | null | null | null | tutorials/FEniCS/00-StefanProblem.ipynb | yoczhang/phaseflow-fenics | 85e9e3478cba0d21c3c1dbcad4f4ce2b75d8c2d8 | [
"MIT"
] | null | null | null | 98.93929 | 16,056 | 0.863431 | [
[
[
"# Solving the Stefan problem with finite elements",
"_____no_output_____"
],
[
"This Jupyter notebook shows how to solve the Stefan problem with finite elements and goal-oriented adaptive mesh refinement (AMR) using FEniCS.",
"_____no_output_____"
],
[
"## Python packages\n\nImport the Python packages for use in this notebook.",
"_____no_output_____"
],
[
"We use the finite element method library FEniCS.",
"_____no_output_____"
]
],
[
[
"import fenics",
"_____no_output_____"
]
],
[
[
"|Note|\n|----|\n| This Jupyter notebook server is using FEniCS 2017.2.0 from ppa:fenics-packages/fenics, installed via `apt` on Ubuntu 16.04.|\n\nFEniCS has convenient plotting features that don't require us to import `matplotlib`; but using `matplotlib` directly will allow us to annotate the plots.",
"_____no_output_____"
]
],
[
[
"import matplotlib",
"_____no_output_____"
]
],
[
[
"Tell this notebook to embed graphical outputs from `matplotlib`, includings those made by `fenics.plot`.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"We will also use numpy.",
"_____no_output_____"
]
],
[
[
"import numpy",
"_____no_output_____"
]
],
[
[
"## Nomenclature\n|||\n|-|-|\n|$\\mathbf{x}$| point in the spatial domain|\n|$t$| time |\n|$T = T(\\mathbf{x},t)$| temperature field |\n|$\\phi$ | solid volume fraction |\n|$()_t = \\frac{\\partial}{\\partial t}()$| time derivative |\n|$T_r$| central temperature of the regularization |\n|$r$| smoothing parameter of the regularization |\n|$\\mathrm{Ste}$| Stefan number|\n|$\\Omega$| spatial domain |\n|$\\mathbf{V}$| finite element function space |\n|$\\psi$| test function |\n|$T_h$| hot boundary temperature |\n|$T_c$| cold boundary temperature |\n|$\\Delta t$| time step size |",
"_____no_output_____"
],
[
"## Governing equations",
"_____no_output_____"
],
[
"To model the Stefan problem with a single domain, consider the enthalpy balance from [4] with zero velocity and unit Prandtl number.\n\n\\begin{align*}\n T_t - \\nabla \\cdot (\\nabla T) - \\frac{1}{\\mathrm{Ste}}\\phi_t &= 0\n\\end{align*}\n\nwhere the regularized semi-phase-field (representing the solid volume fraction) is \n\n\\begin{align*}\n \\phi(T) = \\frac{1}{2}\\left(1 + \\tanh{\\frac{T_r - T}{r}} \\right)\n\\end{align*}",
"_____no_output_____"
],
[
"This is essentially a smoothed heaviside function, which approaches the exact heaviside function as $r$ approaches zero. Let's visualize this.",
"_____no_output_____"
]
],
[
[
"def semi_phase_field(T, T_r, r):\n \n return 0.5*(1. + numpy.tanh((T_r - T)/r))\n\n\nregularization_central_temperature = 0.\n\ntemperatures = numpy.linspace(\n regularization_central_temperature - 0.5,\n regularization_central_temperature + 0.5,\n 1000)\n\nlegend_strings = []\n\nfor regluarization_smoothing_parameter in (0.1, 0.05, 0.025):\n \n matplotlib.pyplot.plot(\n temperatures, \n semi_phase_field(\n T = temperatures, \n T_r = regularization_central_temperature, \n r = regluarization_smoothing_parameter))\n \n legend_strings.append(\n \"$r = \" + str(regluarization_smoothing_parameter) + \"$\")\n \nmatplotlib.pyplot.xlabel(\"$T$\")\n\nmatplotlib.pyplot.ylabel(\"$\\phi$\")\n\nmatplotlib.pyplot.legend(legend_strings)",
"_____no_output_____"
]
],
[
[
"## Mesh",
"_____no_output_____"
],
[
"Define a fine mesh to capture the rapid variation in $\\phi(T)$.",
"_____no_output_____"
]
],
[
[
"N = 1000\n\nmesh = fenics.UnitIntervalMesh(N)",
"_____no_output_____"
]
],
[
[
"## Finite element function space, test function, and solution function",
"_____no_output_____"
],
[
"Lets use piece-wise linear elements.",
"_____no_output_____"
]
],
[
[
"P1 = fenics.FiniteElement('P', mesh.ufl_cell(), 1)",
"_____no_output_____"
]
],
[
[
"|Note|\n|----|\n|`fenics.FiniteElement` requires the `mesh.ufl_cell()` argument to determine some aspects of the domain (e.g. that the spatial domain is two-dimensional).|",
"_____no_output_____"
],
[
"Make the finite element function space $V$, which enumerates the finite element basis functions on each cell of the mesh.",
"_____no_output_____"
]
],
[
[
"V = fenics.FunctionSpace(mesh, P1)",
"_____no_output_____"
]
],
[
[
"Make the test function $\\psi \\in \\mathbf{V}$.",
"_____no_output_____"
]
],
[
[
"psi = fenics.TestFunction(V)",
"_____no_output_____"
]
],
[
[
"Make the solution function $T \\in \\mathbf{V}$.",
"_____no_output_____"
]
],
[
[
"T = fenics.Function(V)",
"_____no_output_____"
]
],
[
[
"### Benchmark parameters",
"_____no_output_____"
],
[
"Set the Stefan number, density, specific heat capacity, and thermal diffusivity. For each we define a `fenics.Constant` for use in the variational form so that FEniCS can more efficiently compile the finite element code.",
"_____no_output_____"
]
],
[
[
"stefan_number = 0.045\n\nSte = fenics.Constant(stefan_number)",
"_____no_output_____"
]
],
[
[
"Define the regularized semi-phase-field for use with FEniCS.",
"_____no_output_____"
]
],
[
[
"regularization_central_temperature = 0.\n\nT_r = fenics.Constant(regularization_central_temperature)\n\nregularization_smoothing_parameter = 0.005\n\nr = fenics.Constant(regularization_smoothing_parameter)\n\ntanh = fenics.tanh\n\ndef phi(T):\n \n return 0.5*(1. + fenics.tanh((T_r - T)/r))",
"_____no_output_____"
]
],
[
[
"Furthermore the benchmark problem involves hot and cold walls with constant temperatures $T_h$ and $T_c$, respectively.",
"_____no_output_____"
]
],
[
[
"hot_wall_temperature = 1.\n\nT_h = fenics.Constant(hot_wall_temperature)\n\ncold_wall_temperature = -0.01\n\nT_c = fenics.Constant(cold_wall_temperature)",
"_____no_output_____"
]
],
[
[
"### Time discretization",
"_____no_output_____"
],
[
"To solve the initial value problem, we will prescribe the initial values, and then take discrete steps forward in time which solve the governing equations.\n\nWe set the initial values such that a small layer of melt already exists touching the hot wall.\n\n\\begin{align*}\n T^0 = \n \\begin{cases}\n T_h, && x_0 < x_{m,0} \\\\\n T_c, && \\mathrm{otherwise}\n \\end{cases}\n\\end{align*}",
"_____no_output_____"
],
[
"Interpolate these values to create the initial solution function.",
"_____no_output_____"
]
],
[
[
"initial_melt_thickness = 10./float(N)\n\nT_n = fenics.interpolate(\n fenics.Expression(\n \"(T_h - T_c)*(x[0] < x_m0) + T_c\",\n T_h = hot_wall_temperature, \n T_c = cold_wall_temperature,\n x_m0 = initial_melt_thickness,\n element = P1),\n V)",
"_____no_output_____"
]
],
[
[
"Let's look at the initial values now.",
"_____no_output_____"
]
],
[
[
"fenics.plot(T_n)\n\nmatplotlib.pyplot.title(r\"$T^0$\")\n\nmatplotlib.pyplot.xlabel(\"$x$\")\n\nmatplotlib.pyplot.show()\n\nfenics.plot(phi(T_n))\n\nmatplotlib.pyplot.title(r\"$\\phi(T^0)$\")\n\nmatplotlib.pyplot.xlabel(\"$x$\")\n\nmatplotlib.pyplot.show()",
"_____no_output_____"
]
],
[
[
"|Note|\n|----|\n|$\\phi$ undershoots and overshoots the expected minimum and maximum values near the rapid change. This is a common feature of interior layers in finite element solutions. Here, `fenics.plot` projected $phi(T^0)$ onto a piece-wise linear basis for plotting. This could suggest we will encounter numerical issues. We'll see what happens.|",
"_____no_output_____"
],
[
"For the time derivative terms, we apply the first-order implicit Euler finite difference time discretization, i.e.\n\n\\begin{align*}\n T_t = \\frac{T^{n+1} - T^n}{\\Delta t} \\\\\n \\phi_t = \\frac{\\phi\\left(T^{n+1}\\right) - \\phi\\left(T^n\\right)}{\\Delta t} \n\\end{align*}\n\n|Note|\n|----|\n|We will use the shorthand $T = T^{n+1}$, since we will always be solving for the latest discrete time.|\n\nChoose a time step size and set the discrete time derivatives.",
"_____no_output_____"
]
],
[
[
"timestep_size = 1.e-2\n\nDelta_t = fenics.Constant(timestep_size)\n\nT_t = (T - T_n)/Delta_t\n\nphi_t = (phi(T) - phi(T_n))/Delta_t",
"_____no_output_____"
]
],
[
[
"## Variational form",
"_____no_output_____"
],
[
"To obtain the finite element weak form, we follow the standard Ritz-Galerkin method. Therefore, we multiply the strong form *from the left* by the test function $\\psi$ from the finite element function space $V$ and integrate over the spatial domain $\\Omega$. This gives us the variational problem: Find $T \\in V$ such that\n\n\\begin{align*}\n (\\psi,T_t - \\frac{1}{\\mathrm{Ste}}\\phi_t) + (\\nabla \\psi, \\nabla T) = 0 \\quad \\forall \\psi \\in V\n\\end{align*}\n\n|Note| \n|----|\n|We denote integrating inner products over the domain as $(v,u) = \\int_\\Omega v u d \\mathbf{x}$.|",
"_____no_output_____"
],
[
"Define the nonlinear variational form for FEniCS.\n\n|Note|\n|----|\n|The term $\\phi(T)$ is nonlinear.|",
"_____no_output_____"
]
],
[
[
"dot, grad = fenics.dot, fenics.grad\n \nF = (psi*(T_t - 1./Ste*phi_t) + dot(grad(psi), grad(T)))*fenics.dx",
"_____no_output_____"
]
],
[
[
"## Linearization\nNotice that $\\mathcal{F}$ is a *nonlinear* variational form. FEniCS will solve the nonlinear problem using Newton's method. This requires computing the Jacobian (formally the Gâteaux derivative) of the nonlinear variational form, yielding a a sequence of linearized problems whose solutions may converge to approximate the nonlinear solution.\n\nWe could manually define the Jacobian; but thankfully FEniCS can do this for us.\n\n|Note|\n|----|\n|When solving linear variational problems in FEniCS, one defines the linear variational form using `fenics.TrialFunction` instead of `fenics.Function` (while both approaches will need `fenics.TestFunction`). When solving nonlinear variational problems with FEniCS, we only need `fenics.TrialFunction` to define the linearized problem, since it is the linearized problem which will be assembled into a linear system and solved.|",
"_____no_output_____"
]
],
[
[
"JF = fenics.derivative(F, T, fenics.TrialFunction(V))",
"_____no_output_____"
]
],
[
[
"## Boundary conditions",
"_____no_output_____"
],
[
"We need boundary conditions before we can define a variational *problem* (i.e. in this case a boundary value problem).\n\nWe consider a constant hot temperature on the left wall, a constant cold temperature on the right wall. Because the problem's geometry is simple, we can identify the boundaries with the following piece-wise function.\n\n\\begin{align*}\n T(\\mathbf{x}) &= \n \\begin{cases}\n T_h , && x_0 = 0 \\\\\n T_c , && x_0 = 1 \n \\end{cases}\n\\end{align*}",
"_____no_output_____"
]
],
[
[
"hot_wall = \"near(x[0], 0.)\"\n\ncold_wall = \"near(x[0], 1.)\"",
"_____no_output_____"
]
],
[
[
"Define the boundary conditions for FEniCS.",
"_____no_output_____"
]
],
[
[
"boundary_conditions = [\n fenics.DirichletBC(V, hot_wall_temperature, hot_wall),\n fenics.DirichletBC(V, cold_wall_temperature, cold_wall)]",
"_____no_output_____"
]
],
[
[
"## The variational problem",
"_____no_output_____"
],
[
"Now we have everything we need to define the variational problem for FEniCS.",
"_____no_output_____"
]
],
[
[
"problem = fenics.NonlinearVariationalProblem(F, T, boundary_conditions, JF)",
"_____no_output_____"
]
],
[
[
"## The benchmark solution",
"_____no_output_____"
],
[
"Finally we instantiate the adaptive solver with our problem and goal",
"_____no_output_____"
]
],
[
[
"solver = fenics.NonlinearVariationalSolver(problem)",
"_____no_output_____"
]
],
[
[
"and solve the problem to the prescribed tolerance.",
"_____no_output_____"
]
],
[
[
"solver.solve()",
"_____no_output_____"
]
],
[
[
"|Note|\n|----|\n|`solver.solve` will modify the solution `w`, which means that `u` and `p` will also be modified.|",
"_____no_output_____"
],
[
"Now plot the temperature and solid volume fraction.",
"_____no_output_____"
]
],
[
[
"def plot(T):\n\n fenics.plot(T)\n\n matplotlib.pyplot.title(\"Temperature\")\n\n matplotlib.pyplot.xlabel(\"$x$\")\n\n matplotlib.pyplot.ylabel(\"$T$\")\n \n matplotlib.pyplot.show()\n \n \n fenics.plot(phi(T))\n\n matplotlib.pyplot.title(\"Solid volume fraction\")\n\n matplotlib.pyplot.xlabel(\"$x$\")\n\n matplotlib.pyplot.ylabel(\"$\\phi$\")\n \n matplotlib.pyplot.show()\n \n \nplot(T)",
"_____no_output_____"
]
],
[
[
"Let's run further.",
"_____no_output_____"
]
],
[
[
"for timestep in range(10):\n \n T_n.vector()[:] = T.vector()\n \n solver.solve()\n \nplot(T)",
"_____no_output_____"
]
],
[
[
"## References\n[1] W Bangerth and R Rannacher. **Adaptive Finite Element Methods for Differential Equations**. *Springer Science & Business Media*, 2003.\n\n[2] A Logg, KA Mardal, and GN Wells. **Automated Solution of Differential Equations by the Finite Element Method**, *Springer*, 2012.\n\n[3] VR Voller, CR Swaminathan, and BG Thomas. **Fixed grid techniques for phase change problems: a review.** *International Journal of Numerical Methods and Engineering*, 30(4):875–898, 1990.\n\n[4] AG Zimmerman and J Kowalski. **Monolithic simulation of convection-coupled phase-change - verification and reproducibility.** *arXiv:1801.03429 [physics.flu-dyn]*, 2018.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7474e3c16748715b0a7b90949831579c5c4fa64 | 83,793 | ipynb | Jupyter Notebook | week1/maithili/Q3 - Q/Attempt1_filesubmission_bicycle_model.ipynb | naveenmoto/lablet102 | 24de9daa4ae75cbde93567a3239ede43c735cf03 | [
"MIT"
] | 1 | 2021-07-09T16:48:44.000Z | 2021-07-09T16:48:44.000Z | week1/maithili/Q3 - Q/Attempt1_filesubmission_bicycle_model.ipynb | naveenmoto/lablet102 | 24de9daa4ae75cbde93567a3239ede43c735cf03 | [
"MIT"
] | null | null | null | week1/maithili/Q3 - Q/Attempt1_filesubmission_bicycle_model.ipynb | naveenmoto/lablet102 | 24de9daa4ae75cbde93567a3239ede43c735cf03 | [
"MIT"
] | null | null | null | 108.540155 | 13,162 | 0.823935 | [
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport doctest\nimport copy",
"_____no_output_____"
],
[
"#https://stackoverflow.com/questions/40137950/possible-to-run-python-doctest-on-a-jupyter-cell-function\ndef test(func):\n '''\n Use test as a decorator to a function with doctests in Jupyter notebook. \n Run the cell to see the results of the doctests.\n '''\n globs = copy.copy(globals())\n globs.update({func.__name__:func})\n doctest.run_docstring_examples(func, globs, verbose=True, name=func.__name__)\n return func",
"_____no_output_____"
]
],
[
[
"### Simulate straight line and circular movements with Bicycle model\n\nRobot is at the origin (0, 0) and facing North, i.e, $\\theta = \\pi/2$. Assume the wheelbase of the vehicle $L$ = 0.9 m",
"_____no_output_____"
]
],
[
[
"\n#uncomment this decorator to test your code\n@test \ndef bicycle_model(curr_pose, v, delta, dt=1.0):\n '''\n >>> bicycle_model((0.0,0.0,0.0), 1.0, 0.0)\n (1.0, 0.0, 0.0)\n >>> bicycle_model((0.0,0.0,0.0), 0.0, np.pi/4)\n (0.0, 0.0, 0.0)\n >>> bicycle_model((0.0, 0.0, 0.0), 1.0, np.pi/4)\n (1.0, 0.0, 1.11)\n '''\n # write code to calculate next_pose\n # refer to the kinematic equations of a unicycle model\n L = 0.9\n x,y,theta = curr_pose\n x = x + v*np.cos(theta)*dt\n y = y + v*np.sin(theta)*dt\n theta = theta + round(v/L*np.tan(delta),2)*dt\n \n # Keep theta bounded between [-pi, pi]\n theta = np.arctan2(np.sin(theta), np.cos(theta))\n\n return (x, y, theta)\n #return ",
"Finding tests in bicycle_model\nTrying:\n bicycle_model((0.0,0.0,0.0), 1.0, 0.0)\nExpecting:\n (1.0, 0.0, 0.0)\nok\nTrying:\n bicycle_model((0.0,0.0,0.0), 0.0, np.pi/4)\nExpecting:\n (0.0, 0.0, 0.0)\nok\nTrying:\n bicycle_model((0.0, 0.0, 0.0), 1.0, np.pi/4)\nExpecting:\n (1.0, 0.0, 1.11)\nok\n"
],
[
"#straight line\nstraight_trajectory = []\npose = (0, 0, np.pi/2)\nsteps = 10\n#fill in v and delta values\nall_v = np.ones(steps)\nall_delta = np.zeros(steps)\nfor v,delta in zip(all_v, all_delta):\n #instruction to take v, w and compute new pose \n straight_trajectory.append(pose) \n pose = bicycle_model(pose,v,delta)\n # store new pose\ns_trajectory = np.array(straight_trajectory) \nprint(s_trajectory)\n\n ",
"[[0.00000000e+00 0.00000000e+00 1.57079633e+00]\n [6.12323400e-17 1.00000000e+00 1.57079633e+00]\n [1.22464680e-16 2.00000000e+00 1.57079633e+00]\n [1.83697020e-16 3.00000000e+00 1.57079633e+00]\n [2.44929360e-16 4.00000000e+00 1.57079633e+00]\n [3.06161700e-16 5.00000000e+00 1.57079633e+00]\n [3.67394040e-16 6.00000000e+00 1.57079633e+00]\n [4.28626380e-16 7.00000000e+00 1.57079633e+00]\n [4.89858720e-16 8.00000000e+00 1.57079633e+00]\n [5.51091060e-16 9.00000000e+00 1.57079633e+00]]\n"
],
[
"#circle\ncircle_trajectory = []\npose = (0, 0, np.pi/2)\nsteps = 10\n#fill in v and omega values\nall_v = np.ones(steps*100)\nall_delta = np.ones(steps*100)\nfor v,delta in zip(all_v, all_delta):\n #instruction to take v, delta and compute new pose \n circle_trajectory.append(pose) \n pose = bicycle_model(pose,v,delta,0.1)\n # store new pose\ncirclular_trajectory = np.array(circle_trajectory) \n # store new pose\nprint(circlular_trajectory)\n \n",
"[[ 0.00000000e+00 0.00000000e+00 1.57079633e+00]\n [ 6.12323400e-18 1.00000000e-01 1.74379633e+00]\n [-1.72138338e-02 1.98507279e-01 1.91679633e+00]\n ...\n [-1.11120531e+00 2.71692575e-01 -1.87739227e+00]\n [-1.14138682e+00 1.76355927e-01 -1.70439227e+00]\n [-1.15470671e+00 7.72469940e-02 -1.53139227e+00]]\n"
],
[
"### Plot straight and circular trajectories\nplt.figure()\nplt.axes().set_aspect(\"equal\",\"datalim\")\nplt.plot(s_trajectory[:,0], s_trajectory[:,1])\n\nplt.figure()\nplt.axes().set_aspect(\"equal\",\"datalim\")\nplt.plot(circlular_trajectory[:,0], circlular_trajectory[:,1])\n",
"_____no_output_____"
]
],
[
[
"### Simulate Bicycle model with Open Loop control\n\nWe want the robot to follow these instructions\n\n**straight 10m, right turn, straight 5m, left turn, straight 8m, right turn**\n\nIt is in open loop; control commands have to be calculated upfront. How do we do it?\n\nTo keep things simple in the first iteration, we can fix $v = v_c$ and change only $\\delta$. To make it even simpler, $\\delta$ can take only 2 values \n+ 0 when the vehicle is going straight \n+ $\\delta = \\delta_c$ when turning\n\nThis leaves only 2 questions to be answered\n* What should be $v_c$ and $\\delta_c$?\n* When should $\\delta$ change from 0 and back?",
"_____no_output_____"
]
],
[
[
"v_c = 1 # m/s\ndelta_c = np.pi/6 # rad/s\n\n#calculate time taken to finish a quarter turn (pi/2)\n# unlike you would need to take into account v_c and L of the vehicle as well\nt_turn = int(np.pi/2/delta_c)\n\n#calculate the time taken to finish straight segments\n# omega array is to be padded with equivalent zeros\n\nt_straight1, t_straight2, t_straight3 = int(10/v_c), int(5/v_c), int(8/v_c)\n\nall_delta = [0]*t_straight1 + [delta_c]*t_turn + \\\n [0]*t_straight2 + [delta_c]*t_turn + \\\n [0]*t_straight3 + [-delta_c]*t_turn\nall_v = v_c*np.ones_like(all_delta)",
"_____no_output_____"
]
],
[
[
"Let us make a cool function out of this!\n\nTake in as input a generic route and convert it into open-loop commands\n\nInput format: [(\"straight\", 5), (\"right\", 90), (\"straight\", 6), (\"left\", 85)]\n\nOutput: all_v, all_delta",
"_____no_output_____"
]
],
[
[
"def get_open_loop_commands(route, vc=1, deltac=np.pi/12):\n all_delta = []\n for dir, command in route: \n if dir == 'straight':\n t_straight = np.ceil(command/vc).astype('int')\n all_delta += [0]*t_straight\n elif dir == 'right':\n all_delta += [-deltac]*np.ceil(np.deg2rad(command)/deltac).astype('int')\n else:\n all_delta += [deltac]*np.ceil(np.deg2rad(command)/deltac).astype('int')\n all_v = vc*np.ones_like(all_delta)\n print(len(all_delta))\n print(len(all_v))\n return all_v, all_delta",
"_____no_output_____"
]
],
[
[
"### Unit test your function with the following inputs\n\n+ [(\"straight\", 5), (\"right\", 90), (\"straight\", 6), (\"left\", 85)]\n+ $v_c = 1$\n+ $delta_c = \\pi/12$",
"_____no_output_____"
]
],
[
[
"v, delta = get_open_loop_commands([(\"straight\", 5), (\"right\", 90), (\"straight\", 6), (\"left\", 85)], 1, np.pi/12)",
"23\n23\n"
],
[
"robot_trajectory = []\nall_v, all_delta = get_open_loop_commands([(\"straight\", 5), (\"right\", 90), (\"straight\", 6), (\"left\", 85)])\npose = np.array([0, 0, np.pi/2])\nfor v, delta in zip(all_v, all_delta):\n #instruction to take v, w and compute new pose \n robot_trajectory.append(pose) \n pose = bicycle_model(pose,v,delta)\n # store new pose\nprint(robot_trajectory)\nprint(len(robot_trajectory))\nrobot_trajectory = np.array(robot_trajectory)",
"23\n23\n[array([0. , 0. , 1.57079633]), (6.123233995736766e-17, 1.0, 1.5707963267948966), (1.2246467991473532e-16, 2.0, 1.5707963267948966), (1.8369701987210297e-16, 3.0, 1.5707963267948966), (2.4492935982947064e-16, 4.0, 1.5707963267948966), (3.061616997868383e-16, 5.0, 1.5707963267948966), (3.67394039744206e-16, 6.0, 1.2707963267948965), (0.29552020666134005, 6.955336489125606, 0.9707963267948965), (0.8601626800563755, 7.780672104035284, 0.6707963267948964), (1.643489589683859, 8.40228207230595, 0.37079632679489644), (2.5755286756510856, 8.764639826782624, 0.07079632679489645), (3.57302366225514, 8.835377028450326, -0.22920367320510354), (4.546871293133336, 8.608174933757239, -0.22920367320510354), (5.520718924011531, 8.380972839064151, -0.22920367320510354), (6.494566554889726, 8.153770744371064, -0.22920367320510354), (7.468414185767921, 7.926568649677977, -0.22920367320510354), (8.442261816646116, 7.6993665549848895, -0.22920367320510354), (9.416109447524311, 7.472164460291802, -0.22920367320510354), (10.389957078402507, 7.244962365598715, 0.07079632679489645), (11.387452065006562, 7.315699567266417, 0.37079632679489644), (12.319491150973787, 7.678057321743091, 0.6707963267948964), (13.10281806060127, 8.299667290013755, 0.9707963267948965), (13.667460533996307, 9.125002904923432, 1.2707963267948965)]\n23\n"
],
[
"# plot robot trajectory\nplt.figure()\nplt.grid()\nxi, yi, _ = robot_trajectory[0]\nxf, yf, _ = robot_trajectory[-1]\nplt.axes().set_aspect(\"equal\",\"datalim\")\nplt.plot(robot_trajectory[:,0], robot_trajectory[:,1],'black')\n\n#show first and last robot positions with + markers\nplt.plot(xi, yi, 'r+', ms=10)\nplt.plot(xf, yf, 'r+', ms=10)\n",
"/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:6: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.\n \n"
]
],
[
[
"### Shape the turn\nLet us try something cooler than before (though a bit tricky in open loop). Instead of boring circular arcs, change the steering angle so that the robot orientation changes as shown in the equation below\n\n$\\theta = (\\theta_i - \\theta_f) * (1 - 3x^2 + 2\\theta^3) + \\theta_f \\thinspace \\vee x \\in [0,1]$\n\nFirst let us plot this",
"_____no_output_____"
]
],
[
[
"def poly_turn(theta_i, theta_f, n=10):\n x = np.linspace(0, 1, num=n)\n return (theta_i-theta_f) * (1 - 3 * x * x + 2 * (x**3)) + theta_f",
"_____no_output_____"
]
],
[
[
"How does a right turn look? ",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.plot(poly_turn(np.pi/2, 0),'.')\nplt.plot(poly_turn(np.pi/2, 0))",
"_____no_output_____"
]
],
[
[
"\nNow plot a right turn (North to East)",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.plot(poly_turn(np.pi/2, np.pi),'.')\nplt.plot(poly_turn(np.pi/2, np.pi))",
"_____no_output_____"
]
],
[
[
"How does $\\theta$ change when we had constant $\\delta$? Plot it",
"_____no_output_____"
]
],
[
[
"theta_change = np.diff(poly_turn(np.pi/2, np.pi))\nplt.plot(theta_change,'.')\nplt.plot(theta_change)",
"_____no_output_____"
]
],
[
[
"We know the rate of change of $\\theta$ is proportional to $\\delta$. Can you work out the sequence of $\\delta$ to change $\\theta$ as in the cubic polynomial shown above?",
"_____no_output_____"
]
],
[
[
"L = 0.9\nv = 1\ntheta_change = np.diff(poly_turn(np.pi/2, np.pi))\ndelta = np.arctan((L/v)*theta_change)\nprint(delta)",
"[0.04844344 0.12920623 0.18593495 0.21942362 0.23048008 0.21942362\n 0.18593495 0.12920623 0.04844344]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7475061977e4e8495c4e2e681801181dbf50516 | 846 | ipynb | Jupyter Notebook | notebooks/advanced_topics/1-start_here.ipynb | MLDERES/misbootcamp | 1e965dd1c91651d1b5b7aefc302075ce1cee294f | [
"BSD-3-Clause"
] | 1 | 2020-08-09T16:11:41.000Z | 2020-08-09T16:11:41.000Z | notebooks/advanced_topics/1-start_here.ipynb | MLDERES/misbootcamp | 1e965dd1c91651d1b5b7aefc302075ce1cee294f | [
"BSD-3-Clause"
] | 9 | 2020-08-09T16:12:53.000Z | 2021-08-20T21:35:39.000Z | notebooks/advanced_topics/1-start_here.ipynb | MLDERES/misbootcamp | 1e965dd1c91651d1b5b7aefc302075ce1cee294f | [
"BSD-3-Clause"
] | null | null | null | 22.864865 | 283 | 0.600473 | [
[
[
"# Advanced Topics\n\nIf you have some experience programming in Python, or another langauge, it makes sense to spend a bit of time learning some deeper topics. Understanding how the tools are meant to be used and how other developers think is a valuable way to become a more proficient programmer.",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e747570c8394ceba7192be92d7c2bbecbdf51f16 | 166,066 | ipynb | Jupyter Notebook | Jan_2020/a03_jan13/a01_cleancat15_gc0_gm0.ipynb | bpRsh/shear_analysis_after_dmstack | bfe8cffbf36c2adfb4c6db79f46e7d0949ba148c | [
"Apache-2.0"
] | null | null | null | Jan_2020/a03_jan13/a01_cleancat15_gc0_gm0.ipynb | bpRsh/shear_analysis_after_dmstack | bfe8cffbf36c2adfb4c6db79f46e7d0949ba148c | [
"Apache-2.0"
] | null | null | null | Jan_2020/a03_jan13/a01_cleancat15_gc0_gm0.ipynb | bpRsh/shear_analysis_after_dmstack | bfe8cffbf36c2adfb4c6db79f46e7d0949ba148c | [
"Apache-2.0"
] | 2 | 2019-06-20T18:18:33.000Z | 2020-01-22T18:40:31.000Z | 145.799824 | 69,468 | 0.817344 | [
[
[
"# Table of Contents\n <p><div class=\"lev1 toc-item\"><a href=\"#Introduction\" data-toc-modified-id=\"Introduction-1\"><span class=\"toc-item-num\">1 </span>Introduction</a></div><div class=\"lev1 toc-item\"><a href=\"#Imports\" data-toc-modified-id=\"Imports-2\"><span class=\"toc-item-num\">2 </span>Imports</a></div><div class=\"lev1 toc-item\"><a href=\"#Load-the-final-text-cleancat15-data\" data-toc-modified-id=\"Load-the-final-text-cleancat15-data-3\"><span class=\"toc-item-num\">3 </span>Load the final text cleancat15 data</a></div><div class=\"lev1 toc-item\"><a href=\"#Plot-g00-vs-g20\" data-toc-modified-id=\"Plot-g00-vs-g20-4\"><span class=\"toc-item-num\">4 </span>Plot g00 vs g20</a></div>",
"_____no_output_____"
],
[
"<div class=\"alert alert-block alert-success\">\n<b>Kernel Author:</b> <br>\n<a href=\"https://bhishanpdl.github.io/\" , target=\"_blank\">Bhishan Poudel, Ph.D Contd. Astrophysics</a>\n \n Date: Jan 10, 2020 \n Update: Jan 13, 2020\n</div>",
"_____no_output_____"
],
[
"# Introduction\nDate: Dec 10, 2019 Mon\n\n**Update** \n1. Looked at gm0 vs gc0 (and gm1 vs gc1) 45 degree line and removed outliers.\n2. Find the weights for g_sq for given magnitude bins using smooth fitting curve.\n\n\n**Usual Filtering** \n```python\ndf = df.query('calib_psfCandidate == 0.0')\ndf = df.query('deblend_nChild == 0.0')\ndf['ellip'] = np.hypot( df['ext_shapeHSM_HsmShapeRegauss_e1'] ,\n df['ext_shapeHSM_HsmShapeRegauss_e2'] )\ndf = df.query('ellip < 2.0') # it was 1.5 before\n\n#select only few columns after filtering:\ncols_select = ['base_SdssCentroid_x', 'base_SdssCentroid_y',\n 'base_SdssCentroid_xSigma','base_SdssCentroid_ySigma',\n 'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2',\n 'base_SdssShape_flux']\ndf = df[cols_select] \n\n# drop all nans\ndf = df.dropna()\n\n# additional columns\ndf['radius'] = df.eval(\"\"\" ( (ext_shapeHSM_HsmSourceMoments_xx * ext_shapeHSM_HsmSourceMoments_yy) \\\n - (ext_shapeHSM_HsmSourceMoments_xy**2 ) )**0.25 \"\"\")\n```\n\n**Shape filtering** \nhttps://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb\n```python\ndf = df.query('ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3')\ndf = df.query('ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4')\ndf = df.query('ext_shapeHSM_HsmShapeRegauss_flag== 0.0')\n```\n\n**Filter strongly lensed objects** \n- Take the objects with centroids >154 pixels (remove strong lens objects).\n```python\n# exclude strong lens objects <=154 distance\n# The shape of lsst.fits file is 3998,3998 and center is 1699,1699.\ndf['x_center'] = 1699\ndf['y_center'] = 1699\ndf['distance'] = ( (df['x[0]'] - df['x_center'])**2 + (df['x[1]'] - df['y_center'])**2 )**0.5\ndf = df[df.distance > 154]\n```\n\n**Imcat script** \n```bash\n\n# create new columns and cleaning (four files)\nlc -C -n fN -n id -N '1 2 x' -N '1 2 errx' -N '1 2 g' -n ellip -n flux -n radius < \"${M9T}\".txt | lc +all 'mag = %flux log10 -2.5 *' | cleancat 15 | lc +all -r 'mag' > \"${M9C}\".cat\n\n\n# merge 4 catalogs\nmergecats 5 \"${MC}\".cat \"${M9C}\".cat \"${LC}\".cat \"${L9C}\".cat > ${catalogs}/merge.cat &&\n \n \nlc -b +all \n'x = %x[0][0] %x[1][0] + %x[2][0] + %x[3][0] + 4 / %x[0][1] %x[1][1] + %x[2][1] + %x[3][1] + 4 / 2 vector'\n'gm = %g[0][0] %g[1][0] + 2 / %g[0][1] %g[1][1] + 2 / 2 vector' \n'gc = %g[2][0] %g[3][0] + 2 / %g[2][1] %g[3][1] + 2 / 2 vector' \n'gmd = %g[0][0] %g[1][0] - 2 / %g[0][1] %g[1][1] - 2 / 2 vector' \n'gcd = %g[2][0] %g[3][0] - 2 / %g[2][1] %g[3][1] - 2 / 2 vector' \n< ${catalogs}/merge.cat > ${final}/final_${i}.cat\n```\n**Notes** \n\nfinal_text.txt is created by imcat program after merging four lsst files (m,m9,l,l9) after cleaning.",
"_____no_output_____"
],
[
"# Imports",
"_____no_output_____"
]
],
[
[
"import json, os,sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nsns.set(color_codes=True)\n\nimport plotly\nimport ipywidgets\n\npd.set_option('display.max_columns',200)\n\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n%matplotlib inline\n\nprint([(x.__name__, x.__version__) for x in [np,pd,sns,plotly,ipywidgets]])",
"[('numpy', '1.17.4'), ('pandas', '0.24.2'), ('seaborn', '0.9.0'), ('plotly', '4.1.1'), ('ipywidgets', '6.0.0')]\n"
],
[
"%%javascript\nIPython.OutputArea.auto_scroll_threshold = 9999;",
"_____no_output_____"
]
],
[
[
"# Load the final text cleancat15 data\n\n```\ng_sq = g00 g00 + g10 g10\ngmd_sq = gmd0**2 + gmd1**2\n\n```",
"_____no_output_____"
]
],
[
[
"!head -2 ../data/cleancat/final_text_cleancat15_000_167.txt",
"# fN[0][0] fN[1][0] fN[2][0] fN[3][0] id[0][0] id[1][0] id[2][0] id[3][0] x[0] x[1] errx[0][0] errx[0][1] errx[1][0] errx[1][1] errx[2][0] errx[2][1] errx[3][0] errx[3][1] g[0][0] g[0][1] g[1][0] g[1][1] g[2][0] g[2][1] g[3][0] g[3][1] ellip[0][0] ellip[1][0] ellip[2][0] ellip[3][0] flux[0][0] flux[1][0] flux[2][0] flux[3][0] radius[0][0] radius[1][0] radius[2][0] radius[3][0] mag[0][0] mag[1][0] mag[2][0] mag[3][0] gm[0] gm[1] gc[0] gc[1] gmd[0] gmd[1] gcd[0] gcd[1]\r\n 0 0 0 0 5301 5314 5231 5117 88.17075 1847.1934 0.0196 0.0249 0.0227 0.0216 0.02 0.0256 0.0231 0.022 -0.4253 0.1855 0.273 -0.3021 -0.4257 0.1904 0.2778 -0.3155 0.4639939 0.40717737 0.46633963 0.42037256 79841.47 82737.354 80303.923 83923.908 5.1869534 5.2938582 5.2678266 5.390682 -12.255571 -12.294254 -12.261842 -12.309714 -0.07615 -0.0583 -0.07395 -0.06255 -0.34915 0.2438 -0.35175 0.25295\r\n"
],
[
"names = \"fN[0][0] fN[1][0] fN[2][0] fN[3][0] id[0][0] id[1][0] id[2][0] id[3][0] x[0] x[1] errx[0][0] errx[0][1] errx[1][0] errx[1][1] errx[2][0] errx[2][1] errx[3][0] errx[3][1] g[0][0] g[0][1] g[1][0] g[1][1] g[2][0] g[2][1] g[3][0] g[3][1] ellip[0][0] ellip[1][0] ellip[2][0] ellip[3][0] flux[0][0] flux[1][0] flux[2][0] flux[3][0] radius[0][0] radius[1][0] radius[2][0] radius[3][0] mag[0][0] mag[1][0] mag[2][0] mag[3][0] gm[0] gm[1] gc[0] gc[1] gmd[0] gmd[1] gcd[0] gcd[1]\"\nprint(names)",
"fN[0][0] fN[1][0] fN[2][0] fN[3][0] id[0][0] id[1][0] id[2][0] id[3][0] x[0] x[1] errx[0][0] errx[0][1] errx[1][0] errx[1][1] errx[2][0] errx[2][1] errx[3][0] errx[3][1] g[0][0] g[0][1] g[1][0] g[1][1] g[2][0] g[2][1] g[3][0] g[3][1] ellip[0][0] ellip[1][0] ellip[2][0] ellip[3][0] flux[0][0] flux[1][0] flux[2][0] flux[3][0] radius[0][0] radius[1][0] radius[2][0] radius[3][0] mag[0][0] mag[1][0] mag[2][0] mag[3][0] gm[0] gm[1] gc[0] gc[1] gmd[0] gmd[1] gcd[0] gcd[1]\n"
],
[
"names = ['fN[0][0]','fN[1][0]','fN[2][0]','fN[3][0]',\n 'id[0][0]','id[1][0]','id[2][0]','id[3][0]',\n 'x[0]','x[1]',\n 'errx[0][0]','errx[0][1]','errx[1][0]','errx[1][1]','errx[2][0]',\n 'errx[2][1]','errx[3][0]','errx[3][1]',\n 'g[0][0]','g[0][1]','g[1][0]','g[1][1]','g[2][0]','g[2][1]','g[3][0]','g[3][1]',\n 'ellip[0][0]','ellip[1][0]','ellip[2][0]','ellip[3][0]',\n 'flux[0][0]','flux[1][0]','flux[2][0]','flux[3][0]',\n 'radius[0][0]','radius[1][0]','radius[2][0]','radius[3][0]',\n 'mag[0][0]','mag[1][0]','mag[2][0]','mag[3][0]',\n 'gm[0]','gm[1]','gc[0]', 'gc[1]',\n 'gmd[0]','gmd[1]','gcd[0]','gcd[1]']",
"_____no_output_____"
],
[
"def read_data(ifile):\n df = pd.read_csv(ifile,comment='#',engine='python',sep=r'\\s\\s+',\n header=None,names=names)\n\n print(df.shape)\n\n # new columns\n # df['g_sq'] = df['g[0][0]'] **2 + df['g[1][0]']**2 # only for imcat 00 and 10\n # df['gmd_sq'] = df['gmd[0]'] **2 + df['gmd[1]']**2\n\n df['g_sq'] = df['g[0][0]'] **2 + df['g[0][1]']**2\n df['gmd_sq'] = df['gmd[0]'] **2 + df['gmd[1]']**2\n\n df['gm_sq'] = df['gm[0]']**2 + df['gm[1]']**2\n df['gc_sq'] = df['gc[0]']**2 + df['gc[1]']**2\n\n df['mag_mono'] = (df['mag[0][0]'] + df['mag[1][0]'] ) / 2\n df['mag_chro'] = (df['mag[2][0]'] + df['mag[3][0]'] ) / 2\n \n return df",
"_____no_output_____"
],
[
"file_path = f'../data/cleancat/final_text_cleancat15_000_167.txt'\ndf = read_data(file_path)\ndf.head()",
"(90623, 50)\n"
]
],
[
[
"# Plot g00 vs g20",
"_____no_output_____"
]
],
[
[
"df.head(2)",
"_____no_output_____"
],
[
"def plot_g00_20(df,start,end):\n fig,ax = plt.subplots(1,2,figsize=(12,8))\n x = df['gm[0]']\n y = df['gc[0]']-df['gm[0]']\n \n xx = df['g[0][0]']\n yy = df['g[2][0]']-df['g[0][0]']\n\n ax[0].scatter(x,y)\n ax[1].scatter(xx,yy)\n \n ax[0].set_ylabel('gc0-gm0')\n ax[0].set_xlabel('gm0')\n \n ax[1].set_ylabel('g20-g00')\n ax[1].set_xlabel('g00')\n\n plt.suptitle(f'gm vs gc plot from {start} to {end}',weight='bold',fontsize=24);",
"_____no_output_____"
],
[
"start = 0\nend = 99\nfile_path = f'../data/cleancat/final_text_cleancat15_{start:03d}_{end:03d}.txt'\ndf = read_data(file_path)\n\nplot_g00_20(df,start,end)",
"(56861, 50)\n"
],
[
"start = 0\nend = 167\nfile_path = f'../data/cleancat/final_text_cleancat15_{start:03d}_{end:03d}.txt'\ndf = read_data(file_path)\n\nplot_g00_20(df,start,end)",
"(90623, 50)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e747602e402a2170d9412b339f4bc2933e2a258f | 107,184 | ipynb | Jupyter Notebook | VacationPy/VacationPy.ipynb | mpadgett004/pythonAPI-challenge | f3ce97c3ec8834b14684bfae2853ff690217b53b | [
"ADSL"
] | null | null | null | VacationPy/VacationPy.ipynb | mpadgett004/pythonAPI-challenge | f3ce97c3ec8834b14684bfae2853ff690217b53b | [
"ADSL"
] | null | null | null | VacationPy/VacationPy.ipynb | mpadgett004/pythonAPI-challenge | f3ce97c3ec8834b14684bfae2853ff690217b53b | [
"ADSL"
] | null | null | null | 37.887593 | 167 | 0.492685 | [
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n\n# Import API key\nfrom api_keys import g_key",
"_____no_output_____"
],
[
"# Trying to get map to show\n!jupyter nbextension enable --py --sys-prefix widgetsnbextension\n!jupyter nbextension enable --py --sys-prefix gmaps",
"Config option `kernel_spec_manager_class` not recognized by `EnableNBExtensionApp`.\nEnabling notebook extension jupyter-js-widgets/extension...\n - Validating: \u001b[32mOK\u001b[0m\nConfig option `kernel_spec_manager_class` not recognized by `EnableNBExtensionApp`.\nEnabling notebook extension jupyter-gmaps/extension...\n - Validating: \u001b[32mOK\u001b[0m\n"
],
[
"# Reading in CSV file created from WeatherPy data and turning into a data frame\nfile = \"WeatherData.csv\"\n\nfile_df = pd.read_csv(file)\nfile_df = file_df.drop(columns=[\"Unnamed: 0\"])\nfile_df",
"_____no_output_____"
],
[
"# Configuring gmaps\ngmaps.configure(api_key=g_key)",
"_____no_output_____"
],
[
"# Setting variable to the values of the cordinates\nlocations = file_df[[\"Latitude\", \"Longitude\"]].astype(float)\nlocations\n\n# Dropping null values\nfile_df = file_df.dropna()",
"_____no_output_____"
],
[
"# Setting variable to the humidity values\nhumidity = file_df[\"Humidity\"].astype(float)\n\n# Creating layer and displaying map\nm = gmaps.figure()\nlayer = gmaps.heatmap_layer(locations, weights=humidity,\n dissipating=False, max_intensity=max(file_df[\"Humidity\"]),\n point_radius=2)\nm.add_layer(layer)\nm",
"_____no_output_____"
],
[
"# Using loc and conditionals to filter and find the ideal cities\n# A max temperature lower than 80 degrees but higher than 70.\n# Wind speed less than 10 mph.\n# Cloudiness below 30%\nideal_cities_df = file_df.loc[((file_df[\"Max Temp\"] < 80) & (file_df[\"Max Temp\"] > 70)) & (file_df[\"Wind Speed\"] < 10) & (file_df[\"Cloudiness\"] < 30)]\n\nideal_cities_df",
"_____no_output_____"
],
[
"# Creating hotel data frame that looks at just the city, latitude, longitude, and country\nhotel_df = file_df[[\"City\", \"Latitude\", \"Longitude\", \"Country\"]]\nhotel_df\n\n# Adding column for the hotels and setting to a empty string\nhotel_df[\"Hotel\"] = \"\"\nhotel_df",
"/Users/matthewpadgett/opt/anaconda3/envs/PythonData/lib/python3.6/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n after removing the cwd from sys.path.\n"
],
[
"# Dictionary to hold the parameters needed when searching\nparams = {\n \"radius\": 5000,\n \"types\": \"lodging\",\n \"key\": g_key\n}\n\n# For loop to iterate through the row of the hotel data frame\nfor index, row in hotel_df.iterrows():\n lat = row[\"Latitude\"]\n lng = row[\"Longitude\"]\n \n # Adding to the params dictionary\n params[\"location\"] = f\"{lat},{lng}\"\n \n # URL used to search with\n base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\n # Make API request to JSON\n print(f\"Retrieving Results for Index {index}: {row['City']}.\")\n response = requests.get(base_url, params=params).json()\n \n \n # Setting variable to hold the results\n results = response[\"results\"]\n \n # Try statement to use the for loop goes through each city\n try:\n print(f\"Closest hotel is {results[0]['name']}.\")\n hotel_df.loc[index, \"Hotel\"] = results[0][\"name\"]\n \n except (KeyError, IndexError):\n print(\"Missing field/result... skipping.\")\n \n print(\"------------\")",
"Retrieving Results for Index 0: Bilibino.\nClosest hotel is Gostinitsa Severyanka.\n------------\nRetrieving Results for Index 1: Cherskiy.\nClosest hotel is Gostinitsa.\n------------\nRetrieving Results for Index 2: San Cristobal.\nClosest hotel is Pirineos.\n------------\nRetrieving Results for Index 3: Tegul'det.\nClosest hotel is Gostinitsa Kedr.\n------------\nRetrieving Results for Index 4: Dingli.\nClosest hotel is The Xara Palace Relais & Chateaux.\n------------\nRetrieving Results for Index 5: Ushuaia.\nClosest hotel is Albatros Hotel.\n------------\nRetrieving Results for Index 6: Busselton.\nClosest hotel is Observatory Guest House.\n------------\nRetrieving Results for Index 7: Castro.\nClosest hotel is CHACARA BAILLY.\n------------\nRetrieving Results for Index 8: Vila Velha.\nClosest hotel is Hotel Vitória Palace.\n------------\nRetrieving Results for Index 9: East London.\nClosest hotel is Tu Casa.\n------------\nRetrieving Results for Index 10: Severo-Kuril'sk.\nClosest hotel is Kak Doma.\n------------\nRetrieving Results for Index 11: Saint-Pierre.\nClosest hotel is Lindsey Hôtel.\n------------\nRetrieving Results for Index 12: Lompoc.\nClosest hotel is Holiday Inn Express Lompoc.\n------------\nRetrieving Results for Index 13: Carnarvon.\nClosest hotel is Hospitality Carnarvon.\n------------\nRetrieving Results for Index 14: Atuona.\nClosest hotel is Villa Enata.\n------------\nRetrieving Results for Index 15: Punta Arenas.\nClosest hotel is Hotel Dreams Del Estrecho.\n------------\nRetrieving Results for Index 16: Grindavik.\nClosest hotel is Northern Light Inn & Max's Restaurant.\n------------\nRetrieving Results for Index 17: Ponta do Sol.\nClosest hotel is Hotel do Campo.\n------------\nRetrieving Results for Index 18: Butaritari.\nClosest hotel is Isles Sunset Lodge.\n------------\nRetrieving Results for Index 19: Sheltozero.\nClosest hotel is The village of Vanino.\n------------\nRetrieving Results for Index 20: Albany.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 21: Irbeyskoye.\nClosest hotel is Gostinitsa \"Agul\".\n------------\nRetrieving Results for Index 22: Rawson.\nClosest hotel is Hosteria Sampedro.\n------------\nRetrieving Results for Index 23: Puerto Ayora.\nClosest hotel is Finch Bay Galapagos Hotel.\n------------\nRetrieving Results for Index 24: Jinchang.\nClosest hotel is Mingshihui Jinchang International Hotel.\n------------\nRetrieving Results for Index 25: Saint-Philippe.\nClosest hotel is Chambres d'hôte \"La Trinité\".\n------------\nRetrieving Results for Index 26: Kapaa.\nClosest hotel is Sheraton Kauai Resort at Coconut Beach.\n------------\nRetrieving Results for Index 27: Awjilah.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 28: Qaanaaq.\nClosest hotel is Qaanaaq Hotel.\n------------\nRetrieving Results for Index 29: Cabo San Lucas.\nClosest hotel is Hotel Tesoro Los Cabos.\n------------\nRetrieving Results for Index 30: Taoudenni.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 31: George Town.\nClosest hotel is Cititel Penang.\n------------\nRetrieving Results for Index 32: Montes Claros.\nClosest hotel is ibis Montes Claros Shopping.\n------------\nRetrieving Results for Index 33: Mahébourg.\nClosest hotel is Shandrani Beachcomber Resort & Spa.\n------------\nRetrieving Results for Index 34: Mataura.\nClosest hotel is Ellie's Villa.\n------------\nRetrieving Results for Index 35: Rio Grande.\nClosest hotel is Hotel Atlântico Rio Grande.\n------------\nRetrieving Results for Index 36: Bluff.\nClosest hotel is Bluff Homestead - Guesthouse & Campervan Park.\n------------\nRetrieving Results for Index 37: Yellowknife.\nClosest hotel is The Explorer Hotel.\n------------\nRetrieving Results for Index 38: Litovko.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 39: Rikitea.\nClosest hotel is Pension Maro'i.\n------------\nRetrieving Results for Index 40: Kahului.\nClosest hotel is Maui Seaside Hotel.\n------------\nRetrieving Results for Index 41: Den Helder.\nClosest hotel is Hotel Den Helder.\n------------\nRetrieving Results for Index 42: Makakilo City.\nClosest hotel is Marriott's Ko Olina Beach Club.\n------------\nRetrieving Results for Index 43: Tual.\nClosest hotel is Grand Vilia Hotel.\n------------\nRetrieving Results for Index 44: Saraland.\nClosest hotel is Microtel Inn & Suites by Wyndham Saraland/North Mobile.\n------------\nRetrieving Results for Index 45: Antofagasta.\nClosest hotel is Holiday Inn Express Antofagasta.\n------------\nRetrieving Results for Index 46: Mehamn.\nClosest hotel is Mehamn Arctic Hotel AS.\n------------\nRetrieving Results for Index 47: Dhilwan.\nClosest hotel is Janj Ghar.\n------------\nRetrieving Results for Index 48: San Joaquin.\nClosest hotel is University Plaza Waterfront Hotel.\n------------\nRetrieving Results for Index 49: Vaini.\nClosest hotel is Keleti Beach Resort.\n------------\nRetrieving Results for Index 50: Kindu.\nClosest hotel is Résidence Okanda.\n------------\nRetrieving Results for Index 52: Dikson.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 53: Nikolskoye.\nClosest hotel is Tourist House - Sablino.\n------------\nRetrieving Results for Index 54: Rocha.\nClosest hotel is Beleza Pura B&B.\n------------\nRetrieving Results for Index 55: Camabatela.\nClosest hotel is Pensão E Restaurantes.\n------------\nRetrieving Results for Index 56: Pevek.\nClosest hotel is Severnoye Zoloto.\n------------\nRetrieving Results for Index 57: Belaya Gora.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 58: Port Alfred.\nClosest hotel is The Halyards Hotel.\n------------\nRetrieving Results for Index 59: Chokurdakh.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 60: Hermanus.\nClosest hotel is Misty Waves Boutique Hotel.\n------------\nRetrieving Results for Index 61: Hobart.\nClosest hotel is St Ives Hobart Accommodation Tasmania.\n------------\nRetrieving Results for Index 62: Srednekolymsk.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 63: Haukipudas.\nClosest hotel is Best Western Hotel Samantta.\n------------\nRetrieving Results for Index 64: Pitimbu.\nClosest hotel is Reserva do Abiaí.\n------------\nRetrieving Results for Index 65: Kushima.\nClosest hotel is Hanaoka.\n------------\nRetrieving Results for Index 66: Alofi.\nClosest hotel is Taloa Heights.\n------------\nRetrieving Results for Index 67: Alamogordo.\nClosest hotel is Fairfield Inn & Suites by Marriott Alamogordo.\n------------\nRetrieving Results for Index 68: Havelock.\nClosest hotel is Sherwood Motel.\n------------\nRetrieving Results for Index 69: Katsuura.\nClosest hotel is Katsuura Hotel Mikazuki.\n------------\nRetrieving Results for Index 70: Yar-Sale.\nClosest hotel is Yalemd.\n------------\nRetrieving Results for Index 71: Mar del Plata.\nClosest hotel is Gran Hotel Mar del Plata.\n------------\nRetrieving Results for Index 72: Sumbe.\nClosest hotel is Hotel Ritz Sumbe.\n------------\nRetrieving Results for Index 73: Nago.\nClosest hotel is Hotel Yugaf Inn Okinawa.\n------------\nRetrieving Results for Index 74: São Miguel do Araguaia.\nClosest hotel is Hotel Executivo Palace.\n------------\nRetrieving Results for Index 75: Sinnamary.\nClosest hotel is ALETHEIA Appartement Sinnamary.\n------------\nRetrieving Results for Index 76: São Filipe.\nClosest hotel is Tortuga B&B.\n------------\nRetrieving Results for Index 77: Chui.\nClosest hotel is Nuevo Hotel Plaza.\n------------\nRetrieving Results for Index 78: Galle.\nClosest hotel is Amangalla.\n------------\nRetrieving Results for Index 79: Cartagena del Chairá.\nClosest hotel is HOTEL LA ROCA.\n------------\nRetrieving Results for Index 80: Lebu.\nClosest hotel is Hostal Las Lilas.\n------------\nRetrieving Results for Index 81: Santa Rosa.\nClosest hotel is Erlyn’s Sta Rosa City Home.\n------------\nRetrieving Results for Index 82: Marsá Maţrūḩ.\nClosest hotel is Beau Site.\n------------\nRetrieving Results for Index 83: Tiksi.\nClosest hotel is Arktika.\n------------\nRetrieving Results for Index 84: Lagoa.\nClosest hotel is Casa Das Faias.\n------------\nRetrieving Results for Index 85: Prachuap Khiri Khan.\nClosest hotel is สมบัติยาย.\n------------\nRetrieving Results for Index 86: Westport.\nClosest hotel is Norwalk Inn.\n------------\nRetrieving Results for Index 87: Hithadhoo.\n"
],
[
"hotel_df",
"_____no_output_____"
],
[
"# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\nhotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]\nlocations = hotel_df[[\"Latitude\", \"Longitude\"]]\nmarkers = gmaps.marker_layer(locations)\n\n# Create hotel symbol layer\nhotel_layer = gmaps.symbol_layer(\n locations, fill_color='rgba(0, 150, 0, 0.4)',\n stroke_color='rgba(0, 0, 150, 0.4)', scale=2,\n info_box_content=hotel_info\n)",
"_____no_output_____"
],
[
"# Adding layer to map and display figure\nm.add_layer(markers)\nm.add_layer(hotel_layer)\nm.add_layer(layer)\nm",
"_____no_output_____"
],
[
"# Creating new data frame to just look at the ideal city data\nhotel2_df = ideal_cities_df[[\"City\", \"Latitude\", \"Longitude\", \"Country\"]]\nhotel2_df\n\n# Adding column for the hotels and setting to a empty string\nhotel2_df[\"Hotel\"] = \"\"\nhotel2_df",
"/Users/matthewpadgett/opt/anaconda3/envs/PythonData/lib/python3.6/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n"
],
[
"# Dictionary to hold the parameters needed when searching\nparams = {\n \"radius\": 5000,\n \"types\": \"lodging\",\n \"key\": g_key\n}\n\n# For loop to iterate through the row of the second hotel data frame\nfor index, row in hotel2_df.iterrows():\n lat = row[\"Latitude\"]\n lng = row[\"Longitude\"]\n \n # Adding to the params dictionary\n params[\"location\"] = f\"{lat},{lng}\"\n \n # URL used to search with\n base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n\n # Make API request to JSON\n print(f\"Retrieving Results for Index {index}: {row['City']}.\")\n response = requests.get(base_url, params=params).json()\n \n \n # Setting variable to hold the results\n results = response[\"results\"]\n \n # Try statement to use the for loop goes through each city\n try:\n print(f\"Closest hotel is {results[0]['name']}.\")\n hotel2_df.loc[index, \"Hotel\"] = results[0][\"name\"]\n \n except (KeyError, IndexError):\n print(\"Missing field/result... skipping.\")\n \n print(\"------------\")",
"Retrieving Results for Index 171: Dinguiraye.\nMissing field/result... skipping.\n------------\nRetrieving Results for Index 426: Arlit.\nClosest hotel is Hôtel Telwa Bungalow.\n------------\nRetrieving Results for Index 538: Solenzo.\nMissing field/result... skipping.\n------------\n"
],
[
"# Using the template add the hotel marks to the heatmap\ninfo_box_template2 = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\nideal_hotel_info = [info_box_template2.format(**row) for index, row in hotel2_df.iterrows()]\nlocations = hotel2_df[[\"Latitude\", \"Longitude\"]]\nmarkers = gmaps.marker_layer(locations)\n\n# Create hotel symbol layer\nhotel_layer2 = gmaps.symbol_layer(\n locations, fill_color='rgba(0, 150, 0, 0.4)',\n stroke_color='rgba(0, 0, 150, 0.4)', scale=2,\n info_box_content=ideal_hotel_info\n)",
"_____no_output_____"
],
[
"# Adding layers that are specifc to the hotel found in the ideal cities\n# Also add the humdity layer\nm2 = gmaps.figure()\nm2.add_layer(markers)\nm2.add_layer(hotel_layer2)\nm2.add_layer(layer)\nm2",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e74768ea31bf28e885425b0242c17153f5763472 | 9,240 | ipynb | Jupyter Notebook | deep-learning/tensor-flow-examples/notebooks/3_neural_networks/alexnet.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | deep-learning/tensor-flow-examples/notebooks/3_neural_networks/alexnet.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | deep-learning/tensor-flow-examples/notebooks/3_neural_networks/alexnet.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | 31.972318 | 185 | 0.538745 | [
[
[
"# AlexNet in TensorFlow\n\nCredits: Forked from [TensorFlow-Examples](https://github.com/aymericdamien/TensorFlow-Examples) by Aymeric Damien\n\n## Setup\n\nRefer to the [setup instructions](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/Setup_TensorFlow.md)",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)",
"Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\nExtracting /tmp/data/train-images-idx3-ubyte.gz\nSuccessfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nSuccessfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nSuccessfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\n"
],
[
"# Parameters\nlearning_rate = 0.001\ntraining_iters = 300000\nbatch_size = 64\ndisplay_step = 100",
"_____no_output_____"
],
[
"# Network Parameters\nn_input = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\ndropout = 0.8 # Dropout, probability to keep units",
"_____no_output_____"
],
[
"# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_input])\ny = tf.placeholder(tf.float32, [None, n_classes])\nkeep_prob = tf.placeholder(tf.float32) # dropout (keep probability)",
"_____no_output_____"
],
[
"# Create AlexNet model\ndef conv2d(name, l_input, w, b):\n return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], \n padding='SAME'),b), name=name)\n\ndef max_pool(name, l_input, k):\n return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1], \n padding='SAME', name=name)\n\ndef norm(name, l_input, lsize=4):\n return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)\n\ndef alex_net(_X, _weights, _biases, _dropout):\n # Reshape input picture\n _X = tf.reshape(_X, shape=[-1, 28, 28, 1])\n\n # Convolution Layer\n conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])\n # Max Pooling (down-sampling)\n pool1 = max_pool('pool1', conv1, k=2)\n # Apply Normalization\n norm1 = norm('norm1', pool1, lsize=4)\n # Apply Dropout\n norm1 = tf.nn.dropout(norm1, _dropout)\n\n # Convolution Layer\n conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])\n # Max Pooling (down-sampling)\n pool2 = max_pool('pool2', conv2, k=2)\n # Apply Normalization\n norm2 = norm('norm2', pool2, lsize=4)\n # Apply Dropout\n norm2 = tf.nn.dropout(norm2, _dropout)\n\n # Convolution Layer\n conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])\n # Max Pooling (down-sampling)\n pool3 = max_pool('pool3', conv3, k=2)\n # Apply Normalization\n norm3 = norm('norm3', pool3, lsize=4)\n # Apply Dropout\n norm3 = tf.nn.dropout(norm3, _dropout)\n\n # Fully connected layer\n # Reshape conv3 output to fit dense layer input\n dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) \n # Relu activation\n dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')\n \n # Relu activation\n dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') \n\n # Output, class prediction\n out = tf.matmul(dense2, _weights['out']) + _biases['out']\n return out",
"_____no_output_____"
],
[
"# Store layers weight & bias\nweights = {\n 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),\n 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),\n 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),\n 'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),\n 'wd2': tf.Variable(tf.random_normal([1024, 1024])),\n 'out': tf.Variable(tf.random_normal([1024, 10]))\n}\nbiases = {\n 'bc1': tf.Variable(tf.random_normal([64])),\n 'bc2': tf.Variable(tf.random_normal([128])),\n 'bc3': tf.Variable(tf.random_normal([256])),\n 'bd1': tf.Variable(tf.random_normal([1024])),\n 'bd2': tf.Variable(tf.random_normal([1024])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}",
"_____no_output_____"
],
[
"# Construct model\npred = alex_net(x, weights, biases, keep_prob)",
"_____no_output_____"
],
[
"# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)",
"_____no_output_____"
],
[
"# Evaluate model\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))",
"_____no_output_____"
],
[
"# Initializing the variables\ninit = tf.global_variables_initializer()",
"_____no_output_____"
],
[
"# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n step = 1\n # Keep training until reach max iterations\n while step * batch_size < training_iters:\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Fit training using batch data\n sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})\n if step % display_step == 0:\n # Calculate batch accuracy\n acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n # Calculate batch loss\n loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n print (\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" \\\n + \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \"{:.5f}\".format(acc))\n step += 1\n print (\"Optimization Finished!\")\n # Calculate accuracy for 256 mnist test images\n print (\"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], \n y: mnist.test.labels[:256], \n keep_prob: 1.}))",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7478142c812bc051cd2b87425282e69682557cb | 204,888 | ipynb | Jupyter Notebook | t81_558_class_06_2_cnn.ipynb | sanjayssane/t81_558_deep_learning | dd186c240f9d0faeda70e81648d439a0f63ea8cc | [
"Apache-2.0"
] | 1 | 2020-12-15T19:35:48.000Z | 2020-12-15T19:35:48.000Z | t81_558_class_06_2_cnn.ipynb | sanjayssane/t81_558_deep_learning | dd186c240f9d0faeda70e81648d439a0f63ea8cc | [
"Apache-2.0"
] | null | null | null | t81_558_class_06_2_cnn.ipynb | sanjayssane/t81_558_deep_learning | dd186c240f9d0faeda70e81648d439a0f63ea8cc | [
"Apache-2.0"
] | null | null | null | 64.920152 | 59,446 | 0.665486 | [
[
[
"# T81-558: Applications of Deep Neural Networks\n**Module 6: Convolutional Neural Networks (CNN) for Computer Vision**\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).",
"_____no_output_____"
],
[
"# Module 6 Material\n\n* Part 6.1: Image Processing in Python [[Video]](https://www.youtube.com/watch?v=Oe0-hX4KSZQ&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=18) [[Notebook]](t81_558_class_06_1_python_images.ipynb)\n* **Part 6.2: Keras Neural Networks for Digits and Fashion MINST** [[Video]](https://www.youtube.com/watch?v=GDlI-3O5r6I&index=19&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_06_2_cnn.ipynb)\n* Part 6.3: Implementing a ResNet in Keras [[Video]](https://www.youtube.com/watch?v=zUZRUTJbYm8&index=20&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_06_3_resnet.ipynb)\n* Part 6.4: Using Your Own Images with Keras [[Video]](https://www.youtube.com/watch?v=zUZRUTJbYm8&index=20&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_06_4_keras_images.ipynb)\n* Part 6.5: Recognizing Multiple Images with YOLO Darknet [[Video]](https://www.youtube.com/watch?v=zUZRUTJbYm8&index=20&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_06_5_yolo.ipynb)",
"_____no_output_____"
]
],
[
[
"# Nicely formatted time string\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60\n return f\"{h}:{m:>02}:{s:>05.2f}\"",
"_____no_output_____"
]
],
[
[
"# Part 6.2: Keras Neural Networks for Digits and Fashion MINST\n\n# Computer Vision\n\nThis class will focus on computer vision. There are some important differences and similarities with previous neural networks.\n\n* We will usually use classification, though regression is still an option.\n* The input to the neural network is now 3D (height, width, color)\n* Data are not transformed, no z-scores or dummy variables.\n* Processing time is much longer.\n* We now have different layer times: dense layers (just like before), convolution layers and max pooling layers.\n* Data will no longer arrive as CSV files. TensorFlow provides some utilities for going directly from image to the input for a neural network.\n\n\n# Computer Vision Data Sets\n\nThere are many data sets for computer vision. Two of the most popular are the MNIST digits data set and the CIFAR image data sets.\n\n### MNIST Digits Data Set\n\nThe [MNIST Digits Data Set](http://yann.lecun.com/exdb/mnist/) is very popular in the neural network research community. A sample of it can be seen here:\n\n\n\nThis data set was generated from scanned forms.\n\n\n\n### MNIST Fashion Data Set\n\n[Fashion-MNIST](https://www.kaggle.com/zalando-research/fashionmnist) is a dataset of [Zalando](https://jobs.zalando.com/tech/)'s article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. Fashion-MNIST is intended to serve as a direct **drop-in replacement** for the original [MNIST dataset](http://yann.lecun.com/exdb/mnist/) for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits.\n\n\n\n### CIFAR Data Set\n\nThe [CIFAR-10 and CIFAR-100](https://www.cs.toronto.edu/~kriz/cifar.html) datasets are also frequently used by the neural network research community.\n\n\n\nThe CIFAR-10 data set contains low-rez images that are divided into 10 classes. The CIFAR-100 data set contains 100 classes in a hierarchy. \n\n# Other Resources\n\n* [Imagenet:Large Scale Visual Recognition Challenge 2014](http://image-net.org/challenges/LSVRC/2014/index)\n* [Andrej Karpathy](http://cs.stanford.edu/people/karpathy/) - PhD student/instructor at Stanford.\n * [CS231n Convolutional Neural Networks for Visual Recognition](http://cs231n.stanford.edu/) - Stanford course on computer vision/CNN's.\n * [CS231n - GitHub](http://cs231n.github.io/)\n * [ConvNetJS](http://cs.stanford.edu/people/karpathy/convnetjs/) - JavaScript library for deep learning.\n \n\n# Convolutional Neural Networks (CNNs)\n\nThe convolutional neural network (CNN) is a neural network technology that has profoundly impacted the area of computer vision (CV). Fukushima (1980) introduced the original concept of a convolutional neural network, and LeCun, Bottou, Bengio & Haffner (1998) greatly improved this work. From this research, Yan LeCun introduced the famous LeNet-5 neural network architecture. This class follows the LeNet-5 style of convolutional neural network.\n\n**A LeNET-5 Network (LeCun, 1998)**\n\n\nSo far we have only seen one layer type (dense layers). By the end of this course we will have seen:\n\n* **Dense Layers** - Fully connected layers. (introduced previously)\n* **Convolution Layers** - Used to scan across images. (introduced this class)\n* **Max Pooling Layers** - Used to downsample images. (introduced this class)\n* **Dropout Layer** - Used to add regularization. (introduced next class)\n\n## Convolution Layers\n\nThe first layer that we will examine is the convolutional layer. We will begin by looking at the hyper-parameters that you must specify for a convolutional layer in most neural network frameworks that support the CNN:\n\n* Number of filters\n* Filter Size\n* Stride\n* Padding\n* Activation Function/Non-Linearity\n\nThe primary purpose for a convolutional layer is to detect features such as edges, lines, blobs of color, and other visual elements. The filters can detect these features. The more filters that we give to a convolutional layer, the more features it can detect.\n\nA filter is a square-shaped object that scans over the image. A grid can represent the individual pixels of a grid. You can think of the convolutional layer as a smaller grid that sweeps left to right over each row of the image. There is also a hyper parameter that specifies both the width and height of the square-shaped filter. Figure 10.1 shows this configuration in which you see the six convolutional filters sweeping over the image grid:\n\nA convolutional layer has weights between it and the previous layer or image grid. Each pixel on each convolutional layer is a weight. Therefore, the number of weights between a convolutional layer and its predecessor layer or image field is the following:\n\n```\n[FilterSize] * [FilterSize] * [# of Filters]\n```\n\nFor example, if the filter size were 5 (5x4) for 10 filters, there would be 250 weights.\n\nYou need to understand how the convolutional filters sweep across the previous layer’s output or image grid. Figure 10.2 illustrates the sweep:\n\n\n\nThe above figure shows a convolutional filter with a size of 4 and a padding size of 1. The padding size is responsible for the boarder of zeros in the area that the filter sweeps. Even though the image is actually 8x7, the extra padding provides a virtual image size of 9x8 for the filter to sweep across. The stride specifies the number of positions at which the convolutional filters will stop. The convolutional filters move to the right, advancing by the number of cells specified in the stride. Once the far right is reached, the convolutional filter moves back to the far left, then it moves down by the stride amount and\ncontinues to the right again.\n\nSome constraints exist in relation to the size of the stride. Obviously, the stride cannot be 0. The convolutional filter would never move if the stride were set to 0. Furthermore, neither the stride, nor the convolutional filter size can be larger than the previous grid. There are additional constraints on the stride (s), padding (p) and the filter width (f) for an image of width (w). Specifically, the convolutional filter must be able to start at the far left or top boarder, move a certain number of strides, and land on the far right or bottom boarder. The following equation shows the number of steps a convolutional operator\nmust take to cross the image:\n\n$ steps = \\frac{w - f + 2p}{s+1} $\n\nThe number of steps must be an integer. In other words, it cannot have decimal places. The purpose of the padding (p) is to be adjusted to make this equation become an integer value.\n\n## Max Pooling Layers\n\nMax-pool layers downsample a 3D box to a new one with smaller dimensions. Typically, you can always place a max-pool layer immediately following convolutional layer. The LENET shows the max-pool layer immediately after layers C1 and C3. These max-pool layers progressively decrease the size of the dimensions of the 3D boxes passing through them. This technique can avoid overfitting (Krizhevsky, Sutskever & Hinton, 2012).\n\nA pooling layer has the following hyper-parameters:\n\n* Spatial Extent (f )\n* Stride (s)\n\nUnlike convolutional layers, max-pool layers do not use padding. Additionally, max-pool layers have no weights, so training does not affect them. These layers simply downsample their 3D box input. The 3D box output by a max-pool layer will have a width equal to this equation:\n\n$ w_2 = \\frac{w_1 - f}{s + 1} $\n\nThe height of the 3D box produced by the max-pool layer is calculated similarly with this equation:\n\n$ h_2 = \\frac{h_1 - f}{s + 1} $\n\nThe depth of the 3D box produced by the max-pool layer is equal to the depth the 3D box received as input. The most common setting for the hyper-parameters of a max-pool layer are f =2 and s=2. The spatial extent (f) specifies that boxes of 2x2 will be scaled down to single pixels. Of these four pixels, the pixel with the maximum value will represent the 2x2 pixel in the new grid. Because squares of size 4 are replaced with size 1, 75% of the pixel information is lost. The following figure shows this transformation as a 6x6 grid becomes a 3x3:\n\n\n\nOf course, the above diagram shows each pixel as a single number. A grayscale image would have this characteristic. For an RGB image, we usually take the average of the three numbers to determine which pixel has the maximum value.\n\n[More information on CNN's](http://cs231n.github.io/convolutional-networks/)\n\n# TensorFlow with CNNs\n\nThe following sections describe how to use TensorFlow/Keras with CNNs.",
"_____no_output_____"
],
[
"### Access to Data Sets - DIGITS\n\nKeras provides built in access classes for MNIST. It is important to note that MNIST data arrives already separated into two sets:\n\n* **train** - Neural network will be trained with this.\n* **test** - Used for validation.",
"_____no_output_____"
]
],
[
[
"import tensorflow.keras\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.datasets import mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nprint(\"Shape of x_train: {}\".format(x_train.shape))\nprint(\"Shape of y_train: {}\".format(y_train.shape))\nprint()\nprint(\"Shape of x_test: {}\".format(x_test.shape))\nprint(\"Shape of y_test: {}\".format(y_test.shape))",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 0s 0us/step\nShape of x_train: (60000, 28, 28)\nShape of y_train: (60000,)\n\nShape of x_test: (10000, 28, 28)\nShape of y_test: (10000,)\n"
]
],
[
[
"### Display the Digits \n\nThe following code shows what the MNIST files contain.",
"_____no_output_____"
]
],
[
[
"# Display as text\nfrom IPython.display import display\nimport pandas as pd\n\nprint(\"Shape for dataset: {}\".format(x_train.shape))\nprint(\"Labels: {}\".format(y_train))\n\n# Single MNIST digit\nsingle = x_train[0]\nprint(\"Shape for single: {}\".format(single.shape))\n\ndisplay(pd.DataFrame(single.reshape(28,28)))",
"Shape for dataset: (60000, 28, 28)\nLabels: [5 0 4 ... 5 6 8]\nShape for single: (28, 28)\n"
],
[
"# Display as image\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\ndigit = 105 # Change to choose new digit\na = x_train[digit]\nplt.imshow(a, cmap='gray', interpolation='nearest')\nprint(\"Image (#{}): Which is digit '{}'\".format(digit,y_train[digit]))",
"Image (#105): Which is digit '1'\n"
],
[
"import random\n\nROWS = 6\nrandom_indices = random.sample(range(x_train.shape[0]), ROWS*ROWS)\n\nsample_images = x_train[random_indices, :]\n\nplt.clf()\n\nfig, axes = plt.subplots(ROWS,ROWS, \n figsize=(ROWS,ROWS),\n sharex=True, sharey=True) \n\nfor i in range(ROWS*ROWS):\n subplot_row = i//ROWS \n subplot_col = i%ROWS\n ax = axes[subplot_row, subplot_col]\n\n plottable_image = np.reshape(sample_images[i,:], (28,28))\n ax.imshow(plottable_image, cmap='gray_r')\n \n ax.set_xbound([0,28])\n\nplt.tight_layout()\nplt.show()",
"_____no_output_____"
],
[
"import tensorflow.keras\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras import backend as K\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n# input image dimensions\nimg_rows, img_cols = 28, 28\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(\"Training samples: {}\".format(x_train.shape[0]))\nprint(\"Test samples: {}\".format(x_test.shape[0]))\n# convert class vectors to binary class matrices\ny_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)\ny_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n",
"x_train shape: (60000, 28, 28, 1)\nTraining samples: 60000\nTest samples: 10000\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/layers/core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
]
],
[
[
"### Training/Fitting CNN - DIGITS\n\nThe following code will train the CNN for 20,000 steps. This can take awhile, you might want to scale the step count back. GPU training can help. My results:\n\n* CPU Training Time: Elapsed time: 1:50:13.10\n* GPU Training Time: Elapsed time: 0:13:43.06",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nimport time\n\nstart_time = time.time()\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=2,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss: {}'.format(score[0]))\nprint('Test accuracy: {}'.format(score[1]))\n\nelapsed_time = time.time() - start_time\nprint(\"Elapsed time: {}\".format(hms_string(elapsed_time)))",
"Train on 60000 samples, validate on 10000 samples\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\nEpoch 1/12\n - 8s - loss: 0.2385 - acc: 0.9278 - val_loss: 0.0541 - val_acc: 0.9832\nEpoch 2/12\n - 4s - loss: 0.0831 - acc: 0.9750 - val_loss: 0.0398 - val_acc: 0.9863\nEpoch 3/12\n - 4s - loss: 0.0634 - acc: 0.9808 - val_loss: 0.0324 - val_acc: 0.9891\nEpoch 4/12\n - 4s - loss: 0.0504 - acc: 0.9848 - val_loss: 0.0280 - val_acc: 0.9913\nEpoch 5/12\n - 4s - loss: 0.0437 - acc: 0.9865 - val_loss: 0.0297 - val_acc: 0.9895\nEpoch 6/12\n - 4s - loss: 0.0373 - acc: 0.9882 - val_loss: 0.0353 - val_acc: 0.9897\nEpoch 7/12\n - 4s - loss: 0.0327 - acc: 0.9894 - val_loss: 0.0310 - val_acc: 0.9906\nEpoch 8/12\n - 4s - loss: 0.0298 - acc: 0.9906 - val_loss: 0.0288 - val_acc: 0.9906\nEpoch 9/12\n - 4s - loss: 0.0282 - acc: 0.9908 - val_loss: 0.0284 - val_acc: 0.9925\nEpoch 10/12\n - 4s - loss: 0.0259 - acc: 0.9914 - val_loss: 0.0295 - val_acc: 0.9910\nEpoch 11/12\n - 4s - loss: 0.0231 - acc: 0.9923 - val_loss: 0.0277 - val_acc: 0.9914\nEpoch 12/12\n - 4s - loss: 0.0210 - acc: 0.9930 - val_loss: 0.0277 - val_acc: 0.9920\nTest loss: 0.027716550575438943\nTest accuracy: 0.9919999837875366\nElapsed time: 0:00:53.44\n"
]
],
[
[
"### Evaluate Accuracy - DIGITS\n\nNote, if you are using a GPU you might get the **ResourceExhaustedError**. This occurs because the GPU might not have enough ram to predict the entire data set at once. ",
"_____no_output_____"
]
],
[
[
"# Predict using either GPU or CPU, send the entire dataset. This might not work on the GPU.\n# Set the desired TensorFlow output level for this example\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss: {}'.format(score[0]))\nprint('Test accuracy: {}'.format(score[1]))",
"Test loss: 0.027716550575438943\nTest accuracy: 0.9919999837875366\n"
]
],
[
[
"GPUs are most often used for training rather than prediction. For prediction either disable the GPU or just predict on a smaller sample. If your GPU has enough memory, the above prediction code may work just fine. If not, just prediction on a sample with the following code:",
"_____no_output_____"
]
],
[
[
"from sklearn import metrics\n\n# For GPU just grab the first 100 images\nsmall_x = x_test[1:100]\nsmall_y = y_test[1:100]\nsmall_y2 = np.argmax(small_y,axis=1)\npred = model.predict(small_x)\npred = np.argmax(pred,axis=1)\nscore = metrics.accuracy_score(small_y2, pred)\nprint('Accuracy: {}'.format(score))",
"Accuracy: 0.98989898989899\n"
]
],
[
[
"### MINST Fashion",
"_____no_output_____"
]
],
[
[
"import tensorflow.keras\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.datasets import fashion_mnist\n\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\nprint(\"Shape of x_train: {}\".format(x_train.shape))\nprint(\"Shape of y_train: {}\".format(y_train.shape))\nprint()\nprint(\"Shape of x_test: {}\".format(x_test.shape))\nprint(\"Shape of y_test: {}\".format(y_test.shape))",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n32768/29515 [=================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n26427392/26421880 [==============================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n8192/5148 [===============================================] - 0s 0us/step\nDownloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n4423680/4422102 [==============================] - 0s 0us/step\nShape of x_train: (60000, 28, 28)\nShape of y_train: (60000,)\n\nShape of x_test: (10000, 28, 28)\nShape of y_test: (10000,)\n"
]
],
[
[
"### Display the Apparel \n\nThe following code shows what the Fashion MNIST files contain.",
"_____no_output_____"
]
],
[
[
"# Display as text\nfrom IPython.display import display\nimport pandas as pd\n\nprint(\"Shape for dataset: {}\".format(x_train.shape))\nprint(\"Labels: {}\".format(y_train))\n\n# Single MNIST digit\nsingle = x_train[0]\nprint(\"Shape for single: {}\".format(single.shape))\n\ndisplay(pd.DataFrame(single.reshape(28,28)))",
"Shape for dataset: (60000, 28, 28)\nLabels: [9 0 0 ... 3 0 5]\nShape for single: (28, 28)\n"
],
[
"# Display as image\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\ndigit = 90 # Change to choose new article\na = x_train[digit]\nplt.imshow(a, cmap='gray', interpolation='nearest')\nprint(\"Image (#{}): Which is digit '{}'\".format(digit,y_train[digit]))",
"Image (#90): Which is digit '9'\n"
],
[
"import random\n\nROWS = 6\nrandom_indices = random.sample(range(x_train.shape[0]), ROWS*ROWS)\n\nsample_images = x_train[random_indices, :]\n\nplt.clf()\n\nfig, axes = plt.subplots(ROWS,ROWS, \n figsize=(ROWS,ROWS),\n sharex=True, sharey=True) \n\nfor i in range(ROWS*ROWS):\n subplot_row = i//ROWS \n subplot_col = i%ROWS\n ax = axes[subplot_row, subplot_col]\n\n plottable_image = np.reshape(sample_images[i,:], (28,28))\n ax.imshow(plottable_image, cmap='gray_r')\n \n ax.set_xbound([0,28])\n\nplt.tight_layout()\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"### Training/Fitting CNN - Fashion\n\nThe following code will train the CNN for 20,000 steps. This can take awhile, you might want to scale the step count back. GPU training can help. My results:\n\n* CPU Training Time: Elapsed time: 1:50:13.10\n* GPU Training Time: Elapsed time: 0:13:43.06",
"_____no_output_____"
]
],
[
[
"import tensorflow.keras\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras import backend as K\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n# input image dimensions\nimg_rows, img_cols = 28, 28\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(\"Training samples: {}\".format(x_train.shape[0]))\nprint(\"Test samples: {}\".format(x_test.shape[0]))\n# convert class vectors to binary class matrices\ny_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)\ny_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n",
"x_train shape: (60000, 28, 28, 1)\nTraining samples: 60000\nTest samples: 10000\n"
],
[
"import tensorflow as tf\nimport time\n\nstart_time = time.time()\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=2,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss: {}'.format(score[0]))\nprint('Test accuracy: {}'.format(score[1]))\n\nelapsed_time = time.time() - start_time\nprint(\"Elapsed time: {}\".format(hms_string(elapsed_time)))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/12\n - 4s - loss: 0.5299 - acc: 0.8125 - val_loss: 0.3500 - val_acc: 0.8740\nEpoch 2/12\n - 4s - loss: 0.3456 - acc: 0.8774 - val_loss: 0.2875 - val_acc: 0.8960\nEpoch 3/12\n - 4s - loss: 0.2973 - acc: 0.8927 - val_loss: 0.2736 - val_acc: 0.9018\nEpoch 4/12\n - 4s - loss: 0.2670 - acc: 0.9042 - val_loss: 0.2457 - val_acc: 0.9088\nEpoch 5/12\n - 4s - loss: 0.2415 - acc: 0.9118 - val_loss: 0.2376 - val_acc: 0.9149\nEpoch 6/12\n - 4s - loss: 0.2222 - acc: 0.9192 - val_loss: 0.2335 - val_acc: 0.9113\nEpoch 7/12\n - 4s - loss: 0.2073 - acc: 0.9240 - val_loss: 0.2258 - val_acc: 0.9191\nEpoch 8/12\n - 4s - loss: 0.1915 - acc: 0.9289 - val_loss: 0.2175 - val_acc: 0.9201\nEpoch 9/12\n - 4s - loss: 0.1778 - acc: 0.9339 - val_loss: 0.2223 - val_acc: 0.9201\nEpoch 10/12\n - 4s - loss: 0.1656 - acc: 0.9375 - val_loss: 0.2234 - val_acc: 0.9222\nEpoch 11/12\n - 4s - loss: 0.1583 - acc: 0.9401 - val_loss: 0.2197 - val_acc: 0.9239\nEpoch 12/12\n - 4s - loss: 0.1464 - acc: 0.9456 - val_loss: 0.2218 - val_acc: 0.9261\nTest loss: 0.221764595079422\nTest accuracy: 0.9261000156402588\nElapsed time: 0:00:49.85\n"
]
],
[
[
"# Module 6 Assignment\n\nYou can find the first assignment here: [assignment 6](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e747838060ee0a69be68b9f96000ac2a2c58dc3c | 1,990 | ipynb | Jupyter Notebook | Linear_Transformation.ipynb | espinili/Linear-Algebra-58020 | aae6369a014ed12c3eb8bdfb49e355f183b5ca3d | [
"Apache-2.0"
] | null | null | null | Linear_Transformation.ipynb | espinili/Linear-Algebra-58020 | aae6369a014ed12c3eb8bdfb49e355f183b5ca3d | [
"Apache-2.0"
] | null | null | null | Linear_Transformation.ipynb | espinili/Linear-Algebra-58020 | aae6369a014ed12c3eb8bdfb49e355f183b5ca3d | [
"Apache-2.0"
] | null | null | null | 26.533333 | 247 | 0.428643 | [
[
[
"<a href=\"https://colab.research.google.com/github/espinili/Linear-Algebra-58020/blob/main/Linear_Transformation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nA = np.array([[4,10,8],[10,26,26],[8,26,61]])\nprint(A)\ninv_A=np.linalg.inv(A)\nprint(inv_A)\nB = np.array([[44],[128],[214]])\nprint(B)\n\n#AA^-1X = B.A^-1\nX = np.dot(inv_A,B)\nprint(X)\n",
"[[ 4 10 8]\n [10 26 26]\n [ 8 26 61]]\n[[ 25.27777778 -11.16666667 1.44444444]\n [-11.16666667 5. -0.66666667]\n [ 1.44444444 -0.66666667 0.11111111]]\n[[ 44]\n [128]\n [214]]\n[[-8.]\n [ 6.]\n [ 2.]]\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
]
] |
e7478ff79efea5c19ebb310f9af129cce96b5b6d | 1,822 | ipynb | Jupyter Notebook | Machine Learning Cookbook/Chapter 2 Loading Data.ipynb | sonwanesuresh95/Books-to-notebooks | 7e56d31395cfda258baefa93d5181839d1a829dc | [
"MIT"
] | 1 | 2021-03-09T06:22:46.000Z | 2021-03-09T06:22:46.000Z | Machine Learning Cookbook/Chapter 2 Loading Data.ipynb | sonwanesuresh95/Books-to-notebooks | 7e56d31395cfda258baefa93d5181839d1a829dc | [
"MIT"
] | null | null | null | Machine Learning Cookbook/Chapter 2 Loading Data.ipynb | sonwanesuresh95/Books-to-notebooks | 7e56d31395cfda258baefa93d5181839d1a829dc | [
"MIT"
] | null | null | null | 17.862745 | 46 | 0.510428 | [
[
[
"import numpy as np\nimport sklearn\nimport matplotlib.pyplot as plt\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"# Load datasets",
"_____no_output_____"
]
],
[
[
"from sklearn import datasets",
"_____no_output_____"
],
[
"digits = datasets.load_digits()",
"_____no_output_____"
],
[
"features = digits.data\ntarget = digits.target",
"_____no_output_____"
]
],
[
[
"Another datasets are:\n1. load_boston\n2. load_iris\n3. load_digits",
"_____no_output_____"
],
[
"# Reading data",
"_____no_output_____"
],
[
"1. from csv<br>\n<code> pd.read_csv(filename)</code>\n2. from excel<br>\n<code> pd.read_excel(filename)</code>\n3. from sql database<br>\n<code> pd.read_sql_query(query)</code>",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e747b47655cc49d1ef8c5448e5fb9d572ddc15e4 | 71,303 | ipynb | Jupyter Notebook | Untitled.ipynb | mkirby1995/PyISRU | 46338999e70308604692bbd3c20b4f770c27dc6d | [
"MIT"
] | null | null | null | Untitled.ipynb | mkirby1995/PyISRU | 46338999e70308604692bbd3c20b4f770c27dc6d | [
"MIT"
] | null | null | null | Untitled.ipynb | mkirby1995/PyISRU | 46338999e70308604692bbd3c20b4f770c27dc6d | [
"MIT"
] | null | null | null | 272.148855 | 66,172 | 0.933411 | [
[
[
"Need 240 metric tons of methane. \n\n2.4e^8 grams of methane",
"_____no_output_____"
]
],
[
[
"from ISRU import Atmospheric_processing_unit\nimport random\nimport json\nimport matplotlib.pyplot as plt\n\nperidoic = json.load(open(\"periodic.json\"))\navogadro = 6.0221409E23",
"_____no_output_____"
],
[
"H = peridoic['H']\nC = peridoic['C']",
"_____no_output_____"
],
[
"CH_4 = {}\n\nCH_4['mass'] = (1 * C['atomic_mass']) + (4 * H['atomic_mass'])",
"_____no_output_____"
],
[
"CH_4['mass']",
"_____no_output_____"
],
[
"apu = Atmospheric_processing_unit()\n\napu.hydrogen_addition(500)\napu.atmosphere_intake(50)\n\n\n\nCO2_record = [apu.co2_stock]\nH2O_record = [apu.h2o_stock]\nCO_record = [apu.co_stock]\nCH4_record = [apu.ch4_stock]\nH2_record = [apu.h2_stock]\nO2_record = [apu.o2_stock]",
"_____no_output_____"
],
[
"while apu.h2_stock >= 2:\n if apu.co2_stock < 10:\n apu.atmosphere_intake(25)\n\n apu.sabatier_rwgs()\n\n CO2_record.append(apu.co2_stock)\n H2O_record.append(apu.h2o_stock)\n CO_record.append(apu.co_stock)\n CH4_record.append(apu.ch4_stock)\n H2_record.append(apu.h2_stock)\n O2_record.append(apu.o2_stock)",
"_____no_output_____"
],
[
"plt.figure(num=None, figsize=(15, 6), dpi=80, facecolor='w', edgecolor='k')\n\nx = range(0, len(CH4_record))\n\nplt.plot(x, CO2_record, label = 'CO2')\nplt.plot(x, H2O_record, label = 'H2O')\nplt.plot(x, CO_record, label = 'CO')\nplt.plot(x, CH4_record, label = 'CH4')\nplt.plot(x, H2_record, label = 'H2')\nplt.plot(x, O2_record, label = 'O2')\n\nplt.legend()\nplt.show();",
"_____no_output_____"
],
[
"CH4_record[-1] * CH_4['mass']",
"_____no_output_____"
],
[
"1.66e-24 * (CH4_record[-1] * CH_4['mass'])",
"_____no_output_____"
],
[
"2.4e8 / (1.66e-24 * (CH4_record[-1] * CH_4['mass']))",
"_____no_output_____"
],
[
"(1.66e-24 * (CH4_record[-1] * CH_4['mass'])) / 2.4e8 ",
"_____no_output_____"
],
[
"from chem_help import moles_to_mass",
"_____no_output_____"
],
[
"moles_to_mass()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e747b7f3799d90f1ca6da414911e45522abf0922 | 14,069 | ipynb | Jupyter Notebook | python/syntax-variables-and-numbers.ipynb | saiyugandharsingamaneni/kaggel-courses | ea9189a3c9aa7020c2cc8a7cf146ef7ba7bcf774 | [
"MIT"
] | null | null | null | python/syntax-variables-and-numbers.ipynb | saiyugandharsingamaneni/kaggel-courses | ea9189a3c9aa7020c2cc8a7cf146ef7ba7bcf774 | [
"MIT"
] | null | null | null | python/syntax-variables-and-numbers.ipynb | saiyugandharsingamaneni/kaggel-courses | ea9189a3c9aa7020c2cc8a7cf146ef7ba7bcf774 | [
"MIT"
] | null | null | null | 14,069 | 14,069 | 0.694648 | [
[
[
"**This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/hello-python).**\n\n---\n",
"_____no_output_____"
],
[
"# Exercises",
"_____no_output_____"
],
[
"Welcome to your first set of Python coding problems! \n\nIf this is your first time using Kaggle Notebooks, welcome! \n\nNotebooks are composed of blocks (called \"cells\") of text and code. Each of these is editable, though you'll mainly be editing the code cells to answer some questions.\n\nTo get started, try running the code cell below (by pressing the ► button, or clicking on the cell and pressing ctrl+enter on your keyboard).",
"_____no_output_____"
]
],
[
[
"print(\"You've successfully run some Python code\")\nprint(\"Congratulations!\")\nprint(\"this is sai yugandhar\")\n",
"_____no_output_____"
]
],
[
[
"Try adding another line of code in the cell above and re-running it. \n\nNow let's get a little fancier: Add a new code cell by clicking on an existing code cell, hitting the escape key, and then hitting the `a` or `b` key. The `a` key will add a cell above the current cell, and `b` adds a cell below.\n\nGreat! Now you know how to use Notebooks.\n\nEach hands-on exercise starts by setting up our feedback and code checking mechanism. Run the code cell below to do that. Then you'll be ready to move on to question 0.",
"_____no_output_____"
]
],
[
[
"from learntools.core import binder; binder.bind(globals())\nfrom learntools.python.ex1 import *\nprint(\"Setup complete! You're ready to start question 0.\")",
"_____no_output_____"
]
],
[
[
"## 0.\n\n*This is a silly question intended as an introduction to the format we use for hands-on exercises throughout all Kaggle courses.*\n\n**What is your favorite color? **\n\nTo complete this question, create a variable called `color` in the cell below with an appropriate value. The function call `q0.check()` (which we've already provided in the cell below) will check your answer.",
"_____no_output_____"
]
],
[
[
"# create a variable called color with an appropriate value on the line below\n# (Remember, strings in Python must be enclosed in 'single' or \"double\" quotes\ncolour=(\"blue\")\n\n# Check your answer\nq0.check()",
"_____no_output_____"
]
],
[
[
"Didn't get the right answer? How do you not even know your own favorite color?!\n\nDelete the `#` in the line below to make one of the lines run. You can choose between getting a hint or the full answer by choosing which line to remove the `#` from. \n\nRemoving the `#` is called uncommenting, because it changes that line from a \"comment\" which Python doesn't run to code, which Python does run.",
"_____no_output_____"
]
],
[
[
"q0.hint()\nq0.solution()",
"_____no_output_____"
]
],
[
[
"The upcoming questions work the same way. The only thing that will change are the question numbers. For the next question, you'll call `q1.check()`, `q1.hint()`, `q1.solution()`, for question 2, you'll call `q2.check()`, and so on.",
"_____no_output_____"
],
[
"<hr/>\n\n## 1.\n\nComplete the code below. In case it's helpful, here is the table of available arithmetic operations:\n\n\n\n| Operator | Name | Description |\n|--------------|----------------|--------------------------------------------------------|\n| ``a + b`` | Addition | Sum of ``a`` and ``b`` |\n| ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |\n| ``a * b`` | Multiplication | Product of ``a`` and ``b`` |\n| ``a / b`` | True division | Quotient of ``a`` and ``b`` |\n| ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |\n| ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |\n| ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |\n| ``-a`` | Negation | The negative of ``a`` |\n\n<span style=\"display:none\"></span>\n",
"_____no_output_____"
]
],
[
[
"pi = 3.14159 # approximate\ndiameter = 3\n\n# Create a variable called 'radius' equal to half the diameter\nradius=1/2*diameter\n\n# Create a variable called 'area', using the formula for the area of a circle: pi times the radius squared\narea=pi*radius**2\n\n# Check your answer\nq1.check()",
"_____no_output_____"
],
[
"# Uncomment and run the lines below if you need help.\nq1.hint()\nq1.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n## 2.\n\nAdd code to the following cell to swap variables `a` and `b` (so that `a` refers to the object previously referred to by `b` and vice versa).",
"_____no_output_____"
]
],
[
[
"########### Setup code - don't touch this part ######################\n# If you're curious, these are examples of lists. We'll talk about \n# them in depth a few lessons from now. For now, just know that they're\n# yet another type of Python object, like int or float.\na = [1, 2, 3]\nb = [3, 2, 1]\nq2.store_original_ids()\n######################################################################\n\n# Your code goes here. Swap the values to which a and b refer.\n# If you get stuck, you can always uncomment one or both of the lines in\n# the next cell for a hint, or to peek at the solution.\ntemp=a\na=b\nb=temp\n\n######################################################################\n\n# Check your answer\nq2.check()",
"_____no_output_____"
],
[
"q2.hint()",
"_____no_output_____"
],
[
"q2.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n## 3.\n\na) Add parentheses to the following expression so that it evaluates to 1.",
"_____no_output_____"
]
],
[
[
"((5 - 3) // 2)",
"_____no_output_____"
],
[
"q3.a.hint()",
"_____no_output_____"
],
[
"# Check your answer (Run this code cell to receive credit!)\nq3.a.solution()",
"_____no_output_____"
]
],
[
[
"<small>Questions, like this one, marked a spicy pepper are a bit harder.</small>\n\nb) <span title=\"A bit spicy\" style=\"color: darkgreen \">🌶️</span> Add parentheses to the following expression so that it evaluates to 0\n",
"_____no_output_____"
]
],
[
[
"(8 - 3) * (2 - (1 + 1))",
"_____no_output_____"
],
[
"#q3.b.hint()",
"_____no_output_____"
],
[
"# Check your answer (Run this code cell to receive credit!)\nq3.b.solution()",
"_____no_output_____"
]
],
[
[
"<hr/>\n## 4. \nAlice, Bob and Carol have agreed to pool their Halloween candy and split it evenly among themselves.\nFor the sake of their friendship, any candies left over will be smashed. For example, if they collectively\nbring home 91 candies, they'll take 30 each and smash 1.\n\nWrite an arithmetic expression below to calculate how many candies they must smash for a given haul.",
"_____no_output_____"
]
],
[
[
"# Variables representing the number of candies collected by alice, bob, and carol\nalice_candies = 42\nbob_candies = 28\ncarol_candies = 21\n\n# Your code goes here! Replace the right-hand side of this assignment with an expression\n# involving alice_candies, bob_candies, and carol_candies\nto_smash = 1\nsum = alice_candies+bob_candies+carol_candies\nprint(sum)",
"_____no_output_____"
],
[
"print(sum-to_smash)\n# Check your answer\nq4.check()",
"_____no_output_____"
],
[
"q4.hint()\nq4.solution()",
"_____no_output_____"
]
],
[
[
"# Keep Going\n\nNext up, you'll **[learn to write new functions and understand functions others write](https://www.kaggle.com/colinmorris/functions-and-getting-help)**. This will make you at least 10 times more productive as a Python programmer. ",
"_____no_output_____"
],
[
"---\n\n\n\n\n*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161283) to chat with other Learners.*",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e747c13022df863a3ee44f22aed70f66bca33a6f | 4,036 | ipynb | Jupyter Notebook | LMDZ.ipynb | morianemo/ecflow-notehbook2 | b0e917aeed7cf2ef6fb9ad3c38e7f8863d1e7cd3 | [
"Apache-2.0"
] | null | null | null | LMDZ.ipynb | morianemo/ecflow-notehbook2 | b0e917aeed7cf2ef6fb9ad3c38e7f8863d1e7cd3 | [
"Apache-2.0"
] | null | null | null | LMDZ.ipynb | morianemo/ecflow-notehbook2 | b0e917aeed7cf2ef6fb9ad3c38e7f8863d1e7cd3 | [
"Apache-2.0"
] | null | null | null | 33.081967 | 136 | 0.526511 | [
[
[
"# LMDZ GCM\n[LMDZ](http://lmdz.lmd.jussieu.fr/le-projet-lmdz/formation/2017)\n[mars?](http://www-mars.lmd.jussieu.fr/)\n## Download, compile",
"_____no_output_____"
]
],
[
[
"%%bash\nrun=svn && which $run || sudo apt-get install subversion || true\nwhich $run || echo \"Please install svn...\"\nINST=\"svn co http://svn.lmd.jussieu.fr/LMDZ/BOL/script_install && cd script_install && chmod +x install*.sh && ./install*.sh\"\necho $INST",
"/usr/bin/svn\n/usr/bin/svn\nsvn co http://svn.lmd.jussieu.fr/LMDZ/BOL/script_install && cd script_install && chmod +x install*.sh && ./install*.sh\n"
],
[
"######################\nfrom __future__ import print_function\nimport os, pwd, sys; \nlib = \"/usr/local/apps/ecflow/current/lib/python2.7/site-packages/ecflow\"\nlib = \"/usr/local/apps/ecflow/current/lib/python3.5/site-packages/ecflow\"\nlib = \"/usr/local/lib/python3.5/site-packages/ecflow\"\nsys.path.append(lib)\nimport ecf; from ecf import (Client, Defs, Suite, Family, Task, Defstatus, Edit, Trigger)\ntry: x = Edit(test=\"value\") # Edit is present in recent ecf.py module\nexcept:\n class Edit(Variables): pass\nhome = os.getenv(\"HOME\") + \"/ecflow_server\"\nuser = os.getenv(\"USER\")\n\n# SUITE\nnode = Suite(\"lmdz\").add(\n Defstatus(\"suspended\"),\n Edit(ECF_HOME=home, ECF_INCLUDE=home + \"/include\", ECF_FILES=home + \"/files\",\n ECF_EXTN=\".ecg\", # current convention for generated task template extension\n ECF_JOB_CMD=\"%ECF_JOB% > %ECF_JOBOUT% 2>&1\", # localhost run\n ECF_URL_CMD=\"firefox %URL%\",\n URL=\"http://lmdz.lmd.jussieu.fr/\", ),\n Family(\"make\").add(\n Family(\"get\").add(Task(\"cmd\").add(\n Edit(CMD=\"svn co http://svn.lmd.jussieu.fr/LMDZ/BOL/script_install\",\n ARGS=\"\")), ),\n Family(\"compile\").add(\n Trigger([\"get\"]),\n Task(\"cmd\").add(\n Edit(CMD=\"cd script_install && chmod +x install*sh && ./install_lmdz.sh\")), ), ),\n Family(\"main\").add(Task(\"cmd\").add(Edit(CMD=\"echo\", ARGS=\"YOUR PART\"))))\n\n# TASK TEMPLATE\nwith open(home + \"/files/cmd.ecg\", 'w') as task_template: print(\"\"\"#!/bin/bash\n%include <head.h>\n %CMD:echo% %ARGS:%\n%include <tail.h>\"\"\", file=task_template)\n\n# DEFS\ndefs = Defs()\ndefs.add_suite(node)\npath = '/' + node.name()\n\n# CLIENT\nclient = Client(\"localhost@%s\" % os.getenv(\"ECF_PORT\", 2500)) # PYTHON CLIENT\nclient.replace(path, defs) # load/replace the top node (suite)\nclient.begin_suite(node.name()) # BEGIN suite: UNKNOWN -> QUEUED\nclient.resume(path) # RESUME suite: SUSPENDED -> create job and submit",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
e747c78d1d99123e7764c939250ba9d3a77d48e0 | 489,718 | ipynb | Jupyter Notebook | Chapter 03/multiple_optimizers.ipynb | bpbpublications/Mastering-TensorFlow-2.x | fc169692e6f38f3d6b78f956f47bcc7c884a9647 | [
"MIT"
] | 1 | 2022-02-15T07:36:18.000Z | 2022-02-15T07:36:18.000Z | Chapter 03/multiple_optimizers.ipynb | bpbpublications/Mastering-TensorFlow-2.x | fc169692e6f38f3d6b78f956f47bcc7c884a9647 | [
"MIT"
] | null | null | null | Chapter 03/multiple_optimizers.ipynb | bpbpublications/Mastering-TensorFlow-2.x | fc169692e6f38f3d6b78f956f47bcc7c884a9647 | [
"MIT"
] | null | null | null | 378.160618 | 56,936 | 0.919823 | [
[
[
"import tensorflow as tf\nimport numpy as np",
"_____no_output_____"
],
[
"(X_train, y_train), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\nX_train = X_train/255.\nX_test = X_test/255.\n\nX_train.shape, X_test.shape, y_train.shape, y_test.shape",
"_____no_output_____"
],
[
"# Define the labels of the dataset\nCLASSES=[\"T-shirt/top\",\"Trouser\",\"Pullover\",\"Dress\",\"Coat\",\n \"Sandal\",\"Shirt\",\"Sneaker\",\"Bag\",\"Ankle boot\"]",
"_____no_output_____"
],
[
"# Change the pixel values to float32 and reshape input data\nX_train = X_train.astype(\"float32\").reshape(-1, 28, 28, 1)\nX_test = X_test.astype(\"float32\").reshape(-1, 28, 28, 1)",
"_____no_output_____"
],
[
"from tensorflow.keras import datasets, layers, models\n\nfrom tensorflow.keras.layers import *",
"_____no_output_____"
],
[
"def get_model():\n model = models.Sequential()\n model.add(Conv2D(16, (5, 5), activation=\"relu\",\n input_shape=(28, 28,1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(32, (5, 5), activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(128, activation=\"relu\"))\n model.add(Dense(len(CLASSES), activation=\"softmax\"))\n return model\nmodel_adam = get_model()",
"_____no_output_____"
],
[
"model_adam.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_adam = model_adam.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 15s 244us/sample - loss: 0.5433 - accuracy: 0.8014 - val_loss: 0.4045 - val_accuracy: 0.8538\nEpoch 2/20\n60000/60000 [==============================] - 15s 253us/sample - loss: 0.3698 - accuracy: 0.8643 - val_loss: 0.3388 - val_accuracy: 0.8759\nEpoch 3/20\n60000/60000 [==============================] - 15s 251us/sample - loss: 0.3240 - accuracy: 0.8800 - val_loss: 0.3121 - val_accuracy: 0.8838\nEpoch 4/20\n60000/60000 [==============================] - 15s 251us/sample - loss: 0.2980 - accuracy: 0.8892 - val_loss: 0.2985 - val_accuracy: 0.8894\nEpoch 5/20\n60000/60000 [==============================] - 16s 258us/sample - loss: 0.2726 - accuracy: 0.8995 - val_loss: 0.2812 - val_accuracy: 0.8968\nEpoch 6/20\n60000/60000 [==============================] - 16s 268us/sample - loss: 0.2557 - accuracy: 0.9038 - val_loss: 0.2927 - val_accuracy: 0.8925\nEpoch 7/20\n60000/60000 [==============================] - 16s 271us/sample - loss: 0.2422 - accuracy: 0.9096 - val_loss: 0.2710 - val_accuracy: 0.9021\nEpoch 8/20\n60000/60000 [==============================] - 17s 289us/sample - loss: 0.2306 - accuracy: 0.9124 - val_loss: 0.2582 - val_accuracy: 0.9070\nEpoch 9/20\n60000/60000 [==============================] - 18s 301us/sample - loss: 0.2202 - accuracy: 0.9166 - val_loss: 0.2703 - val_accuracy: 0.9024\nEpoch 10/20\n60000/60000 [==============================] - 19s 309us/sample - loss: 0.2107 - accuracy: 0.9200 - val_loss: 0.2642 - val_accuracy: 0.9085\nEpoch 11/20\n60000/60000 [==============================] - 19s 322us/sample - loss: 0.2042 - accuracy: 0.9234 - val_loss: 0.2587 - val_accuracy: 0.9075\nEpoch 12/20\n60000/60000 [==============================] - 20s 339us/sample - loss: 0.1966 - accuracy: 0.9256 - val_loss: 0.2495 - val_accuracy: 0.9088\nEpoch 13/20\n60000/60000 [==============================] - 22s 359us/sample - loss: 0.1906 - accuracy: 0.9273 - val_loss: 0.2659 - val_accuracy: 0.9036\nEpoch 14/20\n60000/60000 [==============================] - 22s 370us/sample - loss: 0.1848 - accuracy: 0.9295 - val_loss: 0.2622 - val_accuracy: 0.9090\nEpoch 15/20\n60000/60000 [==============================] - 22s 361us/sample - loss: 0.1788 - accuracy: 0.9325 - val_loss: 0.2556 - val_accuracy: 0.9099\nEpoch 16/20\n60000/60000 [==============================] - 23s 385us/sample - loss: 0.1731 - accuracy: 0.9330 - val_loss: 0.2636 - val_accuracy: 0.9080\nEpoch 17/20\n60000/60000 [==============================] - 21s 351us/sample - loss: 0.1679 - accuracy: 0.9366 - val_loss: 0.2712 - val_accuracy: 0.9049\nEpoch 18/20\n60000/60000 [==============================] - 21s 356us/sample - loss: 0.1653 - accuracy: 0.9370 - val_loss: 0.2622 - val_accuracy: 0.9095\nEpoch 19/20\n60000/60000 [==============================] - 22s 369us/sample - loss: 0.1614 - accuracy: 0.9388 - val_loss: 0.2609 - val_accuracy: 0.9095\nEpoch 20/20\n60000/60000 [==============================] - 22s 359us/sample - loss: 0.1559 - accuracy: 0.9404 - val_loss: 0.2769 - val_accuracy: 0.9096\n"
],
[
"adam_loss = history_adam.history['loss']\nval_adam_loss = history_adam.history['val_loss']\nadam_loss",
"_____no_output_____"
],
[
"model_sgd = get_model()\nmodel_sgd.compile(optimizer='sgd',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_sgd = model_sgd.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 20s 332us/sample - loss: 0.8707 - accuracy: 0.6796 - val_loss: 0.6169 - val_accuracy: 0.7731\nEpoch 2/20\n60000/60000 [==============================] - 20s 328us/sample - loss: 0.5633 - accuracy: 0.7933 - val_loss: 0.5063 - val_accuracy: 0.8178\nEpoch 3/20\n60000/60000 [==============================] - 18s 296us/sample - loss: 0.4961 - accuracy: 0.8208 - val_loss: 0.4576 - val_accuracy: 0.8397\nEpoch 4/20\n60000/60000 [==============================] - 18s 292us/sample - loss: 0.4567 - accuracy: 0.8339 - val_loss: 0.4326 - val_accuracy: 0.8449\nEpoch 5/20\n60000/60000 [==============================] - 17s 283us/sample - loss: 0.4283 - accuracy: 0.8448 - val_loss: 0.4081 - val_accuracy: 0.8561\nEpoch 6/20\n60000/60000 [==============================] - 17s 276us/sample - loss: 0.4098 - accuracy: 0.8531 - val_loss: 0.3890 - val_accuracy: 0.8623\nEpoch 7/20\n60000/60000 [==============================] - 17s 279us/sample - loss: 0.3920 - accuracy: 0.8577 - val_loss: 0.3822 - val_accuracy: 0.8631\nEpoch 8/20\n60000/60000 [==============================] - 17s 286us/sample - loss: 0.3786 - accuracy: 0.8641 - val_loss: 0.3644 - val_accuracy: 0.8697\nEpoch 9/20\n60000/60000 [==============================] - 18s 307us/sample - loss: 0.3650 - accuracy: 0.8672 - val_loss: 0.3601 - val_accuracy: 0.8702\nEpoch 10/20\n60000/60000 [==============================] - 20s 329us/sample - loss: 0.3568 - accuracy: 0.8706 - val_loss: 0.3390 - val_accuracy: 0.8777\nEpoch 11/20\n60000/60000 [==============================] - 19s 322us/sample - loss: 0.3457 - accuracy: 0.8744 - val_loss: 0.3394 - val_accuracy: 0.8802\nEpoch 12/20\n60000/60000 [==============================] - 19s 319us/sample - loss: 0.3378 - accuracy: 0.8779 - val_loss: 0.3330 - val_accuracy: 0.8817\nEpoch 13/20\n60000/60000 [==============================] - 20s 326us/sample - loss: 0.3320 - accuracy: 0.8798 - val_loss: 0.3318 - val_accuracy: 0.8784\nEpoch 14/20\n60000/60000 [==============================] - 20s 332us/sample - loss: 0.3251 - accuracy: 0.8815 - val_loss: 0.3235 - val_accuracy: 0.8822\nEpoch 15/20\n60000/60000 [==============================] - 21s 346us/sample - loss: 0.3187 - accuracy: 0.8839 - val_loss: 0.3276 - val_accuracy: 0.8804\nEpoch 16/20\n60000/60000 [==============================] - 19s 323us/sample - loss: 0.3128 - accuracy: 0.8859 - val_loss: 0.3118 - val_accuracy: 0.8880\nEpoch 17/20\n60000/60000 [==============================] - 20s 341us/sample - loss: 0.3083 - accuracy: 0.8882 - val_loss: 0.3153 - val_accuracy: 0.8859\nEpoch 18/20\n60000/60000 [==============================] - 21s 342us/sample - loss: 0.3023 - accuracy: 0.8899 - val_loss: 0.3050 - val_accuracy: 0.8889\nEpoch 19/20\n60000/60000 [==============================] - 20s 336us/sample - loss: 0.2992 - accuracy: 0.8896 - val_loss: 0.3041 - val_accuracy: 0.8916\nEpoch 20/20\n60000/60000 [==============================] - 19s 312us/sample - loss: 0.2916 - accuracy: 0.8939 - val_loss: 0.3008 - val_accuracy: 0.8912\n"
],
[
"sgd_loss = history_sgd.history['loss']\nval_sgd_loss = history_sgd.history['val_loss']\nsgd_loss",
"_____no_output_____"
],
[
"%matplotlib inline\ndef plot_losses(loss_1,loss_2, val_loss_1, val_loss_2,label1, label2):\n from numpy import linspace\n import matplotlib.pyplot as plt\n\n # Calling the axes.prop_cycle returns an itertoools.cycle\n\n color_cycle = plt.rcParams['axes.prop_cycle']()\n\n x = linspace(1, 20, 20)\n f1 = x*(1-x) ; lab1 = label1 + ' loss'\n f2 = 0.25-f1 ; lab2 = label2 + ' momentum loss' \n f3 = x*x*(1-x) ; lab3 = label1 + ' val loss'\n f4 = 0.25-f3 ; lab4 = label2 + ' val. loss'\n\n # let's plot our curves (note the use of color cycle, otherwise the curves colors in\n # the two subplots will be repeated and a single legend becomes difficult to read)\n fig, (a13, a24) = plt.subplots(2)\n\n a13.plot(x, loss_1, label=lab1, **next(color_cycle))\n a13.plot(x, loss_2, label=lab2, **next(color_cycle))\n a13.set_title(label1)\n a24.plot(x, val_loss_1, label=lab3, **next(color_cycle))\n a24.plot(x, val_loss_2, label=lab4, **next(color_cycle))\n a24.set_title(label2)\n # so far so good, now the trick\n\n lines_labels = [ax.get_legend_handles_labels() for ax in fig.axes]\n lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]\n\n #fig.legend(lines, labels)\n fig.legend(\n lines,\n labels,\n bbox_to_anchor=(1.35, 0.875),\n bbox_transform=plt.gcf().transFigure,\n )\n fig.suptitle(label1 + ' vs ' + label2 + ': Fashion MNIST',fontsize=15, y=1.03)\n fig. tight_layout(pad=0.15)\n fig.set_size_inches(7,6, forward=True)\n plt.show()\nplot_losses(sgd_loss,adam_loss, val_sgd_loss, val_adam_loss,'SGD', 'Adam')",
"_____no_output_____"
],
[
"model_adadelta = get_model()\nmodel_adadelta.compile(optimizer='adadelta',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_adadelta = model_adadelta.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 13s 222us/sample - loss: 2.3052 - accuracy: 0.1419 - val_loss: 2.2653 - val_accuracy: 0.2812\nEpoch 2/20\n60000/60000 [==============================] - 14s 228us/sample - loss: 2.2448 - accuracy: 0.2411 - val_loss: 2.2047 - val_accuracy: 0.3799\nEpoch 3/20\n60000/60000 [==============================] - 12s 200us/sample - loss: 2.1845 - accuracy: 0.3169 - val_loss: 2.1289 - val_accuracy: 0.4808\nEpoch 4/20\n60000/60000 [==============================] - 13s 218us/sample - loss: 2.1046 - accuracy: 0.3907 - val_loss: 2.0281 - val_accuracy: 0.5044\nEpoch 5/20\n60000/60000 [==============================] - 17s 286us/sample - loss: 1.9989 - accuracy: 0.4429 - val_loss: 1.8999 - val_accuracy: 0.5103\nEpoch 6/20\n60000/60000 [==============================] - 22s 372us/sample - loss: 1.8693 - accuracy: 0.4695 - val_loss: 1.7511 - val_accuracy: 0.5171\nEpoch 7/20\n60000/60000 [==============================] - 22s 374us/sample - loss: 1.7247 - accuracy: 0.4888 - val_loss: 1.5953 - val_accuracy: 0.5470\nEpoch 8/20\n60000/60000 [==============================] - 18s 306us/sample - loss: 1.5856 - accuracy: 0.5094 - val_loss: 1.4535 - val_accuracy: 0.5938\nEpoch 9/20\n60000/60000 [==============================] - 16s 265us/sample - loss: 1.4654 - accuracy: 0.5265 - val_loss: 1.3367 - val_accuracy: 0.6229\nEpoch 10/20\n60000/60000 [==============================] - 17s 287us/sample - loss: 1.3644 - accuracy: 0.5458 - val_loss: 1.2431 - val_accuracy: 0.6389\nEpoch 11/20\n60000/60000 [==============================] - 17s 290us/sample - loss: 1.2874 - accuracy: 0.5631 - val_loss: 1.1694 - val_accuracy: 0.6478\nEpoch 12/20\n60000/60000 [==============================] - 17s 286us/sample - loss: 1.2265 - accuracy: 0.5761 - val_loss: 1.1104 - val_accuracy: 0.6576\nEpoch 13/20\n60000/60000 [==============================] - 16s 270us/sample - loss: 1.1763 - accuracy: 0.5898 - val_loss: 1.0631 - val_accuracy: 0.6622\nEpoch 14/20\n60000/60000 [==============================] - 14s 241us/sample - loss: 1.1349 - accuracy: 0.6009 - val_loss: 1.0237 - val_accuracy: 0.6734\nEpoch 15/20\n60000/60000 [==============================] - 16s 269us/sample - loss: 1.0981 - accuracy: 0.6107 - val_loss: 0.9915 - val_accuracy: 0.6815\nEpoch 16/20\n60000/60000 [==============================] - 19s 314us/sample - loss: 1.0685 - accuracy: 0.6225 - val_loss: 0.9638 - val_accuracy: 0.6881\nEpoch 17/20\n60000/60000 [==============================] - 17s 281us/sample - loss: 1.0437 - accuracy: 0.6270 - val_loss: 0.9401 - val_accuracy: 0.6895\nEpoch 18/20\n60000/60000 [==============================] - 16s 262us/sample - loss: 1.0159 - accuracy: 0.6369 - val_loss: 0.9195 - val_accuracy: 0.6950\nEpoch 19/20\n60000/60000 [==============================] - 18s 294us/sample - loss: 0.9960 - accuracy: 0.6432 - val_loss: 0.9003 - val_accuracy: 0.7037\nEpoch 20/20\n60000/60000 [==============================] - 17s 283us/sample - loss: 0.9820 - accuracy: 0.6485 - val_loss: 0.8839 - val_accuracy: 0.7089\n"
],
[
"model_adagrad = get_model()\nmodel_adagrad.compile(optimizer='adagrad',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_adagrad = model_adagrad.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 13s 224us/sample - loss: 0.8688 - accuracy: 0.6852 - val_loss: 0.6949 - val_accuracy: 0.7424\nEpoch 2/20\n60000/60000 [==============================] - 13s 212us/sample - loss: 0.6956 - accuracy: 0.7411 - val_loss: 0.6441 - val_accuracy: 0.7601\nEpoch 3/20\n60000/60000 [==============================] - 13s 222us/sample - loss: 0.6528 - accuracy: 0.7568 - val_loss: 0.6166 - val_accuracy: 0.7742\nEpoch 4/20\n60000/60000 [==============================] - 13s 215us/sample - loss: 0.6270 - accuracy: 0.7662 - val_loss: 0.5965 - val_accuracy: 0.7809\nEpoch 5/20\n60000/60000 [==============================] - 13s 217us/sample - loss: 0.6076 - accuracy: 0.7749 - val_loss: 0.5818 - val_accuracy: 0.7901\nEpoch 6/20\n60000/60000 [==============================] - 13s 216us/sample - loss: 0.5961 - accuracy: 0.7805 - val_loss: 0.5702 - val_accuracy: 0.7926\nEpoch 7/20\n60000/60000 [==============================] - 12s 202us/sample - loss: 0.5792 - accuracy: 0.7864 - val_loss: 0.5608 - val_accuracy: 0.7926\nEpoch 8/20\n60000/60000 [==============================] - 11s 190us/sample - loss: 0.5711 - accuracy: 0.7886 - val_loss: 0.5518 - val_accuracy: 0.7985\nEpoch 9/20\n60000/60000 [==============================] - 14s 226us/sample - loss: 0.5635 - accuracy: 0.7919 - val_loss: 0.5447 - val_accuracy: 0.8040\nEpoch 10/20\n60000/60000 [==============================] - 15s 253us/sample - loss: 0.5559 - accuracy: 0.7956 - val_loss: 0.5368 - val_accuracy: 0.8056\nEpoch 11/20\n60000/60000 [==============================] - 15s 256us/sample - loss: 0.5479 - accuracy: 0.7992 - val_loss: 0.5320 - val_accuracy: 0.8081\nEpoch 12/20\n60000/60000 [==============================] - 15s 246us/sample - loss: 0.5429 - accuracy: 0.8003 - val_loss: 0.5254 - val_accuracy: 0.8103\nEpoch 13/20\n60000/60000 [==============================] - 14s 232us/sample - loss: 0.5351 - accuracy: 0.8039 - val_loss: 0.5200 - val_accuracy: 0.8143\nEpoch 14/20\n60000/60000 [==============================] - 13s 218us/sample - loss: 0.5285 - accuracy: 0.8077 - val_loss: 0.5157 - val_accuracy: 0.8160\nEpoch 15/20\n60000/60000 [==============================] - 13s 212us/sample - loss: 0.5246 - accuracy: 0.8084 - val_loss: 0.5120 - val_accuracy: 0.8173\nEpoch 16/20\n60000/60000 [==============================] - 12s 206us/sample - loss: 0.5206 - accuracy: 0.8105 - val_loss: 0.5064 - val_accuracy: 0.8195\nEpoch 17/20\n60000/60000 [==============================] - 12s 202us/sample - loss: 0.5161 - accuracy: 0.8128 - val_loss: 0.5025 - val_accuracy: 0.8217\nEpoch 18/20\n60000/60000 [==============================] - 13s 209us/sample - loss: 0.5123 - accuracy: 0.8147 - val_loss: 0.4989 - val_accuracy: 0.8225\nEpoch 19/20\n60000/60000 [==============================] - 15s 242us/sample - loss: 0.5080 - accuracy: 0.8161 - val_loss: 0.4949 - val_accuracy: 0.8250\nEpoch 20/20\n60000/60000 [==============================] - 15s 258us/sample - loss: 0.5046 - accuracy: 0.8170 - val_loss: 0.4922 - val_accuracy: 0.8256\n"
],
[
"model_adamax = get_model()\nmodel_adamax.compile(optimizer='adamax',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_adamax = model_adamax.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 14s 240us/sample - loss: 0.6259 - accuracy: 0.7736 - val_loss: 0.4787 - val_accuracy: 0.8308\nEpoch 2/20\n60000/60000 [==============================] - 13s 221us/sample - loss: 0.4381 - accuracy: 0.8421 - val_loss: 0.4060 - val_accuracy: 0.8518\nEpoch 3/20\n60000/60000 [==============================] - 12s 206us/sample - loss: 0.3890 - accuracy: 0.8586 - val_loss: 0.3803 - val_accuracy: 0.8634\nEpoch 4/20\n60000/60000 [==============================] - 12s 206us/sample - loss: 0.3605 - accuracy: 0.8695 - val_loss: 0.3456 - val_accuracy: 0.8764\nEpoch 5/20\n60000/60000 [==============================] - 12s 205us/sample - loss: 0.3365 - accuracy: 0.8774 - val_loss: 0.3284 - val_accuracy: 0.8810\nEpoch 6/20\n60000/60000 [==============================] - 12s 200us/sample - loss: 0.3186 - accuracy: 0.8848 - val_loss: 0.3207 - val_accuracy: 0.8830\nEpoch 7/20\n60000/60000 [==============================] - 12s 207us/sample - loss: 0.3048 - accuracy: 0.8889 - val_loss: 0.3127 - val_accuracy: 0.8878\nEpoch 8/20\n60000/60000 [==============================] - 12s 195us/sample - loss: 0.2909 - accuracy: 0.8948 - val_loss: 0.2966 - val_accuracy: 0.8921\nEpoch 9/20\n60000/60000 [==============================] - 12s 200us/sample - loss: 0.2804 - accuracy: 0.8990 - val_loss: 0.2973 - val_accuracy: 0.8903\nEpoch 10/20\n60000/60000 [==============================] - 12s 205us/sample - loss: 0.2711 - accuracy: 0.9009 - val_loss: 0.2827 - val_accuracy: 0.8966\nEpoch 11/20\n60000/60000 [==============================] - 12s 199us/sample - loss: 0.2634 - accuracy: 0.9038 - val_loss: 0.2838 - val_accuracy: 0.8956\nEpoch 12/20\n60000/60000 [==============================] - 14s 225us/sample - loss: 0.2565 - accuracy: 0.9062 - val_loss: 0.2703 - val_accuracy: 0.9009\nEpoch 13/20\n60000/60000 [==============================] - 15s 247us/sample - loss: 0.2487 - accuracy: 0.9096 - val_loss: 0.2623 - val_accuracy: 0.9045\nEpoch 14/20\n60000/60000 [==============================] - 14s 226us/sample - loss: 0.2430 - accuracy: 0.9101 - val_loss: 0.2595 - val_accuracy: 0.9058\nEpoch 15/20\n60000/60000 [==============================] - 13s 220us/sample - loss: 0.2385 - accuracy: 0.9128 - val_loss: 0.2549 - val_accuracy: 0.9068\nEpoch 16/20\n60000/60000 [==============================] - 14s 229us/sample - loss: 0.2309 - accuracy: 0.9148 - val_loss: 0.2639 - val_accuracy: 0.9031\nEpoch 17/20\n60000/60000 [==============================] - 13s 222us/sample - loss: 0.2294 - accuracy: 0.9147 - val_loss: 0.2550 - val_accuracy: 0.9096\nEpoch 18/20\n60000/60000 [==============================] - 13s 220us/sample - loss: 0.2218 - accuracy: 0.9190 - val_loss: 0.2486 - val_accuracy: 0.9100\nEpoch 19/20\n60000/60000 [==============================] - 13s 224us/sample - loss: 0.2154 - accuracy: 0.9210 - val_loss: 0.2523 - val_accuracy: 0.9096\nEpoch 20/20\n60000/60000 [==============================] - 14s 226us/sample - loss: 0.2141 - accuracy: 0.9218 - val_loss: 0.2458 - val_accuracy: 0.9106\n"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.plot(range(1, 21), sgd_loss, label='loss')\nplt.plot(range(1, 21), val_sgd_loss, label='Validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.title('Loss : Fashion MNIST - SGD')\nplt.legend(loc='right')\nplt.show()",
"_____no_output_____"
],
[
"model_sgd_m = get_model()\noptimizer_sgd_m = tf.keras.optimizers.SGD(momentum=0.9)\nmodel_sgd_m.compile(optimizer=optimizer_sgd_m ,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_sgd_m = model_sgd_m.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 16s 270us/sample - loss: 0.5695 - accuracy: 0.7921 - val_loss: 0.4011 - val_accuracy: 0.8551\nEpoch 2/20\n60000/60000 [==============================] - 15s 252us/sample - loss: 0.3767 - accuracy: 0.8620 - val_loss: 0.3549 - val_accuracy: 0.8710\nEpoch 3/20\n60000/60000 [==============================] - 15s 243us/sample - loss: 0.3352 - accuracy: 0.8781 - val_loss: 0.3177 - val_accuracy: 0.8835\nEpoch 4/20\n60000/60000 [==============================] - 14s 237us/sample - loss: 0.3093 - accuracy: 0.8854 - val_loss: 0.3076 - val_accuracy: 0.8886\nEpoch 5/20\n60000/60000 [==============================] - 15s 255us/sample - loss: 0.2942 - accuracy: 0.8909 - val_loss: 0.3022 - val_accuracy: 0.8899\nEpoch 6/20\n60000/60000 [==============================] - 16s 272us/sample - loss: 0.2764 - accuracy: 0.8964 - val_loss: 0.2986 - val_accuracy: 0.8898\nEpoch 7/20\n60000/60000 [==============================] - 20s 328us/sample - loss: 0.2658 - accuracy: 0.9002 - val_loss: 0.2849 - val_accuracy: 0.8980\nEpoch 8/20\n60000/60000 [==============================] - 17s 284us/sample - loss: 0.2567 - accuracy: 0.9035 - val_loss: 0.2681 - val_accuracy: 0.9049\nEpoch 9/20\n60000/60000 [==============================] - 16s 264us/sample - loss: 0.2478 - accuracy: 0.9067 - val_loss: 0.2717 - val_accuracy: 0.9010\nEpoch 10/20\n60000/60000 [==============================] - 19s 325us/sample - loss: 0.2388 - accuracy: 0.9092 - val_loss: 0.2743 - val_accuracy: 0.8983\nEpoch 11/20\n60000/60000 [==============================] - 17s 282us/sample - loss: 0.2308 - accuracy: 0.9127 - val_loss: 0.2639 - val_accuracy: 0.9048\nEpoch 12/20\n60000/60000 [==============================] - 17s 285us/sample - loss: 0.2276 - accuracy: 0.9131 - val_loss: 0.2745 - val_accuracy: 0.9004\nEpoch 13/20\n60000/60000 [==============================] - 18s 292us/sample - loss: 0.2200 - accuracy: 0.9167 - val_loss: 0.2726 - val_accuracy: 0.9032\nEpoch 14/20\n60000/60000 [==============================] - 18s 296us/sample - loss: 0.2129 - accuracy: 0.9190 - val_loss: 0.2639 - val_accuracy: 0.9057\nEpoch 15/20\n60000/60000 [==============================] - 17s 275us/sample - loss: 0.2060 - accuracy: 0.9211 - val_loss: 0.2667 - val_accuracy: 0.9041\nEpoch 16/20\n60000/60000 [==============================] - 16s 269us/sample - loss: 0.2053 - accuracy: 0.9227 - val_loss: 0.2724 - val_accuracy: 0.9063\nEpoch 17/20\n60000/60000 [==============================] - 17s 285us/sample - loss: 0.1991 - accuracy: 0.9245 - val_loss: 0.2696 - val_accuracy: 0.9062\nEpoch 18/20\n60000/60000 [==============================] - 22s 367us/sample - loss: 0.1912 - accuracy: 0.9271 - val_loss: 0.2875 - val_accuracy: 0.9018\nEpoch 19/20\n60000/60000 [==============================] - 20s 336us/sample - loss: 0.1895 - accuracy: 0.9274 - val_loss: 0.2791 - val_accuracy: 0.9040\nEpoch 20/20\n60000/60000 [==============================] - 17s 289us/sample - loss: 0.1851 - accuracy: 0.9278 - val_loss: 0.2822 - val_accuracy: 0.9021\n"
],
[
"model_sgd_m_n = get_model()\noptimizer_sgd_m_nestrov= tf.keras.optimizers.SGD(momentum=0.9,nesterov=True)\nmodel_sgd_m_n.compile(optimizer=optimizer_sgd_m_nestrov ,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nEPOCHS = 20\n\nhistory_sgd_m_nestrov = model_sgd_m_n.fit(X_train, y_train, epochs=EPOCHS, \n validation_data=(X_test, y_test))",
"Train on 60000 samples, validate on 10000 samples\nEpoch 1/20\n60000/60000 [==============================] - 15s 257us/sample - loss: 0.5590 - accuracy: 0.7921 - val_loss: 0.3870 - val_accuracy: 0.8599\nEpoch 2/20\n60000/60000 [==============================] - 16s 260us/sample - loss: 0.3689 - accuracy: 0.8652 - val_loss: 0.3350 - val_accuracy: 0.8792\nEpoch 3/20\n60000/60000 [==============================] - 14s 237us/sample - loss: 0.3295 - accuracy: 0.8788 - val_loss: 0.3145 - val_accuracy: 0.8848\nEpoch 4/20\n60000/60000 [==============================] - 14s 239us/sample - loss: 0.3017 - accuracy: 0.8884 - val_loss: 0.3038 - val_accuracy: 0.8869\nEpoch 5/20\n60000/60000 [==============================] - 14s 241us/sample - loss: 0.2848 - accuracy: 0.8953 - val_loss: 0.2911 - val_accuracy: 0.8932\nEpoch 6/20\n60000/60000 [==============================] - 17s 286us/sample - loss: 0.2727 - accuracy: 0.8982 - val_loss: 0.2891 - val_accuracy: 0.8943\nEpoch 7/20\n60000/60000 [==============================] - 21s 345us/sample - loss: 0.2609 - accuracy: 0.9016 - val_loss: 0.2783 - val_accuracy: 0.8965\nEpoch 8/20\n60000/60000 [==============================] - 20s 326us/sample - loss: 0.2512 - accuracy: 0.9060 - val_loss: 0.2759 - val_accuracy: 0.8991\nEpoch 9/20\n60000/60000 [==============================] - 17s 282us/sample - loss: 0.2421 - accuracy: 0.9082 - val_loss: 0.2917 - val_accuracy: 0.8932\nEpoch 10/20\n60000/60000 [==============================] - 17s 280us/sample - loss: 0.2364 - accuracy: 0.9103 - val_loss: 0.2577 - val_accuracy: 0.9042\nEpoch 11/20\n60000/60000 [==============================] - 14s 240us/sample - loss: 0.2271 - accuracy: 0.9147 - val_loss: 0.2669 - val_accuracy: 0.9025\nEpoch 12/20\n60000/60000 [==============================] - 13s 224us/sample - loss: 0.2213 - accuracy: 0.9148 - val_loss: 0.2633 - val_accuracy: 0.9058\nEpoch 13/20\n60000/60000 [==============================] - 13s 217us/sample - loss: 0.2141 - accuracy: 0.9187 - val_loss: 0.2593 - val_accuracy: 0.9057\nEpoch 14/20\n60000/60000 [==============================] - 13s 213us/sample - loss: 0.2127 - accuracy: 0.9191 - val_loss: 0.2780 - val_accuracy: 0.8997\nEpoch 15/20\n60000/60000 [==============================] - 14s 233us/sample - loss: 0.2048 - accuracy: 0.9220 - val_loss: 0.2638 - val_accuracy: 0.9031\nEpoch 16/20\n60000/60000 [==============================] - 14s 226us/sample - loss: 0.1985 - accuracy: 0.9239 - val_loss: 0.2624 - val_accuracy: 0.9067\nEpoch 17/20\n60000/60000 [==============================] - 12s 204us/sample - loss: 0.1965 - accuracy: 0.9260 - val_loss: 0.2629 - val_accuracy: 0.9078\nEpoch 18/20\n60000/60000 [==============================] - 14s 230us/sample - loss: 0.1930 - accuracy: 0.9262 - val_loss: 0.2744 - val_accuracy: 0.9031\nEpoch 19/20\n60000/60000 [==============================] - 13s 216us/sample - loss: 0.1891 - accuracy: 0.9273 - val_loss: 0.2773 - val_accuracy: 0.9009\nEpoch 20/20\n60000/60000 [==============================] - 13s 218us/sample - loss: 0.1848 - accuracy: 0.9293 - val_loss: 0.2611 - val_accuracy: 0.9089\n"
],
[
"sgd_m_n_loss = history_sgd_m_nestrov.history['loss']\nval_sgd_m_n_loss = history_sgd_m_nestrov.history['val_loss']\nsgd_m_loss = history_sgd_m.history['loss']\nval_sgd_m_loss = history_sgd_m.history['val_loss']\n#sgd_m_loss",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.plot(range(1, 21), sgd_m_loss, label='SGD Momentum loss')\nplt.plot(range(1, 21), sgd_loss, label='SGD loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.title('Loss : Fashion MNIST')\nplt.legend(loc='right')\nplt.show()",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.plot(range(1, 21), sgd_m_loss, label='SGD Momentum loss')\n#plt.plot(range(1, 21), sgd_loss, label='SGD loss')\nplt.plot(range(1, 21), sgd_m_n_loss, label='SGD Momentum Nestrov loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.title('Loss : Fashion MNIST')\nplt.legend(loc='right')\nplt.show()",
"_____no_output_____"
],
[
"sgd_m_loss",
"_____no_output_____"
],
[
"plot_losses(sgd_loss,sgd_m_loss, val_sgd_loss, val_sgd_m_loss,'SGD', 'SGD momentum ')",
"_____no_output_____"
],
[
"from numpy import linspace\nimport matplotlib.pyplot as plt\n\n# Calling the axes.prop_cycle returns an itertoools.cycle\n\ncolor_cycle = plt.rcParams['axes.prop_cycle']()\n\nx = linspace(1, 20, 20)\nf1 = x*(1-x) ; lab1 = 'sgd momentus loss'\nf2 = 0.25-f1 ; lab2 = 'sgd momentum nestrov loss' \nf3 = x*x*(1-x) ; lab3 = 'sgd momentum val. loss'\nf4 = 0.25-f3 ; lab4 = 'sgd momentum nestrov val. loss'\n\n# let's plot our curves (note the use of color cycle, otherwise the curves colors in\n# the two subplots will be repeated and a single legend becomes difficult to read)\nfig, (a13, a24) = plt.subplots(2)\n\na13.plot(x, sgd_m_loss, label=lab1, **next(color_cycle))\na13.plot(x, sgd_m_n_loss, label=lab2, **next(color_cycle))\na13.set_title('Loss')\na24.plot(x, val_sgd_m_loss, label=lab3, **next(color_cycle))\na24.plot(x, val_sgd_m_n_loss, label=lab4, **next(color_cycle))\na24.set_title('Validation Loss')\n# so far so good, now the trick\n\nlines_labels = [ax.get_legend_handles_labels() for ax in fig.axes]\nlines, labels = [sum(lol, []) for lol in zip(*lines_labels)]\n\n#fig.legend(lines, labels)\nfig.legend(\n lines,\n labels,\n bbox_to_anchor=(1.45, 0.875),\n bbox_transform=plt.gcf().transFigure,\n)\nfig.suptitle('SGD Momentum vs SGD Momentum Nestrov: Fashion MNIST',fontsize=15, y=1.03)\nfig. tight_layout(pad=0.15)\nfig.set_size_inches(7,6, forward=True)\nplt.show()",
"_____no_output_____"
],
[
"adam_loss = history_adam.history['loss']\nval_adam_loss = history_adam.history['val_loss']",
"_____no_output_____"
],
[
"plot_losses(adam_loss,adagrad_loss, val_adam_loss, val_adagrad_loss,'Adam', 'Adagrad')",
"_____no_output_____"
],
[
"adamax_loss = history_adamax.history['loss']\nval_adamax_loss = history_adamax.history['val_loss']\nadamax_loss",
"_____no_output_____"
],
[
"adadelta_loss = history_adadelta.history['loss']\nval_adadelta_loss = history_adadelta.history['val_loss']\nadadelta_loss",
"_____no_output_____"
],
[
"adagrad_loss = history_adagrad.history['loss']\nval_adagrad_loss = history_adagrad.history['val_loss']\nadagrad_loss",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\n\nplt.plot(range(1, 6), adam_loss, label='adam loss')\nplt.plot(range(1, 6), adadelta_loss,label='adadelta loss')\nplt.plot(range(1, 6), adamax_loss,label='adamax loss')\nplt.plot(range(1, 6), adagrad_loss,label='adagrad loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend(loc='right')\nplt.show()",
"_____no_output_____"
],
[
"plot_losses(adam_loss,adadelta_loss, val_adam_loss, val_adadelta_loss,'Adam', 'Adadelta')",
"_____no_output_____"
],
[
"plot_losses(adam_loss,adamax_loss, val_adam_loss, val_adamax_loss,'Adam', 'Adamax')",
"_____no_output_____"
],
[
"plot_losses(adam_loss,adagrad_loss, val_adam_loss, val_adagrad_loss,'Adam', 'Adagrad')",
"_____no_output_____"
],
[
"val_adamax_loss",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nfig,a = plt.subplots(2,2)\nimport numpy as np\nx = range(1, 21)\nfig.tight_layout(pad=3.0)\n\nplt.subplot(121)\nplt.plot(x, adam_loss, label='adam loss')\nplt.plot(x, adadelta_loss,label='adadelta loss')\nplt.plot(x, adamax_loss,label='adamax loss')\nplt.plot(x, adagrad_loss,label='adagrad loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend(loc='right')\n\nplt.subplot(122)\nplt.xlabel('Epochs')\nplt.ylabel('ValidationLoss')\nplt.plot(x, val_adam_loss, label='adam loss')\nplt.plot(x, val_adadelta_loss,label='adadelta loss')\nplt.plot(x, val_adamax_loss,label='adamax loss')\nplt.plot(x, val_adagrad_loss,label='adagrad loss')\nplt.legend()\n#plt.rcParams[\"figure.figsize\"] = (2,1)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e747dab789bd304928d20800fbfde3fbbef3e76e | 157,431 | ipynb | Jupyter Notebook | 2018-2019/assignment 3 (CNN)/ConvNet_CIFAR10_PyTorch.ipynb | Tudor67/Neural-Networks-Assignments | 7376e9d3b0059df2f2b21d56787c47d3c1ba6746 | [
"MIT"
] | 1 | 2019-04-07T03:50:57.000Z | 2019-04-07T03:50:57.000Z | 2018-2019/assignment 3 (CNN)/ConvNet_CIFAR10_PyTorch.ipynb | Tudor67/Neural-Networks-Assignments | 7376e9d3b0059df2f2b21d56787c47d3c1ba6746 | [
"MIT"
] | 5 | 2018-10-16T22:46:33.000Z | 2019-02-04T20:11:41.000Z | 2018-2019/assignment 3 (CNN)/ConvNet_CIFAR10_PyTorch.ipynb | Tudor67/Neural-Networks-Assignments | 7376e9d3b0059df2f2b21d56787c47d3c1ba6746 | [
"MIT"
] | 1 | 2019-04-07T03:50:42.000Z | 2019-04-07T03:50:42.000Z | 205.792157 | 55,852 | 0.901887 | [
[
[
"# ConvNet for image classification (CIFAR-10)",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F \nimport torch.optim as optim\nimport torchvision.datasets as dset\nimport torchvision.transforms as T\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import sampler\n\nfrom pprint import pprint\n\n%matplotlib inline",
"_____no_output_____"
],
[
"import sys\n\nprint('sys version: ', sys.version)\nprint('torch version: ', torch.version.__version__)",
"sys version: 3.6.7 |Anaconda, Inc.| (default, Oct 23 2018, 19:16:44) \n[GCC 7.3.0]\ntorch version: 0.4.1\n"
],
[
"INIT_METHOD = 'glorot_uniform'\nEPOCHS = 15\nPRINT_EVERY = 100\n\nNUM_TRAIN = 49000\nBATCH_SIZE = 64\nCIFAR10_PATH = './cifar10_dataset'\nDOWNLOAD = True",
"_____no_output_____"
],
[
"USE_GPU = True\n\ndtype = torch.float32\n\nif USE_GPU and torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\nprint('using device:', device)",
"using device: cuda\n"
]
],
[
[
"## Load CIFAR-10",
"_____no_output_____"
]
],
[
[
"# Set up a transform to preprocess the data by subtracting the mean RGB value \n# and dividing by the standard deviation of each RGB value;\ntransform = T.Compose([\n T.ToTensor(),\n T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n# Set up a Dataset object for each split (train / val / test); Datasets load\n# training examples one at a time, so we wrap each Dataset in a DataLoader which\n# iterates through the Dataset and forms minibatches. \n# The CIFAR-10 training set is divided into train and val sets by passing a Sampler object\n# to the DataLoader telling how it should sample from the underlying Dataset.\ncifar10_train = dset.CIFAR10(CIFAR10_PATH, train=True, download=DOWNLOAD,\n transform=transform)\nloader_train = DataLoader(cifar10_train, batch_size=BATCH_SIZE, \n sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))\n\ncifar10_val = dset.CIFAR10(CIFAR10_PATH, train=True, download=DOWNLOAD,\n transform=transform)\nloader_val = DataLoader(cifar10_val, batch_size=BATCH_SIZE, \n sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))\n\ncifar10_test = dset.CIFAR10(CIFAR10_PATH, train=False, download=DOWNLOAD, \n transform=transform)\nloader_test = DataLoader(cifar10_test, batch_size=BATCH_SIZE)\n\nX_train = cifar10_train.train_data[:NUM_TRAIN]\ny_train = cifar10_train.train_labels[:NUM_TRAIN]\nX_val = cifar10_train.train_data[NUM_TRAIN:50000]\ny_val = cifar10_train.train_labels[NUM_TRAIN:50000]\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] \n\nprint(f'X_train: {X_train.shape}, [{X_train.min()}, {X_train.max()}]')\nprint(f'X_val: {X_val.shape}, [{X_val.min()}, {X_val.max()}]')\nprint(f'classes: {classes}')",
"Files already downloaded and verified\nFiles already downloaded and verified\nFiles already downloaded and verified\nX_train: (49000, 32, 32, 3), [0, 255]\nX_val: (1000, 32, 32, 3), [0, 255]\nclasses: ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n"
]
],
[
[
"## Look at some images",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(8, 4))\nbatch = (X_train[0:10])\n\nfor i in range(10):\n plt.subplot(2, 5, i + 1)\n plt.imshow(batch[i].astype('int32'))\n plt.axis('off')\n plt.title(classes[y_train[i]])\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"## ConvNet",
"_____no_output_____"
]
],
[
[
"in_channel = 3\nchannel_1 = 64\nchannel_2 = 16\nnum_classes = 10\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.shape[0], -1)\n \ndef weights_init(m):\n if type(m) in [nn.Conv2d, nn.Linear]:\n nn.init.zeros_(m.bias.data)\n if INIT_METHOD == 'he_normal':\n nn.init.kaiming_normal_(m.weight.data)\n elif INIT_METHOD == 'glorot_normal':\n nn.init.xavier_normal_(m.weight.data)\n elif INIT_METHOD == 'he_uniform':\n nn.init.kaiming_uniform_(m.weight.data)\n elif INIT_METHOD == 'glorot_uniform':\n nn.init.xavier_uniform_(m.weight.data)\n\nmodel = nn.Sequential(\n nn.Conv2d(in_channel, channel_1, (5, 5)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(channel_1, channel_2, (5, 5)),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n Flatten(),\n nn.Linear(channel_2 * 5 * 5, 200),\n nn.ReLU(),\n nn.Linear(200, num_classes)\n)\n\nmodel.apply(weights_init)\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3)",
"_____no_output_____"
],
[
"def check_accuracy(loader, model, dataset=None, print_acc=True):\n if loader.dataset.train and print_acc:\n if dataset is None:\n print('Checking accuracy on validation set')\n else:\n print('Checking accuracy on {} set'.format(dataset))\n elif print_acc:\n print('Checking accuracy on test set') \n num_correct = 0\n num_samples = 0\n model.eval() # set model to evaluation mode\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device, dtype=dtype) # move to device (GPU)\n y = y.to(device=device, dtype=torch.long)\n scores = model(x)\n _, preds = scores.max(1)\n num_correct += (preds == y).sum()\n num_samples += preds.size(0)\n acc = float(num_correct) / num_samples\n if print_acc:\n print('Got %d / %d correct (%.2f%%)\\n' % (num_correct, num_samples, 100 * acc))\n return 100 * acc\n\n\ndef train(model, optimizer, epochs=5, print_loss=True):\n val_acc = []\n \n model = model.to(device=device) # move the model parameters to CPU/GPU\n \n for epoch in range(epochs):\n print(f'epoch: {epoch}')\n if (epoch % 5 == 0) or (epoch == epochs - 1):\n weights[epoch] = []\n for i in range(len(model[0].weight)):\n weights[epoch].append(model[0].weight[i].clone())\n \n for t, (x, y) in enumerate(loader_train):\n model.train() # put model to training mode\n x = x.to(device=device, dtype=dtype)\n y = y.to(device=device, dtype=torch.long)\n\n scores = model(x)\n loss = F.cross_entropy(scores, y)\n\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n if t % PRINT_EVERY == 0:\n if print_loss:\n print('Iteration %d, loss = %.4f' % (t, loss.item()))\n \n val_acc.append(check_accuracy(loader_val, model, print_acc=False))\n \n return val_acc",
"_____no_output_____"
]
],
[
[
"## Training",
"_____no_output_____"
]
],
[
[
"weights = {}\nval_acc = train(model, optimizer, epochs=EPOCHS)",
"epoch: 0\nIteration 0, loss = 2.3633\nIteration 100, loss = 1.8332\nIteration 200, loss = 1.4785\nIteration 300, loss = 1.2993\nIteration 400, loss = 1.1881\nIteration 500, loss = 1.6042\nIteration 600, loss = 1.1951\nIteration 700, loss = 1.0206\nepoch: 1\nIteration 0, loss = 1.3337\nIteration 100, loss = 1.4151\nIteration 200, loss = 1.1598\nIteration 300, loss = 1.2845\nIteration 400, loss = 1.5291\nIteration 500, loss = 1.2996\nIteration 600, loss = 1.0076\nIteration 700, loss = 0.8752\nepoch: 2\nIteration 0, loss = 1.0358\nIteration 100, loss = 1.0046\nIteration 200, loss = 1.2985\nIteration 300, loss = 0.8730\nIteration 400, loss = 0.8719\nIteration 500, loss = 1.1180\nIteration 600, loss = 0.9607\nIteration 700, loss = 0.9151\nepoch: 3\nIteration 0, loss = 0.9216\nIteration 100, loss = 0.9258\nIteration 200, loss = 0.9414\nIteration 300, loss = 0.8593\nIteration 400, loss = 0.9898\nIteration 500, loss = 0.9656\nIteration 600, loss = 1.0565\nIteration 700, loss = 0.9040\nepoch: 4\nIteration 0, loss = 0.7897\nIteration 100, loss = 0.9136\nIteration 200, loss = 0.7058\nIteration 300, loss = 0.9487\nIteration 400, loss = 0.7267\nIteration 500, loss = 0.7873\nIteration 600, loss = 0.9767\nIteration 700, loss = 1.0663\nepoch: 5\nIteration 0, loss = 0.8297\nIteration 100, loss = 0.6663\nIteration 200, loss = 0.7780\nIteration 300, loss = 0.9021\nIteration 400, loss = 0.9554\nIteration 500, loss = 0.7058\nIteration 600, loss = 0.7828\nIteration 700, loss = 0.8565\nepoch: 6\nIteration 0, loss = 0.4974\nIteration 100, loss = 0.7211\nIteration 200, loss = 0.6605\nIteration 300, loss = 0.7669\nIteration 400, loss = 0.7968\nIteration 500, loss = 0.8645\nIteration 600, loss = 0.7240\nIteration 700, loss = 0.7558\nepoch: 7\nIteration 0, loss = 0.6371\nIteration 100, loss = 0.7322\nIteration 200, loss = 0.7015\nIteration 300, loss = 0.9116\nIteration 400, loss = 0.5171\nIteration 500, loss = 0.7992\nIteration 600, loss = 0.6726\nIteration 700, loss = 0.7831\nepoch: 8\nIteration 0, loss = 0.6410\nIteration 100, loss = 0.6810\nIteration 200, loss = 0.6612\nIteration 300, loss = 0.9395\nIteration 400, loss = 0.6839\nIteration 500, loss = 0.6748\nIteration 600, loss = 0.5819\nIteration 700, loss = 0.9866\nepoch: 9\nIteration 0, loss = 0.5463\nIteration 100, loss = 0.7197\nIteration 200, loss = 0.6587\nIteration 300, loss = 0.8262\nIteration 400, loss = 0.9073\nIteration 500, loss = 0.8096\nIteration 600, loss = 0.4648\nIteration 700, loss = 0.6061\nepoch: 10\nIteration 0, loss = 0.6257\nIteration 100, loss = 0.4243\nIteration 200, loss = 0.5165\nIteration 300, loss = 0.7424\nIteration 400, loss = 0.8395\nIteration 500, loss = 0.8232\nIteration 600, loss = 0.6080\nIteration 700, loss = 0.7729\nepoch: 11\nIteration 0, loss = 0.5576\nIteration 100, loss = 0.4973\nIteration 200, loss = 0.4891\nIteration 300, loss = 0.7981\nIteration 400, loss = 0.7118\nIteration 500, loss = 0.5588\nIteration 600, loss = 0.7777\nIteration 700, loss = 0.7822\nepoch: 12\nIteration 0, loss = 0.3723\nIteration 100, loss = 0.4775\nIteration 200, loss = 0.5016\nIteration 300, loss = 0.6277\nIteration 400, loss = 0.6096\nIteration 500, loss = 0.5268\nIteration 600, loss = 0.7011\nIteration 700, loss = 0.5325\nepoch: 13\nIteration 0, loss = 0.4878\nIteration 100, loss = 0.4992\nIteration 200, loss = 0.6628\nIteration 300, loss = 0.7183\nIteration 400, loss = 0.4775\nIteration 500, loss = 0.6681\nIteration 600, loss = 0.6459\nIteration 700, loss = 0.5431\nepoch: 14\nIteration 0, loss = 0.4747\nIteration 100, loss = 0.3933\nIteration 200, loss = 0.6696\nIteration 300, loss = 0.6167\nIteration 400, loss = 0.5575\nIteration 500, loss = 0.4926\nIteration 600, loss = 0.4448\nIteration 700, loss = 0.4488\n"
],
[
"plt.figure()\nplt.plot(np.arange(len(val_acc)) + 1, val_acc)\nplt.xlabel('epoch')\nplt.ylabel('accuracy, %')\nplt.title('Validation accuracy')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Results",
"_____no_output_____"
]
],
[
[
"check_accuracy(loader_train, model, dataset='train')\nval_acc = check_accuracy(loader_val, model)\ntest_acc = check_accuracy(loader_test, model)",
"Checking accuracy on train set\nGot 40262 / 49000 correct (82.17%)\n\nChecking accuracy on validation set\nGot 691 / 1000 correct (69.10%)\n\nChecking accuracy on test set\nGot 6867 / 10000 correct (68.67%)\n\n"
]
],
[
[
"## Searching the weights of the model :)",
"_____no_output_____"
]
],
[
[
"print(model)",
"Sequential(\n (0): Conv2d(3, 64, kernel_size=(5, 5), stride=(1, 1))\n (1): ReLU()\n (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (3): Conv2d(64, 16, kernel_size=(5, 5), stride=(1, 1))\n (4): ReLU()\n (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (6): Flatten()\n (7): Linear(in_features=400, out_features=200, bias=True)\n (8): ReLU()\n (9): Linear(in_features=200, out_features=10, bias=True)\n)\n"
],
[
"print(model[0])\nprint(model[0].weight.shape)",
"Conv2d(3, 64, kernel_size=(5, 5), stride=(1, 1))\ntorch.Size([64, 3, 5, 5])\n"
]
],
[
[
"## Final conv1 weights/filters",
"_____no_output_____"
]
],
[
[
"print(f'PyTorch')\nprint(f'init_method: {INIT_METHOD}')\nprint(f'val_acc: {val_acc:.2f}%')\n\ndef normalize_img(x):\n x_min = x.min()\n x_max = x.max()\n x_norm = (x - x_min) / (x_max - x_min)\n return x_norm\n\nplt.figure(figsize=(22, 7))\nfor i in range(len(model[0].weight)):\n conv1_filter = model[0].weight[i].transpose(0, 1).transpose(1, 2).cpu().detach().numpy()\n plt.subplot(4, len(model[0].weight) // 4, i + 1)\n plt.imshow(normalize_img(conv1_filter))\n plt.axis('off')\n plt.title(f'w{i+1}')",
"PyTorch\ninit_method: glorot_uniform\nval_acc: 69.10%\n"
]
],
[
[
"## Conv1 weights/filter during training",
"_____no_output_____"
]
],
[
[
"imgs_per_line = 16\nk = 0\n\nfor epoch in weights.keys():\n print(f'epoch {epoch}')\n plt.figure(figsize=(18, 12))\n for i in range(imgs_per_line):\n k += 1\n plt.subplot(len(weights), imgs_per_line, k)\n w = weights[epoch][i].transpose(0, 1).transpose(1, 2).cpu().detach().numpy()\n\n plt.imshow(normalize_img(w))\n plt.axis('off')\n plt.title(f'w{i+1}')\n plt.show()",
"epoch 0\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e747e944b264dce8beddebe91df021ca2039e22c | 677,769 | ipynb | Jupyter Notebook | 01 Graph Theory/Python/notebook/1 - aberkane_tree_networkx.ipynb | Sultanow/collatz | d8a5137af508be19da371fff787c114f1b5185c3 | [
"CC-BY-4.0"
] | 2 | 2021-04-01T15:12:10.000Z | 2021-04-01T15:54:55.000Z | 01 Graph Theory/Python/notebook/1 - aberkane_tree_networkx.ipynb | Sultanow/collatz | d8a5137af508be19da371fff787c114f1b5185c3 | [
"CC-BY-4.0"
] | null | null | null | 01 Graph Theory/Python/notebook/1 - aberkane_tree_networkx.ipynb | Sultanow/collatz | d8a5137af508be19da371fff787c114f1b5185c3 | [
"CC-BY-4.0"
] | 1 | 2021-05-06T20:44:07.000Z | 2021-05-06T20:44:07.000Z | 731.931965 | 379,149 | 0.929306 | [
[
[
"import networkx as nx\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom networkx.drawing.nx_agraph import graphviz_layout\r\nfrom abc import ABC, abstractmethod\r\nimport numpy as np\r\n",
"_____no_output_____"
],
[
"class Tree:\r\n \r\n def __init__(self, root):\r\n self.root = root\r\n self.labels = []\r\n root.setTree(self)\r\n self.generate(root)\r\n\r\n self.pos = None\r\n self.edges = None\r\n self.colors = None\r\n self.weights = None\r\n self.node_colors = []\r\n\r\n @abstractmethod\r\n def generate(self, node):\r\n pass\r\n \r\n def addLabel(self, label: int):\r\n self.labels.append(label)\r\n\r\nclass Node:\r\n TEAL = \"#e6f2f2\"\r\n GOLD = \"#fcc200\"\r\n PURPLE = \"#f9e6ec\"\r\n RED = \"#FF0000\"\r\n\r\n colors = [TEAL, GOLD, PURPLE, RED]\r\n \r\n columns = []\r\n\r\n def __init__(self, label):\r\n self.tree = None\r\n self.predecessor = None\r\n self.successors = [] \r\n self.label = label\r\n #self.col = 0\r\n self.row = 0\r\n\r\n def addSuccessor(self, successor):\r\n successor.row = self.row + 1\r\n if self.tree is not None and not successor.label in self.tree.labels:\r\n successor.setTree(self.tree)\r\n successor.predecessor = self\r\n self.successors.append(successor)\r\n \r\n def setTree(self, tree: Tree):\r\n self.tree = tree\r\n self.tree.addLabel(self.label)\r\n self.tree.genColCluster()\r\n\r\n def getType(label):\r\n if (label + 1) % 3 == 0:\r\n if ((label-1)/4)%2 == 0:\r\n return 4\r\n return 1\r\n elif label % 3 == 0:\r\n return 2\r\n elif (label - 1) % 3 == 0:\r\n return 3\r\n return -1",
"_____no_output_____"
],
[
"def V(num):\r\n return 4*num+1\r\n\r\ndef S(num):\r\n return 2*num+1\r\n\r\ndef fG(num):\r\n return 2*num-1\r\n\r\ndef getType(num):\r\n if (num+1) %3 == 0:\r\n return \"A\"\r\n elif (num%3) == 0:\r\n return \"B\"\r\n elif (num-1)%3 == 0:\r\n return \"C\"\r\n\r\ndef ruleTwo(num):\r\n result = (num-1)/2\r\n r = 0\r\n k = 0\r\n\r\n while result % 2 != 0:\r\n result = (result-1)/2\r\n k+=1\r\n\r\n r = result/2\r\n\r\n if ((k) % 2 == 0 and r % 2 == 0) or ((k) % 2 != 0 and r % 2 != 0):\r\n return (True)\r\n else:\r\n return (False)\r\n\r\ndef isIncluded(arr, num):\r\n for i in arr:\r\n if i == num:\r\n return True\r\n \r\n return False\r\n",
"_____no_output_____"
],
[
"#Aberkane Tree\r\nclass AberkaneTree(Tree):\r\n def __init__(self, root: Node, height: int):\r\n self.height = height\r\n self.columns = []\r\n self.predecessor_con = []\r\n self.successor_con = []\r\n self.connection_id = 0\r\n self.movement_con = []\r\n super().__init__(root) \r\n\r\n def generate(self, node: Node):\r\n mid_in_range = True\r\n left_in_range = True\r\n right_in_range = True\r\n\r\n label = node.label\r\n h = self.height - 1\r\n b_leaf = 1\r\n for i in range(1,h+1):\r\n b_leaf = int(b_leaf * 2 + 1) \r\n\r\n if (node.label * 4 + 1) > (b_leaf):\r\n mid_in_range = False\r\n else:\r\n mid_in_range = True\r\n\r\n if (node.label * 2 + 1) > (b_leaf):\r\n right_in_range = False\r\n else:\r\n right_in_range = True\r\n\r\n if (node.label * 2 - 1) > (b_leaf):\r\n left_in_range = False\r\n else:\r\n left_in_range = True\r\n\r\n if node.label == 1: \r\n right_child = Node(node.label * 2 + 1) \r\n node.addSuccessor(right_child) \r\n self.generate(right_child) \r\n self.predecessor_con.append(1)\r\n self.successor_con.append(3) \r\n self.predecessor_con.append(1)\r\n self.successor_con.append(5) \r\n self.connection_id = self.connection_id + 2\r\n else: \r\n if mid_in_range == True:\r\n middle_child = Node(node.label * 4 + 1) \r\n if right_in_range == True:\r\n right_child = Node(node.label * 2 + 1)\r\n if left_in_range == True:\r\n left_child = Node(node.label * 2 - 1)\r\n\r\n if right_in_range == True:\r\n node.addSuccessor(right_child)\r\n if mid_in_range == True:\r\n node.addSuccessor(middle_child)\r\n if left_in_range == True:\r\n node.addSuccessor(left_child)\r\n\r\n if right_in_range == True:\r\n self.generate(right_child)\r\n self.predecessor_con.append(node.label)\r\n self.successor_con.append(right_child.label)\r\n self.connection_id = self.connection_id + 1\r\n if mid_in_range == True:\r\n self.generate(middle_child)\r\n self.predecessor_con.append(node.label)\r\n self.successor_con.append(middle_child.label)\r\n self.connection_id = self.connection_id + 1\r\n if left_in_range == True:\r\n self.generate(left_child)\r\n self.predecessor_con.append(node.label)\r\n self.successor_con.append(left_child.label)\r\n self.connection_id = self.connection_id + 1\r\n \r\n def genColCluster(self): \r\n aberkane_height = self.height-1\r\n col_arr = []\r\n big_leaf = int(1)\r\n used_nodes = []\r\n col_position = 0\r\n is_right = False\r\n is_left = False\r\n right_node = 0\r\n left_node = 0\r\n movement_con = []\r\n\r\n for i in range(1,aberkane_height+1):\r\n big_leaf = int(big_leaf * 2 + 1) \r\n\r\n for j in range(1,big_leaf+1, 2):\r\n is_part = False\r\n col_build = []\r\n\r\n for x in used_nodes:\r\n if j == x:\r\n is_part = True\r\n \r\n if is_part == False:\r\n used_nodes.append(int(j))\r\n col_build.append(int(j))\r\n\r\n right_node = int((j-1)/2)\r\n left_node = int((j+1)/2)\r\n \r\n is_right = False\r\n is_left = False\r\n\r\n for a in used_nodes: \r\n if j == 1:\r\n is_right = True\r\n is_left = True\r\n elif a == int(right_node):\r\n is_right = True\r\n elif a == int(left_node):\r\n is_left = True\r\n \r\n test_node = j*4+1\r\n while(test_node < big_leaf):\r\n test_node_is_part = False\r\n for y in used_nodes:\r\n if test_node == y:\r\n test_node_is_part = True\r\n test_node = big_leaf+1\r\n if test_node_is_part == False:\r\n col_build.append(int(test_node))\r\n used_nodes.append(int(test_node))\r\n test_node = test_node * 4 + 1\r\n\r\n col_position\r\n len_col_i = 0\r\n len_columns = len(col_arr)\r\n\r\n for i in range(0,len_columns):\r\n len_col_i = len(col_arr[i])\r\n for j in range(0, len_col_i):\r\n if col_arr[i][j] == left_node or col_arr[i][j] == right_node:\r\n col_position = i \r\n\r\n if is_left == True and is_right == True:\r\n col_arr.append(col_build)\r\n movement_con.append(left_node)\r\n elif is_left == True:\r\n if col_position == 0:\r\n col_arr.insert(col_position, col_build)\r\n else:\r\n col_arr.insert(col_position, col_build)\r\n \r\n movement_con.append(col_build[0])\r\n\r\n elif is_right == True:\r\n col_arr.insert(col_position+1, col_build)\r\n \r\n self.movement_con = movement_con\r\n self.columns = col_arr\r\n return\r\n \r\n\r\n def genGraphData(self, G): \r\n \r\n big_leaf = 2**(self.height)-1\r\n\r\n for i in range(3,2**(self.height),2):\r\n \r\n if(i*4+1 <= big_leaf):\r\n G.add_edge((i),(i*4+1), color='white',weight=1)\r\n if(i*2+1 <= big_leaf):\r\n G.add_edge((i),(i*2+1), color='white',weight=1) \r\n if(i*2-1 <= big_leaf):\r\n G.add_edge((i),(i*2-1), color='white',weight=1) \r\n\r\n G.add_edge((1),5, color='yellow',weight=5) #black before\r\n G.add_edge((1),3, color='yellow',weight=5) #black before\r\n \r\n used = []\r\n proven = []\r\n\r\n rTwoCon = []\r\n rTwoCon.append(1)\r\n rTwoCon.append(3)\r\n\r\n proven.append(3)\r\n proven.append(5)\r\n \r\n while len(proven) > 0:\r\n proven.sort()\r\n #print(proven)\r\n for i in proven:\r\n\r\n #i = proven[0]\r\n if isIncluded(used, i) == False:\r\n used.append(i)\r\n # RULE 1 : if x is orange V(x) becomes orange \r\n if V(i) <= big_leaf + 1 and isIncluded(used, V(i)) == False:\r\n proven.append(V(i))\r\n \r\n if V(i) == 17:\r\n G.add_edge(1, 17, color='yellow',weight=5)\r\n else:\r\n G.add_edge(i,V(i),color='yellow',weight=5) #black before\r\n\r\n\r\n # RULE 2 : if x is orange S(x) becomes orange (under the precise conditions of rule 2)\r\n r2 = ruleTwo(i)\r\n if r2: \r\n if S(i) <= big_leaf + 1 and isIncluded(used, S(i)) == False:\r\n proven.append(S(i))\r\n if S(i) == 17:\r\n G.add_edge(1, 17, color='yellow',weight=5) \r\n else:\r\n #G.add_edge(i,S(i),color='black',weight=5)\r\n rTwoCon.append(i)\r\n rTwoCon.append(S(i)) \r\n\r\n if Node.getType(i) == 2:\r\n proven.remove(i)\r\n break \r\n \r\n\r\n # # RULE 3 : if x is orange and of type C then (4x-1)/3 becomes orange\r\n if Node.getType(i) == 3:\r\n \r\n value3 = int((4*i-1)/3)\r\n\r\n if value3 <= big_leaf + 1 and isIncluded(used, value3) == False:\r\n proven.append(value3)\r\n \r\n if value3 == 17:\r\n G.add_edge(1, 17, color='yellow',weight=5)\r\n \r\n elif int((value3-1)/2) %2 != 0:\r\n G.add_edge( int((value3-1)/2), value3, color='yellow',weight=5)\r\n\r\n elif int((value3+1)/2) %2 != 0:\r\n G.add_edge( int((value3+1)/2), value3, color='yellow',weight=5)\r\n\r\n elif int((value3-1)/4) %2 != 0:\r\n G.add_edge( int((value3-1)/4), value3, color='yellow',weight=5)\r\n \r\n proven.remove(i)\r\n break \r\n\r\n # # RULE 4 : if x is orange and of type A then G(x)/3 becomes orange \r\n if Node.getType(i) == 1 or Node.getType(i) == 4: \r\n value4 = int(fG(i)/3) \r\n \r\n #print(value4)\r\n if isIncluded(used, value4) == False: \r\n proven.append(value4) \r\n if value4 == 17:\r\n G.add_edge(1, 17, color='yellow',weight=5)\r\n elif value4 == 19:\r\n G.add_edge(9, 19, color='yellow',weight=5) #black before\r\n #print(i)\r\n #print(value4)\r\n elif ((value4-1)/2) % 2 != 0:\r\n G.add_edge((fG(i)/3), ((value4-1)/2), color='yellow',weight=5)\r\n\r\n elif ((value4-1)/4) % 2 != 0:\r\n G.add_edge(value4, ((value4-1)/2), color='yellow',weight=5)\r\n\r\n elif ((value4+1)/2) % 2 != 0:\r\n G.add_edge(value4, ((value4-1)/2) ,color='yellow',weight=5)\r\n proven.remove(i)\r\n break \r\n #print(i)\r\n proven.remove(i)\r\n else:\r\n proven.remove(i)\r\n \r\n print(rTwoCon)\r\n\r\n for con in range(0,len(rTwoCon)-1,2):\r\n G.add_edge(rTwoCon[con], rTwoCon[con+1],color='yellow',weight=5) #black before\r\n\r\n for node in G:\r\n color_type = Node.getType(node)\r\n self.node_colors.append(Node.colors[color_type-1])\r\n self.edges = G.edges()\r\n self.colors = [G[u][v]['color'] for u,v in self.edges]\r\n self.weights = [G[u][v]['weight'] for u,v in self.edges]\r\n\r\n def generatePos(self, label_value):\r\n col_counter = 0\r\n len_col_i = 0\r\n len_columns = len(self.columns)\r\n col = 0.0\r\n row = 0.0\r\n for i in range(0,len_columns):\r\n len_col_i = len(self.columns[i])\r\n for j in range(0, len_col_i): \r\n if self.columns[i][j] == int(label_value):\r\n col = float(i+1)\r\n break\r\n if label_value == 1:\r\n row = -2*float(1) * 0.2\r\n elif label_value == 3:\r\n row = -1.5*float(2) * 0.2 \r\n else:\r\n for i in range(2,int(label_value)):\r\n if label_value > (2**(i-1)) and label_value < (2**i):\r\n row = -1*float(i+1) * 0.2\r\n break\r\n return (col, row)\r\n\r\n def generatePosArr(self):\r\n pos1 = {}\r\n len_col_i = 0\r\n len_columns = len(self.columns)\r\n for i in range(0,len_columns):\r\n len_col_i = len(self.columns[i])\r\n for j in range(0, len_col_i):\r\n pos1[self.columns[i][j]] = self.generatePos(self.columns[i][j])\r\n self.pos = pos1\r\n return\r\n",
"_____no_output_____"
],
[
"root = Node(1)\r\naberkane_tree = AberkaneTree(root, 5) \r\npos = aberkane_tree.generatePosArr()\r\nG = nx.Graph()\r\naberkane_tree.genGraphData(G)\r\nw = 5\r\nh = 5\r\nfor i in range(1,(aberkane_tree.height)):\r\n w = w * 1.4\r\n h = h + 1\r\nplt.figure(figsize=(w, h))\r\nplt.gca().invert_yaxis()\r\nnx.draw(G, edgelist = aberkane_tree.edges, edge_color=aberkane_tree.colors, width=aberkane_tree.weights, node_color=aberkane_tree.node_colors,node_shape = \"s\", node_size=2000, pos=aberkane_tree.pos, with_labels=True, linewidths=1)\r\nplt.savefig('C:/Temp/32.pdf')",
"[1, 3, 11, 23, 7, 15, 9, 19]\n"
],
[
"root = Node(1)\r\naberkane_tree = AberkaneTree(root, 6) \r\npos = aberkane_tree.generatePosArr()\r\nG = nx.Graph()\r\naberkane_tree.genGraphData(G)\r\nw = 5\r\nh = 2\r\nfor i in range(1,(aberkane_tree.height)):\r\n w = w * 1.4\r\n h = h + 1\r\nplt.figure(figsize=(w, h))\r\nplt.gca().invert_yaxis()\r\nnx.draw(G, edgelist = aberkane_tree.edges, edge_color=aberkane_tree.colors, width=aberkane_tree.weights, node_color=aberkane_tree.node_colors,node_shape = \"s\", node_size=2000, pos=aberkane_tree.pos, with_labels=True, linewidths=1)\r\nplt.savefig('C:/Temp/64.pdf')",
"[1, 3, 17, 35, 11, 23, 7, 15, 9, 19, 25, 51]\n"
],
[
"root = Node(1)\r\naberkane_tree = AberkaneTree(root, 7) \r\npos = aberkane_tree.generatePosArr()\r\nG = nx.Graph()\r\naberkane_tree.genGraphData(G)\r\nw = 5\r\nh = 2\r\nfor i in range(1,(aberkane_tree.height)):\r\n w = w * 1.4\r\n h = h + 1\r\nplt.figure(figsize=(w, h))\r\nplt.gca().invert_yaxis()\r\nnx.draw(G, edgelist = aberkane_tree.edges, edge_color=aberkane_tree.colors, width=aberkane_tree.weights, node_color=aberkane_tree.node_colors,node_shape = \"s\", node_size=2000, pos=aberkane_tree.pos, with_labels=True, linewidths=1)\r\nplt.savefig('C:/Temp/128.pdf')",
"[1, 3, 17, 35, 11, 23, 7, 15, 9, 19, 25, 51, 33, 67, 49, 99, 43, 87, 57, 115, 59, 119, 39, 79]\n"
],
[
"root = Node(1)\r\naberkane_tree = AberkaneTree(root, 8) \r\npos = aberkane_tree.generatePosArr()\r\nG = nx.Graph()\r\naberkane_tree.genGraphData(G)\r\nw = 5\r\nh = 2\r\nfor i in range(1,(aberkane_tree.height)):\r\n w = w * 1.5\r\n h = h + 1\r\nplt.figure(figsize=(w, h))\r\nplt.gca().invert_yaxis()\r\nnx.draw(G, edgelist = aberkane_tree.edges, edge_color=aberkane_tree.colors, width=aberkane_tree.weights, node_color=aberkane_tree.node_colors,node_shape = \"s\", node_size=2000, pos=aberkane_tree.pos, with_labels=True, linewidths=1)\r\nplt.savefig('C:/Temp/256.pdf')",
"[1, 3, 17, 35, 11, 23, 7, 15, 9, 19, 25, 51, 33, 67, 49, 99, 65, 131, 43, 87, 57, 115, 81, 163, 89, 179, 59, 119, 39, 79, 105, 211, 113, 227, 75, 151, 123, 247]\n"
],
[
"root = Node(1)\r\naberkane_tree = AberkaneTree(root, 10)\r\npos = aberkane_tree.generatePosArr()\r\nG = nx.Graph()\r\naberkane_tree.genGraphData(G)\r\nw = 5\r\nh = 2\r\nfor i in range(1,(aberkane_tree.height)):\r\n w = w * 1.6\r\n h = h + 1\r\nplt.figure(figsize=(w, h))\r\nplt.gca().invert_yaxis()\r\nnx.draw(G, edgelist = aberkane_tree.edges, edge_color=aberkane_tree.colors, width=aberkane_tree.weights, font_size = 8, node_color=aberkane_tree.node_colors,node_shape = \"s\", node_size=250, pos=aberkane_tree.pos, with_labels=True, linewidths=1)",
"[1, 3, 17, 35, 11, 23, 7, 15, 9, 19, 25, 51, 33, 67, 49, 99, 65, 131, 43, 87, 57, 115, 81, 163, 89, 179, 59, 119, 39, 79, 105, 211, 113, 227, 75, 151, 153, 307, 177, 355, 201, 403, 209, 419, 139, 279, 185, 371, 123, 247, 217, 435, 241, 483, 273, 547, 281, 563, 187, 375, 249, 499, 289, 579, 305, 611, 203, 407, 135, 271, 321, 643, 329, 659, 219, 439, 361, 723, 369, 739, 385, 771, 401, 803, 267, 535, 409, 819, 433, 867, 465, 931, 473, 947, 315, 631, 481, 963, 497, 995, 331, 663, 441, 883, 363, 727, 427, 855, 379, 759, 505, 1011, 443, 887, 295, 591, 393, 787, 475, 951, 507, 1015]\n"
],
[
"root = Node(1)\r\naberkane_tree = AberkaneTree(root, 12)\r\npos = aberkane_tree.generatePosArr()\r\nG = nx.Graph()\r\naberkane_tree.genGraphData(G)\r\nw = 800\r\nh = 20\r\n#for i in range(1,(aberkane_tree.height)):\r\n# w = w * 1.6\r\n# h = h + 1\r\nplt.figure(figsize=(w, h))\r\nplt.gca().invert_yaxis()\r\nnx.draw(G, edgelist = aberkane_tree.edges, edge_color=aberkane_tree.colors, width=aberkane_tree.weights, font_size = 8, node_color=aberkane_tree.node_colors,node_shape = \"s\", node_size=250, pos=aberkane_tree.pos, with_labels=True, linewidths=1)",
"_____no_output_____"
],
[
"\r\n# n=5\r\n\r\n# big_leaf = (2**n)-1\r\n\r\n# used = []\r\n# proven = []\r\n\r\n# proven.append(3)\r\n# proven.append(5)\r\n\r\n# arrInfo = []\r\n# rowStartLabel = []\r\n\r\n# for i in range(0,n):\r\n# if i == 0:\r\n# arrInfo.append(1)\r\n# rowStartLabel.append(1)\r\n# elif i == 1:\r\n# arrInfo.append(1)\r\n# rowStartLabel.append(3)\r\n# else:\r\n# arrInfo.append(2**(i-1))\r\n# rowStartLabel.append(2**i + 1)\r\n\r\n# print(rowStartLabel)\r\n# print(arrInfo)\r\n\r\n# counter = 0\r\n\r\n# while len(proven) > 0:\r\n# proven.sort()\r\n# #print(proven)\r\n# for i in proven:\r\n \r\n# if isIncluded(used, i) == False:\r\n# used.append(i)\r\n# # RULE 1 : if x is orange V(x) becomes orange \r\n# if V(i) <= big_leaf + 1:\r\n# proven.append(V(i))\r\n \r\n# value1 = V(i)\r\n# counter = 0\r\n \r\n# for i in rowStartLabel:\r\n# if value1 < rowStartLabel[counter]:\r\n# break\r\n# else:\r\n# counter = counter + 1\r\n \r\n# # while value1 < rowStartLabel[counter-1]:\r\n# # counter += 1\r\n# # if counter == len(rowStartLabel)-1:\r\n# # break\r\n \r\n# arrInfo[counter-1] -= 1\r\n\r\n# if arrInfo[counter-1] == 0:\r\n# print(\"{0}: {1}\".format(counter, len(proven)))\r\n\r\n\r\n# # RULE 2 : if x is orange S(x) becomes orange (under the precise conditions of rule 2)\r\n# r2 = ruleTwo(i)\r\n# if r2: \r\n# if S(i) <= big_leaf + 1:\r\n# proven.append(S(i))\r\n\r\n# value2 = S(i)\r\n# counter = 0\r\n# while value2 < rowStartLabel[counter]:\r\n# counter += 1\r\n# if counter == len(rowStartLabel)-1:\r\n# break\r\n \r\n# arrInfo[counter-1] -= 1\r\n\r\n# if arrInfo[counter-1] == 0:\r\n# print(\"{0}: {1}\".format(counter, len(proven)))\r\n\r\n# # # RULE 3 : if x is orange and of type C then (4x-1)/3 becomes orange\r\n# if Node.getType(i) == 3:\r\n \r\n# value3 = int((4*i-1)/3)\r\n# counter = 0\r\n# if value3 <= big_leaf + 1:\r\n# proven.append(value3)\r\n \r\n# while value3 < rowStartLabel[counter]:\r\n# counter += 1\r\n# if counter == len(rowStartLabel)-1:\r\n# break\r\n \r\n \r\n# arrInfo[counter-1] -= 1\r\n\r\n# if arrInfo[counter-1] == 0:\r\n# print(\"{0}: {1}\".format(counter, len(proven)))\r\n\r\n\r\n# # # RULE 4 : if x is orange and of type A then G(x)/3 becomes orange \r\n# if Node.getType(i) == 1 or Node.getType(i) == 4: \r\n# value4 = int(fG(i)/3) \r\n# proven.append(value4)\r\n# #print(value4)\r\n# counter = 0\r\n# while value4 < rowStartLabel[counter]:\r\n# counter += 1\r\n# if counter == len(rowStartLabel)-1:\r\n# break\r\n \r\n# arrInfo[counter-1] -= 1\r\n\r\n# if arrInfo[counter-1] == 0:\r\n# print(\"{0}: {1}\".format(counter, len(proven)))\r\n\r\n# break \r\n# #print(i)\r\n# proven.remove(i)\r\n# #proven.sort()\r\n# break\r\n# else:\r\n# proven.remove(i)",
"_____no_output_____"
],
[
"def ruleTwo(num):\r\n result = (num-1)/2\r\n r = 0\r\n k = 0\r\n\r\n while result % 2 != 0:\r\n result = (result-1)/2\r\n k+=1\r\n\r\n r = result/2\r\n\r\n if ((k) % 2 == 0 and r % 2 == 0) or ((k) % 2 != 0 and r % 2 != 0):\r\n return (True)\r\n else:\r\n return (False)\r\n\r\nprint(ruleTwo(19))",
"False\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e747ff81233f3c4c59ce837e28b1bef1e0f9d4bd | 3,219 | ipynb | Jupyter Notebook | argumentos_parametros.ipynb | Antonio985/SeminarioDeProgramacion | a1bdd7fa6fa202a0a7a2e9a0f72def24d0f1c465 | [
"Unlicense"
] | null | null | null | argumentos_parametros.ipynb | Antonio985/SeminarioDeProgramacion | a1bdd7fa6fa202a0a7a2e9a0f72def24d0f1c465 | [
"Unlicense"
] | null | null | null | argumentos_parametros.ipynb | Antonio985/SeminarioDeProgramacion | a1bdd7fa6fa202a0a7a2e9a0f72def24d0f1c465 | [
"Unlicense"
] | null | null | null | 24.203008 | 474 | 0.511339 | [
[
[
"#Crear una funcion que realice una resta\ndef resta(a,b):\n return a-b\n\n#Invocar la funcion\nresta(400,40)",
"_____no_output_____"
],
[
"#Crear una funcio=n, haciendo una referencia directa\ndef resta(a=400,b=40):\n return a-b\n#Llamamos a la funcion\nresta()",
"_____no_output_____"
],
[
"def resta(a,b):\n return a-b\n\n#Invocamos la funcion\nresta()",
"_____no_output_____"
],
[
"#Crear una funcion asignando valores por defecto\ndef resta(a=None,b=None):\n if a==None or b==None:\n print(\"Error, debemos enviar dos numeros a la funcion\")\n return\n else:\n return a-b\n\n#Invocamos la funcion\nresta()",
"Error, debemos enviar dos numeros a la funcion\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e748093273b1ea7f99c582d327f9fb07a5b07ef8 | 74,997 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Household_Class_Imbalanced-checkpoint.ipynb | georgetown-analytics/First-Home-Recommender | 2f9773e19d5800f3c655e3973b47002dcd29eb2f | [
"MIT"
] | 1 | 2019-09-14T13:37:12.000Z | 2019-09-14T13:37:12.000Z | .ipynb_checkpoints/Household_Class_Imbalanced-checkpoint.ipynb | georgetown-analytics/First-Home-Recommender | 2f9773e19d5800f3c655e3973b47002dcd29eb2f | [
"MIT"
] | 7 | 2019-11-18T16:01:58.000Z | 2019-12-13T12:09:39.000Z | .ipynb_checkpoints/Household_Class_Imbalanced-checkpoint.ipynb | georgetown-analytics/First-Home-Recommender | 2f9773e19d5800f3c655e3973b47002dcd29eb2f | [
"MIT"
] | 3 | 2019-11-05T04:23:20.000Z | 2020-09-19T14:57:30.000Z | 120.768116 | 17,652 | 0.847314 | [
[
[
"import psycopg2\nimport pandas as pd\nimport numpy as np\nimport os\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"ticks\", color_codes=True)\nimport matplotlib\nimport warnings\nimport yellowbrick as yb\n%matplotlib inline\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)",
"C:\\Users\\Michael\\Anaconda3\\lib\\site-packages\\sklearn\\utils\\deprecation.py:144: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\n warnings.warn(message, FutureWarning)\n"
],
[
"conn = psycopg2.connect( \n host = 'project.cgxhdwn5zb5t.us-east-1.rds.amazonaws.com',\n port = 5432, \n user = 'postgres',\n password = 'Admin123',\n database = 'postgres')\ncursor = conn.cursor()",
"_____no_output_____"
],
[
"DEC2FLOAT = psycopg2.extensions.new_type(\n psycopg2.extensions.DECIMAL.values,\n 'DEC2FLOAT',\n lambda value, curs: float(value) if value is not None else None)\npsycopg2.extensions.register_type(DEC2FLOAT)",
"_____no_output_____"
],
[
"cursor.execute('Select * from \"AHS_Household_Class\"')\nrows = cursor.fetchall()\ncol_names = []\nfor elt in cursor.description:\n col_names.append(elt[0])\n\ndf = pd.DataFrame(data=rows, columns=col_names )",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
]
],
[
[
"## Balanced Binning",
"_____no_output_____"
]
],
[
[
"from yellowbrick.datasets import load_concrete\nfrom yellowbrick.target import BalancedBinningReference\n\n\n# Instantiate the visualizer\nvisualizer = BalancedBinningReference(bins=[0,7,8,9,10])\n\ny = df['RATINGHS']\nvisualizer.fit(y) # Fit the data to the visualizer\nvisualizer.show() # Finalize and render the figure",
"_____no_output_____"
],
[
"\n\nLABEL_MAP = {\n 1: \"Un-Satisfied\",\n 2: \"Un-Satisfied\",\n 3: \"Un-Satisfied\",\n 4: \"Un-Satisfied\",\n 5: \"Un-Satisfied\",\n 6: \"Un-Satisfied\",\n 7: \"Satisfied\",\n 8: \"Satisfied\",\n 9: \"Highly Satisfied\",\n 10: \"Extreme Satisfied\"\n}\n\n# Convert class labels into text\ndf_conv = df['RATINGHS'].map(LABEL_MAP)\n",
"_____no_output_____"
],
[
"#df_sub['RATINGHS'] = df_sub['RATINGHS'].astype(str)\ndf_conv",
"_____no_output_____"
]
],
[
[
"### Class Imbalanced",
"_____no_output_____"
]
],
[
[
"X = df\ny = df_conv",
"_____no_output_____"
],
[
"from yellowbrick.target import ClassBalance\nX = df\ny = df_conv\n# Instantiate the visualizer\nvisualizer = ClassBalance(\n labels=[\"Un-Satisfied\", \"Satisfied\", \"Highly Satisfied\",\"Extreme Satisfied\"], size=(1080, 720)\n)\n\nvisualizer.fit(y)\nvisualizer.show()",
"_____no_output_____"
],
[
"from imblearn.over_sampling import SMOTE\nsm = SMOTE(random_state = 33)",
"_____no_output_____"
],
[
"X_sm, y_sm = sm.fit_sample(X, y.ravel())",
"/Users/sabashaikh/anaconda2/envs/py36/lib/python3.6/site-packages/imblearn/base.py:306: UserWarning: The target type should be binary.\n warnings.warn('The target type should be binary.')\n"
],
[
"# observe that data has been balanced\npd.Series(y_sm).value_counts().plot.bar()",
"_____no_output_____"
],
[
"X_sm, y_sm = sm.fit_sample(X_sm, y_sm.ravel())\npd.Series(y_sm).value_counts().plot.bar()",
"/Users/sabashaikh/anaconda2/envs/py36/lib/python3.6/site-packages/imblearn/base.py:306: UserWarning: The target type should be binary.\n warnings.warn('The target type should be binary.')\n"
],
[
"X_sm, y_sm = sm.fit_sample(X_sm, y_sm.ravel())\npd.Series(y_sm).value_counts().plot.bar()",
"/Users/sabashaikh/anaconda2/envs/py36/lib/python3.6/site-packages/imblearn/base.py:306: UserWarning: The target type should be binary.\n warnings.warn('The target type should be binary.')\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7482140379f9d5c202f99060dcd75e96e1dded6 | 125,087 | ipynb | Jupyter Notebook | docs/lab5/lab5.ipynb | mezgoodle/ad_labs | 75a7d91ab3c6e4abbfe6cace534e0624194df115 | [
"MIT"
] | 1 | 2021-09-08T19:59:16.000Z | 2021-09-08T19:59:16.000Z | docs/lab5/lab5.ipynb | mezgoodle/ad_labs | 75a7d91ab3c6e4abbfe6cace534e0624194df115 | [
"MIT"
] | 18 | 2021-10-01T03:03:08.000Z | 2022-01-13T07:28:00.000Z | docs/lab5/lab5.ipynb | mezgoodle/ad_labs | 75a7d91ab3c6e4abbfe6cace534e0624194df115 | [
"MIT"
] | 1 | 2021-10-02T11:34:27.000Z | 2021-10-02T11:34:27.000Z | 108.960801 | 20,546 | 0.788523 | [
[
[
"<a href=\"https://colab.research.google.com/github/mezgoodle/ad_labs/blob/master/docs/lab5/lab5.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Вступ",
"_____no_output_____"
],
[
"Створити програму, яка виконує наступні завдання:\n\n1. Створити не менше двох об’єктів TimeSeries, у яких індекси створені за\nдопомогою date_range(). Виділити підмасиви у цих об’єктів. Провести\nоб’єднання об’єктів TimeSeries за допомогою merge_asof().\n\n2. Виконати завдання відповідно до варіанту.",
"_____no_output_____"
],
[
"Варіант 7.\n*Файл Microsoft_Stock.csv.*\n\n1. Побудувати графік зміни ціни на час закриття біржі: а) загальний; б)\nза 2019 рік; в) за вересень 2018 року; г) за листопад 2015 – січень\n2018; д) за останні два тижня січня 2021-го року.\n\n2. Знайти середні значення найбільшої ціни за день а) за 2016 рік; б) за\nкожний місяць; в) за кожний тиждень першого кварталу 2019 року.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
]
],
[
[
"# Дані",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('https://raw.githubusercontent.com/mezgoodle/ad_labs/master/data/Microsoft_Stock.csv', index_col='Date', parse_dates=True)\ndf",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 1511 entries, 2015-04-01 16:00:00 to 2021-03-31 16:00:00\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Open 1511 non-null float64\n 1 High 1511 non-null float64\n 2 Low 1511 non-null float64\n 3 Close 1511 non-null float64\n 4 Volume 1511 non-null int64 \ndtypes: float64(4), int64(1)\nmemory usage: 70.8 KB\n"
],
[
"df.describe().T",
"_____no_output_____"
]
],
[
[
"# Перше завдання",
"_____no_output_____"
]
],
[
[
"date_indexes = pd.date_range('2015-04-01 16:00:00', '2021', freq='3D')\ndate_indexes",
"_____no_output_____"
],
[
"open_series = pd.Series(df['Open'], index=date_indexes).fillna(method='ffill')\nopen_series",
"_____no_output_____"
],
[
"date_indexes = pd.date_range('2015-04-01 16:00:00', '2019-11', freq='3B')\ndate_indexes",
"_____no_output_____"
],
[
"close_series = pd.Series(df['Close'], index=date_indexes).fillna(method='ffill')\nclose_series",
"_____no_output_____"
],
[
"open_series_indexed = open_series['2016-05':'2017-03']\nopen_series_indexed",
"_____no_output_____"
],
[
"close_series_indexed = close_series['2017-05']\nclose_series_indexed",
"_____no_output_____"
],
[
"merged_series = pd.merge_asof(open_series, close_series, left_index=True, right_index=True)\nmerged_series",
"_____no_output_____"
]
],
[
[
"# Друге завдання\n",
"_____no_output_____"
]
],
[
[
"close_series = pd.Series(df['Close'])",
"_____no_output_____"
],
[
"close_series.plot()",
"_____no_output_____"
],
[
"close_series['2019'].plot()",
"_____no_output_____"
],
[
"close_series['2018-09'].plot()",
"_____no_output_____"
],
[
"close_series['2015-11': '2018-01'].plot()",
"_____no_output_____"
],
[
"close_series['2021-01'].last('2W').plot()",
"_____no_output_____"
]
],
[
[
"# Третє завдання",
"_____no_output_____"
]
],
[
[
"high_series = pd.Series(df['High'])\nhigh_series",
"_____no_output_____"
],
[
"high_series['2016'].mean()",
"_____no_output_____"
],
[
"high_series.to_period('M').groupby(level=0).mean()",
"_____no_output_____"
],
[
"high_series['2019'].first('3M').to_period('W').groupby(level=0).mean()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e748647e36bc4965e0331a54e644efe9a2b8d683 | 46,183 | ipynb | Jupyter Notebook | HorsevsHumanwithLessSize.ipynb | sunneysood/Tensorflow | 0931151b690daf9ffb85e188f88297a996b86713 | [
"Apache-2.0"
] | null | null | null | HorsevsHumanwithLessSize.ipynb | sunneysood/Tensorflow | 0931151b690daf9ffb85e188f88297a996b86713 | [
"Apache-2.0"
] | null | null | null | HorsevsHumanwithLessSize.ipynb | sunneysood/Tensorflow | 0931151b690daf9ffb85e188f88297a996b86713 | [
"Apache-2.0"
] | null | null | null | 57.370186 | 7,349 | 0.603859 | [
[
[
"<a href=\"https://colab.research.google.com/github/sunneysood/Tensorflow/blob/master/HorsevsHumanwithLessSize.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!wget --no-check-certificate \\\n https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \\\n -O /tmp/horse-or-human.zip",
"--2019-11-15 13:38:55-- https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip\nResolving storage.googleapis.com (storage.googleapis.com)... 74.125.195.128, 2607:f8b0:400e:c08::80\nConnecting to storage.googleapis.com (storage.googleapis.com)|74.125.195.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 149574867 (143M) [application/zip]\nSaving to: ‘/tmp/horse-or-human.zip’\n\n/tmp/horse-or-human 100%[===================>] 142.65M 72.0MB/s in 2.0s \n\n2019-11-15 13:38:58 (72.0 MB/s) - ‘/tmp/horse-or-human.zip’ saved [149574867/149574867]\n\n"
],
[
"!wget --no-check-certificate \\\n https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \\\n -O /tmp/validation-horse-or-human.zip",
"--2019-11-15 13:38:59-- https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip\nResolving storage.googleapis.com (storage.googleapis.com)... 74.125.142.128, 2607:f8b0:400e:c09::80\nConnecting to storage.googleapis.com (storage.googleapis.com)|74.125.142.128|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 11480187 (11M) [application/zip]\nSaving to: ‘/tmp/validation-horse-or-human.zip’\n\n\r /tmp/vali 0%[ ] 0 --.-KB/s \r /tmp/valid 36%[======> ] 4.01M 14.4MB/s \r/tmp/validation-hor 100%[===================>] 10.95M 37.1MB/s in 0.3s \n\n2019-11-15 13:39:00 (37.1 MB/s) - ‘/tmp/validation-horse-or-human.zip’ saved [11480187/11480187]\n\n"
]
],
[
[
"The following python code will use the OS library to use Operating System libraries, giving you access to the file system, and the zipfile library allowing you to unzip the data. ",
"_____no_output_____"
]
],
[
[
"import os\nimport zipfile\n\nlocal_zip = '/tmp/horse-or-human.zip'\nzip_ref = zipfile.ZipFile(local_zip, 'r')\nzip_ref.extractall('/tmp/horse-or-human')\nlocal_zip = '/tmp/validation-horse-or-human.zip'\nzip_ref = zipfile.ZipFile(local_zip, 'r')\nzip_ref.extractall('/tmp/validation-horse-or-human')\nzip_ref.close()",
"_____no_output_____"
]
],
[
[
"The contents of the .zip are extracted to the base directory `/tmp/horse-or-human`, which in turn each contain `horses` and `humans` subdirectories.\n\nIn short: The training set is the data that is used to tell the neural network model that 'this is what a horse looks like', 'this is what a human looks like' etc. \n\nOne thing to pay attention to in this sample: We do not explicitly label the images as horses or humans. If you remember with the handwriting example earlier, we had labelled 'this is a 1', 'this is a 7' etc. Later you'll see something called an ImageGenerator being used -- and this is coded to read images from subdirectories, and automatically label them from the name of that subdirectory. So, for example, you will have a 'training' directory containing a 'horses' directory and a 'humans' one. ImageGenerator will label the images appropriately for you, reducing a coding step. \n\nLet's define each of these directories:",
"_____no_output_____"
]
],
[
[
"# Directory with our training horse pictures\ntrain_horse_dir = os.path.join('/tmp/horse-or-human/horses')\n\n# Directory with our training human pictures\ntrain_human_dir = os.path.join('/tmp/horse-or-human/humans')\n\n# Directory with our training horse pictures\nvalidation_horse_dir = os.path.join('/tmp/validation-horse-or-human/validation-horses')\n\n# Directory with our training human pictures\nvalidation_human_dir = os.path.join('/tmp/validation-horse-or-human/validation-humans')",
"_____no_output_____"
]
],
[
[
"## Building a Small Model from Scratch\n\nBut before we continue, let's start defining the model:\n\nStep 1 will be to import tensorflow.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf",
"_____no_output_____"
]
],
[
[
"We then add convolutional layers as in the previous example, and flatten the final result to feed into the densely connected layers.",
"_____no_output_____"
],
[
"Finally we add the densely connected layers. \n\nNote that because we are facing a two-class classification problem, i.e. a *binary classification problem*, we will end our network with a [*sigmoid* activation](https://wikipedia.org/wiki/Sigmoid_function), so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0).",
"_____no_output_____"
]
],
[
[
"model = tf.keras.models.Sequential([\n # Note the input shape is the desired size of the image 150x150 with 3 bytes color\n # This is the first convolution\n tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The second convolution\n tf.keras.layers.Conv2D(32, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n # The third convolution\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n # The fourth convolution\n #tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n #tf.keras.layers.MaxPooling2D(2,2),\n # The fifth convolution\n #tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n #tf.keras.layers.MaxPooling2D(2,2),\n # Flatten the results to feed into a DNN\n tf.keras.layers.Flatten(),\n # 512 neuron hidden layer\n tf.keras.layers.Dense(512, activation='relu'),\n # Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')\n tf.keras.layers.Dense(1, activation='sigmoid')\n])",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\n"
]
],
[
[
"The model.summary() method call prints a summary of the NN ",
"_____no_output_____"
]
],
[
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 148, 148, 16) 448 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 74, 74, 16) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 72, 72, 32) 4640 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 36, 36, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 34, 34, 64) 18496 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 17, 17, 64) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 18496) 0 \n_________________________________________________________________\ndense (Dense) (None, 512) 9470464 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 513 \n=================================================================\nTotal params: 9,494,561\nTrainable params: 9,494,561\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"The \"output shape\" column shows how the size of your feature map evolves in each successive layer. The convolution layers reduce the size of the feature maps by a bit due to padding, and each pooling layer halves the dimensions.",
"_____no_output_____"
],
[
"Next, we'll configure the specifications for model training. We will train our model with the `binary_crossentropy` loss, because it's a binary classification problem and our final activation is a sigmoid. (For a refresher on loss metrics, see the [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/descending-into-ml/video-lecture).) We will use the `rmsprop` optimizer with a learning rate of `0.001`. During training, we will want to monitor classification accuracy.\n\n**NOTE**: In this case, using the [RMSprop optimization algorithm](https://wikipedia.org/wiki/Stochastic_gradient_descent#RMSProp) is preferable to [stochastic gradient descent](https://developers.google.com/machine-learning/glossary/#SGD) (SGD), because RMSprop automates learning-rate tuning for us. (Other optimizers, such as [Adam](https://wikipedia.org/wiki/Stochastic_gradient_descent#Adam) and [Adagrad](https://developers.google.com/machine-learning/glossary/#AdaGrad), also automatically adapt the learning rate during training, and would work equally well here.)",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.optimizers import RMSprop\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=RMSprop(lr=0.001),\n metrics=['acc'])",
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\n"
]
],
[
[
"### Data Preprocessing\n\nLet's set up data generators that will read pictures in our source folders, convert them to `float32` tensors, and feed them (with their labels) to our network. We'll have one generator for the training images and one for the validation images. Our generators will yield batches of images of size 300x300 and their labels (binary).\n\nAs you may already know, data that goes into neural networks should usually be normalized in some way to make it more amenable to processing by the network. (It is uncommon to feed raw pixels into a convnet.) In our case, we will preprocess our images by normalizing the pixel values to be in the `[0, 1]` range (originally all values are in the `[0, 255]` range).\n\nIn Keras this can be done via the `keras.preprocessing.image.ImageDataGenerator` class using the `rescale` parameter. This `ImageDataGenerator` class allows you to instantiate generators of augmented image batches (and their labels) via `.flow(data, labels)` or `.flow_from_directory(directory)`. These generators can then be used with the Keras model methods that accept data generators as inputs: `fit_generator`, `evaluate_generator`, and `predict_generator`.",
"_____no_output_____"
]
],
[
[
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# All images will be rescaled by 1./255\ntrain_datagen = ImageDataGenerator(rescale=1/255)\nvalidation_datagen = ImageDataGenerator(rescale=1/255)\n\n# Flow training images in batches of 128 using train_datagen generator\ntrain_generator = train_datagen.flow_from_directory(\n '/tmp/horse-or-human/', # This is the source directory for training images\n target_size=(150, 150), # All images will be resized to 150x150\n batch_size=128,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')\n\n# Flow training images in batches of 128 using train_datagen generator\nvalidation_generator = validation_datagen.flow_from_directory(\n '/tmp/validation-horse-or-human/', # This is the source directory for training images\n target_size=(150, 150), # All images will be resized to 150x150\n batch_size=32,\n # Since we use binary_crossentropy loss, we need binary labels\n class_mode='binary')",
"Found 1027 images belonging to 2 classes.\nFound 256 images belonging to 2 classes.\n"
]
],
[
[
"### Training\nLet's train for 15 epochs -- this may take a few minutes to run.\n\nDo note the values per epoch.\n\nThe Loss and Accuracy are a great indication of progress of training. It's making a guess as to the classification of the training data, and then measuring it against the known label, calculating the result. Accuracy is the portion of correct guesses. ",
"_____no_output_____"
]
],
[
[
"history = model.fit_generator(\n train_generator,\n steps_per_epoch=8, \n epochs=15,\n verbose=1,\n validation_data = validation_generator,\n validation_steps=8)",
"Epoch 1/15\n6/8 [=====================>........] - ETA: 1s - loss: 2.9195 - acc: 0.4743Epoch 1/15\n8/8 [==============================] - 1s 90ms/step - loss: 0.6141 - acc: 0.6953\n8/8 [==============================] - 6s 745ms/step - loss: 2.3494 - acc: 0.4950 - val_loss: 0.6141 - val_acc: 0.6953\nEpoch 2/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.6739 - acc: 0.6540Epoch 1/15\n8/8 [==============================] - 1s 137ms/step - loss: 0.4916 - acc: 0.7266\n8/8 [==============================] - 5s 648ms/step - loss: 0.6391 - acc: 0.6552 - val_loss: 0.4916 - val_acc: 0.7266\nEpoch 3/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.6303 - acc: 0.7860Epoch 1/15\n8/8 [==============================] - 1s 138ms/step - loss: 0.5458 - acc: 0.7734\n8/8 [==============================] - 5s 646ms/step - loss: 0.5871 - acc: 0.7964 - val_loss: 0.5458 - val_acc: 0.7734\nEpoch 4/15\n6/8 [=====================>........] - ETA: 1s - loss: 0.3205 - acc: 0.8646Epoch 1/15\n8/8 [==============================] - 1s 134ms/step - loss: 2.2309 - acc: 0.6328\n8/8 [==============================] - 5s 650ms/step - loss: 0.2920 - acc: 0.8643 - val_loss: 2.2309 - val_acc: 0.6328\nEpoch 5/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.2999 - acc: 0.8768Epoch 1/15\n8/8 [==============================] - 1s 135ms/step - loss: 0.9976 - acc: 0.8086\n8/8 [==============================] - 5s 637ms/step - loss: 0.2780 - acc: 0.8910 - val_loss: 0.9976 - val_acc: 0.8086\nEpoch 6/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.1289 - acc: 0.9542Epoch 1/15\n8/8 [==============================] - 1s 135ms/step - loss: 1.2645 - acc: 0.7852\n8/8 [==============================] - 6s 717ms/step - loss: 0.1243 - acc: 0.9561 - val_loss: 1.2645 - val_acc: 0.7852\nEpoch 7/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.3576 - acc: 0.8703Epoch 1/15\n8/8 [==============================] - 1s 138ms/step - loss: 0.2837 - acc: 0.8945\n8/8 [==============================] - 5s 640ms/step - loss: 0.3891 - acc: 0.8521 - val_loss: 0.2837 - val_acc: 0.8945\nEpoch 8/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.1386 - acc: 0.9339Epoch 1/15\n8/8 [==============================] - 1s 135ms/step - loss: 1.2851 - acc: 0.8164\n8/8 [==============================] - 4s 558ms/step - loss: 0.1262 - acc: 0.9341 - val_loss: 1.2851 - val_acc: 0.8164\nEpoch 9/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.0669 - acc: 0.9743Epoch 1/15\n8/8 [==============================] - 1s 129ms/step - loss: 1.4269 - acc: 0.7617\n8/8 [==============================] - 6s 726ms/step - loss: 0.0693 - acc: 0.9727 - val_loss: 1.4269 - val_acc: 0.7617\nEpoch 10/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.0765 - acc: 0.9676Epoch 1/15\n8/8 [==============================] - 1s 86ms/step - loss: 1.6075 - acc: 0.7930\n8/8 [==============================] - 5s 675ms/step - loss: 0.0700 - acc: 0.9717 - val_loss: 1.6075 - val_acc: 0.7930\nEpoch 11/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.0186 - acc: 0.9892Epoch 1/15\n8/8 [==============================] - 1s 135ms/step - loss: 1.7637 - acc: 0.8203\n8/8 [==============================] - 5s 655ms/step - loss: 0.0184 - acc: 0.9910 - val_loss: 1.7637 - val_acc: 0.8203\nEpoch 12/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.0165 - acc: 0.9909Epoch 1/15\n8/8 [==============================] - 1s 133ms/step - loss: 1.0449 - acc: 0.5703\n8/8 [==============================] - 5s 642ms/step - loss: 0.1458 - acc: 0.9577 - val_loss: 1.0449 - val_acc: 0.5703\nEpoch 13/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.1858 - acc: 0.9157Epoch 1/15\n8/8 [==============================] - 1s 137ms/step - loss: 0.6911 - acc: 0.8945\n8/8 [==============================] - 5s 640ms/step - loss: 0.1726 - acc: 0.9221 - val_loss: 0.6911 - val_acc: 0.8945\nEpoch 14/15\n7/8 [=========================>....] - ETA: 0s - loss: 0.2957 - acc: 0.9241Epoch 1/15\n8/8 [==============================] - 1s 132ms/step - loss: 1.1536 - acc: 0.8555\n8/8 [==============================] - 6s 727ms/step - loss: 0.2615 - acc: 0.9326 - val_loss: 1.1536 - val_acc: 0.8555\nEpoch 15/15\n6/8 [=====================>........] - ETA: 0s - loss: 0.0256 - acc: 0.9938Epoch 1/15\n8/8 [==============================] - 1s 136ms/step - loss: 1.0416 - acc: 0.8750\n8/8 [==============================] - 5s 573ms/step - loss: 0.0223 - acc: 0.9948 - val_loss: 1.0416 - val_acc: 0.8750\n"
]
],
[
[
"###Running the Model\n\nLet's now take a look at actually running a prediction using the model. This code will allow you to choose 1 or more files from your file system, it will then upload them, and run them through the model, giving an indication of whether the object is a horse or a human.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom google.colab import files\nfrom keras.preprocessing import image\n\nuploaded = files.upload()\n\nfor fn in uploaded.keys():\n \n # predicting images\n path = '/content/' + fn\n img = image.load_img(path, target_size=(150, 150))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n images = np.vstack([x])\n classes = model.predict(images, batch_size=10)\n print(classes[0])\n if classes[0]>0.5:\n print(fn + \" is a human\")\n else:\n print(fn + \" is a horse\")\n ",
"_____no_output_____"
]
],
[
[
"### Visualizing Intermediate Representations\n\nTo get a feel for what kind of features our convnet has learned, one fun thing to do is to visualize how an input gets transformed as it goes through the convnet.\n\nLet's pick a random image from the training set, and then generate a figure where each row is the output of a layer, and each image in the row is a specific filter in that output feature map. Rerun this cell to generate intermediate representations for a variety of training images.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport random\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\n\n# Let's define a new Model that will take an image as input, and will output\n# intermediate representations for all layers in the previous model after\n# the first.\nsuccessive_outputs = [layer.output for layer in model.layers[1:]]\n#visualization_model = Model(img_input, successive_outputs)\nvisualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)\n# Let's prepare a random input image from the training set.\nhorse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names]\nhuman_img_files = [os.path.join(train_human_dir, f) for f in train_human_names]\nimg_path = random.choice(horse_img_files + human_img_files)\n\nimg = load_img(img_path, target_size=(300, 300)) # this is a PIL image\nx = img_to_array(img) # Numpy array with shape (150, 150, 3)\nx = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)\n\n# Rescale by 1/255\nx /= 255\n\n# Let's run our image through our network, thus obtaining all\n# intermediate representations for this image.\nsuccessive_feature_maps = visualization_model.predict(x)\n\n# These are the names of the layers, so can have them as part of our plot\nlayer_names = [layer.name for layer in model.layers]\n\n# Now let's display our representations\nfor layer_name, feature_map in zip(layer_names, successive_feature_maps):\n if len(feature_map.shape) == 4:\n # Just do this for the conv / maxpool layers, not the fully-connected layers\n n_features = feature_map.shape[-1] # number of features in feature map\n # The feature map has shape (1, size, size, n_features)\n size = feature_map.shape[1]\n # We will tile our images in this matrix\n display_grid = np.zeros((size, size * n_features))\n for i in range(n_features):\n # Postprocess the feature to make it visually palatable\n x = feature_map[0, :, :, i]\n x -= x.mean()\n x /= x.std()\n x *= 64\n x += 128\n x = np.clip(x, 0, 255).astype('uint8')\n # We'll tile each filter into this big horizontal grid\n display_grid[:, i * size : (i + 1) * size] = x\n # Display the grid\n scale = 20. / n_features\n plt.figure(figsize=(scale * n_features, scale))\n plt.title(layer_name)\n plt.grid(False)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')",
"_____no_output_____"
]
],
[
[
"As you can see we go from the raw pixels of the images to increasingly abstract and compact representations. The representations downstream start highlighting what the network pays attention to, and they show fewer and fewer features being \"activated\"; most are set to zero. This is called \"sparsity.\" Representation sparsity is a key feature of deep learning.\n\n\nThese representations carry increasingly less information about the original pixels of the image, but increasingly refined information about the class of the image. You can think of a convnet (or a deep network in general) as an information distillation pipeline.",
"_____no_output_____"
],
[
"## Clean Up\n\nBefore running the next exercise, run the following cell to terminate the kernel and free memory resources:",
"_____no_output_____"
]
],
[
[
"import os, signal\nos.kill(os.getpid(), signal.SIGKILL)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7486bf91aea0e5eaecbd07294b6143801667378 | 66,833 | ipynb | Jupyter Notebook | PY0101EN_1_1_Types.ipynb | NikkiRufiansya/ML-Courses | 9087b994cbd8222d440775bebefde48ddb7ada70 | [
"MIT"
] | null | null | null | PY0101EN_1_1_Types.ipynb | NikkiRufiansya/ML-Courses | 9087b994cbd8222d440775bebefde48ddb7ada70 | [
"MIT"
] | null | null | null | PY0101EN_1_1_Types.ipynb | NikkiRufiansya/ML-Courses | 9087b994cbd8222d440775bebefde48ddb7ada70 | [
"MIT"
] | null | null | null | 22.203654 | 716 | 0.516392 | [
[
[
"<a href=\"https://colab.research.google.com/github/vrry/learning-phyton/blob/learning-types/PY0101EN_1_1_Types.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"<img src = \"https://i.imgur.com/AKXoLze.jpg\" align = \"center\">",
"_____no_output_____"
],
[
"#<h1>Python - Writing Your First Python Code!</h1>",
"_____no_output_____"
],
[
"<p><strong>Welcome!</strong> This notebook will teach you the basics of the Python programming language. Although the information presented here is quite basic, it is an important foundation that will help you read and write Python code. By the end of this notebook, you'll know the basics of Python, including how to write basic commands, understand some basic types, and how to perform simple operations on them.</p> ",
"_____no_output_____"
],
[
"#<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li>\n <a href=\"#hello\">Say \"Hello\" to the world in Python</a>\n <ul>\n <li><a href=\"version\">What version of Python are we using?</a></li>\n <li><a href=\"comments\">Writing comments in Python</a></li>\n <li><a href=\"errors\">Errors in Python</a></li>\n <li><a href=\"python_error\">Does Python know about your error before it runs your code?</a></li>\n <li><a href=\"exercise\">Exercise: Your First Program</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#types_objects\">Types of objects in Python</a>\n <ul>\n <li><a href=\"int\">Integers</a></li>\n <li><a href=\"float\">Floats</a></li>\n <li><a href=\"convert\">Converting from one object type to a different object type</a></li>\n <li><a href=\"bool\">Boolean data type</a></li>\n <li><a href=\"exer_type\">Exercise: Types</a></li>\n </ul>\n </li>\n <li>\n <a href=\"#expressions\">Expressions and Variables</a>\n <ul>\n <li><a href=\"exp\">Expressions</a></li>\n <li><a href=\"exer_exp\">Exercise: Expressions</a></li>\n <li><a href=\"var\">Variables</a></li>\n <li><a href=\"exer_exp_var\">Exercise: Expression and Variables in Python</a></li>\n </ul>\n </li>\n </ul>\n <p>\n Estimated time needed: <strong>25 min</strong>\n </p>\n</div>\n\n<hr>",
"_____no_output_____"
],
[
"#<h2 id=\"hello\">Say \"Hello\" to the world in Python</h2>",
"_____no_output_____"
],
[
"When learning a new programming language, it is customary to start with an \"hello world\" example. As simple as it is, this one line of code will ensure that we know how to print a string in output and how to execute code within cells in a notebook.",
"_____no_output_____"
],
[
"<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n[Tip]: To execute the Python code in the code cell below, click on the cell to select it and press <kbd>Shift</kbd> + <kbd>Enter</kbd>.\n</div>\n<hr/>",
"_____no_output_____"
]
],
[
[
"# Try your first Python output\n\nprint('Hello, Python!')",
"Hello, Python!\n"
],
[
"#ini tugas pertama\nprint ('Nikky Rufiansya')\n",
"Nikky Rufiansya\n"
]
],
[
[
"After executing the cell above, you should see that Python prints <code>Hello, Python!</code>. Congratulations on running your first Python code!",
"_____no_output_____"
],
[
"<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n [Tip:] <code>print()</code> is a function. You passed the string <code>'Hello, Python!'</code> as an argument to instruct Python on what to print.\n</div>\n<hr/>",
"_____no_output_____"
],
[
"## <h3 id=\"version\">What version of Python are we using?</h3>",
"_____no_output_____"
],
[
"<p>\n There are two popular versions of the Python programming language in use today: Python 2 and Python 3. The Python community has decided to move on from Python 2 to Python 3, and many popular libraries have announced that they will no longer support Python 2.\n</p>\n<p>\n Since Python 3 is the future, in this course we will be using it exclusively. How do we know that our notebook is executed by a Python 3 runtime? We can look in the top-right hand corner of this notebook and see \"Python 3\".\n</p>\n<p>\n We can also ask directly Python and obtain a detailed answer. Try executing the following code:\n</p>",
"_____no_output_____"
]
],
[
[
"# Check the Python Version\n\nimport sys\nprint(sys.version)",
"3.5.2 (default, Oct 8 2019, 13:06:37) \n[GCC 5.4.0 20160609]\n"
]
],
[
[
"<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n [Tip:] <code>sys</code> is a built-in module that contains many system-specific parameters and functions, including the Python version in use. Before using it, we must explictly <code>import</code> it.\n</div>\n<hr/>",
"_____no_output_____"
],
[
"## <h3 id=\"comments\">Writing comments in Python</h3>",
"_____no_output_____"
],
[
"<p>\n In addition to writing code, note that it's always a good idea to add comments to your code. It will help others understand what you were trying to accomplish (the reason why you wrote a given snippet of code). Not only does this help <strong>other people</strong> understand your code, it can also serve as a reminder <strong>to you</strong> when you come back to it weeks or months later.</p>\n\n<p>\n To write comments in Python, use the number symbol <code>#</code> before writing your comment. When you run your code, Python will ignore everything past the <code>#</code> on a given line.\n</p>",
"_____no_output_____"
]
],
[
[
"# Practice on writing comments\n\nprint('Hello, Python!') # This line prints a string\n\n# print('Hi')\nprint('\\nHia')",
"Hello, Python!\n\nHia\n"
]
],
[
[
"<p>\n After executing the cell above, you should notice that <code>This line prints a string</code> did not appear in the output, because it was a comment (and thus ignored by Python).\n</p>\n<p>\n The second line was also not executed because <code>print('Hi')</code> was preceded by the number sign (<code>#</code>) as well! Since this isn't an explanatory comment from the programmer, but an actual line of code, we might say that the programmer <em>commented out</em> that second line of code.\n</p>",
"_____no_output_____"
],
[
"## <h3 id=\"errors\">Errors in Python</h3>",
"_____no_output_____"
],
[
"<p>Everyone makes mistakes. For many types of mistakes, Python will tell you that you have made a mistake by giving you an error message. It is important to read error messages carefully to really understand where you made a mistake and how you may go about correcting it.</p>\n<p>For example, if you spell <code>print</code> as <code>frint</code>, Python will display an error message. Give it a try:</p>",
"_____no_output_____"
]
],
[
[
"# Print string as error message\n\nprint(\"Hello, Python!\")",
"Hello, Python!\n"
]
],
[
[
"<p>The error message tells you: \n<ol>\n <li>where the error occurred (more useful in large notebook cells or scripts), and</li> \n <li>what kind of error it was (NameError)</li> \n</ol>\n<p>Here, Python attempted to run the function <code>frint</code>, but could not determine what <code>frint</code> is since it's not a built-in function and it has not been previously defined by us either.</p>",
"_____no_output_____"
],
[
"<p>\n You'll notice that if we make a different type of mistake, by forgetting to close the string, we'll obtain a different error (i.e., a <code>SyntaxError</code>). Try it below:\n</p>",
"_____no_output_____"
]
],
[
[
"# Try to see build in error message\n\nprint(\"Hello, Python!\")",
"Hello, Python!\n"
]
],
[
[
"## <h3 id=\"python_error\">Does Python know about your error before it runs your code?</h3>",
"_____no_output_____"
],
[
"Python is what is called an <em>interpreted language</em>. Compiled languages examine your entire program at compile time, and are able to warn you about a whole class of errors prior to execution. In contrast, Python interprets your script line by line as it executes it. Python will stop executing the entire program when it encounters an error (unless the error is expected and handled by the programmer, a more advanced subject that we'll cover later on in this course).",
"_____no_output_____"
],
[
"Try to run the code in the cell below and see what happens:",
"_____no_output_____"
]
],
[
[
"# Print string and error to see the running order\n\nprint(\"This will be printed\")\nprint(\"This will cause an error\")\nprint(\"This will NOT be printed\")",
"This will be printed\nThis will cause an error\nThis will NOT be printed\n"
]
],
[
[
"## <h3 id=\"exercise\">Exercise: Your First Program</h3>",
"_____no_output_____"
],
[
"<p>Generations of programmers have started their coding careers by simply printing \"Hello, world!\". You will be following in their footsteps.</p>\n<p>In the code cell below, use the <code>print()</code> function to print out the phrase: <code>Hello, world!</code></p>",
"_____no_output_____"
]
],
[
[
"# Write your code below and press Shift+Enter to execute \nprint('Hello, World!')",
"Hello, World!\n"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\nprint(\"Hello, world!\")\n\n-->",
"_____no_output_____"
],
[
"<p>Now, let's enhance your code with a comment. In the code cell below, print out the phrase: <code>Hello, world!</code> and comment it with the phrase <code>Print the traditional hello world</code> all in one line of code.</p>",
"_____no_output_____"
]
],
[
[
"# Write your code below and press Shift+Enter to execute \nprint('Hello, World!')",
"Hello, World!\n"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\nprint(\"Hello, world!\") # Print the traditional hello world\n\n-->\n",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"# <h2 id=\"types_objects\">Types of objects in Python</h2>",
"_____no_output_____"
],
[
"<p>Python is an object-oriented language. There are many different types of objects in Python. Let's start with the most common object types: <i>strings</i>, <i>integers</i> and <i>floats</i>. Anytime you write words (text) in Python, you're using <i>character strings</i> (strings for short). The most common numbers, on the other hand, are <i>integers</i> (e.g. -1, 0, 100) and <i>floats</i>, which represent real numbers (e.g. 3.14, -42.0).</p>",
"_____no_output_____"
],
[
"<a align=\"center\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/TypesObjects.png\" width=\"600\">\n</a>",
"_____no_output_____"
],
[
"<p>The following code cells contain some examples.</p>",
"_____no_output_____"
]
],
[
[
"# Integer\n21",
"_____no_output_____"
],
[
"# Float\n\n3.14",
"_____no_output_____"
],
[
"# String\n\n\"Hello, Python 101!\"",
"_____no_output_____"
]
],
[
[
"<p>You can get Python to tell you the type of an expression by using the built-in <code>type()</code> function. You'll notice that Python refers to integers as <code>int</code>, floats as <code>float</code>, and character strings as <code>str</code>.</p>",
"_____no_output_____"
]
],
[
[
"# Type of 12\n\ntype(12)",
"_____no_output_____"
],
[
"# Type of 2.14\n\ntype(2.14)",
"_____no_output_____"
],
[
"# Type of \"Hello, Python 101!\"\n\ntype(\"Hello, Python 101!\")",
"_____no_output_____"
]
],
[
[
"<p>In the code cell below, use the <code>type()</code> function to check the object type of <code>12.0</code>.",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\ntype(12.0)",
"_____no_output_____"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n\ntype(12.0)\n\n-->",
"_____no_output_____"
],
[
"## <h3 id=\"int\">Integers</h3>",
"_____no_output_____"
],
[
"<p>Here are some examples of integers. Integers can be negative or positive numbers:</p>",
"_____no_output_____"
],
[
"<a align=\"center\">\n <img src=\"https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/TypesInt.png\" width=\"600\">\n</a>",
"_____no_output_____"
],
[
"<p>We can verify this is the case by using, you guessed it, the <code>type()</code> function:",
"_____no_output_____"
]
],
[
[
"# Print the type of -1\n\ntype(-1)",
"_____no_output_____"
],
[
"# Print the type of 4\n\ntype(4)",
"_____no_output_____"
],
[
"# Print the type of 0\n\ntype(0)",
"_____no_output_____"
]
],
[
[
"## <h3 id=\"float\">Floats</h3> ",
"_____no_output_____"
],
[
"<p>Floats represent real numbers; they are a superset of integer numbers but also include \"numbers with decimals\". There are some limitations when it comes to machines representing real numbers, but floating point numbers are a good representation in most cases. You can learn more about the specifics of floats for your runtime environment, by checking the value of <code>sys.float_info</code>. This will also tell you what's the largest and smallest number that can be represented with them.</p>\n\n<p>Once again, can test some examples with the <code>type()</code> function:",
"_____no_output_____"
]
],
[
[
"# Print the type of 1.0\n\ntype(1.0) # Notice that 1 is an int, and 1.0 is a float",
"_____no_output_____"
],
[
"# Print the type of 0.5\n\ntype(0.5)",
"_____no_output_____"
],
[
"# Print the type of 0.56\n\ntype(0.56)",
"_____no_output_____"
],
[
"# System settings about float type\n\nsys.float_info",
"_____no_output_____"
]
],
[
[
"## <h3 id=\"convert\">Converting from one object type to a different object type</h3>",
"_____no_output_____"
],
[
"<p>You can change the type of the object in Python; this is called typecasting. For example, you can convert an <i>integer</i> into a <i>float</i> (e.g. 2 to 2.0).</p>\n<p>Let's try it:</p>",
"_____no_output_____"
]
],
[
[
"# Verify that this is an integer\n\ntype(2)",
"_____no_output_____"
]
],
[
[
"### <h4>Converting integers to floats</h4>\n<p>Let's cast integer 2 to float:</p>",
"_____no_output_____"
]
],
[
[
"# Convert 2 to a float\n\nfloat(2)",
"_____no_output_____"
],
[
"# Convert integer 2 to a float and check its type\n\ntype(float(2))",
"_____no_output_____"
]
],
[
[
"<p>When we convert an integer into a float, we don't really change the value (i.e., the significand) of the number. However, if we cast a float into an integer, we could potentially lose some information. For example, if we cast the float 1.1 to integer we will get 1 and lose the decimal information (i.e., 0.1):</p>",
"_____no_output_____"
]
],
[
[
"# Casting 1.1 to integer will result in loss of information\n\nint(1.1)",
"_____no_output_____"
]
],
[
[
"### <h4>Converting from strings to integers or floats</h4>",
"_____no_output_____"
],
[
"<p>Sometimes, we can have a string that contains a number within it. If this is the case, we can cast that string that represents a number into an integer using <code>int()</code>:</p>",
"_____no_output_____"
]
],
[
[
"# Convert a string into an integer\n\nint('1')",
"_____no_output_____"
]
],
[
[
"<p>But if you try to do so with a string that is not a perfect match for a number, you'll get an error. Try the following:</p>",
"_____no_output_____"
]
],
[
[
"# Convert a string into an integer with error\n\n# int('1 or 2 people')\n\nint('1')",
"_____no_output_____"
]
],
[
[
"<p>You can also convert strings containing floating point numbers into <i>float</i> objects:</p>",
"_____no_output_____"
]
],
[
[
"# Convert the string \"1.2\" into a float\n\nfloat('1.2')",
"_____no_output_____"
]
],
[
[
"<hr/>\n<div class=\"alert alert-success alertsuccess\" style=\"margin-top: 20px\">\n [Tip:] Note that strings can be represented with single quotes (<code>'1.2'</code>) or double quotes (<code>\"1.2\"</code>), but you can't mix both (e.g., <code>\"1.2'</code>).\n</div>\n<hr/>",
"_____no_output_____"
],
[
"### <h4>Converting numbers to strings</h4>",
"_____no_output_____"
],
[
"<p>If we can convert strings to numbers, it is only natural to assume that we can convert numbers to strings, right?</p>",
"_____no_output_____"
]
],
[
[
"# Convert an integer to a string\n\nstr(1)",
"_____no_output_____"
]
],
[
[
"<p>And there is no reason why we shouldn't be able to make floats into strings as well:</p> ",
"_____no_output_____"
]
],
[
[
"# Convert a float to a string\n\nstr(1.2)",
"_____no_output_____"
]
],
[
[
"## <h3 id=\"bool\">Boolean data type</h3>",
"_____no_output_____"
],
[
"<p><i>Boolean</i> is another important type in Python. An object of type <i>Boolean</i> can take on one of two values: <code>True</code> or <code>False</code>:</p>",
"_____no_output_____"
]
],
[
[
"# Value true\n\nTrue",
"_____no_output_____"
]
],
[
[
"<p>Notice that the value <code>True</code> has an uppercase \"T\". The same is true for <code>False</code> (i.e. you must use the uppercase \"F\").</p>",
"_____no_output_____"
]
],
[
[
"# Value false\n\nFalse",
"_____no_output_____"
]
],
[
[
"<p>When you ask Python to display the type of a boolean object it will show <code>bool</code> which stands for <i>boolean</i>:</p> ",
"_____no_output_____"
]
],
[
[
"# Type of True\n\ntype(True)",
"_____no_output_____"
],
[
"# Type of False\n\ntype(False)",
"_____no_output_____"
]
],
[
[
"<p>We can cast boolean objects to other data types. If we cast a boolean with a value of <code>True</code> to an integer or float we will get a one. If we cast a boolean with a value of <code>False</code> to an integer or float we will get a zero. Similarly, if we cast a 1 to a Boolean, you get a <code>True</code>. And if we cast a 0 to a Boolean we will get a <code>False</code>. Let's give it a try:</p> ",
"_____no_output_____"
]
],
[
[
"# Convert True to int\n\nint(True)",
"_____no_output_____"
],
[
"# Convert 1 to boolean\n\nbool(1)",
"_____no_output_____"
],
[
"# Convert 0 to boolean\n\nbool(0)",
"_____no_output_____"
],
[
"# Convert True to float\n\nfloat(False)",
"_____no_output_____"
]
],
[
[
"## <h3 id=\"exer_type\">Exercise: Types</h3>",
"_____no_output_____"
],
[
"<p>What is the data type of the result of: <code>6 / 2</code>?</p>",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\nprint(6/2)",
"3.0\n"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\ntype(6/2) # float\n-->",
"_____no_output_____"
],
[
"<p>What is the type of the result of: <code>6 // 2</code>? (Note the double slash <code>//</code>.)</p>",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\nprint(6//2)",
"3\n"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\ntype(6//2) # int, as the double slashes stand for integer division \n-->",
"_____no_output_____"
],
[
"<hr>",
"_____no_output_____"
],
[
"# <h2 id=\"expressions\">Expression and Variables</h2>",
"_____no_output_____"
],
[
"<h3 id=\"exp\">Expressions</h3>",
"_____no_output_____"
],
[
"<p>Expressions in Python can include operations among compatible types (e.g., integers and floats). For example, basic arithmetic operations like adding multiple numbers:</p>",
"_____no_output_____"
]
],
[
[
"# Addition operation expression\n\n43 + 60 + 16 + 41",
"_____no_output_____"
]
],
[
[
"<p>We can perform subtraction operations using the minus operator. In this case the result is a negative number:</p>",
"_____no_output_____"
]
],
[
[
"# Subtraction operation expression\n\n50 - 60",
"_____no_output_____"
]
],
[
[
"<p>We can do multiplication using an asterisk:</p>",
"_____no_output_____"
]
],
[
[
"# Multiplication operation expression\n\n5 * 5",
"_____no_output_____"
]
],
[
[
"<p>We can also perform division with the forward slash:",
"_____no_output_____"
]
],
[
[
"# Division operation expression\n\n25 / 5",
"_____no_output_____"
],
[
"# Division operation expression\n\n25 / 6",
"_____no_output_____"
]
],
[
[
"<p>As seen in the quiz above, we can use the double slash for integer division, where the result is rounded to the nearest integer:",
"_____no_output_____"
]
],
[
[
"# Integer division operation expression\n\n25 // 5",
"_____no_output_____"
],
[
"# Integer division operation expression\n\n25 // 6",
"_____no_output_____"
]
],
[
[
"## <h3 id=\"exer_exp\">Exercise: Expression</h3>",
"_____no_output_____"
],
[
"<p>Let's write an expression that calculates how many hours there are in 160 minutes:",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\nz = 160//60\nprint(z, 'Hours')",
"2 Hours\n"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n160/60 \n# Or \n160//60\n-->",
"_____no_output_____"
],
[
"<p>Python follows well accepted mathematical conventions when evaluating mathematical expressions. In the following example, Python adds 30 to the result of the multiplication (i.e., 120).",
"_____no_output_____"
]
],
[
[
"# Mathematical expression\n\n30 + 2 * 60",
"_____no_output_____"
]
],
[
[
"<p>And just like mathematics, expressions enclosed in parentheses have priority. So the following multiplies 32 by 60.",
"_____no_output_____"
]
],
[
[
"# Mathematical expression\n\n(30 + 2) * 60",
"_____no_output_____"
]
],
[
[
"## <h3 id=\"var\">Variables</h3>",
"_____no_output_____"
],
[
"<p>Just like with most programming languages, we can store values in <i>variables</i>, so we can use them later on. For example:</p>",
"_____no_output_____"
]
],
[
[
"# Store value into variable\n\nx = 43 + 60 + 16 + 41",
"_____no_output_____"
]
],
[
[
"<p>To see the value of <code>x</code> in a Notebook, we can simply place it on the last line of a cell:</p>",
"_____no_output_____"
]
],
[
[
"# Print out the value in variable\n\nprint(x)",
"160\n"
]
],
[
[
"<p>We can also perform operations on <code>x</code> and save the result to a new variable:</p>",
"_____no_output_____"
]
],
[
[
"# Use another variable to store the result of the operation between variable and value\n\ny = x / 60\ny",
"_____no_output_____"
]
],
[
[
"<p>If we save a value to an existing variable, the new value will overwrite the previous value:</p>",
"_____no_output_____"
]
],
[
[
"# Overwrite variable with new value\n\nx = x / 60\nx",
"_____no_output_____"
]
],
[
[
"<p>It's a good practice to use meaningful variable names, so you and others can read the code and understand it more easily:</p>",
"_____no_output_____"
]
],
[
[
"# Name the variables meaningfully\n\ntotal_min = 43 + 42 + 57 # Total length of albums in minutes\ntotal_min",
"_____no_output_____"
],
[
"# Name the variables meaningfully\n\ntotal_hours = total_min / 60 # Total length of albums in hours \ntotal_hours",
"_____no_output_____"
]
],
[
[
"<p>In the cells above we added the length of three albums in minutes and stored it in <code>total_min</code>. We then divided it by 60 to calculate total length <code>total_hours</code> in hours. You can also do it all at once in a single expression, as long as you use parenthesis to add the albums length before you divide, as shown below.</p>",
"_____no_output_____"
]
],
[
[
"# Complicate expression\n\ntotal_hours = (43 + 42 + 57) / 60 # Total hours in a single expression\ntotal_hours",
"_____no_output_____"
]
],
[
[
"<p>If you'd rather have total hours as an integer, you can of course replace the floating point division with integer division (i.e., <code>//</code>).</p>",
"_____no_output_____"
],
[
"## <h3 id=\"exer_exp_var\">Exercise: Expression and Variables in Python</h3>",
"_____no_output_____"
],
[
"<p>What is the value of <code>x</code> where <code>x = 3 + 2 * 2</code></p>",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\n\nx = 3 + 2 * 2\nx",
"_____no_output_____"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n7\n-->\n",
"_____no_output_____"
],
[
"<p>What is the value of <code>y</code> where <code>y = (3 + 2) * 2</code>?</p>",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\ny = (3 + 2) * 2\n\ny",
"_____no_output_____"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n10\n-->",
"_____no_output_____"
],
[
"<p>What is the value of <code>z</code> where <code>z = x + y</code>?</p>",
"_____no_output_____"
]
],
[
[
"# Write your code below. Don't forget to press Shift+Enter to execute the cell\nz = x + y\nz",
"_____no_output_____"
]
],
[
[
"Double-click __here__ for the solution.\n\n<!-- Your answer is below:\n17\n-->",
"_____no_output_____"
],
[
"#<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>",
"_____no_output_____"
],
[
"<p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href=\"https://cognitiveclass.ai/mit-license/\">MIT License</a>.</p>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e74873654f1f0f192d576d5da9770df58f6305f6 | 16,089 | ipynb | Jupyter Notebook | set_general_params.py.ipynb | maikia/ulfp-paper | 37c6cf31c37a68508170103eeae9ba8d0b8a8674 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | set_general_params.py.ipynb | maikia/ulfp-paper | 37c6cf31c37a68508170103eeae9ba8d0b8a8674 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | set_general_params.py.ipynb | maikia/ulfp-paper | 37c6cf31c37a68508170103eeae9ba8d0b8a8674 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | 52.237013 | 382 | 0.491081 | [
[
[
"prepare general parameters\n\nparameters are saved in json file. json file has advantage that it is saved in the text format and therefore it can be easily opened, visualised and changed without python\n\n",
"_____no_output_____"
]
],
[
[
"import datetime\nimport json\nimport numpy as np\nimport os\n\nimport lib.helper as hl ",
"_____no_output_____"
],
[
"region_type = 'hippocamp'\ncurrent_dir = os.getcwd()\ndir_morpho_vertical = os.path.join(current_dir,\n 'data/vertical')# where your vertical morphologies are saved\nresults_dir = os.path.join(current_dir,\n 'data/results/')\nfile_name = 'all.json'\nneuron_type = 'exc'",
"_____no_output_____"
],
[
"\"\"\" generates plane json file with the parameters set to default;\nfile_name will be preceeded by current date and time\nand new folder will be\ncreated of the same name as .json file \"\"\" \n# check if dir exists and create it if it does not\nhl.create_folder(results_dir)\n\n# check current date/time and create new file and folder name\n\ni = datetime.datetime.now()\nnew_file_name = (\"%s_%s_%s_%s_%s_%s\" % (i.year, i.month, i.day, i.hour, i.minute, file_name))\nnew_folder_name = results_dir + new_file_name[:-5] + '/'\n\n# create new dir with the name of .json file in this dir\nhl.create_folder(new_folder_name)\n",
"_____no_output_____"
],
[
"# example of two sets of parameters\ndef select_region_type_params(region_type, neuron_type,\n new_folder_name, new_file_name, dir_morpho_vertical, \n celltype = ''):\n '''\n :param region_type: 'neocortex' or 'hippocamp'\n :param neuron_type: 'exc' or 'inh'\n :return:\n '''\n grid_size = 100 # The larger the grid the finer the results but also the longer the analysis\n\n # HIPPOCAMPUS EXCITATORY\n def hippocamp_exc():\n # you can read the data from the file\n cell_name = 'Ascoli_NMO_187.swc'\n # save the parameters into the created file\n parameters = {'save_data_dir': new_folder_name,\n 'save_data_name': new_file_name[:-5] + '.npz',\n 'text_file_name': 'parameters.txt',\n 'dir_morpho_vertical': dir_morpho_vertical, # where are all the cell morphologies stored\n 'range_inh_syn_on_one':[0, 0], # min and max number of synapses placed on single postsynaptic neuron\n 'range_exc_syn_on_one': [1, 2],\n # min and max number of synapses placed on single postsynaptic neuron\n 'cell_no': 10, # number of cells\n 'repeat_morpho': True,\n # if True the cell will be each time randomly selected from the directory, otherwise cell_no must be <= to the number of the cells in the directory\n 'cell_layer_ranges': [[-500, -40], [-40, 40], [40, 120], [120, 420], [420, 1000]],\n 'region_layer_ranges': [[-500, -40], [-40, 40], [40, 120], [120, 420], [420, 1000]],\n #[[-500, -35],[-35, 30],[30, 100],[100, 455],[455, 1000]], # [[-500, -30],[-30, 30],[30,35],[35, 600],[600, 1000]],#[[-500, -20], [-20, 30], [30, 100], [100, 400], [400, 1000]],\n # layers within cells will be used for placing synapses. 0 is assumed to be at the soma\n 'inh_synapse_prob': 0.0, # 0.225, # 1 probability of a creating a inh synapse\n 'inh_synapse_prob_layer': np.zeros(26).tolist(), #[0, 0, 0, 0, 0, 0, 0, 0],\n # [0,8,3,0,0], #,#, #[0, 3, 1, 0, 0], #, # max_no_of_inh_synapes within each of the cell_layers on single cell\n 'exc_synapse_prob': 0.42, # probability of a creating a exc synapse\n 'exc_synapse_prob_layer': [0.5,0,0,0.5,0], # restults for Ascoli_ca3b.CNG.swc\n #[0.4, 0,0,0.3,0.3],\n #[0.0, 0.0, 0.0, 0.005, 0.005, 0.01, 0.02, 0.24, 0.25, 0.0, 0.055, 0.055, 0.055, 0.055, 0.055, 0.055, 0.055, 0.055, 0.03, 0.0, 0.0, 0.0, 0.0], #[3.4, 3.4, 3.4, 3.4, 3.4, 3.4, 3.4, 3.4, 0.64, 0.0, 0.03, 2.1, 2.1, 2.5, 6.9, 6.9, 6.9, 3.5, 1.72, 0.8, 0.8, 0.4, 0.4], #0.23, 0.04, 0, 0.002, 0.16, 0.47, 0.1, 0], # Megias, # [0.53, 0, 0, 0.47, 0], # Guzman\n #'exc_synapse_prob_layer': [0.30, 0, 0, 0.70, 0],\n # [7,0,0,3,0], #[0,0,0,0,0], #[7,0,0,3,0]# max_no_of_inh_synapes within each of the cell_layers on single cell\n 'grid_size': grid_size,\n 'space_x_range': (-500, 500), # microm\n 'space_x_prob_distribution':[0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0],# restults for Ascoli_ca3b.CNG.swc\n #[0.15, 0.30, 0.22, 0.15, 0.09, 0.05, 0.03, 0.01], # distribution of the somas in x space in each x_hist from the 0 x-coord\n 'x_hist': 50, # microm - placing soma with the prob 'space_x_prob_distribution' for each bin of size x_hist from the 0 x-coord\n 'space_y_range': (-500, 800),\n # microm; width of the pyramidal cell layer where the soma will be placed\n 'space_z_range': (-200, 200), # microm\n 'space_soma_range': (-20, 20), # place soma solely in the pyr layer\n 'record_x_range': (-500, 500), # (-100, 100),\n 'record_y_range': (-500, 800),\n 'min_cell_distance': 10, # cell size\n\n 'timestep': 0.025, # ms/ dt for simulation\n 'sim_len': 15, # ms\n 'prop_velocity_inh': 0.5, # m/s propagation velocity in the axon of inh neuron in this area\n 'prop_velocity_exc': 0.45, # Meeks and Mennerick (2007) m/s propagation velocity in the axon of exc neuron in this area\n 'stim_delay': 1.,\n # ms stimulation delay of the synapses from the beginning of the simulation + the propagation delay calculated from the distance of the synapse from (0,0,0) and prop_velocity_inh or prop_velocity_exc\n\n 'syn_rev_pot_exc': 0.0, # mV\n 'syn_rev_pot_inh': -72.0, # mV\n 'tau1_exc': 0.26, # Debanne (1995), EPSC, CA3-CA3 peak to peak (Kowalski 2016 (20-80%) Guzman 2016 #1.65, # ms\n 'tau2_exc': 6.71, #8.9, #\n 'tau1_inh': 0, # 1.2, # ms\n 'tau2_inh': 0, # 14.4, # ms\n 'con_weight_exc': 0.00054, # 0.54e-3, #.Guzman 2016: 0.00054 uS\n # microS; also called g_max (try also 0.25e-3) # max excitatory synapse conductance\n 'con_weight_inh': 0, # .49e-3,\n # microS; also called g_max (try also 0.6e-3) # max inhibitory synapse conductance\n\n 'external_resistivity': 3.5, # Ohm.m (external)\n 'cell_area': 'hippocamp', # morphologies of which area should be used\n 'memb_pot': -57 # mV Kowalski et al 2016\n }\n # if read from file, overwrite the values with file values\n\n return parameters\n\n # HIPPOCAMPUS INHIBITORY\n def hippocamp_inh():\n # save the parameters into the created file\n parameters = {'save_data_dir': new_folder_name,\n 'save_data_name': new_file_name[:-5] + '.npz',\n 'text_file_name': 'parameters.txt',\n 'dir_morpho_vertical': dir_morpho_vertical, # where are all the cell morphologies stored\n 'range_inh_syn_on_one': [1, 6], #(Gulyas, 1993 - could be something better -> it's guinue pig )\n # min and max number of synapses placed on single postsynaptic neuron\n 'range_exc_syn_on_one': [0, 0],\n 'cell_no': 10, #number of cells\n 'repeat_morpho': True,\n # if True the cell will be each time randomly selected from the directory, otherwise cell_no must be <= to the number of the cells in the directory\n 'cell_layer_ranges': [[-500, -40], [-40, 40], [40, 120], [120, 420], [420, 1000]],\n 'region_layer_ranges': [[-500, -40], [-40, 40], [40, 120], [120, 420], [420, 1000]],\n # layers within cells will be used for placing synapses. 0 is assumed to be at the soma\n 'inh_synapse_prob': 0.5, # 0.225, # 1 probability of a creating a inh synapse\n 'inh_synapse_prob_layer': [0, 0.8, 0.2, 0, 0],\n # [0,8,3,0,0], #,#, #[0, 3, 1, 0, 0], #, # max_no_of_inh_synapes within each of the cell_layers on single cell\n 'exc_synapse_prob': 0.0, # probability of a creating a exc synapse\n 'exc_synapse_prob_layer': [0, 0, 0, 0, 0],\n # [7,0,0,3,0], #[0,0,0,0,0], #[7,0,0,3,0]# max_no_of_inh_synapes within each of the cell_layers on single cell\n 'grid_size': grid_size,\n 'space_x_range': (-500, 500), # microm\n 'space_y_range': (-500, 800),\n 'space_x_prob_distribution': [0.005, 0.01, 0.02, 0.04, 0.05, 0.07, 0.15, 0.2, 0.22, 0.25,\n 0.25, 0.22, 0.2, 0.15, 0.07, 0.05, 0.04, 0.02, 0.01, 0.005], # trying to get more or less gaussian distribution - no references\n # distribution of the somas in x space in each x_hist from the 0 x-coord\n 'x_hist': 50, #microm\n # microm - placing soma with the prob 'space_x_prob_distribution' for each bin of size x_hist from the 0 x-coord\n # microm; width of the pyramidal cell layer where the soma will be placed\n 'space_z_range': (-200, 200), # microm\n 'space_soma_range': (-30, 40), # place soma solely in the pyr layer\n 'record_x_range': (-500, 500), # (-100, 100),\n 'record_y_range': (-500, 800),\n 'min_cell_distance': 10, # cell size\n\n 'timestep': 0.025, # ms/ dt for simulation\n 'sim_len': 15, # ms\n 'prop_velocity_inh': 0.5, # m/s propagation velocity in the axon of inh neuron in this area\n 'prop_velocity_exc': 0.38, # m/s propagation velocity in the axon of exc neuron in this area\n 'stim_delay': 1.,\n # ms stimulation delay of the synapses from the beginning of the simulation + the propagation delay calculated from the distance of the synapse from (0,0,0) and prop_velocity_inh or prop_velocity_exc\n\n 'syn_rev_pot_exc': 0.0, # mV\n 'syn_rev_pot_inh': -75.0, # mV\n 'tau1_exc': 2.2, # Guzman 2016 #1.65, # ms\n 'tau2_exc': 9.5, # Guzman 2016 #7.35, # ms\n 'tau1_inh': 0.45, # Bartos et al 2002: 0.45 # Miles et al. 1996: 2.8; Bazelot 2010: 2\n 'tau2_inh': 1.2, # Bartos et al 2002: 1.2 # ms Miles 1996: 27 at half amplitude\n 'con_weight_exc': .24e-3, # .31e-3,\n # microS; also called g_max (try also 0.25e-3) # max excitatory synapse conductance\n 'con_weight_inh': 0.005, # Bartos et al 2002: 5 nS #.49e-3,\n # microS; also called g_max (try also 0.6e-3) # max inhibitory synapse conductance\n\n 'external_resistivity': 3.5, # Ohm.m (external)\n 'cell_area': 'hippocamp', # morphologies of which area should be used\n\n 'memb_pot': -70 # mV initial membrane potential\n }\n return parameters\n\n\n if region_type == 'hippocamp':\n if neuron_type == 'exc':\n return hippocamp_exc()\n elif neuron_type == 'inh':\n return hippocamp_inh()\n\n\n",
"_____no_output_____"
],
[
"parameters = select_region_type_params(region_type,neuron_type,\n new_folder_name=new_folder_name,new_file_name=new_file_name,\n dir_morpho_vertical=dir_morpho_vertical,\n celltype='pyr')",
"_____no_output_____"
],
[
"# save params\nwith file(results_dir + new_file_name, 'w') as fid:\n json.dump(parameters, fid, indent=True)\n\nprint (\"Created new DEFAULT .json file: %s%s\" % (results_dir, new_file_name))",
"Created new DEFAULT .json file: /home/maria/maja/code/lfp-paper/data/results/2018_12_5_14_1_all.json\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7487e72f9868a9cc97eb7e2b4f58ee9b387fe7c | 316,687 | ipynb | Jupyter Notebook | overview_val10/overview_all_three_svm-sigmoid.ipynb | vevurka/mt-lob | 70989bcb61f4cfa7884437e1cff2db2454b3ceff | [
"MIT"
] | 2 | 2019-04-17T02:19:22.000Z | 2019-05-23T12:14:59.000Z | overview_val10/overview_all_three_svm-sigmoid.ipynb | vevurka/mt-lob | 70989bcb61f4cfa7884437e1cff2db2454b3ceff | [
"MIT"
] | 10 | 2020-01-28T22:32:13.000Z | 2021-09-08T00:41:37.000Z | overview_val10/overview_all_three_svm-sigmoid.ipynb | vevurka/mt-lob | 70989bcb61f4cfa7884437e1cff2db2454b3ceff | [
"MIT"
] | 6 | 2018-12-05T22:17:05.000Z | 2020-09-03T03:00:50.000Z | 227.9964 | 90,148 | 0.883604 | [
[
[
"%load_ext autoreload\n\n%autoreload 2\n%matplotlib inline\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nfrom sklearn.svm import SVC\nfrom sklearn import metrics\n\nfrom ast import literal_eval\n\nfrom mlxtend.plotting import plot_decision_regions\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\n\n\nimport warnings\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom lob_data_utils import lob, db_result, model\nfrom lob_data_utils.svm_calculation import lob_svm\n\n\nsns.set_style('whitegrid')\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"data_length = 15000\nstocks = ['9064', '9061', '9265']\nshould_save_fig = False # TODO: change plot names if changing that value",
"_____no_output_____"
],
[
"d_stocks = {}\nd_cv_stocks = {}\nd_test_stocks = {}\nfor s in stocks:\n d, d_test = lob.load_prepared_data(\n s, data_dir='../queue_imbalance/data/prepared', cv=False, length=data_length)\n d.index = pd.to_datetime(d['Unnamed: 0'].values)\n d_test.index = pd.to_datetime(d_test['Unnamed: 0'].values)\n d['prev_queue_imbalance'] = [None] + d['queue_imbalance'].iloc[0:len(d)-1].values.tolist()\n d.dropna(inplace=True)\n d_test['prev_queue_imbalance'] = [None] + d_test['queue_imbalance'].iloc[0:len(d_test)-1].values.tolist()\n d_test.dropna(inplace=True)\n d_stocks[s] = d\n d_test_stocks[s] = d_test",
"_____no_output_____"
],
[
"d_stocks['9061'].head()",
"_____no_output_____"
]
],
[
[
"## SVM with queue imbalance",
"_____no_output_____"
]
],
[
[
"df_res = {}\nfor s in stocks:\n df_res_temp = pd.read_csv('res_{}_prev_queue_imbalance.csv'.format(s))\n df_res_temp = df_res_temp[df_res_temp['features'] == 'queue_imbalance']\n df_res_temp = df_res_temp[df_res_temp['method'] == 'svm_sigmoid']\n df_res_temp['stock'] = [s for i in range(len(df_res_temp))]\n df_res[s] = df_res_temp\n columns_to_mean = ['f1', 'kappa', 'matthews', \n 'precision', 'recall', 'roc_auc', 'train_f1', 'train_kappa',\n 'train_matthews', 'train_precision', 'train_recall', 'train_roc_auc']\n for c in columns_to_mean:\n cc = []\n for i, row in df_res[s].iterrows():\n cc.append(np.array(literal_eval(row[c])).mean())\n df_res[s][c] = cc",
"_____no_output_____"
],
[
"df_res['9061'].sort_values(by='matthews', ascending=False).head()",
"_____no_output_____"
],
[
"df_best_svm = pd.DataFrame()\nfor s in stocks:\n idx_max = df_res[s]['matthews'].idxmax()\n df_best_svm = df_best_svm.append(df_res[s].loc[idx_max])\ndf_best_svm",
"_____no_output_____"
],
[
"print(df_best_svm[['stock', 'method', 'c', 'gamma', 'coef0']].to_latex())",
"\\begin{tabular}{lllrrr}\n\\toprule\n{} & stock & method & c & gamma & coef0 \\\\\n\\midrule\n69 & 9064 & svm\\_sigmoid & 1.0 & 0.10 & 1.0 \\\\\n29 & 9061 & svm\\_sigmoid & 10.0 & 0.01 & 1.0 \\\\\n69 & 9265 & svm\\_sigmoid & 1.0 & 0.10 & 1.0 \\\\\n\\bottomrule\n\\end{tabular}\n\n"
],
[
"def fit_best_svm_classifier(df_best_svm, df, stock=None):\n gamma = df_best_svm[df_best_svm['stock'] == stock]['gamma'].values[0]\n coef0 = df_best_svm[df_best_svm['stock'] == stock]['coef0'].values[0]\n c = df_best_svm[df_best_svm['stock'] == stock]['c'].values[0]\n kernel = df_best_svm[df_best_svm['stock'] == stock]['method'].values[0].split('_')[1]\n\n X = df['queue_imbalance'].values.reshape(-1, 1)\n y = df['mid_price_indicator']\n \n clf = SVC(gamma=gamma, C=c, coef0=coef0, kernel=kernel, random_state=34234)\n clf.fit(X, y)\n return clf\n ",
"_____no_output_____"
],
[
"from sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import TimeSeriesSplit\n\nf, ax = plt.subplots(1, 3, figsize=(15,4))\ni = 0\nfor stock in stocks:\n log_clf = fit_best_svm_classifier(df_best_svm, d_stocks[stock], stock=stock)\n lob.plot_learning_curve(\n log_clf, d_stocks[stock]['queue_imbalance'].values.reshape(-1, 1),\n d_stocks[stock]['mid_price_indicator'], \n cv=TimeSeriesSplit(n_splits=10, max_train_size=9000), ax=ax[i], scoring='f1')\n ax[i].set_title('Learning Curve for {}'.format(stock))\n i += 1\nif should_save_fig:\n print('Saving')\n plt.savefig('svm_learning_curves.png')",
"_____no_output_____"
],
[
"f, (ax1, ax3) = plt.subplots(1, 2, figsize=(21,6))\nfor stock in stocks:\n log_clf = fit_best_svm_classifier(df_best_svm, d_stocks[stock], stock=stock)\n pred_train = log_clf.predict(d_stocks[stock]['queue_imbalance'].values.reshape(-1, 1))\n pred_test = log_clf.predict(d_test_stocks[stock]['queue_imbalance'].values.reshape(-1, 1))\n d_stocks[stock]['pred_log'] = pred_train\n d_test_stocks[stock]['pred_log'] = pred_test\n lob.plot_f1(d_stocks[stock], log_clf, stock=stock, label='', ax=ax1,\n title='Precision vs Recall for 3 stocks on training data')\n lob.plot_f1(d_test_stocks[stock], log_clf, stock=stock, label='', ax=ax3,\n title='Precision vs Recall for 3 stocks on testing data')\n if should_save_fig:\n print('Saving')\n plt.savefig('svm_results.png')",
"_____no_output_____"
],
[
"def get_scores_dict_for_data(functions_to_run, dfs, log_clf, stock):\n scores = {'stock': stock}\n for func_name, func in functions_to_run.items():\n for df_name, df in dfs.items():\n pred = log_clf.predict(df['queue_imbalance'].values.reshape(-1, 1))\n scores['{}_{}'.format(df_name, func_name)] = func(df['mid_price_indicator'], pred)\n return scores\n \nfunctions_to_run = {'precision': metrics.precision_score, 'roc_auc': metrics.roc_auc_score,\n 'f1_score': metrics.f1_score, 'recall': metrics.recall_score, \n 'matthews': metrics.matthews_corrcoef, 'kappa': metrics.cohen_kappa_score}\nscores = []\nfor stock in stocks:\n log_clf = fit_best_svm_classifier(df_best_svm, d_stocks[stock], stock=stock)\n dfs = {'train': d_stocks[stock], 'test': d_test_stocks[stock], }\n res_train = model.validate_model(fit_best_svm_classifier(df_best_svm, d_stocks[stock], stock=stock), \n d_stocks[stock][['queue_imbalance']], d_stocks[stock]['mid_price_indicator'])\n res = get_scores_dict_for_data(functions_to_run, dfs, log_clf, stock)\n res = {**res, **res_train}\n scores.append(res)\ndf_scores = pd.DataFrame(scores, index=stocks)",
"_____no_output_____"
],
[
"f, ax = plt.subplots(1, 3, figsize=(27,6))\nfor i in range(len(stocks)):\n s = stocks[i]\n d_stocks[s]['Predicition of Mid Price Indicator'] = d_stocks[s]['pred_log']\n d_stocks[s]['Mid Price Indicator'] = d_stocks[s]['mid_price_indicator']\n d_stocks[s][['Predicition of Mid Price Indicator', 'Mid Price Indicator']].plot(\n kind='kde', ax=ax[i])\n ax[i].set_title('Density of Mid Price Indicator and its prediction {} on training data'.format(s))\n ax[i].legend(loc='lower right')\nif should_save_fig:\n print('Saving')\n plt.savefig('density_of_mid_price_and_prediction_training_data_svm.png')",
"_____no_output_____"
],
[
"for s in stocks:\n d_stocks[s]['queue_imbalance'].plot(kind='kde')\n d_stocks[s]['mid_price_indicator'].plot(kind='kde')",
"_____no_output_____"
],
[
"def convert_scores(df, column):\n scores = []\n for i, row in df.iterrows():\n scores.append(np.mean(row[column]))\n return scores\nscores_columns = ['f1', 'kappa', 'matthews', 'precision', 'recall', 'roc_auc', 'train_f1', 'train_kappa',\n 'train_matthews', 'train_precision', 'train_recall', 'train_roc_auc']\n\nfor col in scores_columns:\n df_scores[col] = convert_scores(df_scores, col)\ndf_scores\n",
"_____no_output_____"
],
[
"print('sigmoid kernel')\ndf_scores[['matthews', 'test_matthews']]",
"sigmoid kernel\n"
],
[
"print('sigmoid kernel')\ndf_scores[['roc_auc', 'test_roc_auc']]",
"sigmoid kernel\n"
],
[
"print(df_scores[['matthews', 'test_matthews', 'roc_auc', 'test_roc_auc']].to_latex())",
"\\begin{tabular}{lrrrr}\n\\toprule\n{} & matthews & test\\_matthews & roc\\_auc & test\\_roc\\_auc \\\\\n\\midrule\n9064 & 0.082146 & 0.088884 & 0.540714 & 0.544286 \\\\\n9061 & 0.093797 & 0.070265 & 0.546624 & 0.535131 \\\\\n9265 & 0.105674 & 0.117860 & 0.552374 & 0.558574 \\\\\n\\bottomrule\n\\end{tabular}\n\n"
],
[
"print(df_scores[['f1', 'test_f1_score', 'precision', 'test_precision', 'recall', 'test_recall']].to_latex())",
"\\begin{tabular}{lrrrrrr}\n\\toprule\n{} & f1 & test\\_f1\\_score & precision & test\\_precision & recall & test\\_recall \\\\\n\\midrule\n9064 & 0.532262 & 0.527442 & 0.554853 & 0.557257 & 0.536294 & 0.500655 \\\\\n9061 & 0.540923 & 0.530323 & 0.543128 & 0.531757 & 0.544688 & 0.528898 \\\\\n9265 & 0.564119 & 0.533145 & 0.560195 & 0.566292 & 0.572780 & 0.503664 \\\\\n\\bottomrule\n\\end{tabular}\n\n"
],
[
"df_best_svm",
"_____no_output_____"
],
[
"f, ax = plt.subplots(3, 1, figsize=(35, 15), sharex=True)\ni = 0\n\nfor i in range(len(stocks)):\n s = stocks[i]\n df = d_stocks[s]\n X = d_stocks[s][['queue_imbalance']].values\n y = d_stocks[s]['mid_price_indicator'].values.astype(np.integer)\n\n clf = fit_best_svm_classifier(df_best_svm, d_stocks[s], stock=s)\n # plot_decision_regions(X[0:1500], y[0:1500], clf=clf,ax=ax[i], colors=','.join(['orange', 'blue']))\n ax[i].set_xlabel('Queue Imbalance')\n ax[i].set_title('SVM Decision Regions for {} on training data'.format(s))\n ax[i].set_xlim(-1.01, 1.01)\n\nif should_save_fig:\n print('Saving')\n plt.savefig('svm_decision_region.png')",
"_____no_output_____"
]
],
[
[
"## Compare Queue Imbalance vs Prev Queue Imblanace",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7488a54889603cb0c724034b3555b411bb16ad7 | 2,471 | ipynb | Jupyter Notebook | Qonto/Qonto_Get_positions.ipynb | gagan3012/awesome-notebooks | 2c009a96676ee722fe99ff7d4f284592894ea287 | [
"BSD-3-Clause"
] | 1 | 2021-07-06T21:07:48.000Z | 2021-07-06T21:07:48.000Z | Qonto/Qonto_Get_positions.ipynb | girilv/awesome-notebooks | 0a0be50b5184f220f4a4ff5dea85107af85036c5 | [
"BSD-3-Clause"
] | null | null | null | Qonto/Qonto_Get_positions.ipynb | girilv/awesome-notebooks | 0a0be50b5184f220f4a4ff5dea85107af85036c5 | [
"BSD-3-Clause"
] | null | null | null | 32.090909 | 800 | 0.677863 | [
[
[
"<img width=\"10%\" alt=\"Naas\" src=\"https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160\"/>",
"_____no_output_____"
],
[
"# Qonto - Get positions\n<a href=\"https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Qonto/Qonto_Get_positions.ipynb\" target=\"_parent\">\n<img src=\"https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg==\"/>\n</a>",
"_____no_output_____"
]
],
[
[
"import naas_drivers\n\n# Enter your credentials\nuser_id = 'YOUR_USER_ID'\napi_key = 'YOUR_API_KEY'\n\n# Get bank positions\ndf_positions = qonto.connect(user_id, api_key).positions.get()\ndf_positions",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7488bed5f147361b0c71927a5ed7bc2a7ab72f6 | 567 | ipynb | Jupyter Notebook | Untitled3.ipynb | ljackson707/python-exercises | ae07485e0747457f9424202509c7248ae40e6617 | [
"MIT"
] | 1 | 2021-01-11T18:32:41.000Z | 2021-01-11T18:32:41.000Z | Untitled3.ipynb | ljackson707/python-exercises | ae07485e0747457f9424202509c7248ae40e6617 | [
"MIT"
] | null | null | null | Untitled3.ipynb | ljackson707/python-exercises | ae07485e0747457f9424202509c7248ae40e6617 | [
"MIT"
] | null | null | null | 16.2 | 34 | 0.516755 | [
[
[
"f",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e748b5394f616680741c0be1b8ebe364edb6af9b | 336,695 | ipynb | Jupyter Notebook | Cls4-Supervised Learning - 1/Supervised1 - Case Study 2 - Solution.ipynb | tuhinssam/MLResources | 5410ce33bc6f3a951b0f94c32bf82748a8c5bd6c | [
"MIT"
] | 1 | 2020-01-31T06:18:30.000Z | 2020-01-31T06:18:30.000Z | Cls4-Supervised Learning - 1/Supervised1 - Case Study 2 - Solution.ipynb | tuhinssam/MLResources | 5410ce33bc6f3a951b0f94c32bf82748a8c5bd6c | [
"MIT"
] | null | null | null | Cls4-Supervised Learning - 1/Supervised1 - Case Study 2 - Solution.ipynb | tuhinssam/MLResources | 5410ce33bc6f3a951b0f94c32bf82748a8c5bd6c | [
"MIT"
] | null | null | null | 81.484753 | 209,092 | 0.725859 | [
[
[
"### 1. Let’s attempt to predict the survival of a horse based on various observed medical conditions. Load the data from ‘horses.csv’ and observe whether it contains missing values.\n[Hint: Pandas dataframe has a method isnull]\n### 2. This dataset contains many categorical features, replace them with label encoding.\n[Hint: Refer to get_dummies methods in pandas dataframe or Label encoder in scikit-learn]\n### 3. Replace the missing values by the most frequent value in each column.\n[Hint: Refer to Imputer class in Scikit learn preprocessing module]\n### 4. Fit a decision tree classifier and observe the accuracy.\n### 5. Fit a random forest classifier and observe the accuracy.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns",
"_____no_output_____"
],
[
"dfhorses = pd.read_csv('horse.csv')",
"_____no_output_____"
],
[
"dfhorses.shape",
"_____no_output_____"
],
[
"dfhorses.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 299 entries, 0 to 298\nData columns (total 28 columns):\nsurgery 299 non-null object\nage 299 non-null object\nhospital_number 299 non-null int64\nrectal_temp 239 non-null float64\npulse 275 non-null float64\nrespiratory_rate 241 non-null float64\ntemp_of_extremities 243 non-null object\nperipheral_pulse 230 non-null object\nmucous_membrane 252 non-null object\ncapillary_refill_time 267 non-null object\npain 244 non-null object\nperistalsis 255 non-null object\nabdominal_distention 243 non-null object\nnasogastric_tube 195 non-null object\nnasogastric_reflux 193 non-null object\nnasogastric_reflux_ph 53 non-null float64\nrectal_exam_feces 197 non-null object\nabdomen 181 non-null object\npacked_cell_volume 270 non-null float64\ntotal_protein 266 non-null float64\nabdomo_appearance 134 non-null object\nabdomo_protein 101 non-null float64\noutcome 299 non-null object\nsurgical_lesion 299 non-null object\nlesion_1 299 non-null int64\nlesion_2 299 non-null int64\nlesion_3 299 non-null int64\ncp_data 299 non-null object\ndtypes: float64(7), int64(4), object(17)\nmemory usage: 65.5+ KB\n"
],
[
"dfhorses.sample(10)",
"_____no_output_____"
],
[
"dfhorses.isna().sum()",
"_____no_output_____"
],
[
"dfhorses.outcome.unique()",
"_____no_output_____"
],
[
"dfhorses['rectal_temp'].fillna(dfhorses['rectal_temp'].mean(), inplace= True)",
"_____no_output_____"
],
[
"dfhorses.count()",
"_____no_output_____"
],
[
"dfhorses.pulse.fillna(dfhorses.pulse.mean(), inplace = True)",
"_____no_output_____"
],
[
"dfhorses.count()",
"_____no_output_____"
],
[
"dfhorses.temp_of_extremities.unique()",
"_____no_output_____"
],
[
"dfhorses.respiratory_rate.fillna(dfhorses.respiratory_rate.mean(), inplace = True)",
"_____no_output_____"
],
[
"dfhorses.pain.unique()",
"_____no_output_____"
],
[
"dfhorses.sample(10)",
"_____no_output_____"
],
[
"dfhorses.pain.fillna('mild_pain',inplace=True)",
"_____no_output_____"
],
[
"from sklearn.preprocessing import LabelEncoder",
"_____no_output_____"
],
[
"le = LabelEncoder()\ndfhorses['pain']=le.fit_transform(dfhorses['pain'].astype('str'))\n",
"_____no_output_____"
],
[
"dfhorses.pain.unique()",
"_____no_output_____"
],
[
"dfhorses.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 299 entries, 0 to 298\nData columns (total 28 columns):\nsurgery 299 non-null object\nage 299 non-null object\nhospital_number 299 non-null int64\nrectal_temp 299 non-null float64\npulse 299 non-null float64\nrespiratory_rate 299 non-null float64\ntemp_of_extremities 243 non-null object\nperipheral_pulse 230 non-null object\nmucous_membrane 252 non-null object\ncapillary_refill_time 267 non-null object\npain 299 non-null int32\nperistalsis 255 non-null object\nabdominal_distention 243 non-null object\nnasogastric_tube 195 non-null object\nnasogastric_reflux 193 non-null object\nnasogastric_reflux_ph 53 non-null float64\nrectal_exam_feces 197 non-null object\nabdomen 181 non-null object\npacked_cell_volume 270 non-null float64\ntotal_protein 266 non-null float64\nabdomo_appearance 134 non-null object\nabdomo_protein 101 non-null float64\noutcome 299 non-null object\nsurgical_lesion 299 non-null object\nlesion_1 299 non-null int64\nlesion_2 299 non-null int64\nlesion_3 299 non-null int64\ncp_data 299 non-null object\ndtypes: float64(7), int32(1), int64(4), object(16)\nmemory usage: 64.3+ KB\n"
],
[
"dfhorses['age']=le.fit_transform(dfhorses['age'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses['surgery']=le.fit_transform(dfhorses['surgery'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses.head()",
"_____no_output_____"
],
[
"dfhorses.drop('hospital_number',axis=1,inplace=True)",
"_____no_output_____"
],
[
"dfhorses['peristalsis']=le.fit_transform(dfhorses['peristalsis'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses['peristalsis'].unique()",
"_____no_output_____"
],
[
"dfhorses['temp_of_extremities'].unique()",
"_____no_output_____"
],
[
"dfhorses['temp_of_extremities']=le.fit_transform(dfhorses['temp_of_extremities'].astype(str))",
"_____no_output_____"
],
[
"dfhorses['temp_of_extremities'].unique()",
"_____no_output_____"
],
[
"dfhorses['peripheral_pulse'].fillna('normal',inplace=True)",
"_____no_output_____"
],
[
"dfhorses['peripheral_pulse']=le.fit_transform(dfhorses['peripheral_pulse'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses['mucous_membrane'].unique()",
"_____no_output_____"
],
[
"dfhorses['mucous_membrane']=le.fit_transform(dfhorses['mucous_membrane'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses['capillary_refill_time'].unique()",
"_____no_output_____"
],
[
"dfhorses['capillary_refill_time']=le.fit_transform(dfhorses['capillary_refill_time'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses['packed_cell_volume'].isna().count()",
"_____no_output_____"
],
[
"dfhorses['total_protein'].isna().count()",
"_____no_output_____"
],
[
"dfhorses['abdomo_protein'].fillna(dfhorses['abdomo_protein'].mean(),inplace = True)",
"_____no_output_____"
],
[
"dfhorses['abdomo_protein'] = le.fit_transform(dfhorses['abdomo_protein'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses.sample(5)",
"_____no_output_____"
],
[
"dfhorses.drop('abdomo_appearance',axis = 1, inplace=True)",
"_____no_output_____"
],
[
"dfhorses.outcome.isna().count()",
"_____no_output_____"
],
[
"#labelEncoding\ndfhorses['outcome'] =le.fit_transform(dfhorses['outcome'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses.outcome.unique()",
"_____no_output_____"
],
[
"dfhorses.drop(['lesion_1','lesion_2','lesion_3'],axis=1,inplace=True)",
"_____no_output_____"
],
[
"dfhorses.isna().sum()",
"_____no_output_____"
],
[
"dfhorses.drop('nasogastric_reflux_ph',axis=1,inplace=True)",
"_____no_output_____"
],
[
"dfhorses.drop(['nasogastric_tube','nasogastric_reflux','rectal_exam_feces','abdomen'],axis=1,inplace=True)",
"_____no_output_____"
],
[
"dfhorses.isna().sum()",
"_____no_output_____"
],
[
"dfhorses[['abdominal_distention','packed_cell_volume','total_protein']].sample(10)",
"_____no_output_____"
],
[
"dfhorses.drop('abdominal_distention',axis=1,inplace=True)",
"_____no_output_____"
],
[
"dfhorses['packed_cell_volume'].fillna(dfhorses['packed_cell_volume'].mean(),inplace=True)",
"_____no_output_____"
],
[
"dfhorses['packed_cell_volume'].unique()",
"_____no_output_____"
],
[
"dfhorses['total_protein'].fillna(dfhorses['total_protein'].mean(),inplace=True)",
"_____no_output_____"
],
[
"dfhorses.isna().sum()",
"_____no_output_____"
],
[
"dfhorses.corr()",
"_____no_output_____"
],
[
"dfhorses.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 299 entries, 0 to 298\nData columns (total 17 columns):\nsurgery 299 non-null int32\nage 299 non-null int32\nrectal_temp 299 non-null float64\npulse 299 non-null float64\nrespiratory_rate 299 non-null float64\ntemp_of_extremities 299 non-null int32\nperipheral_pulse 299 non-null int32\nmucous_membrane 299 non-null int32\ncapillary_refill_time 299 non-null int32\npain 299 non-null int32\nperistalsis 299 non-null int32\npacked_cell_volume 299 non-null float64\ntotal_protein 299 non-null float64\nabdomo_protein 299 non-null int32\noutcome 299 non-null int32\nsurgical_lesion 299 non-null object\ncp_data 299 non-null object\ndtypes: float64(5), int32(10), object(2)\nmemory usage: 28.1+ KB\n"
],
[
"dfhorses.surgical_lesion.unique()",
"_____no_output_____"
],
[
"dfhorses['surgical_lesion']=le.fit_transform(dfhorses['surgical_lesion'].astype('str'))",
"_____no_output_____"
],
[
"dfhorses['cp_data'].unique()",
"_____no_output_____"
],
[
"dfhorses['cp_data']= le.fit_transform(dfhorses['cp_data'].astype('str'))",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,10))\nsns.heatmap(dfhorses.corr(),annot=True, cmap='YlGnBu')",
"_____no_output_____"
],
[
"from sklearn.tree import DecisionTreeClassifier",
"_____no_output_____"
],
[
"#X = dfhorses[['pulse','temp_of_extremities','total_protein','packed_cell_volume','surgical_lesion']]",
"_____no_output_____"
],
[
"X= dfhorses.drop('outcome',axis = 1)",
"_____no_output_____"
],
[
"X.sample(5)",
"_____no_output_____"
],
[
"y = dfhorses.outcome",
"_____no_output_____"
],
[
"y.unique()",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25)",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"X_test.shape",
"_____no_output_____"
],
[
"y_test.shape",
"_____no_output_____"
],
[
"y_train.shape",
"_____no_output_____"
],
[
"dtc = DecisionTreeClassifier()",
"_____no_output_____"
],
[
"dtc.fit(X_train,y_train)",
"_____no_output_____"
],
[
"pred_y = dtc.predict(X_test)",
"_____no_output_____"
],
[
"newdf = pd.DataFrame({\"Test\":y_test,\"Prediction\":pred_y})\nnewdf",
"_____no_output_____"
],
[
"from sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"accuracy_score(y_test,pred_y)",
"_____no_output_____"
],
[
"from sklearn.ensemble import RandomForestClassifier",
"_____no_output_____"
],
[
"rfc = RandomForestClassifier(n_estimators=98)",
"_____no_output_____"
],
[
"rfc.fit(X_train,y_train)",
"_____no_output_____"
],
[
"rfcprediction = rfc.predict(X_test)",
"_____no_output_____"
],
[
"accuracy_score(y_test,rfcprediction)",
"_____no_output_____"
],
[
"newdf1 = pd.DataFrame({\"Test\":y_test,\"Prediction\":rfcprediction})\nnewdf1",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e748bb57a7d5e470d7b735e0c0cfe760e0b31552 | 2,890 | ipynb | Jupyter Notebook | docs/contents/same_value.ipynb | dprada/evidence | d8400fe1a3c662be01f6f9f658fc5b92b894556d | [
"MIT"
] | null | null | null | docs/contents/same_value.ipynb | dprada/evidence | d8400fe1a3c662be01f6f9f658fc5b92b894556d | [
"MIT"
] | null | null | null | docs/contents/same_value.ipynb | dprada/evidence | d8400fe1a3c662be01f6f9f658fc5b92b894556d | [
"MIT"
] | 1 | 2021-11-06T16:03:46.000Z | 2021-11-06T16:03:46.000Z | 19.794521 | 64 | 0.513841 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"# Same value",
"_____no_output_____"
]
],
[
[
"import evidence as evi",
"_____no_output_____"
],
[
"datum1 = evi.Evidence(3.12)\ndatum1.add_reference({'database':'DOI', 'id':'XXX'})\n\ndatum2 = evi.Evidence(3.12)\ndatum2.add_reference({'database':'PubMed', 'id':'YYY'})\n\ndatum3 = evi.Evidence(3.12)\ndatum3.add_reference({'database':'PubMed', 'id':'ZZZ'})\n\ndatum4 = evi.Evidence(6.58)\ndatum4.add_reference({'database':'PubMed', 'id':'ZZZ'})",
"_____no_output_____"
],
[
"datum1.value == datum2.value",
"_____no_output_____"
],
[
"evi.same_value([datum1, datum2, datum3])",
"_____no_output_____"
],
[
"evi.same_value([datum1, datum2, datum4])",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e748d7b67064ac9799122b91a468785fc233cfb5 | 15,292 | ipynb | Jupyter Notebook | challenge2/modelling_sparknlp.ipynb | fahminlb33/ieee-gsc-2021 | c4fe43405ecd88f0bbc3f8adbba8f08746334f69 | [
"MIT"
] | null | null | null | challenge2/modelling_sparknlp.ipynb | fahminlb33/ieee-gsc-2021 | c4fe43405ecd88f0bbc3f8adbba8f08746334f69 | [
"MIT"
] | null | null | null | challenge2/modelling_sparknlp.ipynb | fahminlb33/ieee-gsc-2021 | c4fe43405ecd88f0bbc3f8adbba8f08746334f69 | [
"MIT"
] | null | null | null | 15,292 | 15,292 | 0.70769 | [
[
[
"!wget http://setup.johnsnowlabs.com/kaggle.sh -O - | bash",
"_____no_output_____"
],
[
"import csv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport sparknlp\nfrom sparknlp.annotator import *\nfrom sparknlp.common import *\nfrom sparknlp.base import *\n\nfrom pyspark.ml import Pipeline\n\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score",
"_____no_output_____"
],
[
"RANDOM_SEED = 42\nCLASSIFIER_THRESHOLD = 0.4\n\nnp.random.seed(RANDOM_SEED)",
"_____no_output_____"
],
[
"class_map = {\n 0: \"optimistic\",\n 1: \"thankful\",\n 2: \"empathetic\",\n 3: \"pessimistic\",\n 4: \"anxious\",\n 5: \"sad\",\n 6: \"annoyed\",\n 7: \"denial\",\n 8: \"surprise\",\n 9: \"official_report\",\n 10: \"joking\"\n}\n\ninv_class_map = {\n \"optimistic\": 0,\n \"thankful\": 1,\n \"empathetic\": 2,\n \"pessimistic\": 3,\n \"anxious\": 4,\n \"sad\": 5,\n \"annoyed\": 6,\n \"denial\": 7,\n \"surprise\": 8,\n \"official_report\": 9,\n \"joking\": 10\n}",
"_____no_output_____"
],
[
"spark = sparknlp.start(gpu=False)\nprint(\"Spark NLP version: \", sparknlp.version())",
"_____no_output_____"
],
[
"records = []\nwith open('/kaggle/input/sentiment-analysis-of-covid-19-related-tweets/training.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n current_classes = []\n classes = list(map(int, row[2].split(\" \")))\n for i in range(0, 11):\n if i in classes:\n current_classes.append(class_map[i])\n\n records.append((row[0], row[1], current_classes))\n line_count += 1\n\n print(f'Processed {line_count} lines.')\n\ndf = pd.DataFrame.from_records(records, columns=[\"id\", \"text\", \"labels\"])\n\n#trainDataset, testDataset = spark.read.parquet(\"training_sparknlp.parquet\").randomSplit([0.9, 0.1], seed=RANDOM_SEED)\ntrainDataset, testDataset = spark.createDataFrame(df).randomSplit([0.9, 0.1], seed=RANDOM_SEED)\n\n#df.to_parquet(\"training_sparknlp.parquet\", compression=\"gzip\")\ndf.info()",
"_____no_output_____"
],
[
"trainDataset.show(2)",
"_____no_output_____"
],
[
"print(\"Train dataset: \", trainDataset.cache().count())\nprint(\"Test dataset: \", testDataset.cache().count())",
"_____no_output_____"
],
[
"%%time\ndocument = DocumentAssembler()\\\n .setInputCol(\"text\")\\\n .setOutputCol(\"document\")",
"_____no_output_____"
],
[
"%%time\nsentence_detector = SentenceDetectorDLModel\\\n .pretrained(\"sentence_detector_dl\", \"en\") \\\n .setInputCols([\"document\"]) \\\n .setOutputCol(\"sentences\")",
"_____no_output_____"
],
[
"%%time\ntokenizer = Tokenizer() \\\n .setInputCols([\"sentences\"]) \\\n .setOutputCol(\"token\")",
"_____no_output_____"
],
[
"%%time\n#sentence_embeddings = BertSentenceEmbeddings.pretrained(\"sent_covidbert_large_uncased\", \"en\") \\\n# .setInputCols(\"document\") \\\n# .setOutputCol(\"sentence_embeddings\")\nword_embeddings = BertEmbeddings.pretrained(\"covidbert_large_uncased\", \"en\") \\\n .setInputCols(\"sentences\", \"token\") \\\n .setOutputCol(\"embeddings\")",
"_____no_output_____"
],
[
"%%time\nsentence_embeddings = SentenceEmbeddings() \\\n .setInputCols([\"sentences\", \"embeddings\"]) \\\n .setOutputCol(\"sentence_embeddings\") \\\n .setPoolingStrategy(\"AVERAGE\")",
"_____no_output_____"
],
[
"%%time\nmultiClassifier = MultiClassifierDLApproach()\\\n .setInputCols(\"sentence_embeddings\")\\\n .setOutputCol(\"category\")\\\n .setLabelColumn(\"labels\")\\\n .setBatchSize(128)\\\n .setMaxEpochs(10)\\\n .setLr(1e-3)\\\n .setThreshold(CLASSIFIER_THRESHOLD)\\\n .setShufflePerEpoch(False)\\\n .setEnableOutputLogs(True)\\\n .setValidationSplit(0.1)",
"_____no_output_____"
],
[
"pipeline = Pipeline(\n stages = [\n document,\n sentence_detector,\n tokenizer,\n word_embeddings,\n sentence_embeddings,\n multiClassifier\n ])",
"_____no_output_____"
],
[
"%%time\npipelineModel = pipeline.fit(trainDataset)",
"_____no_output_____"
],
[
"!ls -l ~/annotator_logs/",
"_____no_output_____"
],
[
"!cat ~/annotator_logs/MultiClassifierDLApproach_da4ee550bf50.log",
"_____no_output_____"
],
[
"pipelineModel.stages[-1].write().overwrite().save('tmp_multi_classifierDL_model')",
"_____no_output_____"
],
[
"!zip -r model2.zip /kaggle/working/tmp_multi_classifierDL_model",
"_____no_output_____"
],
[
"multiClassifier = MultiClassifierDLModel.load(\"../input/ieee-gsc-challenge-2-covid19-bert-model/kaggle/working/tmp_multi_classifierDL_model\") \\\n .setInputCols([\"sentence_embeddings\"])\\\n .setOutputCol(\"category\")\\\n .setThreshold(0.4)\n\npipeline = Pipeline(\n stages = [\n document,\n sentence_embeddings,\n multiClassifier\n ])",
"_____no_output_____"
],
[
"preds = pipeline.fit(testDataset).transform(testDataset)\npreds_df = preds.select('labels', 'category.result').toPandas()",
"_____no_output_____"
],
[
"mlb = MultiLabelBinarizer()\n\ny_true = mlb.fit_transform(preds_df['labels'])\ny_pred = mlb.transform(preds_df['result'])",
"_____no_output_____"
],
[
"print(\"Classification report: \\n\", (classification_report(y_true, y_pred)))\nprint(\"F1 micro averaging:\",(f1_score(y_true, y_pred, average='micro')))\nprint(\"F1 macro averaging:\",(f1_score(y_true, y_pred, average='macro')))\nprint(\"ROC: \",(roc_auc_score(y_true, y_pred, average=\"micro\")))",
"_____no_output_____"
],
[
"submission_df = pd.read_csv(\"/kaggle/input/sentiment-analysis-of-covid-19-related-tweets/validation.csv\")\nsubmission_df.columns = [\"id\", \"text\"]\nsubmissionDataset = spark.createDataFrame(submission_df)\n\nsubmission_df.head(10)",
"_____no_output_____"
],
[
"preds = pipeline.fit(submissionDataset).transform(submissionDataset)\npreds_df = preds.select('id', 'category.result').toPandas()\npreds_df.to_parquet(\"sparknlp-bert-covid.parquet\", compression=\"gzip\")",
"_____no_output_____"
],
[
"preds_df.head(10)",
"_____no_output_____"
],
[
"preds_df[\"Labels\"] = preds_df[\"result\"].apply(lambda x: \" \".join(sorted([str(inv_class_map[idx]) for idx in x])))\npreds_df.columns = [\"ID\", \"result\", \"Labels\"]\npreds_df[[\"ID\", \"Labels\"]].to_csv(\"challenge2-sparkml-bert_covid19_large_uncased-3.csv\", index=None)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e748d8349f31d62c4ced352e40002d31923e0506 | 55,895 | ipynb | Jupyter Notebook | tests/accumulation.ipynb | CyberCRI/herocoli-metrics-redwire | abd2e73f3ec6a5088be3302074d5b4b2a1d65362 | [
"CC0-1.0"
] | 1 | 2017-01-18T13:46:31.000Z | 2017-01-18T13:46:31.000Z | tests/accumulation.ipynb | CyberCRI/herocoli-metrics-redwire | abd2e73f3ec6a5088be3302074d5b4b2a1d65362 | [
"CC0-1.0"
] | null | null | null | tests/accumulation.ipynb | CyberCRI/herocoli-metrics-redwire | abd2e73f3ec6a5088be3302074d5b4b2a1d65362 | [
"CC0-1.0"
] | null | null | null | 42.312642 | 93 | 0.438769 | [
[
[
"%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt \nimport seaborn as sns",
"_____no_output_____"
],
[
"df100 = pd.read_csv(\"data/1.0.csv\")\ndf131 = pd.read_csv(\"data/1.31.csv\")\ndf132 = pd.read_csv(\"data/1.32.csv\")",
"_____no_output_____"
],
[
"print 'df100.head()='\ndf100.head()",
"df100.head()=\n"
],
[
"print 'df131.head()='\ndf131.head()",
"df131.head()=\n"
],
[
"print 'df132.head()='\ndf132.head()",
"df132.head()=\n"
]
],
[
[
"# Accumulation Test",
"_____no_output_____"
]
],
[
[
"relevantColumns = ['id', 'serverTime', 'playerId', 'type', 'section']\n\ndf = df100.loc[:,relevantColumns]\npart131 = df131.loc[:,relevantColumns]\npart132 = df132.loc[:,relevantColumns]\n\ndf = pd.concat([df, part131, part132])\n\ndf\n",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e748f6a42a492cfc45a506db3bcee060f01f787a | 19,443 | ipynb | Jupyter Notebook | week5.ipynb | ali820/leanringPython | a3b000108ec4135c1795a812e6409c1a960aae4c | [
"MIT"
] | null | null | null | week5.ipynb | ali820/leanringPython | a3b000108ec4135c1795a812e6409c1a960aae4c | [
"MIT"
] | null | null | null | week5.ipynb | ali820/leanringPython | a3b000108ec4135c1795a812e6409c1a960aae4c | [
"MIT"
] | null | null | null | 24.273408 | 165 | 0.408682 | [
[
[
"## 第五周 函数和代码\n### 5.1 函数的定义与使用\n### 5.2 实例7:七段数码管绘制\n### 5.3 代码复用与函数递归\n### 5.4 模块4:PyInstaller库的使用\n### 5.5 科赫雪花小包裹\n\n",
"_____no_output_____"
],
[
"### 5.1 函数的定义与使用\n- 函数的理解与定义\n - 函数是一段代码的表示,是具有一定功能的、可重复使用的语句组,是一种功能的抽象\n - 两个作用:\n - 降低编程难度\n - 代码复用\n - ```\n def<函数名>(<参数(0个或者多个)>):\n <函数体>\n return <返回值>\n ```\n - 函数定义时,所指定的参数是一种占位符\n - 函数定义后,如果不经过调用,不会被执行\n - 函数定义时,参数是输入、函数体是处理、结果是输出(IPO)\n \n",
"_____no_output_____"
]
],
[
[
"#计算n!\ndef fact(n):\n s=1\n for i in range(1,n+1):\n s*=i\n return s\n\nn = input()\nb=fact(int(n))\nprint(b)",
"3628800\n"
]
],
[
[
"- 函数的使用及调用过程\n - 函数只有被调用,才会被执行\n\n- 函数的参数传递\n - 无论函数有没有参数,都要保留括号\n - 可选参数:函数定义时可以为某些参数指定默认值,构成可选参数\n - ```\n def<函数名>(<非可选参数(必选参数)>,<可选参数>):\n <函数体>\n return <返回值>\n ```\n - 可选参数一定要在必选参数后面\n - 可变参数的传递:函数定义时可以设计可变数量参数,既不确定参数总数量\n - ```\n def<函数名>(<参数>,*b):\n <函数体>\n return <返回值>\n ```\n - 函数调用时,参数可以按照位置或名称方式传递\n",
"_____no_output_____"
]
],
[
[
"#可选参数:计算n!//m\ndef fact(n,m=1): #m是可选参数\n s=1\n for i in range(1,n+1):\n s*=i\n return s//m\nb=fact(10,5) #位置传递\nc=fact(m=5,n=10) #名称传递\nprint(b,c)\n",
"725760 725760\n"
],
[
"#可变参数:计算n!//m\ndef fact(n,*b): #*b为可变参数\n s=1\n for i in range(1,n+1):\n s*=i\n for item in b:\n s*=item\n return s\nprint(fact(3))\nprint(fact(3,4,6))\nprint(fact(3,10,9,10))",
"6\n144\n5400\n"
]
],
[
[
"- 函数的返回值\n - 函数可以返回0或者多个结果,也可以不返回\n",
"_____no_output_____"
]
],
[
[
"#可选参数:计算n!//m\ndef fact(n,m=1): #m是可选参数\n s=1\n for i in range(1,n+1):\n s*=i\n return s,m,s//m #返回三个值\n\nb=fact(10,5) #位置传递\nc=fact(m=5,n=10) #名称传递\n\nx,y,z = fact(10,5) #返回的三个值分别赋给xyz\n\nprint(b,c)\nprint(x,y,z)",
"(3628800, 5, 725760) (3628800, 5, 725760)\n3628800 5 725760\n"
]
],
[
[
"- 局部变量和全局变量\n - 局部变量是函数内部使用的变量\n\n\n",
"_____no_output_____"
],
[
"1. 局部变量和全局变量是不同变量\n- 局部变量是函数内部的占位符,可以与全局变量重名但是不相同\n- 函数运算结束后,局部变量被释放\n- 可以使用`global`保留字在函数内部使用全局变量",
"_____no_output_____"
]
],
[
[
"n,s=10,100 #此处n,s是全局变量\ndef fact(n): #fact函数中的n,s是局部变量\n s=1\n for i in range(1,n+1):\n s*=i\n return s\nprint(fact(n),s) #n,s是全局变量",
"3628800 100\n"
],
[
"n,s=10,100 #此处n,s是全局变量\ndef fact(n): #fact函数中的n,s是局部变量\n global s #声明是全局变量\n for i in range(1,n+1):\n s*=i\n return s\nprint(fact(n),s) #n,s是全局变量",
"362880000 362880000\n"
]
],
[
[
"2. 局部变量为组合数据类型且未创建,等同于全局变量",
"_____no_output_____"
]
],
[
[
"ls=['F','f'] #创建全局变量s\ndef func(a):\n ls.append(a) #此处ls是列表类型,未真实创建则等同于全局变量\n return\nfunc('C') #全局变量ls被修改\nprint(ls)",
"['F', 'f', 'C']\n"
],
[
"ls=['F','f'] #创建全局变量s\ndef func(a):\n ls = [] #此处ls是列表类型,真实创建ls是局部变量,函数运行完成就被释放\n ls.append(a) \n return\nfunc('C') #局部变量ls被修改\nprint(ls)",
"['F', 'f']\n"
]
],
[
[
"- lambda函数\n - lambda函数返回函数名作为结果\n 1. lambda函数是一种匿名函数,即没有名字的函数\n 2. 使用lambda保留字定义,函数名是返回结果\n 3. lambda函数用于定义简单的、能够在一行内表示的函数\n ```\n <函数名>=lambda<参数>:<表达式>\n ```\n - 谨慎使用lambda函数\n - lambda函数主要用作一些特定函数或方法的参数\n - lambda函数有固定的使用方式",
"_____no_output_____"
]
],
[
[
"#\nf = lambda x,y:x+y\nprint(f(10,15))",
"25\n"
]
],
[
[
"### 5.2 实例7:七段数码管绘制\n- 交通灯的显示等\n 1. 绘制三个数字的数码管\n - 七段数码管由7个基本线条组成\n - 七段数码管可以有固定顺序\n - 不同数字显示不同的线条\n 2. 获得一串数字,绘制对应数码管\n 3. 获得当前时间,绘制对应数码管\n",
"_____no_output_____"
]
],
[
[
"#SevenDigitsDrawV1.py\nimport turtle as t\n\ndef drawLine(draw): #绘制单段数码管\n t.pendown() if draw else t.penup()\n t.fd(40)\n t.right(90)\n\ndef drawDigit(digit): #根据数字绘制七段数码管\n drawLine(True) if digit in [2,3,4,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,1,3,4,5,6,7,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,3,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,6,8] else drawLine(False)\n t.left(90)\n drawLine(True) if digit in [0,4,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,3,5,6,7,8,9] else drawLine(False)\n drawLine(True) if digit in [0,1,2,3,4,7,8,9] else drawLine(False)\n t.left(180) #为后续数字确定位置\n t.penup() #为后续数字确定位置\n t.fd(20) #为后续数字确定位置\n\ndef drawDate(date): \n for i in date:\n drawDigit(eval(i)) #通过eval()函数将数字变为整数\n\ndef main():\n t.setup(800,350,200,200)\n t.penup()\n t.fd(-300)\n t.pensize(5)\n drawDate('20200317')\n t.hideturtle()\n t.done\nmain()",
"_____no_output_____"
],
[
"#SevenDigitsDrawV2.py\nimport turtle as t\nimport time as ti\n\ndef drawGap(): #绘制数码管间隔\n t.penup()\n t.fd(5)\n\ndef drawLine(draw):\n drawGap() #绘制单段数码管\n t.pendown() if draw else t.penup()\n t.fd(40)\n drawGap()\n t.right(90)\n\ndef drawDigit(digit): #根据数字绘制七段数码管\n drawLine(True) if digit in [2,3,4,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,1,3,4,5,6,7,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,3,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,6,8] else drawLine(False)\n t.left(90)\n drawLine(True) if digit in [0,4,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,3,5,6,7,8,9] else drawLine(False)\n drawLine(True) if digit in [0,1,2,3,4,7,8,9] else drawLine(False)\n t.left(180) #为后续数字确定位置\n t.penup() #为后续数字确定位置\n t.fd(20) #为后续数字确定位置\n\n\n#遇到-是年,遇到=是月,遇到+是日\ndef drawDate(date): \n t.pencolor('red') #date为日期,格式为‰Y-‰m=%d+\n for i in date:\n if i == '-':\n t.write('年',font = ('Arial',18,'normal')) \n t.pencolor('green')\n t.fd(40)\n elif i == '=':\n t.write('月',font = ('Arial',18,'normal'))\n t.pencolor('blue')\n elif i == '+':\n t.write('日',font = ('Arial',18,'normal'))\n else:\n drawDigit(eval(i))\n\ndef main():\n t.setup(800,350,200,200)\n t.penup()\n t.fd(-300)\n t.pensize(5)\n drawDate(ti.strftime('%Y-%m=%d+',ti.gmtime()))\n t.hideturtle()\n t.done\nmain()",
"_____no_output_____"
]
],
[
[
"### 5.3 代码复用与函数递归\n- 把代码当作资源进行抽象:\n - 代码资源化:程序代码是一种用来表达计算的“资源”\n - 代码抽象化:使用函数等方法对代码赋予更高级别的定义\n - 代码复用:同一份代码在需要时可以被重复使用\n- 函数和对象是代码复用的两种主要形式\n - 函数:将代码命名,在代码层面建立了初步抽象\n - 对象:通过属性和方法`<a>.<b>`和`<a>.<b>()`,在函数之上再次组织进行抽象\n- 模块化设计:分而治之\n - 通过函数或对象封装将程序划分为模块及模块间的表达\n - 具体包括:主程序、子程序和子程序间的关系\n - 是一中分而治之、分层抽象、体系化设计的思想\n\n- 紧耦合:两个部分之间交流很多,无法独立存在\n- 松耦合:两个部分之间交流很少,可以独立存在,由清晰独立的接口\n- 在模块内部尽可能紧耦合、模块之间尽可能松耦合\n\n- 函数递归:函数中调用函数自身的方式,类似数学归纳法\n - 链条:计算过程存在递归链条\n - 基例:存在一个人或者多个不需要再次递归的基例\n - 递归本身是一个函数,需要通过函数定义方式描述\n - 函数内部,采用分支语句对输入参数进行判断\n - 基例和链条,分别编写对应代码",
"_____no_output_____"
]
],
[
[
"#n!计算\ndef fact(n):\n if n==0:\n return 1\n else:\n return n*fact(n-1)\nn=10\nprint(fact(n))\n",
"3628800\n"
],
[
"#字符串反转-递归\n\n# s=input()\n# s[::-1]\n\ndef rvs(s):\n if s == '':\n return s\n else:\n return rvs(s[1:])+s[0]\n\ns = 'sdf'\nrvs(s)\n",
"_____no_output_____"
],
[
"#斐波那契数列\ndef f(n):\n if n ==1 or n == 2:\n return 1\n else:\n return f(n-1)+f(n-2)\n\nn=10\nf(n)",
"_____no_output_____"
],
[
"#汉诺塔\nfrom time import perf_counter \nst = perf_counter()\ncount = 0\ndef hanoi(n,src,dst,mid):\n#n是圆盘数,src是源柱子,dst是目标柱子,mid是中间柱子\n global count \n if n == 1:\n print('{}:{}->{}'.format(1,src,dst))\n count +=1\n else:\n hanoi(n-1,src,mid,dst)\n print('{}:{}->{}'.format(n,src,dst))\n count+=1\n hanoi(n-1,mid,dst,src)\n\nn = 3\nhanoi(n,'a','c','b')\nprint(count,perf_counter()-st)\n\n",
"1:a->c\n2:a->b\n1:c->b\n3:a->c\n1:b->a\n2:b->c\n1:a->c\n7 0.00025690000256872736\n"
]
],
[
[
"### 5.4 模块4:PyInstaller库的使用\n- PyInstaller可以将.py源代码文件封装成为可执行文件\n - pyinstaller -h:查看帮助\n - pyinstaller --clean:清理打包过程的临时文件\n - -D,--online:默认值,生成dist文件夹\n - -F,--onefile:在dist文件夹中只生成独立打包文件\n - -i<图标文件名>指定打包程序使用的图标(icon)文件\n",
"_____no_output_____"
],
[
"### 5.5 科赫雪花小包裹\n- 科赫曲线:一种迭代的自相似曲线",
"_____no_output_____"
]
],
[
[
"#KochDrawV1\nimport turtle as t\ndef koch(size,n):\n if n == 0:\n t.fd(size)\n else:\n for angle in [0,60,-120,60]:\n t.left(angle)\n koch(size/3,n-1)\n\ndef main():\n t.setup(800,400)\n t.penup()\n t.goto(-300,-50)\n t.pendown()\n t.pensize(2)\n koch(600,3)\n t.hideturtle()\n t.done\nmain()",
"_____no_output_____"
],
[
"#KochDrawV2\nimport turtle as t\ndef koch(size,n):\n if n == 0:\n t.fd(size)\n else:\n for angle in [0,60,-120,60]:\n t.left(angle)\n koch(size/3,n-1)\n\ndef main():\n t.setup(600,600)\n t.penup()\n t.goto(-200,100)\n t.pendown()\n t.pensize(2)\n level=int(input())\n koch(400,level)\n t.right(120)\n koch(400,level)\n t.right(120)\n koch(400,level)\n t.hideturtle()\n t.done\nmain()",
"_____no_output_____"
]
],
[
[
"\n### 随机密码生成\n描述\n补充编程模板中代码,完成如下功能:\n以整数17为随机数种子,获取用户输入整数N为长度,产生3个长度为N位的密码,密码的每位是一个数字。每个密码单独一行输出。\n\n产生密码采用random.randint()函数。",
"_____no_output_____"
]
],
[
[
"import random\n\ndef genpwd(length):\n return random.randint(pow(10,length-1),pow(10,length))\n\n\nlength = eval(input())\nrandom.seed(17)\nfor i in range(3):\n print(genpwd(length))\n",
"634\n524\n926\n"
]
],
[
[
"### 连续质数计算\n描述\n补充编程模板中代码,完成如下功能:\n\n获得用户输入数字N,计算并输出从N开始的5个质数,单行输出,质数间用逗号,分割。\n\n注意:需要考虑用户输入的数字N可能是浮点数,应对输入取整数;最后一个输出后不用逗号。",
"_____no_output_____"
]
],
[
[
"def prime(m):\n count = 1\n while(count<=5):\n isprime = 1\n for i in range(2,m):\n if m%i == 0:\n isprime = 0\n if isprime == 1:\n if count == 5:\n print(m,end='')\n else:\n print(m,end=',')\n count+=1\n m+=1\n\nn = eval(input())\nprime(int(n))",
"13,17,19,23,29"
],
[
"#参考代码\ndef prime(m):\n for i in range(2,m):\n if m % i == 0:\n return False\n return True\n\nn = eval(input())\nn_ = int(n)\nn_ = n_+1 if n_ < n else n_\ncount = 5\n\nwhile count > 0:\n if prime(n_):\n if count > 1:\n print(n_, end=\",\")\n else:\n print(n_, end=\"\")\n count -= 1 \n n_ += 1",
"13,17,19,23,29"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e748f92b6cc1cd9ac703ae88e0c96003f7734cf0 | 59,545 | ipynb | Jupyter Notebook | polygon/example.ipynb | sunmengnan/city_brain | 478f0b974f4491b4201956f37b83ce6860712bc8 | [
"MIT"
] | null | null | null | polygon/example.ipynb | sunmengnan/city_brain | 478f0b974f4491b4201956f37b83ce6860712bc8 | [
"MIT"
] | null | null | null | polygon/example.ipynb | sunmengnan/city_brain | 478f0b974f4491b4201956f37b83ce6860712bc8 | [
"MIT"
] | null | null | null | 228.141762 | 27,996 | 0.911613 | [
[
[
"from simple_polygon import Simple_polygon\nimport osmnx as ox, matplotlib.pyplot as plt, numpy as np\nox.config(use_cache=True, log_console=True)\n%matplotlib inline",
"_____no_output_____"
],
[
"# get a street network and plot it with all edge intersections\npoint_A = (30.19689,120.17435)\npoint_B = (30.19728,120.17313)\nG = ox.graph_from_bbox(north=point_B[0], south=point_A[0] , east=point_A[1], west=point_B[1],\n network_type='drive')\nG_proj = ox.project_graph(G)\nfig, ax = ox.plot_graph(G_proj, fig_height=10, node_color='orange', node_size=30, \n node_zorder=2, node_edgecolor='k')",
"_____no_output_____"
],
[
"nodes,edges = ox.save_load.graph_to_gdfs \\\n (G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True)\nnodes",
"_____no_output_____"
],
[
"nodes_shapely = nodes['geometry'].tolist()\nnodes_shapely",
"_____no_output_____"
],
[
"nodes_rm_point = [np.array((node_shapely.xy[0][0],node_shapely.xy[1][0])) for node_shapely in nodes_shapely ]\nnodes_rm_point",
"_____no_output_____"
],
[
"spg = Simple_polygon(nodes_rm_point)\na = spg.main(plot=True)\n",
"[array([120.1738282, 30.1970861]), array([120.1738926, 30.1969733]), array([120.1739526, 30.1971302]), array([120.1740141, 30.1970353])]\n"
],
[
"a",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e748fe342712bb0f37e57399a12a0459278bacc0 | 53,699 | ipynb | Jupyter Notebook | module2-sql-for-analysis/titanicData.ipynb | TimTree/DS-Unit-3-Sprint-2-SQL-and-Databases | 106f53c30fd96ea9a5edf72939743d088b9d8ec2 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/titanicData.ipynb | TimTree/DS-Unit-3-Sprint-2-SQL-and-Databases | 106f53c30fd96ea9a5edf72939743d088b9d8ec2 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/titanicData.ipynb | TimTree/DS-Unit-3-Sprint-2-SQL-and-Databases | 106f53c30fd96ea9a5edf72939743d088b9d8ec2 | [
"MIT"
] | null | null | null | 115.730603 | 20,592 | 0.590663 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_csv('module2-sql-for-analysis/titanic.csv')\ndf.Name = df.Name.replace(\"'\", '', regex=True)",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.shape",
"_____no_output_____"
],
[
"import psycopg2",
"_____no_output_____"
],
[
"pg_conn = psycopg2.connect(database=dbname, user=user, password=password, host=host)",
"_____no_output_____"
],
[
"pg_conn",
"_____no_output_____"
],
[
"import sqlite3",
"_____no_output_____"
],
[
"sl_conn = sqlite3.connect('module2-sql-for-analysis/titanic.sqlite3')",
"_____no_output_____"
],
[
"df.to_sql('titanic_set2',con=sl_conn,if_exists='replace')",
"/Users/timothy/miniconda3/lib/python3.7/site-packages/pandas/core/generic.py:2712: UserWarning: The spaces in these column names will not be changed. In pandas versions < 0.14, spaces were converted to underscores.\n method=method,\n"
],
[
"sl_curs = sl_conn.cursor()",
"_____no_output_____"
],
[
"sl_curs.execute('SELECT COUNT(*) FROM titanic_set2;').fetchall()",
"_____no_output_____"
],
[
"sl_curs.execute('PRAGMA table_info(titanic_set2);').fetchall()",
"_____no_output_____"
],
[
"create_titanic_table = \"\"\"\n CREATE TABLE titanic_set2 (\n index SERIAL PRIMARY KEY,\n Survived INT,\n Pclass INT,\n Name TEXT,\n Sex sex,\n Age REAL,\n SiblingsSpousesAboard INT,\n ParentsChildrenAboard INT,\n Fare REAL\n );\n\"\"\"",
"_____no_output_____"
],
[
"pg_curs = pg_conn.cursor()",
"_____no_output_____"
],
[
"pg_curs.execute('SELECT * FROM test_table;')",
"_____no_output_____"
],
[
"pg_curs.fetchall()",
"_____no_output_____"
],
[
"#pg_curs.execute(\"CREATE TYPE sex AS ENUM ('male', 'female');\")",
"_____no_output_____"
],
[
"#pg_curs.execute(\"CREATE TYPE sex AS ENUM ('male', 'female');\")",
"_____no_output_____"
],
[
"pg_curs.execute(create_titanic_table)",
"_____no_output_____"
],
[
"show_tables = \"\"\"\nSELECT *\nFROM pg_catalog.pg_tables\nWHERE schemaname != 'pg_catalog'\nAND schemaname != 'information_schema';\n\"\"\"",
"_____no_output_____"
],
[
"pg_curs.execute(show_tables)",
"_____no_output_____"
],
[
"pg_curs.fetchall()",
"_____no_output_____"
],
[
"allData = sl_curs.execute('SELECT * from titanic_set2;').fetchall()",
"_____no_output_____"
],
[
"len(allData)",
"_____no_output_____"
],
[
"allData[0]",
"_____no_output_____"
],
[
"allData[-1]",
"_____no_output_____"
],
[
"example_insert = \"\"\"\nINSERT INTO titanic_set2\n(Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\nVALUES \"\"\" + str(allData[1][1:]) + ';'",
"_____no_output_____"
],
[
"print(example_insert)",
"\nINSERT INTO titanic_set2\n(Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\nVALUES (1, 1, 'Mrs. John Bradley (Florence Briggs Thayer) Cumings', 'female', 38.0, 1, 0, 71.2833);\n"
],
[
"for theData in allData:\n insert_data=\"\"\"\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES \"\"\" + str(theData[1:]) + ';'\n print(insert_data)\n pg_curs.execute(insert_data)",
"UES (0, 1, 'Mr. Benjamin Guggenheim', 'male', 46.0, 0, 0, 79.2);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Andrew Keane', 'male', 20.0, 0, 0, 7.75);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Alfred Gaskell', 'male', 16.0, 0, 0, 26.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Stella Anna Sage', 'female', 21.0, 8, 2, 69.55);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 1, 'Mr. William Fisher Hoyt', 'male', 43.0, 0, 0, 30.6958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Ristiu Dantcheff', 'male', 25.0, 0, 0, 7.8958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Richard Otter', 'male', 39.0, 0, 0, 13.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Dr. Alice (Farnham) Leader', 'female', 49.0, 0, 0, 25.9292);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mrs. Mara Osman', 'female', 31.0, 0, 0, 8.6833);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Yousseff Ibrahim Shawah', 'male', 30.0, 0, 0, 7.2292);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mrs. Jean Baptiste (Rosalie Paula Govaert) Van Impe', 'female', 30.0, 1, 1, 24.15);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Martin Ponesell', 'male', 34.0, 0, 0, 13.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Mrs. Harvey (Charlotte Annie Tate) Collyer', 'female', 31.0, 1, 1, 26.25);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Master. William Thornton II Carter', 'male', 11.0, 1, 2, 120.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Master. Assad Alexander Thomas', 'male', 0.42, 0, 1, 8.5167);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mr. Oskar Arvid Hedman', 'male', 27.0, 0, 0, 6.975);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Karl Johan Johansson', 'male', 31.0, 0, 0, 7.775);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 1, 'Mr. Thomas Jr Andrews', 'male', 39.0, 0, 0, 0.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Ellen Natalia Pettersson', 'female', 18.0, 0, 0, 7.775);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. August Meyer', 'male', 39.0, 0, 0, 13.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. Norman Campbell (Bertha Griggs) Chambers', 'female', 33.0, 1, 0, 53.1);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. William Alexander', 'male', 26.0, 0, 0, 7.8875);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. James Lester', 'male', 39.0, 0, 0, 24.15);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Richard James Slemen', 'male', 35.0, 0, 0, 10.5);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Ebba Iris Alfrida Andersson', 'female', 6.0, 4, 2, 31.275);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Ernest Portage Tomlin', 'male', 30.5, 0, 0, 8.05);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 1, 'Mr. Richard Fry', 'male', 39.0, 0, 0, 0.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Wendla Maria Heininen', 'female', 23.0, 0, 0, 7.925);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Albert Mallet', 'male', 31.0, 1, 1, 37.0042);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. John Fredrik Alexander Holm', 'male', 43.0, 0, 0, 6.45);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Master. Karl Thorsten Skoog', 'male', 10.0, 3, 2, 27.9);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. Charles Melville (Clara Jennings Gregg) Hays', 'female', 52.0, 1, 1, 93.5);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mr. Nikola Lulic', 'male', 27.0, 0, 0, 8.6625);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 1, 'Jonkheer. John George Reuchlin', 'male', 38.0, 0, 0, 0.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mrs. (Beila) Moor', 'female', 27.0, 0, 1, 12.475);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Master. Urho Abraham Panula', 'male', 2.0, 4, 1, 39.6875);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. John Flynn', 'male', 36.0, 0, 0, 6.95);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Len Lam', 'male', 23.0, 0, 0, 56.4958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Master. Andre Mallet', 'male', 1.0, 0, 2, 37.0042);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mr. Thomas Joseph McCormack', 'male', 19.0, 0, 0, 7.75);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. George Nelson (Martha Evelyn) Stone', 'female', 62.0, 0, 0, 80.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mrs. Antoni (Selini Alexander) Yasbeck', 'female', 15.0, 1, 0, 14.4542);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Master. George Sibley Richards', 'male', 0.83, 1, 1, 18.75);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Amin Saad', 'male', 30.0, 0, 0, 7.2292);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Albert Augustsson', 'male', 23.0, 0, 0, 7.8542);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Owen George Allum', 'male', 18.0, 0, 0, 8.3);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Miss. Sara Rebecca Compton', 'female', 39.0, 1, 1, 83.1583);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Jakob Pasic', 'male', 21.0, 0, 0, 8.6625);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Maurice Sirota', 'male', 20.0, 0, 0, 8.05);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mr. Chang Chip', 'male', 32.0, 0, 0, 56.4958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mr. Pierre Marechal', 'male', 29.0, 0, 0, 29.7);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Ilmari Rudolf Alhomaki', 'male', 20.0, 0, 0, 7.925);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Thomas Charles Mudd', 'male', 16.0, 0, 0, 10.5);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Miss. Augusta Serepeca', 'female', 30.0, 0, 0, 31.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Peter L Lemberopolous', 'male', 34.5, 0, 0, 6.4375);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Jeso Culumovic', 'male', 17.0, 0, 0, 8.6625);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Anthony Abbing', 'male', 42.0, 0, 0, 7.55);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Douglas Bullen Sage', 'male', 18.0, 8, 2, 69.55);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Marin Markoff', 'male', 35.0, 0, 0, 7.8958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Rev. John Harper', 'male', 28.0, 0, 1, 33.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. Samuel L (Edwiga Grabowska) Goldenberg', 'female', 40.0, 1, 0, 89.1042);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Master. Sigvard Harald Elias Andersson', 'male', 4.0, 4, 2, 31.275);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Johan Svensson', 'male', 74.0, 0, 0, 7.775);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Nourelain Boulos', 'female', 9.0, 1, 1, 15.2458);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Miss. Mary Conover Lines', 'female', 16.0, 0, 1, 39.4);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mrs. Ernest Courtenay (Lilian Hughes) Carter', 'female', 44.0, 1, 0, 26.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mrs. Sam (Leah Rosen) Aks', 'female', 18.0, 0, 1, 9.35);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. George Dennick (Mary Hitchcock) Wick', 'female', 45.0, 1, 1, 164.8667);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mr. Peter Denis Daly', 'male', 51.0, 0, 0, 26.55);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Mrs. Solomon (Latifa Qurban) Baclini', 'female', 24.0, 0, 3, 19.2583);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Raihed Razi', 'male', 30.0, 0, 0, 7.2292);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Claus Peter Hansen', 'male', 41.0, 2, 0, 14.1083);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Frederick Edward Giles', 'male', 21.0, 1, 0, 11.5);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. Frederick Joel (Margaret Welles Barron) Swift', 'female', 48.0, 0, 0, 25.9292);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Dorothy Edith Sage', 'female', 14.0, 8, 2, 69.55);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. John William Gill', 'male', 24.0, 0, 0, 13.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Mrs. (Karolina) Bystrom', 'female', 42.0, 0, 0, 13.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Miss. Asuncion Duran y More', 'female', 27.0, 1, 0, 13.8583);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 1, 'Mr. Washington Augustus II Roebling', 'male', 31.0, 0, 0, 50.4958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Philemon van Melkebeke', 'male', 23.0, 0, 0, 9.5);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Master. Harold Theodor Johnson', 'male', 4.0, 1, 1, 11.1333);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Cerin Balkic', 'male', 26.0, 0, 0, 7.8958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. Richard Leonard (Sallie Monypeny) Beckwith', 'female', 47.0, 1, 1, 52.5542);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 1, 'Mr. Frans Olof Carlsson', 'male', 33.0, 0, 0, 5.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Victor Vander Cruyssen', 'male', 47.0, 0, 0, 9.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Mrs. Samuel (Hannah Wizosky) Abelson', 'female', 28.0, 1, 0, 24.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 3, 'Miss. Adele Kiamie Najib', 'female', 15.0, 0, 0, 7.225);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Alfred Ossian Gustafsson', 'male', 20.0, 0, 0, 9.8458);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Nedelio Petroff', 'male', 19.0, 0, 0, 7.8958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Kristo Laleff', 'male', 23.0, 0, 0, 7.8958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mrs. Thomas Jr (Lily Alexenia Wilson) Potter', 'female', 56.0, 0, 1, 83.1583);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 2, 'Mrs. William (Imanita Parrish Hall) Shelley', 'female', 25.0, 0, 1, 26.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Johann Markun', 'male', 33.0, 0, 0, 7.8958);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Gerda Ulrika Dahlberg', 'female', 22.0, 0, 0, 10.5167);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Mr. Frederick James Banfield', 'male', 28.0, 0, 0, 10.5);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Henry Jr Sutehall', 'male', 25.0, 0, 0, 7.05);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mrs. William (Margaret Norton) Rice', 'female', 39.0, 0, 5, 29.125);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 2, 'Rev. Juozas Montvila', 'male', 27.0, 0, 0, 13.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Miss. Margaret Edith Graham', 'female', 19.0, 0, 0, 30.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Miss. Catherine Helen Johnston', 'female', 7.0, 1, 2, 23.45);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (1, 1, 'Mr. Karl Howell Behr', 'male', 26.0, 0, 0, 30.0);\n\n INSERT INTO titanic_set2\n (Survived, Pclass, Name, Sex, Age, SiblingsSpousesAboard, ParentsChildrenAboard, Fare)\n VALUES (0, 3, 'Mr. Patrick Dooley', 'male', 32.0, 0, 0, 7.75);\n"
],
[
"pg_curs.execute('SELECT * FROM titanic_set2;')\npg_curs.fetchall()",
"_____no_output_____"
],
[
"pg_curs.close()\npg_conn.commit()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7490bf1ee31492dcc43a5e8ead2b1945bef2fa6 | 14,626 | ipynb | Jupyter Notebook | week1_intro/.ipynb_checkpoints/crossentropy_method-checkpoint.ipynb | syuntoku14/PracticalRL_coursera_pytorch | aabdb087b0387f2a20866ca5bfe3b57b57a0419e | [
"MIT"
] | null | null | null | week1_intro/.ipynb_checkpoints/crossentropy_method-checkpoint.ipynb | syuntoku14/PracticalRL_coursera_pytorch | aabdb087b0387f2a20866ca5bfe3b57b57a0419e | [
"MIT"
] | null | null | null | week1_intro/.ipynb_checkpoints/crossentropy_method-checkpoint.ipynb | syuntoku14/PracticalRL_coursera_pytorch | aabdb087b0387f2a20866ca5bfe3b57b57a0419e | [
"MIT"
] | null | null | null | 31.521552 | 319 | 0.544988 | [
[
[
"# Crossentropy method\n\nThis notebook will teach you to solve reinforcement learning problems with crossentropy method.",
"_____no_output_____"
]
],
[
[
"import gym\nimport numpy as np, pandas as pd\n\nenv = gym.make(\"Taxi-v2\")\nenv.reset()\nenv.render()",
"+---------+\n|\u001b[35mR\u001b[0m: | : :G|\n| : : : : |\n| : : : : |\n| | : | :\u001b[43m \u001b[0m|\n|Y| : |\u001b[34;1mB\u001b[0m: |\n+---------+\n\n"
],
[
"n_states = env.observation_space.n\nn_actions = env.action_space.n\n\nprint(\"n_states=%i, n_actions=%i\"%(n_states, n_actions))",
"n_states=500, n_actions=6\n"
]
],
[
[
"# Create stochastic policy\n\nThis time our policy should be a probability distribution.\n\n```policy[s,a] = P(take action a | in state s)```\n\nSince we still use integer state and action representations, you can use a 2-dimensional array to represent the policy.\n\nPlease initialize policy __uniformly__, that is, probabililities of all actions should be equal.\n",
"_____no_output_____"
]
],
[
[
"policy = <your code here! Create an array to store action probabilities>",
"_____no_output_____"
],
[
"assert type(policy) in (np.ndarray,np.matrix)\nassert np.allclose(policy,1./n_actions)\nassert np.allclose(np.sum(policy,axis=1), 1)",
"_____no_output_____"
]
],
[
[
"# Play the game\n\nJust like before, but we also record all states and actions we took.",
"_____no_output_____"
]
],
[
[
"def generate_session(policy,t_max=10**4):\n \"\"\"\n Play game until end or for t_max ticks.\n :param policy: an array of shape [n_states,n_actions] with action probabilities\n :returns: list of states, list of actions and sum of rewards\n \"\"\"\n states,actions = [],[]\n total_reward = 0.\n \n s = env.reset()\n \n for t in range(t_max):\n \n a = <sample action from policy (hint: use np.random.choice)>\n \n new_s, r, done, info = env.step(a)\n \n #Record state, action and add up reward to states,actions and total_reward accordingly. \n states.append(s)\n actions.append(a)\n total_reward += r\n \n s = new_s\n if done:\n break\n return states, actions, total_reward\n ",
"_____no_output_____"
],
[
"s,a,r = generate_session(policy)\nassert type(s) == type(a) == list\nassert len(s) == len(a)\nassert type(r) in [float,np.float]",
"_____no_output_____"
],
[
"#let's see the initial reward distribution\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nsample_rewards = [generate_session(policy,t_max=1000)[-1] for _ in range(200)]\n\nplt.hist(sample_rewards,bins=20);\nplt.vlines([np.percentile(sample_rewards, 50)], [0], [100], label=\"50'th percentile\", color='green')\nplt.vlines([np.percentile(sample_rewards, 90)], [0], [100], label=\"90'th percentile\", color='red')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"### Crossentropy method steps (2pts)",
"_____no_output_____"
]
],
[
[
"def select_elites(states_batch,actions_batch,rewards_batch,percentile=50):\n \"\"\"\n Select states and actions from games that have rewards >= percentile\n :param states_batch: list of lists of states, states_batch[session_i][t]\n :param actions_batch: list of lists of actions, actions_batch[session_i][t]\n :param rewards_batch: list of rewards, rewards_batch[session_i][t]\n \n :returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions\n \n Please return elite states and actions in their original order \n [i.e. sorted by session number and timestep within session]\n \n If you're confused, see examples below. Please don't assume that states are integers (they'll get different later).\n \"\"\"\n \n reward_threshold = <Compute minimum reward for elite sessions. Hint: use np.percentile>\n \n elite_states = <your code here>\n elite_actions = <your code here>\n \n return elite_states,elite_actions\n ",
"_____no_output_____"
],
[
"states_batch = [\n [1,2,3], #game1\n [4,2,0,2], #game2\n [3,1] #game3\n]\n\nactions_batch = [\n [0,2,4], #game1\n [3,2,0,1], #game2\n [3,3] #game3\n]\nrewards_batch = [\n 3, #game1\n 4, #game2\n 5, #game3\n]\n\ntest_result_0 = select_elites(states_batch, actions_batch, rewards_batch, percentile=0)\ntest_result_40 = select_elites(states_batch, actions_batch, rewards_batch, percentile=30)\ntest_result_90 = select_elites(states_batch, actions_batch, rewards_batch, percentile=90)\ntest_result_100 = select_elites(states_batch, actions_batch, rewards_batch, percentile=100)\n\nassert np.all(test_result_0[0] == [1, 2, 3, 4, 2, 0, 2, 3, 1]) \\\n and np.all(test_result_0[1] == [0, 2, 4, 3, 2, 0, 1, 3, 3]),\\\n \"For percentile 0 you should return all states and actions in chronological order\"\nassert np.all(test_result_40[0] == [4, 2, 0, 2, 3, 1]) and \\\n np.all(test_result_40[1] ==[3, 2, 0, 1, 3, 3]),\\\n \"For percentile 30 you should only select states/actions from two first\"\nassert np.all(test_result_90[0] == [3,1]) and \\\n np.all(test_result_90[1] == [3,3]),\\\n \"For percentile 90 you should only select states/actions from one game\"\nassert np.all(test_result_100[0] == [3,1]) and\\\n np.all(test_result_100[1] == [3,3]),\\\n \"Please make sure you use >=, not >. Also double-check how you compute percentile.\"\nprint(\"Ok!\")",
"_____no_output_____"
],
[
"def update_policy(elite_states,elite_actions):\n \"\"\"\n Given old policy and a list of elite states/actions from select_elites,\n return new updated policy where each action probability is proportional to\n \n policy[s_i,a_i] ~ #[occurences of si and ai in elite states/actions]\n \n Don't forget to normalize policy to get valid probabilities and handle 0/0 case.\n In case you never visited a state, set probabilities for all actions to 1./n_actions\n \n :param elite_states: 1D list of states from elite sessions\n :param elite_actions: 1D list of actions from elite sessions\n \n \"\"\"\n \n new_policy = np.zeros([n_states,n_actions])\n \n <Your code here: update probabilities for actions given elite states & actions>\n #Don't forget to set 1/n_actions for all actions in unvisited states.\n \n \n return new_policy",
"_____no_output_____"
],
[
"\nelite_states, elite_actions = ([1, 2, 3, 4, 2, 0, 2, 3, 1], [0, 2, 4, 3, 2, 0, 1, 3, 3])\n\n\nnew_policy = update_policy(elite_states,elite_actions)\n\nassert np.isfinite(new_policy).all(), \"Your new policy contains NaNs or +-inf. Make sure you don't divide by zero.\"\nassert np.all(new_policy>=0), \"Your new policy can't have negative action probabilities\"\nassert np.allclose(new_policy.sum(axis=-1),1), \"Your new policy should be a valid probability distribution over actions\"\nreference_answer = np.array([\n [ 1. , 0. , 0. , 0. , 0. ],\n [ 0.5 , 0. , 0. , 0.5 , 0. ],\n [ 0. , 0.33333333, 0.66666667, 0. , 0. ],\n [ 0. , 0. , 0. , 0.5 , 0.5 ]])\nassert np.allclose(new_policy[:4,:5],reference_answer)\nprint(\"Ok!\")",
"_____no_output_____"
]
],
[
[
"# Training loop\nGenerate sessions, select N best and fit to those.",
"_____no_output_____"
]
],
[
[
"from IPython.display import clear_output\n\ndef show_progress(batch_rewards, log, percentile, reward_range=[-990,+10]):\n \"\"\"\n A convenience function that displays training progress. \n No cool math here, just charts.\n \"\"\"\n \n mean_reward, threshold = np.mean(batch_rewards), np.percentile(batch_rewards, percentile)\n log.append([mean_reward,threshold])\n\n clear_output(True)\n print(\"mean reward = %.3f, threshold=%.3f\"%(mean_reward, threshold))\n plt.figure(figsize=[8,4])\n plt.subplot(1,2,1)\n plt.plot(list(zip(*log))[0], label='Mean rewards')\n plt.plot(list(zip(*log))[1], label='Reward thresholds')\n plt.legend()\n plt.grid()\n \n plt.subplot(1,2,2)\n plt.hist(batch_rewards,range=reward_range);\n plt.vlines([np.percentile(batch_rewards, percentile)], [0], [100], label=\"percentile\", color='red')\n plt.legend()\n plt.grid()\n\n plt.show()\n",
"_____no_output_____"
],
[
"#reset policy just in case\npolicy = np.ones([n_states, n_actions]) / n_actions ",
"_____no_output_____"
],
[
"n_sessions = 250 #sample this many sessions\npercentile = 50 #take this percent of session with highest rewards\nlearning_rate = 0.5 #add this thing to all counts for stability\n\nlog = []\n\nfor i in range(100):\n \n %time sessions = [<generate a list of n_sessions new sessions>]\n \n batch_states,batch_actions,batch_rewards = zip(*sessions)\n\n elite_states, elite_actions = <select elite states/actions>\n \n new_policy = <compute new policy>\n \n policy = learning_rate * new_policy + (1-learning_rate) * policy\n \n #display results on chart\n show_progress(batch_rewards, log, percentile)",
"_____no_output_____"
]
],
[
[
"### Reflecting on results\n\nYou may have noticed that the taxi problem quickly converges from <-1000 to a near-optimal score and then descends back into -50/-100. This is in part because the environment has some innate randomness. Namely, the starting points of passenger/driver change from episode to episode.\n\nIn case CEM failed to learn how to win from one distinct starting point, it will siply discard it because no sessions from that starting point will make it into the \"elites\".\n\nTo mitigate that problem, you can either reduce the threshold for elite sessions (duct tape way) or change the way you evaluate strategy (theoretically correct way). You can first sample an action for every possible state and then evaluate this choice of actions by running _several_ games and averaging rewards.",
"_____no_output_____"
],
[
"### Submit to coursera",
"_____no_output_____"
]
],
[
[
"from submit import submit_taxi\nsubmit_taxi(generate_session, policy, <EMAIL>, <TOKEN>)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e74910a512233b4df85aa49ee6c1e1eebe99ba76 | 133,017 | ipynb | Jupyter Notebook | espnet_asr/espnet_lightning/espnet.ipynb | dertilo/speech-recognition | 32dfd0a05480ecb3a4ea3eb9e28628da976e7065 | [
"MIT"
] | 1 | 2020-06-30T20:48:04.000Z | 2020-06-30T20:48:04.000Z | espnet_asr/espnet_lightning/espnet.ipynb | dertilo/speech-recognition | 32dfd0a05480ecb3a4ea3eb9e28628da976e7065 | [
"MIT"
] | null | null | null | espnet_asr/espnet_lightning/espnet.ipynb | dertilo/speech-recognition | 32dfd0a05480ecb3a4ea3eb9e28628da976e7065 | [
"MIT"
] | null | null | null | 72.567921 | 710 | 0.551478 | [
[
[
"from google.colab import drive\ndrive.mount('/content/drive')\n!ln -s /content/drive/My\\ Drive /mydrive\n!ls /mydrive\n!nvidia-smi",
"Mounted at /content/drive\n'Colab Notebooks' Documents\t speech-recognition\n data\t\t envs\t tilosite.gsite\n debug\t\t language-modeling wandb.key\nWed Oct 14 08:07:54 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 455.23.05 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 54C P8 11W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n"
],
[
"%%time\n%%bash\napt-get update && apt-get install -qq bc tree sox libsndfile1\nrm -rf espnet\ngit clone -b tilos_minimal_librispeech https://github.com/dertilo/espnet.git\ncd espnet && pip install -e .\npip install util@git+https://[email protected]/dertilo/util.git#egg=util\n",
"Get:1 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease [15.9 kB]\nHit:2 http://archive.ubuntu.com/ubuntu bionic InRelease\nGet:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\nGet:4 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]\nHit:5 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\nGet:6 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\nGet:7 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\nIgn:8 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 InRelease\nIgn:9 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 InRelease\nGet:10 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release [697 B]\nHit:11 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 Release\nGet:12 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Release.gpg [836 B]\nGet:13 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main Sources [1,681 kB]\nGet:14 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ Packages [39.1 kB]\nGet:15 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic/main amd64 Packages [860 kB]\nGet:16 http://archive.ubuntu.com/ubuntu bionic-updates/main amd64 Packages [2,150 kB]\nGet:17 http://archive.ubuntu.com/ubuntu bionic-updates/multiverse amd64 Packages [45.6 kB]\nGet:18 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 Packages [2,112 kB]\nGet:19 http://archive.ubuntu.com/ubuntu bionic-updates/restricted amd64 Packages [231 kB]\nIgn:21 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Packages\nGet:21 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 Packages [334 kB]\nGet:22 http://security.ubuntu.com/ubuntu bionic-security/multiverse amd64 Packages [15.0 kB]\nGet:23 http://security.ubuntu.com/ubuntu bionic-security/main amd64 Packages [1,733 kB]\nGet:24 http://security.ubuntu.com/ubuntu bionic-security/restricted amd64 Packages [205 kB]\nGet:25 http://security.ubuntu.com/ubuntu bionic-security/universe amd64 Packages [1,348 kB]\nFetched 11.0 MB in 3s (4,143 kB/s)\nReading package lists...\nSelecting previously unselected package libopencore-amrnb0:amd64.\r\n(Reading database ... \r(Reading database ... 5%\r(Reading database ... 10%\r(Reading database ... 15%\r(Reading database ... 20%\r(Reading database ... 25%\r(Reading database ... 30%\r(Reading database ... 35%\r(Reading database ... 40%\r(Reading database ... 45%\r(Reading database ... 50%\r(Reading database ... 55%\r(Reading database ... 60%\r(Reading database ... 65%\r(Reading database ... 70%\r(Reading database ... 75%\r(Reading database ... 80%\r(Reading database ... 85%\r(Reading database ... 90%\r(Reading database ... 95%\r(Reading database ... 100%\r(Reading database ... 144617 files and directories currently installed.)\r\nPreparing to unpack .../0-libopencore-amrnb0_0.1.3-2.1_amd64.deb ...\r\nUnpacking libopencore-amrnb0:amd64 (0.1.3-2.1) ...\r\nSelecting previously unselected package libopencore-amrwb0:amd64.\r\nPreparing to unpack .../1-libopencore-amrwb0_0.1.3-2.1_amd64.deb ...\r\nUnpacking libopencore-amrwb0:amd64 (0.1.3-2.1) ...\r\nSelecting previously unselected package libmagic-mgc.\r\nPreparing to unpack .../2-libmagic-mgc_1%3a5.32-2ubuntu0.4_amd64.deb ...\r\nUnpacking libmagic-mgc (1:5.32-2ubuntu0.4) ...\r\nSelecting previously unselected package libmagic1:amd64.\r\nPreparing to unpack .../3-libmagic1_1%3a5.32-2ubuntu0.4_amd64.deb ...\r\nUnpacking libmagic1:amd64 (1:5.32-2ubuntu0.4) ...\r\nSelecting previously unselected package bc.\r\nPreparing to unpack .../4-bc_1.07.1-2_amd64.deb ...\r\nUnpacking bc (1.07.1-2) ...\r\nSelecting previously unselected package libsox3:amd64.\r\nPreparing to unpack .../5-libsox3_14.4.2-3ubuntu0.18.04.1_amd64.deb ...\r\nUnpacking libsox3:amd64 (14.4.2-3ubuntu0.18.04.1) ...\r\nSelecting previously unselected package libsox-fmt-alsa:amd64.\r\nPreparing to unpack .../6-libsox-fmt-alsa_14.4.2-3ubuntu0.18.04.1_amd64.deb ...\r\nUnpacking libsox-fmt-alsa:amd64 (14.4.2-3ubuntu0.18.04.1) ...\r\nSelecting previously unselected package libsox-fmt-base:amd64.\r\nPreparing to unpack .../7-libsox-fmt-base_14.4.2-3ubuntu0.18.04.1_amd64.deb ...\r\nUnpacking libsox-fmt-base:amd64 (14.4.2-3ubuntu0.18.04.1) ...\r\nSelecting previously unselected package sox.\r\nPreparing to unpack .../8-sox_14.4.2-3ubuntu0.18.04.1_amd64.deb ...\r\nUnpacking sox (14.4.2-3ubuntu0.18.04.1) ...\r\nSelecting previously unselected package tree.\r\nPreparing to unpack .../9-tree_1.7.0-5_amd64.deb ...\r\nUnpacking tree (1.7.0-5) ...\r\nSetting up tree (1.7.0-5) ...\r\nSetting up libmagic-mgc (1:5.32-2ubuntu0.4) ...\r\nSetting up libmagic1:amd64 (1:5.32-2ubuntu0.4) ...\r\nSetting up libopencore-amrnb0:amd64 (0.1.3-2.1) ...\r\nSetting up bc (1.07.1-2) ...\r\nSetting up libopencore-amrwb0:amd64 (0.1.3-2.1) ...\r\nSetting up libsox3:amd64 (14.4.2-3ubuntu0.18.04.1) ...\r\nSetting up libsox-fmt-base:amd64 (14.4.2-3ubuntu0.18.04.1) ...\r\nSetting up libsox-fmt-alsa:amd64 (14.4.2-3ubuntu0.18.04.1) ...\r\nSetting up sox (14.4.2-3ubuntu0.18.04.1) ...\r\nProcessing triggers for libc-bin (2.27-3ubuntu1.2) ...\r\n/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\r\n\r\nProcessing triggers for man-db (2.8.3-2ubuntu0.1) ...\r\nProcessing triggers for mime-support (3.60ubuntu1) ...\r\nObtaining file:///content/espnet\nRequirement already satisfied: setuptools>=38.5.1 in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (50.3.0)\nCollecting configargparse>=1.2.1\n Downloading https://files.pythonhosted.org/packages/bb/79/3045743bb26ca2e44a1d317c37395462bfed82dbbd38e69a3280b63696ce/ConfigArgParse-1.2.3.tar.gz (42kB)\nRequirement already satisfied: typeguard>=2.7.0 in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (2.7.1)\nRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (0.7)\nCollecting humanfriendly\n Downloading https://files.pythonhosted.org/packages/8e/2d/2f1b0a780b8c948c06c74c8c80e68ac354da52397ba432a1c5ac1923c3af/humanfriendly-8.2-py2.py3-none-any.whl (86kB)\nRequirement already satisfied: scipy>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (1.4.1)\nCollecting matplotlib==3.1.0\n Downloading https://files.pythonhosted.org/packages/da/83/d989ee20c78117c737ab40e0318ea221f1aed4e3f5a40b4f93541b369b93/matplotlib-3.1.0-cp36-cp36m-manylinux1_x86_64.whl (13.1MB)\nRequirement already satisfied: pillow>=6.1.0 in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (7.0.0)\nCollecting editdistance==0.5.2\n Downloading https://files.pythonhosted.org/packages/dc/34/381c67595831fc9daa30c9247cfb8116941fc7380b571a390bbff6da011e/editdistance-0.5.2-cp36-cp36m-manylinux1_x86_64.whl (173kB)\nRequirement already satisfied: gdown in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (3.6.4)\nCollecting espnet_model_zoo\n Downloading https://files.pythonhosted.org/packages/05/45/7bfd124ba1b0d2859a350d71217cb38ada21bf84e1c96ce1c12a1e5d2501/espnet_model_zoo-0.0.0a16-py3-none-any.whl\nCollecting ctc-segmentation>=1.0.6\n Downloading https://files.pythonhosted.org/packages/7f/43/a9bce952d45bfd85615eecbba5ab69fb2a9106b6b512cd0d3af624eced1b/ctc_segmentation-1.1.0.tar.gz (57kB)\nRequirement already satisfied: tensorboard>=1.14 in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (2.3.0)\nCollecting tensorboardX>=1.8\n Downloading https://files.pythonhosted.org/packages/af/0c/4f41bcd45db376e6fe5c619c01100e9b7531c55791b7244815bac6eac32c/tensorboardX-2.1-py2.py3-none-any.whl (308kB)\nCollecting librosa>=0.8.0\n Downloading https://files.pythonhosted.org/packages/26/4d/c22d8ca74ca2c13cd4ac430fa353954886104321877b65fa871939e78591/librosa-0.8.0.tar.gz (183kB)\nRequirement already satisfied: resampy in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (0.2.2)\nCollecting pysptk>=0.1.17\n Downloading https://files.pythonhosted.org/packages/7c/25/4ea0932fbf0f1db42934b85011c1c825bcf57055ecde7e511f05e9fb9197/pysptk-0.1.18.tar.gz (419kB)\nCollecting sentencepiece<0.1.90,>=0.1.82\n Downloading https://files.pythonhosted.org/packages/98/2c/8df20f3ac6c22ac224fff307ebc102818206c53fc454ecd37d8ac2060df5/sentencepiece-0.1.86-cp36-cp36m-manylinux1_x86_64.whl (1.0MB)\nCollecting nltk>=3.4.5\n Downloading https://files.pythonhosted.org/packages/92/75/ce35194d8e3022203cca0d2f896dbb88689f9b3fce8e9f9cff942913519d/nltk-3.5.zip (1.4MB)\nCollecting morfessor\n Downloading https://files.pythonhosted.org/packages/39/e6/7afea30be2ee4d29ce9de0fa53acbb033163615f849515c0b1956ad074ee/Morfessor-2.0.6-py3-none-any.whl\nCollecting PyYAML>=5.1.2\n Downloading https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz (269kB)\nCollecting soundfile>=0.10.2\n Downloading https://files.pythonhosted.org/packages/eb/f2/3cbbbf3b96fb9fa91582c438b574cff3f45b29c772f94c400e2c99ef5db9/SoundFile-0.10.3.post1-py2.py3-none-any.whl\nCollecting h5py==2.9.0\n Downloading https://files.pythonhosted.org/packages/30/99/d7d4fbf2d02bb30fb76179911a250074b55b852d34e98dd452a9f394ac06/h5py-2.9.0-cp36-cp36m-manylinux1_x86_64.whl (2.8MB)\nCollecting kaldiio>=2.15.0\n Downloading https://files.pythonhosted.org/packages/a0/f6/a72323a04aa8b6727c319a45d360a4e90dcab2ee6affdd711976610fa50b/kaldiio-2.17.0.tar.gz\nRequirement already satisfied: inflect>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (2.1.0)\nCollecting unidecode>=1.0.22\n Downloading https://files.pythonhosted.org/packages/d0/42/d9edfed04228bacea2d824904cae367ee9efd05e6cce7ceaaedd0b0ad964/Unidecode-1.1.1-py2.py3-none-any.whl (238kB)\nCollecting pyworld>=0.2.10\n Downloading https://files.pythonhosted.org/packages/5a/91/1b3ebd3840a76e50b3695a9d8515a44303a90c74ae13e474647d984d1e12/pyworld-0.2.11.post0.tar.gz (222kB)\nCollecting nnmnkwii\n Downloading https://files.pythonhosted.org/packages/5b/94/3e6264a708e308e597d7563fd39c7aaec2476c21ed41b093a3cffcc8aa4e/nnmnkwii-0.0.21.tar.gz (1.7MB)\nCollecting espnet_tts_frontend\n Downloading https://files.pythonhosted.org/packages/e0/ab/c1e882c7dcc9241d9e422cb3b4f6554a93d5557d560e8ce918c183a80bd8/espnet_tts_frontend-0.0.3-py3-none-any.whl\nCollecting museval>=0.2.1\n Downloading https://files.pythonhosted.org/packages/54/1a/448486d3619d0e091e2b7160cc5920ff4456e1f1de2b49650fe52e50107e/museval-0.3.1-py2.py3-none-any.whl\nCollecting pystoi>=0.2.2\n Downloading https://files.pythonhosted.org/packages/4c/d8/58a5deefb57e3ca78d6421d2ec536880a29ac0e497b2d1baded6f2153beb/pystoi-0.3.3.tar.gz\nCollecting nara_wpe>=0.0.5\n Downloading https://files.pythonhosted.org/packages/4b/d4/11dddfd5f41017df8eda83cbcafab14ba8bf32d23e7697bf9d2bd343d979/nara_wpe-0.0.7-py3-none-any.whl\nCollecting torch_complex\n Downloading https://files.pythonhosted.org/packages/36/30/cc85a1674c70ef8f3f6c6725bf9dc0e88b727afd0c1ec36cb7f33e4c7e5d/torch_complex-0.2.0-py3-none-any.whl\nCollecting pytorch_wpe\n Downloading https://files.pythonhosted.org/packages/a7/6b/8797da1c34d34afad62d7b4adce0409f416636ec8411d5133854dd31eb09/pytorch_wpe-0.0.0-py3-none-any.whl\nCollecting mir-eval>=0.6\n Downloading https://files.pythonhosted.org/packages/0a/fe/be4f7a59ed71938e21e89f23afe93eea0d39eb3e77f83754a12028cf1a68/mir_eval-0.6.tar.gz (87kB)\nRequirement already satisfied: fastdtw in /usr/local/lib/python3.6/dist-packages (from espnet==0.9.2) (0.3.4)\nCollecting torch_optimizer\n Downloading https://files.pythonhosted.org/packages/c9/48/f670cf4b47c315861d0547f0c2be579cd801304c86e55008492f1acebd01/torch_optimizer-0.0.1a15-py3-none-any.whl (41kB)\nCollecting torchaudio==0.6.0\n Downloading https://files.pythonhosted.org/packages/96/34/c651430dea231e382ddf2eb5773239bf4885d9528f640a4ef39b12894cb8/torchaudio-0.6.0-cp36-cp36m-manylinux1_x86_64.whl (6.7MB)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from scipy>=1.4.1->espnet==0.9.2) (1.18.5)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.1.0->espnet==0.9.2) (2.4.7)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.1.0->espnet==0.9.2) (2.8.1)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.1.0->espnet==0.9.2) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib==3.1.0->espnet==0.9.2) (1.2.0)\nRequirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from gdown->espnet==0.9.2) (2.23.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gdown->espnet==0.9.2) (1.15.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from gdown->espnet==0.9.2) (4.41.1)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from espnet_model_zoo->espnet==0.9.2) (1.1.2)\nRequirement already satisfied: Cython in /usr/local/lib/python3.6/dist-packages (from ctc-segmentation>=1.0.6->espnet==0.9.2) (0.29.21)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (0.4.1)\nRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (1.17.2)\nRequirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (3.12.4)\nRequirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (0.10.0)\nRequirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (1.32.0)\nRequirement already satisfied: wheel>=0.26; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (0.35.1)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (1.7.0)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (1.0.1)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard>=1.14->espnet==0.9.2) (3.2.2)\nRequirement already satisfied: audioread>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from librosa>=0.8.0->espnet==0.9.2) (2.1.8)\nRequirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from librosa>=0.8.0->espnet==0.9.2) (0.22.2.post1)\nRequirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.6/dist-packages (from librosa>=0.8.0->espnet==0.9.2) (0.16.0)\nRequirement already satisfied: decorator>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from librosa>=0.8.0->espnet==0.9.2) (4.4.2)\nRequirement already satisfied: numba>=0.43.0 in /usr/local/lib/python3.6/dist-packages (from librosa>=0.8.0->espnet==0.9.2) (0.48.0)\nCollecting pooch>=1.0\n Downloading https://files.pythonhosted.org/packages/ce/11/d7a1dc8173a4085759710e69aae6e070d0d432db84013c7c343e4e522b76/pooch-1.2.0-py3-none-any.whl (47kB)\nRequirement already satisfied: click in /usr/local/lib/python3.6/dist-packages (from nltk>=3.4.5->espnet==0.9.2) (7.1.2)\nRequirement already satisfied: regex in /usr/local/lib/python3.6/dist-packages (from nltk>=3.4.5->espnet==0.9.2) (2019.12.20)\nRequirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.6/dist-packages (from soundfile>=0.10.2->espnet==0.9.2) (1.14.3)\nRequirement already satisfied: sklearn in /usr/local/lib/python3.6/dist-packages (from nnmnkwii->espnet==0.9.2) (0.0)\nCollecting bandmat>=0.7\n Downloading https://files.pythonhosted.org/packages/94/69/c94a1f690ce9dace7de87f9c95ce297e11e8d0c1314f0280027be2194d0b/bandmat-0.7.tar.gz (364kB)\nCollecting jaconv\n Downloading https://files.pythonhosted.org/packages/b0/9e/cf1353fb3e81a177bb52ca59a0ebee425f084b7298039a7965c5414d2d62/jaconv-0.2.4.tar.gz\nCollecting pypinyin\n Downloading https://files.pythonhosted.org/packages/38/c2/2a834bf95f3dfab1e7dbafe4bb88140ac40d0ae35bb737994abfbb54cde9/pypinyin-0.39.1-py2.py3-none-any.whl (780kB)\nCollecting g2p-en\n Downloading https://files.pythonhosted.org/packages/d7/d9/b77dc634a7a0c0c97716ba97dd0a28cbfa6267c96f359c4f27ed71cbd284/g2p_en-2.1.0-py3-none-any.whl (3.1MB)\nCollecting musdb>=0.3.0\n Downloading https://files.pythonhosted.org/packages/57/bd/98ba16482f610bcfa7fcc212175dc0bbf11976e0bc69319b4204b6dc3aec/musdb-0.3.1-py2.py3-none-any.whl\nCollecting simplejson\n Downloading https://files.pythonhosted.org/packages/73/96/1e6b19045375890068d7342cbe280dd64ae73fd90b9735b5efb8d1e044a1/simplejson-3.17.2-cp36-cp36m-manylinux2010_x86_64.whl (127kB)\nRequirement already satisfied: jsonschema in /usr/local/lib/python3.6/dist-packages (from museval>=0.2.1->espnet==0.9.2) (2.6.0)\nRequirement already satisfied: bottleneck in /usr/local/lib/python3.6/dist-packages (from nara_wpe>=0.0.5->espnet==0.9.2) (1.3.2)\nRequirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from mir-eval>=0.6->espnet==0.9.2) (0.16.0)\nRequirement already satisfied: torch>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from torch_optimizer->espnet==0.9.2) (1.6.0+cu101)\nCollecting pytorch-ranger>=0.1.1\n Downloading https://files.pythonhosted.org/packages/0d/70/12256257d861bbc3e176130d25be1de085ce7a9e60594064888a950f2154/pytorch_ranger-0.1.1-py3-none-any.whl\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->gdown->espnet==0.9.2) (2.10)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->gdown->espnet==0.9.2) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->gdown->espnet==0.9.2) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->gdown->espnet==0.9.2) (3.0.4)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->espnet_model_zoo->espnet==0.9.2) (2018.9)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=1.14->espnet==0.9.2) (1.3.0)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard>=1.14->espnet==0.9.2) (0.2.8)\nRequirement already satisfied: rsa<5,>=3.1.4; python_version >= \"3\" in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard>=1.14->espnet==0.9.2) (4.6)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard>=1.14->espnet==0.9.2) (4.1.1)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard>=1.14->espnet==0.9.2) (2.0.0)\nRequirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba>=0.43.0->librosa>=0.8.0->espnet==0.9.2) (0.31.0)\nCollecting appdirs\n Downloading https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl\nRequirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from pooch>=1.0->librosa>=0.8.0->espnet==0.9.2) (20.4)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi>=1.0->soundfile>=0.10.2->espnet==0.9.2) (2.20)\nCollecting distance>=0.1.3\n Downloading https://files.pythonhosted.org/packages/5c/1a/883e47df323437aefa0d0a92ccfb38895d9416bd0b56262c2e46a47767b8/Distance-0.1.3.tar.gz (180kB)\nCollecting pyaml\n Downloading https://files.pythonhosted.org/packages/15/c4/1310a054d33abc318426a956e7d6df0df76a6ddfa9c66f6310274fb75d42/pyaml-20.4.0-py2.py3-none-any.whl\nCollecting stempeg>=0.1.7\n Downloading https://files.pythonhosted.org/packages/29/ab/6e7362cbff21c25e99cfc3ef116057a7f9ebe6f429a44038eef82de3479d/stempeg-0.1.8-py3-none-any.whl (509kB)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=1.14->espnet==0.9.2) (3.1.0)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard>=1.14->espnet==0.9.2) (0.4.8)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard>=1.14->espnet==0.9.2) (3.2.0)\nBuilding wheels for collected packages: configargparse, ctc-segmentation, librosa, pysptk, nltk, PyYAML, kaldiio, pyworld, nnmnkwii, pystoi, mir-eval, bandmat, jaconv, distance\n Building wheel for configargparse (setup.py): started\n Building wheel for configargparse (setup.py): finished with status 'done'\n Created wheel for configargparse: filename=ConfigArgParse-1.2.3-cp36-none-any.whl size=19329 sha256=0ee77fa764035c13bbfe768c5396f42b4a7157df3493cdf72d8ffe7a2143e46a\n Stored in directory: /root/.cache/pip/wheels/bd/d6/53/034032da9498bda2385cd50a51a289e88090b5da2d592b1fdf\n Building wheel for ctc-segmentation (setup.py): started\n Building wheel for ctc-segmentation (setup.py): finished with status 'done'\n Created wheel for ctc-segmentation: filename=ctc_segmentation-1.1.0-cp36-cp36m-linux_x86_64.whl size=92123 sha256=4f209487f26ff869d8b3597181be5f8adb5181fb6ee97440a839cb6ec7a718d0\n Stored in directory: /root/.cache/pip/wheels/ea/76/4e/b88ad3a93cd80c0fa62ec267f2f4ebac5873ddea15212c0986\n Building wheel for librosa (setup.py): started\n Building wheel for librosa (setup.py): finished with status 'done'\n Created wheel for librosa: filename=librosa-0.8.0-cp36-none-any.whl size=201376 sha256=28d982f99b080c21ba35fca40f38aeb4eb74a69f30f526867873ed06f11e104b\n Stored in directory: /root/.cache/pip/wheels/ee/10/1e/382bb4369e189938d5c02e06d10c651817da8d485bfd1647c9\n Building wheel for pysptk (setup.py): started\n Building wheel for pysptk (setup.py): finished with status 'done'\n Created wheel for pysptk: filename=pysptk-0.1.18-cp36-cp36m-linux_x86_64.whl size=950211 sha256=add17b20d0e2e51a3097c13cfea9fe4f43f7570c469e186e66f03d18e024594a\n Stored in directory: /root/.cache/pip/wheels/c7/96/d2/a163240019c59504402fab713af259026af81a99dea943404a\n Building wheel for nltk (setup.py): started\n Building wheel for nltk (setup.py): finished with status 'done'\n Created wheel for nltk: filename=nltk-3.5-cp36-none-any.whl size=1434676 sha256=1d836711fac55427631be1876a86dbf82bc45d7dee5af2c468456504469e4a8c\n Stored in directory: /root/.cache/pip/wheels/ae/8c/3f/b1fe0ba04555b08b57ab52ab7f86023639a526d8bc8d384306\n Building wheel for PyYAML (setup.py): started\n Building wheel for PyYAML (setup.py): finished with status 'done'\n Created wheel for PyYAML: filename=PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl size=44619 sha256=4cf65441acfeb0ab94327ad68cbdd4911c8ba7dcbf53e7409abbcd96d43cf391\n Stored in directory: /root/.cache/pip/wheels/a7/c1/ea/cf5bd31012e735dc1dfea3131a2d5eae7978b251083d6247bd\n Building wheel for kaldiio (setup.py): started\n Building wheel for kaldiio (setup.py): finished with status 'done'\n Created wheel for kaldiio: filename=kaldiio-2.17.0-cp36-none-any.whl size=24457 sha256=1160046db5d73c46e8f3e94ada423277ba61176278233a5a663f83e88186b7ec\n Stored in directory: /root/.cache/pip/wheels/5d/ed/f5/0ff9e53aab498215f756c4482b9233a97218f7cdaf043e5ef0\n Building wheel for pyworld (setup.py): started\n Building wheel for pyworld (setup.py): finished with status 'done'\n Created wheel for pyworld: filename=pyworld-0.2.11.post0-cp36-cp36m-linux_x86_64.whl size=608636 sha256=37e442ed5027d848c7e605690aeab3938e5a253e3e117eff31aaf334247d765b\n Stored in directory: /root/.cache/pip/wheels/dd/af/e5/28059a621233a9204e9322986b2afddb90976ad5b1c05d76d0\n Building wheel for nnmnkwii (setup.py): started\n Building wheel for nnmnkwii (setup.py): finished with status 'done'\n Created wheel for nnmnkwii: filename=nnmnkwii-0.0.21-cp36-cp36m-linux_x86_64.whl size=1881870 sha256=6672b004879a6b5a142567ed2691d6a067cab0f8b8adaba48a82c912ccda6de9\n Stored in directory: /root/.cache/pip/wheels/be/44/02/9f421ab97da563141b42a76b58937f17efb07d548e108d9746\n Building wheel for pystoi (setup.py): started\n Building wheel for pystoi (setup.py): finished with status 'done'\n Created wheel for pystoi: filename=pystoi-0.3.3-py2.py3-none-any.whl size=7782 sha256=2f877708956aa4092457bd8326454a206c7b3e6fe8051423540f9231b65e3ab8\n Stored in directory: /root/.cache/pip/wheels/6b/6c/b2/d49af4b7ee4ce275cf2511484b043e09b9cb7ae90c1accb17a\n Building wheel for mir-eval (setup.py): started\n Building wheel for mir-eval (setup.py): finished with status 'done'\n Created wheel for mir-eval: filename=mir_eval-0.6-cp36-none-any.whl size=96516 sha256=2ef0c79f93bf728a7a503f4d4135280fb472c669eb4e1f6e8f72e3a63bfa3c08\n Stored in directory: /root/.cache/pip/wheels/49/ce/30/730fa72addf275e49d90683b01b3613048b4be3bf7ff8eb6ec\n Building wheel for bandmat (setup.py): started\n Building wheel for bandmat (setup.py): finished with status 'done'\n Created wheel for bandmat: filename=bandmat-0.7-cp36-cp36m-linux_x86_64.whl size=897998 sha256=07c25ffd1c3284b7bce21eaf9f77d1334cbad316afeadb4a5ecaea2fa0b4bb09\n Stored in directory: /root/.cache/pip/wheels/54/1e/28/429fffda48c9c9d2be90a5bea465554ac9f8f40bb370dae42d\n Building wheel for jaconv (setup.py): started\n Building wheel for jaconv (setup.py): finished with status 'done'\n Created wheel for jaconv: filename=jaconv-0.2.4-cp36-none-any.whl size=12284 sha256=2a81d11be07733efe72f62f615e8c40a37fbaa91c5e67271ecffa5e7f2f15e25\n Stored in directory: /root/.cache/pip/wheels/e1/46/f7/85a7f89bd3263423c8530dfed16083f9a142cc0fc78c81ff32\n Building wheel for distance (setup.py): started\n Building wheel for distance (setup.py): finished with status 'done'\n Created wheel for distance: filename=Distance-0.1.3-cp36-none-any.whl size=16262 sha256=afb894eb8aeb44f058cb81796d8d615d89edfd8c57ab32daba7bb9f506f3cd7e\n Stored in directory: /root/.cache/pip/wheels/d5/aa/e1/dbba9e7b6d397d645d0f12db1c66dbae9c5442b39b001db18e\nSuccessfully built configargparse ctc-segmentation librosa pysptk nltk PyYAML kaldiio pyworld nnmnkwii pystoi mir-eval bandmat jaconv distance\nInstalling collected packages: configargparse, humanfriendly, matplotlib, editdistance, espnet-model-zoo, ctc-segmentation, tensorboardX, soundfile, appdirs, pooch, librosa, pysptk, sentencepiece, nltk, morfessor, PyYAML, h5py, kaldiio, unidecode, pyworld, bandmat, nnmnkwii, jaconv, pypinyin, distance, g2p-en, espnet-tts-frontend, pyaml, stempeg, musdb, simplejson, museval, pystoi, nara-wpe, torch-complex, pytorch-wpe, mir-eval, pytorch-ranger, torch-optimizer, torchaudio, espnet\n Found existing installation: matplotlib 3.2.2\n Uninstalling matplotlib-3.2.2:\n Successfully uninstalled matplotlib-3.2.2\n Found existing installation: editdistance 0.5.3\n Uninstalling editdistance-0.5.3:\n Successfully uninstalled editdistance-0.5.3\n Found existing installation: librosa 0.6.3\n Uninstalling librosa-0.6.3:\n Successfully uninstalled librosa-0.6.3\n Found existing installation: nltk 3.2.5\n Uninstalling nltk-3.2.5:\n Successfully uninstalled nltk-3.2.5\n Found existing installation: PyYAML 3.13\n Uninstalling PyYAML-3.13:\n Successfully uninstalled PyYAML-3.13\n Found existing installation: h5py 2.10.0\n Uninstalling h5py-2.10.0:\n Successfully uninstalled h5py-2.10.0\n Running setup.py develop for espnet\nSuccessfully installed PyYAML-5.3.1 appdirs-1.4.4 bandmat-0.7 configargparse-1.2.3 ctc-segmentation-1.1.0 distance-0.1.3 editdistance-0.5.2 espnet espnet-model-zoo-0.0.0a16 espnet-tts-frontend-0.0.3 g2p-en-2.1.0 h5py-2.9.0 humanfriendly-8.2 jaconv-0.2.4 kaldiio-2.17.0 librosa-0.8.0 matplotlib-3.1.0 mir-eval-0.6 morfessor-2.0.6 musdb-0.3.1 museval-0.3.1 nara-wpe-0.0.7 nltk-3.5 nnmnkwii-0.0.21 pooch-1.2.0 pyaml-20.4.0 pypinyin-0.39.1 pysptk-0.1.18 pystoi-0.3.3 pytorch-ranger-0.1.1 pytorch-wpe-0.0.0 pyworld-0.2.11.post0 sentencepiece-0.1.86 simplejson-3.17.2 soundfile-0.10.3.post1 stempeg-0.1.8 tensorboardX-2.1 torch-complex-0.2.0 torch-optimizer-0.0.1a15 torchaudio-0.6.0 unidecode-1.1.1\nCollecting util@ git+https://[email protected]/dertilo/util.git#egg=util\n Cloning https://****@github.com/dertilo/util.git to /tmp/pip-install-mpx7gbf6/util\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from util@ git+https://[email protected]/dertilo/util.git#egg=util) (1.18.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from util@ git+https://[email protected]/dertilo/util.git#egg=util) (1.4.1)\nBuilding wheels for collected packages: util\n Building wheel for util (setup.py): started\n Building wheel for util (setup.py): finished with status 'done'\n Created wheel for util: filename=util-0.1-cp36-none-any.whl size=10975 sha256=06b96e3b7253440d2550bd4634bc87c57b3b4cf33cd82230624f92a74e58a677\n Stored in directory: /tmp/pip-ephem-wheel-cache-bpcf8ilf/wheels/95/b5/bf/4b1614c05bcd41e9aa6453ed81261e7d7f3d20e168935b0cfd\nSuccessfully built util\nInstalling collected packages: util\nSuccessfully installed util-0.1\n"
],
[
"%%time\n%%bash\nls -alth /mydrive/data\nTARFILE=dev-clean_preprocessed.tar.gz\ncp /mydrive/data/$TARFILE ./\ntar xfz $TARFILE -C ./\nls -alth\ndu -sh ./dev-clean_preprocessed/\n\n",
"total 1.9G\n-rw------- 1 root root 971M Sep 29 14:09 asr_train_asr_transformer_e18_raw_bpe_sp_valid.acc.best.zip\n-rw------- 1 root root 55M Sep 28 16:07 dev-clean_preprocessed.tar.gz\n-rw------- 1 root root 636M Jun 7 08:35 decisions_185_StGB_no_content.jsonl.gz\n-rw------- 1 root root 279M Jan 9 2020 BverfG_juris_content.jsonl.gz\ntotal 56M\ndrwxr-xr-x 1 root root 4.0K Oct 14 08:10 .\n-rw------- 1 root root 55M Oct 14 08:10 dev-clean_preprocessed.tar.gz\ndrwxr-xr-x 18 root root 4.0K Oct 14 08:08 espnet\ndrwxr-xr-x 1 root root 4.0K Oct 14 08:07 ..\ndrwx------ 5 root root 4.0K Oct 14 08:07 drive\ndrwxr-xr-x 1 root root 4.0K Oct 5 16:31 .config\ndrwxr-xr-x 1 root root 4.0K Oct 5 16:31 sample_data\ndrwxrwxr-x 2 1000 1000 184K Sep 28 16:03 dev-clean_preprocessed\n62M\t./dev-clean_preprocessed/\nCPU times: user 4.51 ms, sys: 1.89 ms, total: 6.4 ms\nWall time: 2.03 s\n"
],
[
"%%bash\npip install wandb\n",
"Collecting wandb\n Downloading https://files.pythonhosted.org/packages/80/14/9a2c792e48e01e55913b9495ce0e8a16297e2bc1cc99e86a848d205c91e7/wandb-0.10.5-py2.py3-none-any.whl (1.7MB)\nRequirement already satisfied: promise<3,>=2.0 in /usr/local/lib/python3.6/dist-packages (from wandb) (2.3)\nCollecting configparser>=3.8.1\n Downloading https://files.pythonhosted.org/packages/08/b2/ef713e0e67f6e7ec7d59aea3ee78d05b39c15930057e724cc6d362a8c3bb/configparser-5.0.1-py3-none-any.whl\nRequirement already satisfied: requests<3,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from wandb) (2.23.0)\nCollecting docker-pycreds>=0.4.0\n Downloading https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from wandb) (2.8.1)\nCollecting watchdog>=0.8.3\n Downloading https://files.pythonhosted.org/packages/0e/06/121302598a4fc01aca942d937f4a2c33430b7181137b35758913a8db10ad/watchdog-0.10.3.tar.gz (94kB)\nRequirement already satisfied: protobuf>=3.12.0 in /usr/local/lib/python3.6/dist-packages (from wandb) (3.12.4)\nCollecting sentry-sdk>=0.4.0\n Downloading https://files.pythonhosted.org/packages/1f/08/5eb320799e3085ccc66ec0fc3360421302803f3b784f74959564dbc6cdc9/sentry_sdk-0.19.0-py2.py3-none-any.whl (120kB)\nRequirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.6/dist-packages (from wandb) (5.4.8)\nRequirement already satisfied: PyYAML in /usr/local/lib/python3.6/dist-packages (from wandb) (5.3.1)\nCollecting subprocess32>=3.5.3\n Downloading https://files.pythonhosted.org/packages/32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz (97kB)\nCollecting GitPython>=1.0.0\n Downloading https://files.pythonhosted.org/packages/c0/d7/b2b0672e0331567157adf9281f41ee731c412ee518ca5e6552c27fa73c91/GitPython-3.1.9-py3-none-any.whl (159kB)\nRequirement already satisfied: six>=1.13.0 in /usr/local/lib/python3.6/dist-packages (from wandb) (1.15.0)\nCollecting shortuuid>=0.5.0\n Downloading https://files.pythonhosted.org/packages/25/a6/2ecc1daa6a304e7f1b216f0896b26156b78e7c38e1211e9b798b4716c53d/shortuuid-1.0.1-py3-none-any.whl\nRequirement already satisfied: Click>=7.0 in /usr/local/lib/python3.6/dist-packages (from wandb) (7.1.2)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.0.0->wandb) (2020.6.20)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.0.0->wandb) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.0.0->wandb) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.0.0->wandb) (2.10)\nCollecting pathtools>=0.1.1\n Downloading https://files.pythonhosted.org/packages/e7/7f/470d6fcdf23f9f3518f6b0b76be9df16dcc8630ad409947f8be2eb0ed13a/pathtools-0.1.2.tar.gz\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.12.0->wandb) (50.3.0)\nCollecting gitdb<5,>=4.0.1\n Downloading https://files.pythonhosted.org/packages/48/11/d1800bca0a3bae820b84b7d813ad1eff15a48a64caea9c823fc8c1b119e8/gitdb-4.0.5-py3-none-any.whl (63kB)\nCollecting smmap<4,>=3.0.1\n Downloading https://files.pythonhosted.org/packages/b0/9a/4d409a6234eb940e6a78dfdfc66156e7522262f5f2fecca07dc55915952d/smmap-3.0.4-py2.py3-none-any.whl\nBuilding wheels for collected packages: watchdog, subprocess32, pathtools\n Building wheel for watchdog (setup.py): started\n Building wheel for watchdog (setup.py): finished with status 'done'\n Created wheel for watchdog: filename=watchdog-0.10.3-cp36-none-any.whl size=73873 sha256=645ffb0f7ea1e065a806e33ed3b4a6213d00f9bf14a315340e8ca8f18f107056\n Stored in directory: /root/.cache/pip/wheels/a8/1d/38/2c19bb311f67cc7b4d07a2ec5ea36ab1a0a0ea50db994a5bc7\n Building wheel for subprocess32 (setup.py): started\n Building wheel for subprocess32 (setup.py): finished with status 'done'\n Created wheel for subprocess32: filename=subprocess32-3.5.4-cp36-none-any.whl size=6489 sha256=ef483908a98eb0508b479ded06e967691a77311973a07b392fbda3a87bd6d5bf\n Stored in directory: /root/.cache/pip/wheels/68/39/1a/5e402bdfdf004af1786c8b853fd92f8c4a04f22aad179654d1\n Building wheel for pathtools (setup.py): started\n Building wheel for pathtools (setup.py): finished with status 'done'\n Created wheel for pathtools: filename=pathtools-0.1.2-cp36-none-any.whl size=8785 sha256=1dcc1e5237056fe5598afa74e50e67ca69010948630a06a68bebe880fecbe21b\n Stored in directory: /root/.cache/pip/wheels/0b/04/79/c3b0c3a0266a3cb4376da31e5bfe8bba0c489246968a68e843\nSuccessfully built watchdog subprocess32 pathtools\nInstalling collected packages: configparser, docker-pycreds, pathtools, watchdog, sentry-sdk, subprocess32, smmap, gitdb, GitPython, shortuuid, wandb\nSuccessfully installed GitPython-3.1.9 configparser-5.0.1 docker-pycreds-0.4.0 gitdb-4.0.5 pathtools-0.1.2 sentry-sdk-0.19.0 shortuuid-1.0.1 smmap-3.0.4 subprocess32-3.5.4 wandb-0.10.5 watchdog-0.10.3\n"
],
[
"%%time\n%%bash \n# pip install espnet_model_zoo\n#model_name=\"Shinji Watanabe/librispeech_asr_train_asr_transformer_e18_raw_bpe_sp_valid.acc.best\"\n#espnet_model_zoo_download --unpack true --cachedir \"/content/pretrained\" \"$model_name\"\n#ls -alth /content/pretrained/653d10049fdc264f694f57b49849343e/\n#zipfile=pretrained/653d10049fdc264f694f57b49849343e/asr_train_asr_transformer_e18_raw_bpe_sp_valid.acc.best.zip\n# cp -r $zipfile /mydrive/data/\nls -alth /mydrive/data\nZIPFILE=asr_train_asr_transformer_e18_raw_bpe_sp_valid.acc.best.zip\ncp /mydrive/data/$ZIPFILE ./\nunzip $ZIPFILE -d pretrained_espnet",
"total 1.9G\n-rw------- 1 root root 971M Sep 29 14:09 asr_train_asr_transformer_e18_raw_bpe_sp_valid.acc.best.zip\n-rw------- 1 root root 55M Sep 28 16:07 dev-clean_preprocessed.tar.gz\n-rw------- 1 root root 636M Jun 7 08:35 decisions_185_StGB_no_content.jsonl.gz\n-rw------- 1 root root 279M Jan 9 2020 BverfG_juris_content.jsonl.gz\nArchive: asr_train_asr_transformer_e18_raw_bpe_sp_valid.acc.best.zip\n extracting: pretrained_espnet/meta.yaml \n extracting: pretrained_espnet/exp/asr_train_asr_transformer_e18_raw_bpe_sp/config.yaml \n extracting: pretrained_espnet/exp/lm_train_lm_adam_bpe/config.yaml \n extracting: pretrained_espnet/exp/asr_train_asr_transformer_e18_raw_bpe_sp/54epoch.pth \n extracting: pretrained_espnet/exp/lm_train_lm_adam_bpe/17epoch.pth \n extracting: pretrained_espnet/exp/asr_train_asr_transformer_e18_raw_bpe_sp/RESULTS.md \n extracting: pretrained_espnet/exp/asr_stats_raw_sp/train/feats_stats.npz \n extracting: pretrained_espnet/data/token_list/bpe_unigram5000/bpe.model \nCPU times: user 5.43 ms, sys: 5.38 ms, total: 10.8 ms\nWall time: 47.4 s\n"
],
[
"%%time\n%%bash\nexport WANDB_API_KEY=$(head -n 1 /mydrive/wandb.key | cut -d '=' -f 2 )\nexport WANDB_NAME=\"debug\"\nexport WANDB_NOTES=\"just trying to track GPU usage\"\nexport WANDB_PROJECT=espnet-asr\npython -m wandb init --project $WANDB_PROJECT\n\nrm -rf /tmp/espnet_output # start from zero, use if train-data changes somehow\n#rm -rf /tmp/espnet_output/train_logs\npython /mydrive/speech-recognition/espnet_main.py \\\n --train_path \"dev-clean_preprocessed\" \\\n --eval_path \"dev-clean_preprocessed\" \\\n --pretrained_base \"pretrained_espnet\" \\\n --num_gpus 1 \\\n --batch_bins 3200000\n # --eval_limit 40 \\",
"Namespace(batch_bins=3200000, config_yml=None, eval_limit=None, eval_path='dev-clean_preprocessed', is_distributed=False, num_encoder_blocks=1, num_gpus=1, num_workers=1, output_path='/tmp/espnet_output', pretrained_base='pretrained_espnet', train_limit=1, train_path='dev-clean_preprocessed', vocab_size=500)\n\n"
],
[
"%%bash\nls -alth /tmp/espnet_output",
"total 24K\ndrwxrwxrwt 1 root root 4.0K Oct 14 08:23 ..\ndrwxr-xr-x 5 root root 4.0K Oct 14 08:23 train_logs\ndrwxr-xr-x 5 root root 4.0K Oct 14 08:21 .\ndrwxr-xr-x 4 root root 4.0K Oct 14 08:20 stats\ndrwxr-xr-x 4 root root 4.0K Oct 14 08:20 manifests\n-rw-r--r-- 1 root root 1.5K Oct 14 08:20 config.yml\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e749230d91717b6b9cdb5ca0f07a0862c1e7d028 | 64,326 | ipynb | Jupyter Notebook | experiments/old/experiment_07_omalizumab_full.ipynb | pietrobarbiero/logic_explained_networks | 238f2a220ae8fc4f31ab0cf12649603aba0285d5 | [
"Apache-2.0"
] | 18 | 2021-05-24T07:47:57.000Z | 2022-01-05T14:48:39.000Z | experiments/old/experiment_07_omalizumab_full.ipynb | pietrobarbiero/logic_explained_networks | 238f2a220ae8fc4f31ab0cf12649603aba0285d5 | [
"Apache-2.0"
] | 1 | 2021-08-25T16:33:10.000Z | 2021-08-25T16:33:10.000Z | experiments/old/experiment_07_omalizumab_full.ipynb | pietrobarbiero/deep-logic | 238f2a220ae8fc4f31ab0cf12649603aba0285d5 | [
"Apache-2.0"
] | 2 | 2021-05-26T08:15:14.000Z | 2021-08-23T18:58:16.000Z | 51.750603 | 2,502 | 0.548487 | [
[
[
"import sys\nsys.path.append('..')\nimport os\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import simplify_logic\nimport time\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier, plot_tree\nfrom sklearn.tree import _tree, export_text\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport torch.nn.functional as F\nfrom sklearn.preprocessing import OneHotEncoder, MinMaxScaler, LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom lens.utils.base import validate_network, set_seed, tree_to_formula\nfrom lens.utils.relunn import get_reduced_model, prune_features\nfrom lens.utils.sigmoidnn import prune_equal_fanin\nfrom lens import logic\n\nresults_dir = 'results/omalizumab_full'\nif not os.path.isdir(results_dir):\n os.makedirs(results_dir)\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nn_rep = 10\ntot_epochs = 6001\nprune_epochs = 3001",
"_____no_output_____"
],
[
"gene_expression_matrix = pd.read_csv('data/omalizumab/w_1/data_0.csv', index_col=None, header=None)\nlabels = pd.read_csv('data/omalizumab/w_1/tempLabels_W-1.csv', index_col=None, header=None)\ngenes = pd.read_csv('data/omalizumab/w_1/features_0.csv', index_col=None, header=None)\ngene_expression_matrix",
"_____no_output_____"
],
[
"encoder = LabelEncoder()\nlabels_encoded = encoder.fit_transform(labels.values)\nlabels_encoded_noncontrols = labels_encoded[labels_encoded!=0] - 1\n\ndata_controls = gene_expression_matrix[labels_encoded==0]\ndata = gene_expression_matrix[labels_encoded!=0]\n\ngene_signature = data_controls.mean(axis=0)\ndata_scaled = data - gene_signature\n\nscaler = MinMaxScaler((0, 1))\nscaler.fit(data_scaled)\ndata_normalized = scaler.transform(data_scaled)\n\nx = torch.FloatTensor(data_normalized)\ny = torch.LongTensor(labels_encoded_noncontrols)\nprint(x.shape)\nprint(y.shape)",
"c:\\users\\pietr\\anaconda3\\envs\\deep-logic\\lib\\site-packages\\sklearn\\utils\\validation.py:63: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n return f(*args, **kwargs)\n"
],
[
"concepts = list(genes.values.squeeze())\nconcepts[:10]",
"_____no_output_____"
],
[
"n_splits = 10\nskf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=0)",
"_____no_output_____"
],
[
"def train_nn(x_train, y_train, need_pruning, seed, device, relu=False):\n set_seed(seed)\n x_train = x_train.to(device)\n y_train = y_train.to(device)\n layers = [\n torch.nn.Linear(x_train.size(1), 50),\n torch.nn.ReLU() if relu else torch.nn.LeakyReLU(),\n torch.nn.Linear(50, 20),\n torch.nn.ReLU() if relu else torch.nn.LeakyReLU(),\n torch.nn.Linear(20, 5),\n torch.nn.ReLU() if relu else torch.nn.LeakyReLU(),\n torch.nn.Linear(5, 2),\n torch.nn.Softmax(dim=1),\n ]\n model = torch.nn.Sequential(*layers).to(device)\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=0.0001)\n loss_form = torch.nn.CrossEntropyLoss()\n model.train()\n for epoch in range(tot_epochs):\n # forward pass\n optimizer.zero_grad()\n y_pred = model(x_train)\n # Compute Loss\n loss = loss_form(y_pred, y_train)\n\n for module in model.children():\n if isinstance(module, torch.nn.Linear):\n loss += 0.0001 * torch.norm(module.weight, 1)\n loss += 0.0001 * torch.norm(module.bias, 1)\n break\n\n # backward pass\n loss.backward()\n optimizer.step()\n\n if epoch > prune_epochs and need_pruning and epoch % 1000 == 1:\n prune_features(model, n_classes=1, device=device)\n need_pruning = True\n \n # compute accuracy\n if epoch % 500 == 0:\n y_pred_d = torch.argmax(y_pred, dim=1)\n accuracy = y_pred_d.eq(y_train).sum().item() / y_train.size(0)\n print(f'\\t Epoch {epoch}: train accuracy: {accuracy:.4f}')\n \n return model",
"_____no_output_____"
]
],
[
[
"# General pruning",
"_____no_output_____"
]
],
[
[
"need_pruning = True\nmethod = 'pruning'\nmethods = []\nsplits = []\nexplanations = []\nexplanations_inv = []\nmodel_accuracies = []\nexplanation_accuracies = []\nexplanation_accuracies_inv = []\nelapsed_times = []\nelapsed_times_inv = []\n\nfor split, (train_index, test_index) in enumerate(skf.split(x.cpu().detach().numpy(), y.cpu().detach().numpy())):\n print(f'Split [{split+1}/{n_splits}]')\n x_train, x_test = torch.FloatTensor(x[train_index]), torch.FloatTensor(x[test_index])\n y_train, y_test = torch.LongTensor(y[train_index]), torch.LongTensor(y[test_index])\n \n# if split not in [5]: continue\n \n model = train_nn(x_train, y_train, need_pruning, split, device)\n \n y_preds = model(x_test.to(device)).cpu().detach().numpy()\n model_accuracy = accuracy_score(y_test.cpu().detach().numpy(), y_preds.argmax(axis=1))\n print(f'\\t Model\\'s accuracy: {model_accuracy:.4f}')\n \n # positive class\n target_class = 1\n start = time.time()\n global_explanation, _, counter = logic.relunn.combine_local_explanations(model, \n x_train.to(device), y_train.to(device), \n target_class=target_class,\n topk_explanations=2,\n method=method, device=device)\n elapsed_time = time.time() - start\n if global_explanation:\n explanation_accuracy, _ = logic.base.test_explanation(global_explanation, target_class, x_test, y_test)\n explanation = logic.base.replace_names(global_explanation, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation}\" - Accuracy: {explanation_accuracy:.4f}')\n print(f'\\t Elapsed time {elapsed_time}')\n \n # negative class\n target_class = 0\n start = time.time()\n global_explanation_inv, _, counter_inv = logic.relunn.combine_local_explanations(model, \n x_train.to(device), y_train.to(device), \n target_class=target_class,\n topk_explanations=2,\n method=method, device=device)\n elapsed_time_inv = time.time() - start\n if global_explanation_inv:\n explanation_accuracy_inv, _ = logic.base.test_explanation(global_explanation_inv, target_class, x_test, y_test)\n explanation_inv = logic.base.replace_names(global_explanation_inv, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation_inv}\" - Accuracy: {explanation_accuracy_inv:.4f}')\n print(f'\\t Elapsed time {elapsed_time_inv}')\n \n methods.append(method)\n splits.append(split)\n explanations.append(explanation)\n explanations_inv.append(explanation_inv)\n model_accuracies.append(model_accuracy)\n explanation_accuracies.append(explanation_accuracy)\n explanation_accuracies_inv.append(explanation_accuracy_inv)\n elapsed_times.append(elapsed_time)\n elapsed_times_inv.append(elapsed_time_inv)",
"Split [1/10]\n\t Epoch 0: train accuracy: 0.2500\n\t Epoch 500: train accuracy: 1.0000\n\t Epoch 1000: train accuracy: 1.0000\n\t Epoch 1500: train accuracy: 1.0000\n\t Epoch 2000: train accuracy: 1.0000\n\t Epoch 2500: train accuracy: 1.0000\n\t Epoch 3000: train accuracy: 1.0000\n\t Epoch 3500: train accuracy: 1.0000\n\t Epoch 4000: train accuracy: 1.0000\n\t Epoch 4500: train accuracy: 1.0000\n\t Epoch 5000: train accuracy: 1.0000\n\t Epoch 5500: train accuracy: 1.0000\n\t Epoch 6000: train accuracy: 1.0000\n\t Model's accuracy: 0.7500\n\t Class 1 - Global explanation: \"(ILMN_1777168 & ILMN_2127722 & ILMN_2293374 & ILMN_3207605 & ILMN_3228700 & ILMN_3237256 & ILMN_3243714 & ILMN_3248786 & ~ILMN_1708983 & ~ILMN_1775520 & ~ILMN_1893728 & ~ILMN_2102422 & ~ILMN_2104929 & ~ILMN_3186853 & ~ILMN_3282496 & ~ILMN_3310514) | (ILMN_1777168 & ILMN_2102422 & ILMN_3243714 & ILMN_3248786 & ILMN_3282496 & ~ILMN_1708983 & ~ILMN_1775520 & ~ILMN_1893728 & ~ILMN_2104929 & ~ILMN_2127722 & ~ILMN_2293374 & ~ILMN_3186853 & ~ILMN_3207605 & ~ILMN_3228700 & ~ILMN_3237256 & ~ILMN_3310514)\" - Accuracy: 0.2500\n\t Elapsed time 54.590567111968994\n\t Class 0 - Global explanation: \"(ILMN_1775520 & ILMN_2293374 & ILMN_3248786 & ILMN_3310514 & ~ILMN_1708983 & ~ILMN_1777168 & ~ILMN_1893728 & ~ILMN_2102422 & ~ILMN_2104929 & ~ILMN_2127722 & ~ILMN_3186853 & ~ILMN_3207605 & ~ILMN_3228700 & ~ILMN_3237256 & ~ILMN_3243714 & ~ILMN_3282496) | (ILMN_1893728 & ILMN_3248786 & ILMN_3310514 & ~ILMN_1708983 & ~ILMN_1775520 & ~ILMN_1777168 & ~ILMN_2102422 & ~ILMN_2104929 & ~ILMN_2127722 & ~ILMN_2293374 & ~ILMN_3186853 & ~ILMN_3207605 & ~ILMN_3228700 & ~ILMN_3237256 & ~ILMN_3243714 & ~ILMN_3282496)\" - Accuracy: 0.7500\n\t Elapsed time 52.65466928482056\nSplit [2/10]\n\t Epoch 0: train accuracy: 0.2500\n\t Epoch 500: train accuracy: 0.2500\n\t Epoch 1000: train accuracy: 0.7500\n\t Epoch 1500: train accuracy: 0.7500\n\t Epoch 2000: train accuracy: 0.7500\n\t Epoch 2500: train accuracy: 0.9722\n\t Epoch 3000: train accuracy: 0.9722\n\t Epoch 3500: train accuracy: 1.0000\n\t Epoch 4000: train accuracy: 1.0000\n\t Epoch 4500: train accuracy: 0.9722\n\t Epoch 5000: train accuracy: 1.0000\n\t Epoch 5500: train accuracy: 1.0000\n\t Epoch 6000: train accuracy: 1.0000\n\t Model's accuracy: 0.7500\n"
],
[
"results_pruning = pd.DataFrame({\n 'method': methods,\n 'split': splits,\n 'explanation': explanations,\n 'explanation_inv': explanations_inv,\n 'model_accuracy': model_accuracies,\n 'explanation_accuracy': explanation_accuracies,\n 'explanation_accuracy_inv': explanation_accuracies_inv,\n 'elapsed_time': elapsed_times,\n 'elapsed_time_inv': elapsed_times_inv,\n})\nresults_pruning.to_csv(os.path.join(results_dir, 'results_pruning.csv'))\nresults_pruning",
"_____no_output_____"
]
],
[
[
"# LIME",
"_____no_output_____"
]
],
[
[
"need_pruning = False\nmethod = 'lime'\nmethods = []\nsplits = []\nexplanations = []\nexplanations_inv = []\nmodel_accuracies = []\nexplanation_accuracies = []\nexplanation_accuracies_inv = []\nelapsed_times = []\nelapsed_times_inv = []\nfor seed in range(n_rep):\n print(f'Seed [{seed+1}/{n_rep}]')\n \n model = train_nn(x_train, y_train, need_pruning, seed, device)\n \n y_preds = model(x_test.to(device)).cpu().detach().numpy()\n model_accuracy = accuracy_score(y_test.cpu().detach().numpy(), y_preds.argmax(axis=1))\n print(f'\\t Model\\'s accuracy: {model_accuracy:.4f}')\n \n # positive class\n target_class = 1\n start = time.time()\n global_explanation, _, _ = logic.relunn.combine_local_explanations(model, \n x_train.to(device), y_train.to(device),\n topk_explanations=2,\n target_class=target_class,\n method=method, device=device)\n elapsed_time = time.time() - start\n if global_explanation:\n explanation_accuracy, _ = logic.base.test_explanation(global_explanation, target_class, x_test, y_test)\n explanation = logic.base.replace_names(global_explanation, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation}\" - Accuracy: {explanation_accuracy:.4f}')\n print(f'\\t Elapsed time {elapsed_time}')\n \n # negative class\n target_class = 0\n start = time.time()\n global_explanation_inv, _, _ = logic.relunn.combine_local_explanations(model, \n x_train.to(device), y_train.to(device), \n topk_explanations=2,\n target_class=target_class,\n method=method, device=device)\n elapsed_time_inv = time.time() - start\n if global_explanation_inv:\n explanation_accuracy_inv, _ = logic.base.test_explanation(global_explanation_inv, target_class, x_test, y_test)\n explanation_inv = logic.base.replace_names(global_explanation_inv, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation_inv}\" - Accuracy: {explanation_accuracy_inv:.4f}')\n print(f'\\t Elapsed time {elapsed_time_inv}')\n \n methods.append(method)\n splits.append(seed)\n explanations.append(explanation)\n explanations_inv.append(explanation_inv)\n model_accuracies.append(model_accuracy)\n explanation_accuracies.append(explanation_accuracy)\n explanation_accuracies_inv.append(explanation_accuracy_inv)\n elapsed_times.append(elapsed_time)\n elapsed_times_inv.append(elapsed_time_inv)",
"_____no_output_____"
],
[
"results_lime = pd.DataFrame({\n 'method': methods,\n 'split': splits,\n 'explanation': explanations,\n 'explanation_inv': explanations_inv,\n 'model_accuracy': model_accuracies,\n 'explanation_accuracy': explanation_accuracies,\n 'explanation_accuracy_inv': explanation_accuracies_inv,\n 'elapsed_time': elapsed_times,\n 'elapsed_time_inv': elapsed_times_inv,\n})\nresults_lime.to_csv(os.path.join(results_dir, 'results_lime.csv'))\nresults_lime",
"_____no_output_____"
]
],
[
[
"# Weights",
"_____no_output_____"
]
],
[
[
"need_pruning = False\nmethod = 'weights'\nmethods = []\nsplits = []\nexplanations = []\nexplanations_inv = []\nmodel_accuracies = []\nexplanation_accuracies = []\nexplanation_accuracies_inv = []\nelapsed_times = []\nelapsed_times_inv = []\n\nfor split, (train_index, test_index) in enumerate(skf.split(x.cpu().detach().numpy(), y.cpu().detach().numpy())):\n print(f'Split [{split+1}/{n_splits}]')\n x_train, x_test = torch.FloatTensor(x[train_index]), torch.FloatTensor(x[test_index])\n y_train, y_test = torch.LongTensor(y[train_index]), torch.LongTensor(y[test_index])\n \n model = train_nn(x_train, y_train, need_pruning, split, device, relu=True)\n \n y_preds = model(x_test.to(device)).cpu().detach().numpy()\n model_accuracy = accuracy_score(y_test.cpu().detach().numpy(), y_preds.argmax(axis=1))\n print(f'\\t Model\\'s accuracy: {model_accuracy:.4f}')\n \n # positive class\n target_class = 1\n start = time.time()\n global_explanation, _, _ = logic.relunn.combine_local_explanations(model, \n x_train.to(device), y_train.to(device),\n topk_explanations=2, \n target_class=target_class,\n method=method, device=device)\n elapsed_time = time.time() - start\n if global_explanation:\n explanation_accuracy, _ = logic.base.test_explanation(global_explanation, target_class, x_test, y_test)\n explanation = logic.base.replace_names(global_explanation, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation}\" - Accuracy: {explanation_accuracy:.4f}')\n print(f'\\t Elapsed time {elapsed_time}')\n \n # negative class\n target_class = 0\n start = time.time()\n global_explanation_inv, _, _ = logic.relunn.combine_local_explanations(model, \n x_train.to(device), y_train.to(device), \n topk_explanations=2, \n target_class=target_class,\n method=method, device=device)\n elapsed_time_inv = time.time() - start\n if global_explanation_inv:\n explanation_accuracy_inv, _ = logic.base.test_explanation(global_explanation_inv, target_class, x_test, y_test)\n explanation_inv = logic.base.replace_names(global_explanation_inv, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation_inv}\" - Accuracy: {explanation_accuracy_inv:.4f}')\n print(f'\\t Elapsed time {elapsed_time_inv}')\n \n methods.append(method)\n splits.append(split)\n explanations.append(explanation)\n explanations_inv.append(explanation_inv)\n model_accuracies.append(model_accuracy)\n explanation_accuracies.append(explanation_accuracy)\n explanation_accuracies_inv.append(explanation_accuracy_inv)\n elapsed_times.append(elapsed_time)\n elapsed_times_inv.append(elapsed_time_inv)",
"_____no_output_____"
],
[
"results_weights = pd.DataFrame({\n 'method': methods,\n 'split': splits,\n 'explanation': explanations,\n 'explanation_inv': explanations_inv,\n 'model_accuracy': model_accuracies,\n 'explanation_accuracy': explanation_accuracies,\n 'explanation_accuracy_inv': explanation_accuracies_inv,\n 'elapsed_time': elapsed_times,\n 'elapsed_time_inv': elapsed_times_inv,\n})\nresults_weights.to_csv(os.path.join(results_dir, 'results_weights.csv'))\nresults_weights",
"_____no_output_____"
]
],
[
[
"# Psi network",
"_____no_output_____"
]
],
[
[
"def train_psi_nn(x_train, y_train, need_pruning, seed, device):\n set_seed(seed)\n x_train = x_train.to(device)\n y_train = y_train.to(device).to(torch.float)\n layers = [\n torch.nn.Linear(x_train.size(1), 10),\n torch.nn.Sigmoid(),\n torch.nn.Linear(10, 4),\n torch.nn.Sigmoid(),\n torch.nn.Linear(4, 1),\n torch.nn.Sigmoid(),\n ]\n model = torch.nn.Sequential(*layers).to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n loss_form = torch.nn.BCELoss()\n model.train()\n for epoch in range(tot_epochs):\n # forward pass\n optimizer.zero_grad()\n y_pred = model(x_train).squeeze()\n # Compute Loss\n loss = loss_form(y_pred, y_train)\n\n for module in model.children():\n if isinstance(module, torch.nn.Linear):\n loss += 0.0001 * torch.norm(module.weight, 1)\n\n # backward pass\n loss.backward()\n optimizer.step()\n\n if epoch > 1500 and need_pruning:\n model = prune_equal_fanin(model, 2, validate=True, device=device)\n need_pruning = False\n \n # compute accuracy\n if epoch % 500 == 0:\n y_pred_d = y_pred > 0.5\n accuracy = y_pred_d.eq(y_train).sum().item() / y_train.size(0)\n print(f'\\t Epoch {epoch}: train accuracy: {accuracy:.4f}')\n \n return model",
"_____no_output_____"
],
[
"need_pruning = True\nmethod = 'psi'\nmethods = []\nsplits = []\nexplanations = []\nexplanations_inv = []\nmodel_accuracies = []\nexplanation_accuracies = []\nexplanation_accuracies_inv = []\nelapsed_times = []\nelapsed_times_inv = []\nfor seed in range(n_rep):\n print(f'Seed [{seed+1}/{n_rep}]')\n \n # positive class\n target_class = 1\n model = train_psi_nn(x_train, y_train, need_pruning, seed, device)\n \n y_preds = model(x_test.to(device)).cpu().detach().numpy()\n model_accuracy = accuracy_score(y_test.cpu().detach().numpy(), y_preds > 0.5)\n print(f'\\t Model\\'s accuracy: {model_accuracy:.4f}')\n \n start = time.time()\n global_explanation = logic.generate_fol_explanations(model, device)[0]\n elapsed_time = time.time() - start\n explanation_accuracy, _ = logic.base.test_explanation(global_explanation, target_class, x_test, y_test)\n explanation = logic.base.replace_names(global_explanation, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation}\" - Accuracy: {explanation_accuracy:.4f}')\n print(f'\\t Elapsed time {elapsed_time}')\n \n # negative class\n target_class = 0\n model = train_psi_nn(x_train, y_train.eq(target_class), need_pruning, seed, device)\n \n y_preds = model(x_test.to(device)).cpu().detach().numpy()\n model_accuracy = accuracy_score(y_test.eq(target_class).cpu().detach().numpy(), y_preds > 0.5)\n print(f'\\t Model\\'s accuracy: {model_accuracy:.4f}')\n \n start = time.time()\n global_explanation_inv = logic.generate_fol_explanations(model, device)[0]\n elapsed_time_inv = time.time() - start\n explanation_accuracy_inv, _ = logic.base.test_explanation(global_explanation_inv, \n target_class, x_test, y_test)\n explanation_inv = logic.base.replace_names(global_explanation_inv, concepts)\n print(f'\\t Class {target_class} - Global explanation: \"{explanation_inv}\" - Accuracy: {explanation_accuracy_inv:.4f}')\n print(f'\\t Elapsed time {elapsed_time_inv}')\n \n methods.append(method)\n splits.append(seed)\n explanations.append(explanation)\n explanations_inv.append(explanation_inv)\n model_accuracies.append(model_accuracy)\n explanation_accuracies.append(explanation_accuracy)\n explanation_accuracies_inv.append(explanation_accuracy_inv)\n elapsed_times.append(elapsed_time)\n elapsed_times_inv.append(elapsed_time_inv)",
"_____no_output_____"
],
[
"results_psi = pd.DataFrame({\n 'method': methods,\n 'split': splits,\n 'explanation': explanations,\n 'explanation_inv': explanations_inv,\n 'model_accuracy': model_accuracies,\n 'explanation_accuracy': explanation_accuracies,\n 'explanation_accuracy_inv': explanation_accuracies_inv,\n 'elapsed_time': elapsed_times,\n 'elapsed_time_inv': elapsed_times_inv,\n})\nresults_psi.to_csv(os.path.join(results_dir, 'results_psi.csv'))\nresults_psi",
"_____no_output_____"
]
],
[
[
"# Decision tree",
"_____no_output_____"
]
],
[
[
"need_pruning = False\nmethod = 'decision_tree'\nmethods = []\nsplits = []\nexplanations = []\nexplanations_inv = []\nmodel_accuracies = []\nexplanation_accuracies = []\nexplanation_accuracies_inv = []\nelapsed_times = []\nelapsed_times_inv = []\n\nfor split, (train_index, test_index) in enumerate(skf.split(x.cpu().detach().numpy(), y.cpu().detach().numpy())):\n print(f'Split [{split+1}/{n_splits}]')\n x_train, x_test = x[train_index], x[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n classifier = DecisionTreeClassifier(random_state=split)\n classifier.fit(x_train.cpu().detach().numpy(), y_train.cpu().detach().numpy())\n y_preds = classifier.predict(x_test.cpu().detach().numpy())\n model_accuracy = accuracy_score(y_test.cpu().detach().numpy(), y_preds)\n print(f'\\t Model\\'s accuracy: {model_accuracy:.4f}')\n \n target_class = 1\n start = time.time()\n explanation = tree_to_formula(classifier, concepts, target_class)\n elapsed_time = time.time() - start\n print(f'\\t Class {target_class} - Global explanation: {explanation}')\n print(f'\\t Elapsed time {elapsed_time}')\n \n target_class = 0\n start = time.time()\n explanation_inv = tree_to_formula(classifier, concepts, target_class)\n elapsed_time_inv = time.time() - start\n print(f'\\t Class {target_class} - Global explanation: {explanation_inv}')\n print(f'\\t Elapsed time {elapsed_time_inv}')\n \n methods.append(method)\n splits.append(split)\n explanations.append(explanation)\n explanations_inv.append(explanation_inv)\n model_accuracies.append(model_accuracy)\n explanation_accuracies.append(model_accuracy)\n explanation_accuracies_inv.append(model_accuracy)\n elapsed_times.append(0)\n elapsed_times_inv.append(0)",
"_____no_output_____"
],
[
"results_tree = pd.DataFrame({\n 'method': methods,\n 'split': splits,\n 'explanation': explanations,\n 'explanation_inv': explanations_inv,\n 'model_accuracy': model_accuracies,\n 'explanation_accuracy': explanation_accuracies,\n 'explanation_accuracy_inv': explanation_accuracies_inv,\n 'elapsed_time': elapsed_times,\n 'elapsed_time_inv': elapsed_times_inv,\n})\nresults_tree.to_csv(os.path.join(results_dir, 'results_tree.csv'))\nresults_tree",
"_____no_output_____"
]
],
[
[
"# Summary",
"_____no_output_____"
]
],
[
[
"cols = ['model_accuracy', 'explanation_accuracy', 'explanation_accuracy_inv', 'elapsed_time', 'elapsed_time_inv']\nmean_cols = [f'{c}_mean' for c in cols]\nsem_cols = [f'{c}_sem' for c in cols]\n\n# pruning\ndf_mean = results_pruning[cols].mean()\ndf_sem = results_pruning[cols].sem()\ndf_mean.columns = mean_cols\ndf_sem.columns = sem_cols\nsummary_pruning = pd.concat([df_mean, df_sem])\nsummary_pruning.name = 'pruning'\n\n# lime\ndf_mean = results_lime[cols].mean()\ndf_sem = results_lime[cols].sem()\ndf_mean.columns = mean_cols\ndf_sem.columns = sem_cols\nsummary_lime = pd.concat([df_mean, df_sem])\nsummary_lime.name = 'lime'\n\n# weights\ndf_mean = results_weights[cols].mean()\ndf_sem = results_weights[cols].sem()\ndf_mean.columns = mean_cols\ndf_sem.columns = sem_cols\nsummary_weights = pd.concat([df_mean, df_sem])\nsummary_weights.name = 'weights'\n\n# psi\ndf_mean = results_psi[cols].mean()\ndf_sem = results_psi[cols].sem()\ndf_mean.columns = mean_cols\ndf_sem.columns = sem_cols\nsummary_psi = pd.concat([df_mean, df_sem])\nsummary_psi.name = 'psi'\n\n# tree\ndf_mean = results_tree[cols].mean()\ndf_sem = results_tree[cols].sem()\ndf_mean.columns = mean_cols\ndf_sem.columns = sem_cols\nsummary_tree = pd.concat([df_mean, df_sem])\nsummary_tree.name = 'tree'\n\nsummary = pd.concat([summary_pruning, \n summary_lime, \n summary_weights, \n summary_psi, \n summary_tree], axis=1).T\nsummary.columns = mean_cols + sem_cols\nsummary",
"_____no_output_____"
],
[
"summary.to_csv(os.path.join(results_dir, 'summary.csv'))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7492b11af87c98e4906f6f60da19ceefd432066 | 89,947 | ipynb | Jupyter Notebook | lib/examples/CliMT -- Radiative-Convective Equilibrium with Simple Physics.ipynb | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | lib/examples/CliMT -- Radiative-Convective Equilibrium with Simple Physics.ipynb | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | lib/examples/CliMT -- Radiative-Convective Equilibrium with Simple Physics.ipynb | CliMT/climt-legacy | adbd4fe77426c90deb8d2c046a2f3dc3b72df89e | [
"BSD-3-Clause"
] | null | null | null | 93.988506 | 51,609 | 0.758869 | [
[
[
"# Radiative Convective Equilibrium with Simple Physics\n----------------------------------\n\nThis demo steps the grey radiation code forward in time to get a radiative equilibrium\nprofile. The Simple Physics module is available to add a boundary layer and\nsurface fluxes. The Emanuel convection scheme provides moist convection.\n\nIn the current setup, the Emanuel scheme displays a sensitivity to the time-step. If you\nchange things and if convection (TdotConv) seems to remain at zero, or the simulation blows\nup, just reduce the time step!\n\nThis configuration results in a tropopause around 550 mb, which is a consequence of the\nradiative heating being insensitive to shortwave and water vapour.",
"_____no_output_____"
]
],
[
[
"%matplotlib notebook\nimport numpy as np\nimport climt\nfrom climt.simple_physics_custom import simple_physics_custom\n\nglobal_time_step = 100.\n\n#Initialise radiation\n\nkwargs = {}\n#kwargs['UpdateFreq'] = 3600.\n\nrad = climt.radiation(scheme='newgreygas', **kwargs)\n\n#Initialise simple physics\nkwargs = {}\n#kwargs['qflux'] = False\n#kwargs['tflux'] = False\nkwargs['lsc'] = False\nkwargs['use_ext_ts'] = True\n\nkwargs['dt'] = global_time_step\n\nsolar_in = 400.\nTs = (solar_in/5.67e-8)**0.25*np.ones((1,1))\nkwargs['Ts'] = Ts\n\nphys = simple_physics_custom(**kwargs)\n\n#Initialise convection\nkwargs = {}\nkwargs['dt'] = global_time_step\n\nconv = climt.convection(scheme='emanuelnew', **kwargs)\nprint conv.FromExtension",
"Using netCDF4 interface for IO\nFinished initialising emanuel convection scheme\n['Tinc', 'qinc', 'precc', 'TdotConv', 'qdotConv', 'cape']\n"
],
[
"#Initialise federation\nkwargs = {}\nkwargs['MonitorFields'] = ['T','qdotConv','TdotConv','q']\nT = np.zeros((1,1,rad.nlev)) + (solar_in/2./5.67e-8)**0.25\nU = 10.*np.ones((1,1,rad.nlev))\n\nkwargs['dt'] = global_time_step\nkwargs['T'] = T\nkwargs['U'] = U\nkwargs['Ts'] = Ts\n\nprint Ts",
"[[ 289.81391463]]\n"
],
[
"fed = climt.federation(rad, phys, conv, **kwargs)\n\nfor i in range(100000):\n\n climt_U = fed['U']\n dU = -(1./86400)*global_time_step*(climt_U - U)\n fed.step(Inc={'U':dU})\n #fed.step()\n #print conv.Inc['q'], conv.Inc['T']\n q = fed['q']\n q[q<0] = 0\n fed['q'] = q",
"_____no_output_____"
],
[
"print conv['T']\nprint conv['p']",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e74932e33021172fef8e69b73ea7f4beb5174028 | 16,944 | ipynb | Jupyter Notebook | content/python/pandas/.ipynb_checkpoints/DataFrame_object-checkpoint.ipynb | mmblack4/mynotes | eef4ffb13f42e966b716c63b3dcb25b17971be47 | [
"MIT"
] | null | null | null | content/python/pandas/.ipynb_checkpoints/DataFrame_object-checkpoint.ipynb | mmblack4/mynotes | eef4ffb13f42e966b716c63b3dcb25b17971be47 | [
"MIT"
] | null | null | null | content/python/pandas/.ipynb_checkpoints/DataFrame_object-checkpoint.ipynb | mmblack4/mynotes | eef4ffb13f42e966b716c63b3dcb25b17971be47 | [
"MIT"
] | null | null | null | 24.808199 | 214 | 0.379308 | [
[
[
"---\ntitle: \"DataFrame object\"\nauthor: \"TACT\"\ndate: 2019-04-20\ndescription: \"-\"\ntype: technical_note\ndraft: false\n---",
"_____no_output_____"
],
[
"The pandas DataFrame object\n a pandas series represents a single array of values, with an index label for each value.if you want to have more than one series of data that is aligned by a common index, then a Pandas DataFrame is used.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nfrom pandas import DataFrame, Series",
"_____no_output_____"
],
[
"dates = pd.date_range('2019-05-18', '2019-05-25')\ntemp_chennai = Series([36, 37, 36, 37, 37, 37, 37, 37],\n index = dates)\ntemp_delhi = Series([34, 39, 41, 41, 41, 41, 41, 42],\n index = dates)",
"_____no_output_____"
],
[
"# create a DataFrame from the two Series objects temp_chennai and temp_delhi\n# and give them column names\ntemps_df = DataFrame({\n \"chennai\" : temp_chennai,\n \"Delhi\" : temp_delhi\n}) \ntemps_df",
"_____no_output_____"
],
[
"temps_df['chennai'] # get the column with the name chennai",
"_____no_output_____"
],
[
"temps_df['Delhi'] # get the column with the name Delhi",
"_____no_output_____"
],
[
"temps_df.chennai # gretrieve the chennai column through property syntax",
"_____no_output_____"
],
[
"temp_diffs = abs(temps_df.chennai - temps_df.Delhi)\ntemps_df['Difference'] = temp_diffs\ntemps_df",
"_____no_output_____"
],
[
"temps_df.columns # get columns ",
"_____no_output_____"
],
[
"temps_df.Difference[1:4]",
"_____no_output_____"
],
[
"temps_df.iloc[0] # get the row array postition 0",
"_____no_output_____"
],
[
"temps_df.ix[1].index",
"/home/mmblack/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: \n.ix is deprecated. Please use\n.loc for label based indexing or\n.iloc for positional indexing\n\nSee the documentation here:\nhttp://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated\n \"\"\"Entry point for launching an IPython kernel.\n"
],
[
"temps_df.loc['2019-05-19']",
"_____no_output_____"
],
[
"temps_df.iloc[[1, 3, 5]] # select only rows 1, 3, 5",
"_____no_output_____"
],
[
"temps_df.Delhi >40",
"_____no_output_____"
],
[
"temps_df[temps_df.Delhi > 40]",
"_____no_output_____"
]
]
] | [
"raw",
"code"
] | [
[
"raw",
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e749341f0228c3bf13837d5469c1421d3bed9ed9 | 222,574 | ipynb | Jupyter Notebook | data_preprocessing/toy-data-generation.ipynb | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 | [
"MIT"
] | null | null | null | data_preprocessing/toy-data-generation.ipynb | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 | [
"MIT"
] | null | null | null | data_preprocessing/toy-data-generation.ipynb | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 | [
"MIT"
] | null | null | null | 450.554656 | 94,324 | 0.941157 | [
[
[
"import networkx as nx\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\nnp.random.seed(23) \nfrom scipy import stats\nfrom pathlib import Path\nimport json\n",
"_____no_output_____"
],
[
"label_graph = nx.DiGraph()\nlabel_graph.add_edge('B','A')",
"_____no_output_____"
],
[
"label_graph.add_edge('C','A')\nlabel_graph.add_edge('D','E')\nlabel_graph.add_edge('F','E')\nlabel_graph.add_edge('E','G')\nlabel_graph.add_node('H')",
"_____no_output_____"
],
[
"#Visualize Graph\nnx.draw_networkx(label_graph, arrows=True)\nlabel_graph_tc = nx.algorithms.transitive_closure(label_graph)\n",
"_____no_output_____"
],
[
"\nwith open(data_dir/'hierarchy_tc.edgelist', 'wb') as f:\n nx.write_edgelist(label_graph_tc, f)",
"_____no_output_____"
],
[
"centres = [(60, -40), (50, -10), (65, -40), (-60,60), (-40,40), (-20,20), (-40,40), (-40,-50)]\nradii = [40, 10, 5, 10, 45, 15, 60, 20]\nlabels = sorted(label_graph.nodes())\nlen(centres)==len(radii)",
"_____no_output_____"
],
[
"label_names = np.array(labels)\nlabel_names",
"_____no_output_____"
],
[
"plt.figure()\nfor i in range(len(labels)):\n circle=plt.Circle(centres[i],radii[i], fill=False)\n plt.gcf().gca().add_artist(circle)\n plt.gca().annotate(labels[i], xy=(centres[i][0]+radii[i], centres[i][1]), fontsize=10)\nplt.xlim(-100,100)\nplt.ylim(-100,100)\nplt.legend()\nplt.show()",
"No handles with labels found to put in legend.\n"
],
[
"#Generate Gaussian Parameters\nmean = centres.copy()\nSigma = []\nfor i in range(len(labels)):\n #r1 = np.random.uniform(0,radii[i], 1)\n #r2 = np.random.uniform(0, radii[i], 1)\n r1 = radii[i]\n r2 = radii[i]\n corr = np.random.uniform(-0.5,0.5,1)\n Sigma.append(np.array([[r1**2, corr*r1*r2], [corr*r1*r2, r2**2]], dtype=np.float))",
"_____no_output_____"
],
[
"print(Sigma[0])",
"[[1600. -281.82369645]\n [-281.82369645 1600. ]]\n"
],
[
"delta = 0.025\nx = np.arange(-3.0, 3.0, delta)\ny = np.arange(-2.0, 2.0, delta)\nX, Y = np.meshgrid(x, y)\nZ1 = np.exp(-X**2 - Y**2)\nZ2 = np.exp(-(X - 1)**2 - (Y - 1)**2)\nZ = (Z1 - Z2) * 2",
"_____no_output_____"
],
[
"# Generate Data Samples\ndata = []\nplt.figure(figsize=(10,10))\ndistributions = []\nthresholds=[0.00005, 0.0002, 0.0001, 0.0001, 0.00003, 0.0001, 0.00003,0.00005]\nfor i in tqdm(range(len(labels))):\n #data_sampled = np.random.multivariate_normal(mean[i], Sigma[i], 1000)\n distribution = stats.multivariate_normal(mean=mean[i], cov=Sigma[i])\n distributions.append(distribution)\n #samples = distribution.rvs(size=1000)\n x = np.arange(-100.0, 100.0, 0.1)\n y = np.arange(-100.0, 100.0, 0.1)\n X, Y = np.meshgrid(x, y)\n #X, Y = np.meshgrid(samples[:,0], samples[:,1])\n z = distribution.pdf(np.array([X.reshape(-1), Y.reshape(-1)]).T).reshape(2000,2000)\n cs = plt.contour(X,Y,z, levels=[thresholds[i]])\n plt.clabel(cs, inline=1, fontsize=10, fmt='%1.6f')\n \nplt.show() ",
"100%|██████████| 8/8 [00:02<00:00, 2.69it/s]\n"
],
[
"thresholds=[0.00005, 0.0002, 0.0001, 0.0001, 0.00003, 0.0001, 0.00003,0.00001]",
"_____no_output_____"
],
[
"data_samples = np.random.uniform(-100, 100, size=(10000, 2))",
"_____no_output_____"
],
[
"classes = np.array([d.pdf(data_samples) > threshold for threshold, d in zip(thresholds,distributions)]).T",
"_____no_output_____"
],
[
"data = [{'x':ds, 'labels': label_names[l].tolist() or ['I']} for ds, l in zip(data_samples.tolist(), classes.tolist())]",
"_____no_output_____"
],
[
"labels_with_i = np.concatenate([label_names, ['I']])\nfig, axs = plt.subplots(1,9)\nfor sample in tqdm(data):\n for l in sample['labels']:\n idx = np.where(labels_with_i==l)[0][0]\n #breakpoint()\n axs[idx].scatter([sample['x'][0]],[sample['x'][1]], c='r')\nfor i, val in enumerate(labels_with_i):\n axs[i].set_title(val)\nplt.show()",
"100%|██████████| 10000/10000 [01:37<00:00, 102.50it/s]\n"
],
[
"labels_with_i = np.concatenate([label_names, ['I']])\nfig, axs = plt.subplots(3,3, figsize=(12,12))\nplot_data = [[] for i in range(labels_with_i.shape[0])]\nfor sample in tqdm(data):\n for l in sample['labels']:\n idx = np.where(labels_with_i==l)[0][0]\n #breakpoint()\n plot_data[idx].append(sample['x'])\naxs = axs.reshape(-1) \nfor i, val in enumerate(labels_with_i):\n pdata = np.array(plot_data[i])\n axs[i].scatter(pdata[:, 0], pdata[:, 1])\n axs[i].set_title(val)\n axs[i].set_xlim(-100,100)\n axs[i].set_ylim(-100,100)\nplt.show()",
"100%|██████████| 10000/10000 [00:00<00:00, 260436.51it/s]\n"
],
[
"np.random.shuffle(data)",
"_____no_output_____"
],
[
"data_new = []\nfor d in data:\n d['x'] = list(np.array(d['x'])/100)\n data_new.append(d)",
"_____no_output_____"
],
[
"train_len = int(0.5*len(data_new))\ndev_len = int(0.5*(len(data_new)-train_len))\ntrain = data_new[:train_len]\ndev = data_new[train_len:train_len+dev_len]\ntest = data_new[train_len+dev_len:]",
"_____no_output_____"
],
[
"# with open(data_dir/'train.json', 'w') as outfile:\n# json.dump(train, outfile)\n# with open(data_dir/'dev.json', 'w') as outfile:\n# json.dump(dev, outfile)\n# with open(data_dir/'test.json', 'w') as outfile:\n# json.dump(test, outfile)",
"_____no_output_____"
],
[
"# with open(data_dir/'hierarchy.edgelist', 'wb') as f:\n# nx.write_edgelist(label_graph, f)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7495c1e56c34c909a82745d8bf3349d416d0bc9 | 3,907 | ipynb | Jupyter Notebook | Dynamic Programming/1005/516. Longest Palindromic Subsequence.ipynb | YuHe0108/Leetcode | 90d904dde125dd35ee256a7f383961786f1ada5d | [
"Apache-2.0"
] | 1 | 2020-08-05T11:47:47.000Z | 2020-08-05T11:47:47.000Z | Dynamic Programming/1005/516. Longest Palindromic Subsequence.ipynb | YuHe0108/LeetCode | b9e5de69b4e4d794aff89497624f558343e362ad | [
"Apache-2.0"
] | null | null | null | Dynamic Programming/1005/516. Longest Palindromic Subsequence.ipynb | YuHe0108/LeetCode | b9e5de69b4e4d794aff89497624f558343e362ad | [
"Apache-2.0"
] | null | null | null | 26.046667 | 99 | 0.419503 | [
[
[
"第二类区间型DP:\n 给定字符串s,找到最长回文子序列的长度s。您可以假设s的最大长度为1000。\n\nExample 1:\n Input:\n \"bbbab\"\n Output:\n 4\n One possible longest palindromic subsequence is \"bbbb\".\n \nExample 2:\n Input:\n \"cbbd\"\n Output:\n 2\n One possible longest palindromic subsequence is \"bb\".\n\nConstraints:\n 1、1 <= s.length <= 1000\n 2、s consists only of lowercase English letters.",
"_____no_output_____"
]
],
[
[
"class Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n s = '0' + s\n len_s = len(s)\n dp = [[0] * len_s for _ in range(len_s)]\n \n for i in range(1, len_s):\n dp[1][i] = 1\n \n for sub_len in range(2, len_s + 1):\n start = 1\n while start + sub_len - 1 < len_s:\n end = start + sub_len\n print(s[start:end])\n if s[start] == s[end]:\n dp[sub_len][end] = max(dp[sub_len][end], dp[sub_len])\n else:\n dp[sub_len][end] = max(dp[sub_len - 1][end-1], dp[sub_len - 1][start])\n start += 1\n print(dp)\n return dp[-1][-1]",
"_____no_output_____"
],
[
"class Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n s = '0' + s\n len_s = len(s)\n \n # dp[i][j] 代表了从s的第 i 个 idx 到 j 个idx,回文数最大长度\n dp = [[0] * len_s for _ in range(len_s)]\n for i in range(1, len_s):\n dp[i][i] = 1\n \n for sub_len in range(2, len_s + 1): # 代表了从s中截取不同长度的sub_s\n start = 1\n while start + sub_len - 1 < len_s:\n end = start + sub_len - 1 # 末尾字符串的索引\n print(s[start:end+1], sub_len, start, end)\n if s[start] == s[end]:\n dp[start][end] = max(dp[start][end], dp[start+1][end-1] + 2)\n else:\n dp[start][end] = max(dp[start+1][end], dp[start][end-1])\n start += 1\n return dp[1][-1]",
"_____no_output_____"
],
[
"solution = Solution()\nsolution.longestPalindromeSubseq('cbbd')",
"cb 2 1 2\nbb 2 2 3\nbd 2 3 4\ncbb 3 1 3\nbbd 3 2 4\ncbbd 4 1 4\n"
]
]
] | [
"raw",
"code"
] | [
[
"raw"
],
[
"code",
"code",
"code"
]
] |
e7496a0da6d0ad747a2eec82c8b1175867937254 | 7,256 | ipynb | Jupyter Notebook | Lab2/.ipynb | keshavpadiyar/Bigdata-Analytics | 0369d0165cecc088593f6a4fe22bb5e4e877ec2d | [
"MIT"
] | null | null | null | Lab2/.ipynb | keshavpadiyar/Bigdata-Analytics | 0369d0165cecc088593f6a4fe22bb5e4e877ec2d | [
"MIT"
] | null | null | null | Lab2/.ipynb | keshavpadiyar/Bigdata-Analytics | 0369d0165cecc088593f6a4fe22bb5e4e877ec2d | [
"MIT"
] | null | null | null | 42.934911 | 126 | 0.50565 | [
[
[
"from pyspark import SparkContext",
"_____no_output_____"
],
[
"spark = SparkContext(appName = \"BDA - Lab2\")",
"_____no_output_____"
],
[
"#using RDD\nrdd_tempReadings = sc.textFile(\"file:///home/x_kesma/Lab1/input_data/temperature-readings.csv\") \\\n .map(lambda line: line.split(\";\"))\n\nrdd_filtered_1 = rdd_tempReadings.filter(lambda line: (int(line[1][0:4]))>=1950 and int(line[1][0:4])<=2014) \\\n .map(lambda line: (line[1][0:4],(line[0],float(line[3]))))\n\nprint(rdd_filtered_1.reduceByKey(max)\\\n .sortBy(keyfunc=lambda k: k[0],ascending = False).collect())\n\nprint(rdd_filtered_1.reduceByKey(min)\\\n .sortBy(keyfunc=lambda k: k[0],ascending = False).collect())\n",
"_____no_output_____"
],
[
"# 2_1 Count the number of readings for each month in the period of 1950-2014 which are higher than 10 degrees \n\nrdd_filtered_2_1 = rdd_tempReadings.filter(lambda line: ((int(line[1][0:4]))>=1950\\\n and int(line[1][0:4])<=2014)\\\n and float(line[3]) >10 )\\\n .map(lambda line: ((line[1][0:4], line[1][5:7]),(line[0],float(line[3]))))\\\n .countByKey()\nprint(sorted(rdd_filtered_2_1.items(), key = lambda v:v[1], reverse = True))",
"_____no_output_____"
],
[
"# 2_2 Repeat the exercise,this time taking only distinct readings from each station.\n# That is, if a station reported a reading above 10 degrees in some month, then itappears only\n# once in the count for that month\n\nrdd_filtered_2_2 = rdd_tempReadings.filter(lambda line: ((int(line[1][0:4]))>=1950\\\n and int(line[1][0:4])<=2014)\\\n and float(line[3]) >10 )\\\n .map(lambda line: (line[1][0:4], line[1][5:7],line[0]))\\\n .distinct()\\\n .map(lambda line: ((line[0],line[1]),(line[2])))\\\n .countByKey()\nprint(sorted(rdd_filtered_2_2.items(), key = lambda v:v[1], reverse = True))",
"_____no_output_____"
],
[
"# 3 Find the average monthly temperature for each available station in Sweden. Your result\n#should include average temperature for each station for each month in the period of 1960-\n#2014. Bear in mind that not every station has the readings for each month in this timeframe.\n\nrdd_filtered_3 = rdd_tempReadings.filter(lambda line: (int(line[1][0:4]))>=1950 and int(line[1][0:4])<=2014) \\\n .map(lambda line: ((line[1][0:4], line[1][5:7], line[0]),(float(line[3]))))\\\n .groupByKey()\\\n .mapValues(lambda val: sum(val)/len(val))\nprint(rdd_filtered_3\\\n .sortBy(keyfunc=lambda k: (k[0][2],k[0][0],k[0][1]),ascending = False).collect())",
"_____no_output_____"
],
[
"# 4 Provide a list of stations with their associated maximum measured temperatures and\n# maximum measured daily precipitation. Show only those stations where the maximum\n# temperature is between 25 and 30 degrees and maximum daily precipitation is between 100mm and 200mm.\n\nrdd_precReadings = sc.textFile(\"file:///home/x_kesma/Lab1/input_data/precipitation-readings.csv\") \\\n .map(lambda line: line.split(\";\"))\n\nrdd_tempReadings_4_1 = rdd_tempReadings.filter(lambda line: ((int(line[1][0:4])>=1950 and int(line[1][0:4])<=2014))\\\n and (float(line[3]) >= 10 and float(line[3]) >= 20))\\\n .map(lambda line: ((line[1],line[0]),(float(line[3]))))\\\n .reduceByKey(max)\n\nrdd_precReadings_4_2 = rdd_precReadings.filter(lambda line: float(line[3])>=10.0 and float(line[3])<=20.0)\\\n .map(lambda line:((line[1],line[0]),(float(line[3]))))\\\n .reduceByKey(max)\n\n\nrdd_joins = rdd_tempReadings_4_1.join(rdd_precReadings_4_2)\\\n .map(lambda line:(int(line[0][1]),(line[1][0], line[1][1])))",
"_____no_output_____"
],
[
"# 5 Calculate the average monthly precipitation for the Östergotland region (list of stations is\n# provided in the separate file) for the period 1993-2016. In orderto dothis, you willfirstneed to\n# calculate the total monthly precipitation for each station before calculating the monthly\n# average (by averaging over stations).\n\nrdd_ostStations = sc.textFile(\"file:///home/x_kesma/Lab1/input_data/precipitation-readings.csv\") \\\n .map(lambda line: line.split(\";\"))\n\nrdd_ostStations_5_1 = rdd_ostStations.map(lambda line: int (line[0])).distinct().collect()\n\nrdd_precReadings_5_2 = rdd_precReadings.filter(lambda line: ((int(line[1][0:4])>=1993 and int(line[1][0:4])<=2016))\\\n and int(line[0]) in rdd_ostStations_5_1)\\\n .map(lambda line: ((line[1][0:4],line[1][5:7]),(float(line[3]))))\\\n .groupByKey()\\\n .mapValues(lambda val: sum(val)/len(val))\\\n .sortBy(keyfunc=lambda k: (k[0][0],k[0][1]),ascending = False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7496a911d2ff6876c3fd643d5c688b4d19fa1d6 | 782 | ipynb | Jupyter Notebook | essential/a14.ipynb | azuryne/cv | e384c2fa5a46a57bca9d3147e0fdb436b18b5a49 | [
"MIT"
] | 10 | 2021-10-03T12:37:08.000Z | 2022-03-04T04:00:23.000Z | essential/a14.ipynb | azuryne/cv | e384c2fa5a46a57bca9d3147e0fdb436b18b5a49 | [
"MIT"
] | null | null | null | essential/a14.ipynb | azuryne/cv | e384c2fa5a46a57bca9d3147e0fdb436b18b5a49 | [
"MIT"
] | 7 | 2021-10-03T12:36:52.000Z | 2021-12-01T00:24:26.000Z | 19.073171 | 106 | 0.537084 | [
[
[
"# Assignment\n\n1. Apply the following threshold method to image ca[.ext].\n- Binary inverse\n- Truncate\n- Tozero\n- Tozero inverse\n\n2. Apply the two adaptive threshold method to image ca[.ext].\n3. Apply the bilateral filter before applying the two adaptive threshold method to image ca[.ext].\n",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e7497abf048a96f1f56182cfceebdcddc808cd03 | 16,652 | ipynb | Jupyter Notebook | panda_practice.ipynb | swcislo/School_District_Analysis | eab263cfa0872821a6ed873b1dcb614e253755ff | [
"MIT"
] | null | null | null | panda_practice.ipynb | swcislo/School_District_Analysis | eab263cfa0872821a6ed873b1dcb614e253755ff | [
"MIT"
] | null | null | null | panda_practice.ipynb | swcislo/School_District_Analysis | eab263cfa0872821a6ed873b1dcb614e253755ff | [
"MIT"
] | null | null | null | 27.120521 | 386 | 0.437545 | [
[
[
"#List of high schools\n#high_schools=['Hernandez High School', 'Figueroa High School', 'Wilson High School', 'Wright High School']\n#for school in high_schools:\n #print(school)",
"_____no_output_____"
],
[
"# A dictionary of high schools and the type of school.\nhigh_school_types = [{\"High School\": \"Griffin\", \"Type\":\"District\"},\n {\"High School\": \"Figueroa\", \"Type\": \"District\"},\n {\"High School\": \"Wilson\", \"Type\": \"Charter\"},\n {\"High School\": \"Wright\", \"Type\": \"Charter\"}]\n",
"_____no_output_____"
],
[
"print(*high_school_types,sep=\"\\n\")",
"{'High School': 'Griffin', 'Type': 'District'}\n{'High School': 'Figueroa', 'Type': 'District'}\n{'High School': 'Wilson', 'Type': 'Charter'}\n{'High School': 'Wright', 'Type': 'Charter'}\n"
],
[
"# List of high schools\nhigh_schools = [\"Huang High School\", \"Figueroa High School\", \"Shelton High School\", \"Hernandez High School\",\"Griffin High School\",\"Wilson High School\", \"Cabrera High School\", \"Bailey High School\", \"Holden High School\", \"Pena High School\", \"Wright High School\",\"Rodriguez High School\", \"Johnson High School\", \"Ford High School\", \"Thomas High School\"]",
"_____no_output_____"
],
[
"# Add the Pandas dependency\nimport pandas as pd",
"_____no_output_____"
],
[
"# Create a Panda Series from a list.\nschool_series = pd.Series(high_schools)\nschool_series",
"_____no_output_____"
],
[
"# Iterate through the school series and print out each high school\nschool_series = pd.Series(high_schools)\nfor index, value in school_series.items():\n print({value})",
"{'Huang High School'}\n{'Figueroa High School'}\n{'Shelton High School'}\n{'Hernandez High School'}\n{'Griffin High School'}\n{'Wilson High School'}\n{'Cabrera High School'}\n{'Bailey High School'}\n{'Holden High School'}\n{'Pena High School'}\n{'Wright High School'}\n{'Rodriguez High School'}\n{'Johnson High School'}\n{'Ford High School'}\n{'Thomas High School'}\n"
],
[
"# A dictionary of high schools\nhigh_school_dicts = [{\"School ID\": 0, \"school_name\": \"Huang High School\", \"type\": \"District\"},\n {\"School ID\": 1, \"school_name\": \"Figueroa High School\", \"type\": \"District\"},\n {\"School ID\": 2, \"school_name\":\"Shelton High School\", \"type\": \"Charter\"},\n {\"School ID\": 3, \"school_name\":\"Hernandez High School\", \"type\": \"District\"},\n {\"School ID\": 4, \"school_name\":\"Griffin High School\", \"type\": \"Charter\"}]",
"_____no_output_____"
],
[
"school_df = pd.DataFrame(high_school_dicts)\nschool_df",
"_____no_output_____"
],
[
"# Three separate lists of information on high schools\nschool_id = [0, 1, 2, 3, 4]\n\nschool_name = [\"Huang High School\", \"Figueroa High School\",\n\"Shelton High School\", \"Hernandez High School\",\"Griffin High School\"]\n\ntype_of_school = [\"District\", \"District\", \"Charter\", \"District\",\"Charter\"]",
"_____no_output_____"
],
[
"# Initialize a new DataFrame.\nschools_df = pd.DataFrame()",
"_____no_output_____"
],
[
"# Add the list to a new DataFrame.\nschools_df['School ID'] = school_id\nschools_df['School Name'] = school_name\nschools_df['Type'] = type_of_school\n\n# Print the DataFrame.\nschools_df",
"_____no_output_____"
],
[
"# Create a dictionary of information on high schools.\nhigh_schools_dict = {'School ID': school_id, 'School_Name':school_name, 'Type':type_of_school}",
"_____no_output_____"
],
[
"school_df = pd.DataFrame(high_schools_dict)\nschool_df",
"_____no_output_____"
],
[
"school_df.columns",
"_____no_output_____"
],
[
"school_df.index",
"_____no_output_____"
],
[
"school_df.values",
"_____no_output_____"
],
[
"# Create a Panda Series from a list.\nschool_series_df = pd.DataFrame(high_schools)\nschool_series",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7497cc8ddd38fc69cd8768a7894ea712a656dce | 157,143 | ipynb | Jupyter Notebook | Code/Adult_non_binary.ipynb | georgeyiasemis/Fairness-Regression | 757df7e12a721d3c5ee4c730dfbaa29353ac4bbf | [
"MIT"
] | 1 | 2021-01-20T11:41:35.000Z | 2021-01-20T11:41:35.000Z | Code/Adult_non_binary.ipynb | georgeyiasemis/Fairness-Regression | 757df7e12a721d3c5ee4c730dfbaa29353ac4bbf | [
"MIT"
] | null | null | null | Code/Adult_non_binary.ipynb | georgeyiasemis/Fairness-Regression | 757df7e12a721d3c5ee4c730dfbaa29353ac4bbf | [
"MIT"
] | null | null | null | 79.970992 | 47,076 | 0.712555 | [
[
[
"Install ```AIF360``` with minimum requirements:",
"_____no_output_____"
]
],
[
[
"!pip install aif360",
"_____no_output_____"
]
],
[
[
"Install packages that we will use:",
"_____no_output_____"
]
],
[
[
"import aif360\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\nfrom aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions \\\n import load_preproc_data_adult\nfrom aif360.algorithms.preprocessing.reweighing import Reweighing\nfrom aif360.metrics import ClassificationMetric\nfrom aif360.datasets.adult_dataset import AdultDataset\nfrom sklearn.preprocessing import StandardScaler\n#from sklearn.linear_model import LogisticRegression\n\nimport torch\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nimport torch.utils.data as Data\n\n# These 2 functions will help us save and load objects\npath = \"/content/drive/My Drive/Colab Notebooks/Ethics/\"\ndef save_obj(obj, name ):\n with open(path+ name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name ):\n with open(path + name + '.pkl', 'rb') as f:\n return pickle.load(f)",
"_____no_output_____"
],
[
"adult_dataset_orig = load_preproc_data_adult()",
"_____no_output_____"
]
],
[
[
"Define privileged and unprivileged groups:",
"_____no_output_____"
]
],
[
[
"# Assume privieleged group is the White Males\nprivileged_groups = [{'sex': 1, 'race': 1}]\nunprivileged_groups = [{'sex': 1, 'race': 0}, {'sex': 0, 'race': 0}, {'sex': 0, 'race': 1}]",
"_____no_output_____"
]
],
[
[
"Visualise Adult data with respect to the taget label ('Income Binary'; >50k or <=50k) and the sensitive attributes (sex and race):",
"_____no_output_____"
]
],
[
[
"df = adult_dataset_orig.metadata['params']['df'].copy()\n# Number of White Men with Income <=50 k\nwhite_male_less_50k = sum(df[((df['sex'] == 1.0) & (df['race'] == 1.0))]['Income Binary'] == 0.0)\n\n# Number of Nonwhite Men with Income <=50 k\nnonwhite_male_less_50k = sum(df[((df['sex'] == 1.0) & (df['race'] == 0.0))]['Income Binary'] == 0.0)\n\n# Number of white Men with Income >50 k\nwhite_male_more_50k = sum(df[((df['sex'] == 1.0) & (df['race'] == 1.0))]['Income Binary'] == 1.0)\n\n# Number of Nonwhite Men with Income >50 k\nnonwhite_male_more_50k = sum(df[((df['sex'] == 1.0) & (df['race'] == 0.0))]['Income Binary'] == 1.0)\n\n# Number of white Women with Income <=50 k\nwhite_female_less_50k = sum(df[((df['sex'] == 0.0) & (df['race'] == 1.0))]['Income Binary'] == 0.0)\n\n# Number of Nonwhite Women with Income <=50 k\nnonwhite_female_less_50k = sum(df[((df['sex'] == 0.0) & (df['race'] == 0.0))]['Income Binary'] == 0.0)\n\n# Number of white women with Income >50 k\nwhite_female_more_50k = sum(df[((df['sex'] == 0.0) & (df['race'] == 1.0))]['Income Binary'] == 1.0)\n\n# Number of Nonwhite women with Income >50 k\nnonwhite_female_more_50k = sum(df[((df['sex'] == 0.0) & (df['race'] == 0.0))]['Income Binary'] == 1.0)\n\nprint('Male')\nprint('white <=50k:', white_male_less_50k,'\\twhite >50k:', \n white_male_more_50k, '\\tTotal:', white_male_less_50k + white_male_more_50k)\nprint('nonwhite <=50k:', nonwhite_male_less_50k,'\\tnonwhite >50k:', \n nonwhite_male_more_50k, '\\tTotal:', nonwhite_male_less_50k + nonwhite_male_more_50k)\n\nprint('Female')\nprint('white <=50k:', white_female_less_50k, '\\t>white 50k:', \n white_female_more_50k, '\\tTotal:', white_female_less_50k + white_female_more_50k)\nprint('nonwhite <=50k:', nonwhite_female_less_50k, '\\t>nonwhite 50k:', \n nonwhite_female_more_50k, '\\tTotal:', nonwhite_female_less_50k + nonwhite_female_more_50k)\n\nprint('\\n\\t\\t\\t\\tTotal:', white_male_less_50k + white_male_more_50k + nonwhite_male_less_50k + \\\n nonwhite_male_more_50k + white_female_less_50k + white_female_more_50k + nonwhite_female_less_50k + nonwhite_female_more_50k)\n# Plot a bar graph:\nlabels = ['White Female', 'NonWhite Female','White Male', 'NonWhite Male']\nless_50k = [white_female_less_50k,nonwhite_female_less_50k, white_male_less_50k, nonwhite_male_less_50k]\nmore_50k = [white_female_more_50k, nonwhite_female_more_50k, white_male_more_50k, nonwhite_male_more_50k]\n\nx = np.arange(len(labels)) # the label locations\nwidth = 0.4 # the width of the bars\n\nfig, ax = plt.subplots(figsize=(7,5))\nrects1 = ax.bar(x - width/2, less_50k, width, label=' Income <= 50k')\nrects2 = ax.bar(x + width/2, more_50k, width, label='Income > 50k')\n\nax.set_ylabel('Counts')\nax.set_title('Binary Income by group')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\nfig.tight_layout()\n\nplt.show()",
"Male\nwhite <=50k: 19670 \twhite >50k: 9065 \tTotal: 28735\nnonwhite <=50k: 3062 \tnonwhite >50k: 853 \tTotal: 3915\nFemale\nwhite <=50k: 11485 \t>white 50k: 1542 \tTotal: 13027\nnonwhite <=50k: 2938 \t>nonwhite 50k: 227 \tTotal: 3165\n\n\t\t\t\tTotal: 48842\n"
]
],
[
[
"Split Dataset into training and test data:",
"_____no_output_____"
]
],
[
[
"ad_train, ad_test = adult_dataset_orig.split([0.75], shuffle=True)",
"_____no_output_____"
],
[
"# Preprocess data\nscale_orig = StandardScaler()\n\nX_train = scale_orig.fit_transform(ad_train.features)\ny_train = ad_train.labels.ravel()\n\nX_test = scale_orig.transform(ad_test.features)\ny_test = ad_test.labels.ravel()",
"_____no_output_____"
]
],
[
[
"Reweigh data:",
"_____no_output_____"
]
],
[
[
"labels = ad_train.labels.ravel()\nfeatures = ad_train.features[:,:2]\nWEIGHTS = {}\n# W_(y,a) = count(Y=y) * count(A=a) / (count(Y=y, A=a) * N)\n# In total 8 unique weights\nfor Y in range(2):\n WEIGHTS[Y] = {}\n for A in [(0,0), (0,1), (1,0), (1,1)]:\n NY = sum(ad_train.labels.ravel() == Y)\n NA = sum((ad_train.features[:,0] == A[0]) & (ad_train.features[:,1] == A[1]))\n NYA = sum((ad_train.labels.ravel() == Y) & (ad_train.features[:,0] == A[0]) & (ad_train.features[:,1] == A[1]))\n WEIGHTS[Y][A] = {}\n WEIGHTS[Y][A] = (NY * NA / (NYA * ad_train.labels.shape[0]))\n\nW = torch.zeros(ad_train.labels.shape[0])\nfor i in range(len(labels)):\n W[i] = WEIGHTS[labels[i]][(features[i,0], features[i,1])]\n\nrw_train_dataset = Data.TensorDataset(torch.tensor(X_train).float(), \n torch.Tensor(y_train).float(), \n W.float())",
"_____no_output_____"
]
],
[
[
"Create a Logistic Regression class with pytorch:",
"_____no_output_____"
]
],
[
[
"class LogisticRegression_torch(torch.nn.Module):\n def __init__(self, input_dim, output_dim):\n super(LogisticRegression_torch, self).__init__()\n self.linear = torch.nn.Linear(input_dim, output_dim)\n\n def forward(self, x):\n outputs = torch.sigmoid(self.linear(x))\n return outputs",
"_____no_output_____"
],
[
"GPU = True\ndevice_idx = 0\nif GPU:\n device = torch.device(\"cuda:\" + str(device_idx) if torch.cuda.is_available() else \"cpu\")\nelse:\n device = torch.device(\"cpu\")\nBATCH_SIZE = 32\nlearning_rate = 0.0001\n\n# Data Tensor\ntrain_dataset = Data.TensorDataset(torch.tensor(X_train).float(), torch.Tensor(y_train).float())\n\nif device == 0:\n num_workers = 2\nelse:\n num_workers = 0\n\n# Data Loader\nloader_train = Data.DataLoader(\n dataset=rw_train_dataset, \n batch_size=BATCH_SIZE, \n shuffle=False, num_workers=num_workers)",
"_____no_output_____"
],
[
"from sklearn.metrics import confusion_matrix as CM\nepochs = 4\n\nmetrics_rw = {}\n\nlambdas = [0.0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.5, 0.7, 0.9]\nlambdas = np.concatenate((np.array(lambdas), np.linspace(1, 100, num=150)))\nfor reg_lambda in lambdas:\n model = LogisticRegression_torch(X_train.shape[1], 1)\n optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)\n print('Lambda:', reg_lambda,'\\n')\n for epoch in range(epochs):\n train_loss = 0.0\n for i, (x, y, w) in enumerate(loader_train):\n # Converting inputs and labels to Variable\n\n inputs = Variable(x.to(device))\n labels = Variable(y.to(device))\n\n # Clear gradient buffers because we don't want any gradient \n # from previous epoch to carry forward, dont want to cummulate gradients\n optimizer.zero_grad()\n\n # get output from the model, given the inputs\n outputs = model(inputs)\n\n # Regularization\n reg = 0\n for param in model.parameters():\n reg += 0.5 * (param ** 2).mean()\n \n # criterion\n criterion = torch.nn.BCELoss(weight=w, reduction='sum')\n # get loss for the predicted output\n loss = criterion(outputs.reshape(outputs.shape[0]), labels) + \\\n reg_lambda * reg\n \n \n train_loss += loss.item()\n \n # get gradients w.r.t to parameters\n loss.backward()\n \n # update parameters\n optimizer.step()\n \n\n print('epoch [{}/{}], Training loss:{:.6f}'.format(\n epoch + 1, \n epochs, \n train_loss / len(loader_train.dataset)))\n with torch.no_grad():\n model.eval()\n out = model(Variable(torch.Tensor(X_test).to(device))).detach().cpu()\n pred = (out >= 0.5).int().numpy().squeeze()\n \n accuracy = sum((y_test == pred))/len(y_test)\n print('Accuracy: ', accuracy,'\\n')\n\n test_pred = ad_test.copy()\n test_pred.labels = pred.reshape(-1,1)\n #White Men \n white_male = ad_test.labels[((ad_test.features[:,1] == 1.0) & (ad_test.features[:,0] == 1.0))]\n white_male_pred = pred[((ad_test.features[:,1] == 1.0) & (ad_test.features[:,0] == 1.0))]\n #Nonwhite Men\n nonwhite_male = ad_test.labels[((ad_test.features[:,1] == 1.0) & (ad_test.features[:,0] == 0.0))]\n nonwhite_male_pred = pred[((ad_test.features[:,1] == 1.0) & (ad_test.features[:,0] == 0.0))]\n\n #White Female\n white_female = ad_test.labels[((ad_test.features[:,1] == 0.0) & (ad_test.features[:,0] == 1.0))]\n white_female_pred = pred[((ad_test.features[:,1] == 0.0) & (ad_test.features[:,0] == 1.0))]\n #Nonwhite Men\n nonwhite_female = ad_test.labels[((ad_test.features[:,1] == 0.0) & (ad_test.features[:,0] == 0.0))]\n nonwhite_female_pred = pred[((ad_test.features[:,1] == 0.0) & (ad_test.features[:,0] == 0.0))]\n \n metrics_rw[reg_lambda] = {}\n metrics_rw[reg_lambda]['accuracy'] = accuracy\n metrics_rw[reg_lambda]['privilaged'] = {}\n metrics_rw[reg_lambda]['unprivilaged'] = {}\n\n # GET TPR and TNR for each unique group\n tn, fp, fn, tp = CM(white_male, white_male_pred).ravel()\n metrics_rw[reg_lambda]['privilaged']['White Male'] = (tp/(tp+fn), tn/(tn+fp))\n\n\n tn, fp, fn, tp = CM(nonwhite_male, nonwhite_male_pred).ravel()\n metrics_rw[reg_lambda]['unprivilaged']['Nonwhite Male'] = (tp/(tp+fn), tn/(tn+fp))\n\n\n tn, fp, fn, tp = CM(nonwhite_female, nonwhite_female_pred).ravel()\n metrics_rw[reg_lambda]['unprivilaged']['Nonwhite Female'] = (tp/(tp+fn), tn/(tn+fp))\n\n\n tn, fp, fn, tp = CM(white_female, white_female_pred).ravel()\n metrics_rw[reg_lambda]['unprivilaged']['White Female'] = (tp/(tp+fn), tn/(tn+fp))\n\n",
"Lambda: 0.0 \n\nepoch [1/4], Training loss:0.563622\nepoch [2/4], Training loss:0.481069\nepoch [3/4], Training loss:0.460172\nepoch [4/4], Training loss:0.452398\nAccuracy: 0.7847842109573335 \n\nLambda: 1e-05 \n\nepoch [1/4], Training loss:0.613272\nepoch [2/4], Training loss:0.490675\nepoch [3/4], Training loss:0.463506\nepoch [4/4], Training loss:0.453964\nAccuracy: 0.7856850380804193 \n\nLambda: 0.0001 \n\nepoch [1/4], Training loss:0.571538\nepoch [2/4], Training loss:0.483293\nepoch [3/4], Training loss:0.460988\nepoch [4/4], Training loss:0.452743\nAccuracy: 0.7851936778314634 \n\nLambda: 0.001 \n\nepoch [1/4], Training loss:0.548967\nepoch [2/4], Training loss:0.477175\nepoch [3/4], Training loss:0.458556\nepoch [4/4], Training loss:0.451576\nAccuracy: 0.7847842109573335 \n\nLambda: 0.01 \n\nepoch [1/4], Training loss:0.576060\nepoch [2/4], Training loss:0.483930\nepoch [3/4], Training loss:0.461542\nepoch [4/4], Training loss:0.453288\nAccuracy: 0.7856850380804193 \n\nLambda: 0.1 \n\nepoch [1/4], Training loss:0.580828\nepoch [2/4], Training loss:0.483083\nepoch [3/4], Training loss:0.462389\nepoch [4/4], Training loss:0.455222\nAccuracy: 0.7847842109573335 \n\nLambda: 0.2 \n\nepoch [1/4], Training loss:0.595029\nepoch [2/4], Training loss:0.491228\nepoch [3/4], Training loss:0.466920\nepoch [4/4], Training loss:0.458792\nAccuracy: 0.7847842109573335 \n\nLambda: 0.3 \n\nepoch [1/4], Training loss:0.625869\nepoch [2/4], Training loss:0.497715\nepoch [3/4], Training loss:0.470640\nepoch [4/4], Training loss:0.461965\nAccuracy: 0.7847842109573335 \n\nLambda: 0.5 \n\nepoch [1/4], Training loss:0.588961\nepoch [2/4], Training loss:0.491678\nepoch [3/4], Training loss:0.472258\nepoch [4/4], Training loss:0.466030\nAccuracy: 0.7847842109573335 \n\nLambda: 0.7 \n\nepoch [1/4], Training loss:0.607957\nepoch [2/4], Training loss:0.499839\nepoch [3/4], Training loss:0.478039\nepoch [4/4], Training loss:0.471358\nAccuracy: 0.7847842109573335 \n\nLambda: 0.9 \n\nepoch [1/4], Training loss:0.621087\nepoch [2/4], Training loss:0.504848\nepoch [3/4], Training loss:0.482470\nepoch [4/4], Training loss:0.476002\nAccuracy: 0.7847842109573335 \n\nLambda: 1.0 \n\nepoch [1/4], Training loss:0.572592\nepoch [2/4], Training loss:0.495442\nepoch [3/4], Training loss:0.480892\nepoch [4/4], Training loss:0.476774\nAccuracy: 0.7847842109573335 \n\nLambda: 1.6644295302013423 \n\nepoch [1/4], Training loss:0.624894\nepoch [2/4], Training loss:0.513952\nepoch [3/4], Training loss:0.495344\nepoch [4/4], Training loss:0.490590\nAccuracy: 0.7847842109573335 \n\nLambda: 2.3288590604026846 \n\nepoch [1/4], Training loss:0.603850\nepoch [2/4], Training loss:0.515547\nepoch [3/4], Training loss:0.502993\nepoch [4/4], Training loss:0.500276\nAccuracy: 0.7847842109573335 \n\nLambda: 2.9932885906040267 \n\nepoch [1/4], Training loss:0.587186\nepoch [2/4], Training loss:0.521715\nepoch [3/4], Training loss:0.511816\nepoch [4/4], Training loss:0.509588\nAccuracy: 0.7847023175825075 \n\nLambda: 3.6577181208053693 \n\nepoch [1/4], Training loss:0.573856\nepoch [2/4], Training loss:0.525111\nepoch [3/4], Training loss:0.518567\nepoch [4/4], Training loss:0.517226\nAccuracy: 0.7829006633363361 \n\nLambda: 4.322147651006712 \n\nepoch [1/4], Training loss:0.580835\nepoch [2/4], Training loss:0.531543\nepoch [3/4], Training loss:0.524948\nepoch [4/4], Training loss:0.523655\nAccuracy: 0.7812627958398165 \n\nLambda: 4.9865771812080535 \n\nepoch [1/4], Training loss:0.606506\nepoch [2/4], Training loss:0.537961\nepoch [3/4], Training loss:0.530873\nepoch [4/4], Training loss:0.529732\nAccuracy: 0.7826549832118581 \n\nLambda: 5.651006711409396 \n\nepoch [1/4], Training loss:0.636781\nepoch [2/4], Training loss:0.546349\nepoch [3/4], Training loss:0.536763\nepoch [4/4], Training loss:0.535172\nAccuracy: 0.7808533289656867 \n\nLambda: 6.315436241610739 \n\nepoch [1/4], Training loss:0.625773\nepoch [2/4], Training loss:0.548458\nepoch [3/4], Training loss:0.541050\nepoch [4/4], Training loss:0.539831\nAccuracy: 0.7778232740971256 \n\nLambda: 6.979865771812081 \n\nepoch [1/4], Training loss:0.626314\nepoch [2/4], Training loss:0.552315\nepoch [3/4], Training loss:0.545527\nepoch [4/4], Training loss:0.544366\nAccuracy: 0.7767586602243879 \n\nLambda: 7.644295302013424 \n\nepoch [1/4], Training loss:0.632158\nepoch [2/4], Training loss:0.556885\nepoch [3/4], Training loss:0.549291\nepoch [4/4], Training loss:0.548064\nAccuracy: 0.7751207927278683 \n\nLambda: 8.308724832214764 \n\nepoch [1/4], Training loss:0.617710\nepoch [2/4], Training loss:0.558182\nepoch [3/4], Training loss:0.552697\nepoch [4/4], Training loss:0.551818\nAccuracy: 0.7726639914830891 \n\nLambda: 8.973154362416107 \n\nepoch [1/4], Training loss:0.622096\nepoch [2/4], Training loss:0.561642\nepoch [3/4], Training loss:0.555989\nepoch [4/4], Training loss:0.555159\nAccuracy: 0.7726639914830891 \n\nLambda: 9.63758389261745 \n\nepoch [1/4], Training loss:0.613363\nepoch [2/4], Training loss:0.563680\nepoch [3/4], Training loss:0.558829\nepoch [4/4], Training loss:0.557959\nAccuracy: 0.7726639914830891 \n\nLambda: 10.302013422818792 \n\nepoch [1/4], Training loss:0.625180\nepoch [2/4], Training loss:0.566575\nepoch [3/4], Training loss:0.561505\nepoch [4/4], Training loss:0.560659\nAccuracy: 0.7723364179837852 \n\nLambda: 10.966442953020135 \n\nepoch [1/4], Training loss:0.616219\nepoch [2/4], Training loss:0.568462\nepoch [3/4], Training loss:0.563871\nepoch [4/4], Training loss:0.563193\nAccuracy: 0.7730734583572189 \n\nLambda: 11.630872483221477 \n\nepoch [1/4], Training loss:0.608068\nepoch [2/4], Training loss:0.571355\nepoch [3/4], Training loss:0.566515\nepoch [4/4], Training loss:0.565612\nAccuracy: 0.7730734583572189 \n\nLambda: 12.29530201342282 \n\nepoch [1/4], Training loss:0.623529\nepoch [2/4], Training loss:0.573793\nepoch [3/4], Training loss:0.569267\nepoch [4/4], Training loss:0.568499\nAccuracy: 0.7720088444844813 \n\nLambda: 12.959731543624162 \n\nepoch [1/4], Training loss:0.642499\nepoch [2/4], Training loss:0.577578\nepoch [3/4], Training loss:0.571405\nepoch [4/4], Training loss:0.570391\nAccuracy: 0.7684874293669642 \n\nLambda: 13.624161073825505 \n\nepoch [1/4], Training loss:0.596993\nepoch [2/4], Training loss:0.575001\nepoch [3/4], Training loss:0.572460\nepoch [4/4], Training loss:0.571998\nAccuracy: 0.7674228154942265 \n\nLambda: 14.288590604026847 \n\nepoch [1/4], Training loss:0.614159\nepoch [2/4], Training loss:0.577516\nepoch [3/4], Training loss:0.574295\nepoch [4/4], Training loss:0.573791\nAccuracy: 0.7674228154942265 \n\nLambda: 14.953020134228188 \n\nepoch [1/4], Training loss:0.635873\nepoch [2/4], Training loss:0.581244\nepoch [3/4], Training loss:0.576419\nepoch [4/4], Training loss:0.575712\nAccuracy: 0.7674228154942265 \n\nLambda: 15.61744966442953 \n\nepoch [1/4], Training loss:0.653255\nepoch [2/4], Training loss:0.584581\nepoch [3/4], Training loss:0.578312\nepoch [4/4], Training loss:0.577383\nAccuracy: 0.7674228154942265 \n\nLambda: 16.281879194630875 \n\nepoch [1/4], Training loss:0.646741\nepoch [2/4], Training loss:0.583935\nepoch [3/4], Training loss:0.579391\nepoch [4/4], Training loss:0.578862\nAccuracy: 0.7607075587584964 \n\nLambda: 16.946308724832214 \n\nepoch [1/4], Training loss:0.619674\nepoch [2/4], Training loss:0.584034\nepoch [3/4], Training loss:0.581076\nepoch [4/4], Training loss:0.580645\nAccuracy: 0.7607075587584964 \n\nLambda: 17.610738255033556 \n\nepoch [1/4], Training loss:0.627562\nepoch [2/4], Training loss:0.586570\nepoch [3/4], Training loss:0.582444\nepoch [4/4], Training loss:0.581863\nAccuracy: 0.7519449676521169 \n\nLambda: 18.2751677852349 \n\nepoch [1/4], Training loss:0.621836\nepoch [2/4], Training loss:0.587000\nepoch [3/4], Training loss:0.583740\nepoch [4/4], Training loss:0.583254\nAccuracy: 0.7519449676521169 \n\nLambda: 18.93959731543624 \n\nepoch [1/4], Training loss:0.634915\nepoch [2/4], Training loss:0.589488\nepoch [3/4], Training loss:0.585642\nepoch [4/4], Training loss:0.585046\nAccuracy: 0.7519449676521169 \n\nLambda: 19.604026845637584 \n\nepoch [1/4], Training loss:0.628320\nepoch [2/4], Training loss:0.589917\nepoch [3/4], Training loss:0.586388\nepoch [4/4], Training loss:0.585876\nAccuracy: 0.7519449676521169 \n\nLambda: 20.268456375838927 \n\nepoch [1/4], Training loss:0.651468\nepoch [2/4], Training loss:0.591900\nepoch [3/4], Training loss:0.587624\nepoch [4/4], Training loss:0.587039\nAccuracy: 0.7519449676521169 \n\nLambda: 20.93288590604027 \n\nepoch [1/4], Training loss:0.627348\nepoch [2/4], Training loss:0.592233\nepoch [3/4], Training loss:0.588612\nepoch [4/4], Training loss:0.588125\nAccuracy: 0.7519449676521169 \n\nLambda: 21.59731543624161 \n\nepoch [1/4], Training loss:0.660199\nepoch [2/4], Training loss:0.596017\nepoch [3/4], Training loss:0.590737\nepoch [4/4], Training loss:0.589862\nAccuracy: 0.7519449676521169 \n\nLambda: 22.261744966442954 \n\nepoch [1/4], Training loss:0.628889\nepoch [2/4], Training loss:0.594675\nepoch [3/4], Training loss:0.591177\nepoch [4/4], Training loss:0.590592\nAccuracy: 0.7519449676521169 \n\nLambda: 22.926174496644297 \n\nepoch [1/4], Training loss:0.633812\nepoch [2/4], Training loss:0.596978\nepoch [3/4], Training loss:0.593139\nepoch [4/4], Training loss:0.592315\nAccuracy: 0.7519449676521169 \n\nLambda: 23.59060402684564 \n\nepoch [1/4], Training loss:0.630275\nepoch [2/4], Training loss:0.594945\nepoch [3/4], Training loss:0.592620\nepoch [4/4], Training loss:0.592307\nAccuracy: 0.7519449676521169 \n\nLambda: 24.255033557046982 \n\nepoch [1/4], Training loss:0.632420\nepoch [2/4], Training loss:0.597590\nepoch [3/4], Training loss:0.594490\nepoch [4/4], Training loss:0.593878\nAccuracy: 0.7519449676521169 \n\nLambda: 24.919463087248324 \n\nepoch [1/4], Training loss:0.636380\nepoch [2/4], Training loss:0.598733\nepoch [3/4], Training loss:0.595063\nepoch [4/4], Training loss:0.594504\nAccuracy: 0.7519449676521169 \n\nLambda: 25.583892617449667 \n\nepoch [1/4], Training loss:0.629624\nepoch [2/4], Training loss:0.598344\nepoch [3/4], Training loss:0.595733\nepoch [4/4], Training loss:0.595353\nAccuracy: 0.7519449676521169 \n\nLambda: 26.24832214765101 \n\nepoch [1/4], Training loss:0.648579\nepoch [2/4], Training loss:0.600692\nepoch [3/4], Training loss:0.596582\nepoch [4/4], Training loss:0.596081\nAccuracy: 0.7519449676521169 \n\nLambda: 26.912751677852352 \n\nepoch [1/4], Training loss:0.660648\nepoch [2/4], Training loss:0.601860\nepoch [3/4], Training loss:0.597553\nepoch [4/4], Training loss:0.597009\nAccuracy: 0.7519449676521169 \n\nLambda: 27.577181208053695 \n\nepoch [1/4], Training loss:0.630199\nepoch [2/4], Training loss:0.600707\nepoch [3/4], Training loss:0.598215\nepoch [4/4], Training loss:0.597858\nAccuracy: 0.7492424862828597 \n\nLambda: 28.241610738255034 \n\nepoch [1/4], Training loss:0.648434\nepoch [2/4], Training loss:0.603898\nepoch [3/4], Training loss:0.599980\nepoch [4/4], Training loss:0.599229\nAccuracy: 0.7492424862828597 \n\nLambda: 28.906040268456376 \n\nepoch [1/4], Training loss:0.643013\nepoch [2/4], Training loss:0.603830\nepoch [3/4], Training loss:0.600308\nepoch [4/4], Training loss:0.599717\nAccuracy: 0.7492424862828597 \n\nLambda: 29.57046979865772 \n\nepoch [1/4], Training loss:0.637376\nepoch [2/4], Training loss:0.602869\nepoch [3/4], Training loss:0.600229\nepoch [4/4], Training loss:0.599892\nAccuracy: 0.7492424862828597 \n\nLambda: 30.23489932885906 \n\nepoch [1/4], Training loss:0.671159\nepoch [2/4], Training loss:0.606020\nepoch [3/4], Training loss:0.601592\nepoch [4/4], Training loss:0.600993\nAccuracy: 0.7330275980673163 \n\nLambda: 30.899328859060404 \n\nepoch [1/4], Training loss:0.645502\nepoch [2/4], Training loss:0.604697\nepoch [3/4], Training loss:0.601766\nepoch [4/4], Training loss:0.601438\nAccuracy: 0.7300794365735812 \n\nLambda: 31.563758389261746 \n\nepoch [1/4], Training loss:0.645219\nepoch [2/4], Training loss:0.605309\nepoch [3/4], Training loss:0.602468\nepoch [4/4], Training loss:0.602136\nAccuracy: 0.7330275980673163 \n\nLambda: 32.22818791946309 \n\nepoch [1/4], Training loss:0.644147\nepoch [2/4], Training loss:0.607403\nepoch [3/4], Training loss:0.604176\nepoch [4/4], Training loss:0.603481\nAccuracy: 0.7300794365735812 \n\nLambda: 32.89261744966443 \n\nepoch [1/4], Training loss:0.662143\nepoch [2/4], Training loss:0.607013\nepoch [3/4], Training loss:0.603777\nepoch [4/4], Training loss:0.603414\nAccuracy: 0.7300794365735812 \n\nLambda: 33.557046979865774 \n\nepoch [1/4], Training loss:0.635786\nepoch [2/4], Training loss:0.607833\nepoch [3/4], Training loss:0.605114\nepoch [4/4], Training loss:0.604557\nAccuracy: 0.7300794365735812 \n\nLambda: 34.22147651006711 \n\nepoch [1/4], Training loss:0.631147\nepoch [2/4], Training loss:0.607058\nepoch [3/4], Training loss:0.605071\nepoch [4/4], Training loss:0.604759\nAccuracy: 0.7297518630742773 \n\nLambda: 34.88590604026846 \n\nepoch [1/4], Training loss:0.660654\nepoch [2/4], Training loss:0.609911\nepoch [3/4], Training loss:0.605687\nepoch [4/4], Training loss:0.605220\nAccuracy: 0.7152567357300794 \n\nLambda: 35.5503355704698 \n\nepoch [1/4], Training loss:0.628998\nepoch [2/4], Training loss:0.609594\nepoch [3/4], Training loss:0.607361\nepoch [4/4], Training loss:0.606672\nAccuracy: 0.7155843092293833 \n\nLambda: 36.214765100671144 \n\nepoch [1/4], Training loss:0.650194\nepoch [2/4], Training loss:0.612164\nepoch [3/4], Training loss:0.608394\nepoch [4/4], Training loss:0.607462\nAccuracy: 0.7152567357300794 \n\nLambda: 36.87919463087248 \n\nepoch [1/4], Training loss:0.647469\nepoch [2/4], Training loss:0.610718\nepoch [3/4], Training loss:0.607341\nepoch [4/4], Training loss:0.606926\nAccuracy: 0.7155843092293833 \n\nLambda: 37.54362416107383 \n\nepoch [1/4], Training loss:0.651970\nepoch [2/4], Training loss:0.610084\nepoch [3/4], Training loss:0.607705\nepoch [4/4], Training loss:0.607464\nAccuracy: 0.7152567357300794 \n\nLambda: 38.20805369127517 \n\nepoch [1/4], Training loss:0.670937\nepoch [2/4], Training loss:0.612958\nepoch [3/4], Training loss:0.608492\nepoch [4/4], Training loss:0.608017\nAccuracy: 0.7152567357300794 \n\nLambda: 38.872483221476514 \n\nepoch [1/4], Training loss:0.647981\nepoch [2/4], Training loss:0.612125\nepoch [3/4], Training loss:0.609035\nepoch [4/4], Training loss:0.608632\nAccuracy: 0.7150929489804275 \n\nLambda: 39.53691275167785 \n\nepoch [1/4], Training loss:0.651550\nepoch [2/4], Training loss:0.612854\nepoch [3/4], Training loss:0.609960\nepoch [4/4], Training loss:0.609457\nAccuracy: 0.7150929489804275 \n\nLambda: 40.2013422818792 \n\nepoch [1/4], Training loss:0.649817\nepoch [2/4], Training loss:0.612139\nepoch [3/4], Training loss:0.609775\nepoch [4/4], Training loss:0.609527\nAccuracy: 0.7127999344853001 \n\nLambda: 40.86577181208054 \n\nepoch [1/4], Training loss:0.657838\nepoch [2/4], Training loss:0.613144\nepoch [3/4], Training loss:0.610577\nepoch [4/4], Training loss:0.610229\nAccuracy: 0.7152567357300794 \n\nLambda: 41.530201342281885 \n\nepoch [1/4], Training loss:0.656946\nepoch [2/4], Training loss:0.614182\nepoch [3/4], Training loss:0.610996\nepoch [4/4], Training loss:0.610627\nAccuracy: 0.7127999344853001 \n\nLambda: 42.19463087248322 \n\nepoch [1/4], Training loss:0.664993\nepoch [2/4], Training loss:0.614777\nepoch [3/4], Training loss:0.611317\nepoch [4/4], Training loss:0.611000\nAccuracy: 0.7127999344853001 \n\nLambda: 42.85906040268457 \n\nepoch [1/4], Training loss:0.651350\nepoch [2/4], Training loss:0.615365\nepoch [3/4], Training loss:0.612369\nepoch [4/4], Training loss:0.611848\nAccuracy: 0.7127999344853001 \n\nLambda: 43.52348993288591 \n\nepoch [1/4], Training loss:0.655072\nepoch [2/4], Training loss:0.615140\nepoch [3/4], Training loss:0.612278\nepoch [4/4], Training loss:0.611941\nAccuracy: 0.7127999344853001 \n\nLambda: 44.18791946308725 \n\nepoch [1/4], Training loss:0.661907\nepoch [2/4], Training loss:0.614844\nepoch [3/4], Training loss:0.612564\nepoch [4/4], Training loss:0.612368\nAccuracy: 0.7127999344853001 \n\nLambda: 44.852348993288594 \n\nepoch [1/4], Training loss:0.666853\nepoch [2/4], Training loss:0.617462\nepoch [3/4], Training loss:0.613956\nepoch [4/4], Training loss:0.613304\nAccuracy: 0.7150929489804275 \n\nLambda: 45.51677852348993 \n\nepoch [1/4], Training loss:0.643108\nepoch [2/4], Training loss:0.616836\nepoch [3/4], Training loss:0.613717\nepoch [4/4], Training loss:0.613293\nAccuracy: 0.7115715338629105 \n\nLambda: 46.18120805369128 \n\nepoch [1/4], Training loss:0.663892\nepoch [2/4], Training loss:0.616610\nepoch [3/4], Training loss:0.613919\nepoch [4/4], Training loss:0.613680\nAccuracy: 0.7115715338629105 \n\nLambda: 46.84563758389262 \n\nepoch [1/4], Training loss:0.639405\nepoch [2/4], Training loss:0.616922\nepoch [3/4], Training loss:0.614874\nepoch [4/4], Training loss:0.614432\nAccuracy: 0.7127999344853001 \n\nLambda: 47.510067114093964 \n\nepoch [1/4], Training loss:0.646519\nepoch [2/4], Training loss:0.618320\nepoch [3/4], Training loss:0.615640\nepoch [4/4], Training loss:0.615024\nAccuracy: 0.7115715338629105 \n\nLambda: 48.1744966442953 \n\nepoch [1/4], Training loss:0.652783\nepoch [2/4], Training loss:0.617259\nepoch [3/4], Training loss:0.615048\nepoch [4/4], Training loss:0.614877\nAccuracy: 0.7127999344853001 \n\nLambda: 48.83892617449665 \n\nepoch [1/4], Training loss:0.633333\nepoch [2/4], Training loss:0.616697\nepoch [3/4], Training loss:0.615396\nepoch [4/4], Training loss:0.615273\nAccuracy: 0.7115715338629105 \n\nLambda: 49.50335570469799 \n\nepoch [1/4], Training loss:0.649445\nepoch [2/4], Training loss:0.618074\nepoch [3/4], Training loss:0.615916\nepoch [4/4], Training loss:0.615705\nAccuracy: 0.7111620669887806 \n\nLambda: 50.167785234899334 \n\nepoch [1/4], Training loss:0.673486\nepoch [2/4], Training loss:0.620115\nepoch [3/4], Training loss:0.616737\nepoch [4/4], Training loss:0.616295\nAccuracy: 0.7127999344853001 \n\nLambda: 50.83221476510067 \n\nepoch [1/4], Training loss:0.646575\nepoch [2/4], Training loss:0.618907\nepoch [3/4], Training loss:0.616848\nepoch [4/4], Training loss:0.616575\nAccuracy: 0.7115715338629105 \n\nLambda: 51.49664429530202 \n\nepoch [1/4], Training loss:0.642434\nepoch [2/4], Training loss:0.618972\nepoch [3/4], Training loss:0.617124\nepoch [4/4], Training loss:0.616893\nAccuracy: 0.7115715338629105 \n\nLambda: 52.16107382550336 \n\nepoch [1/4], Training loss:0.663488\nepoch [2/4], Training loss:0.620752\nepoch [3/4], Training loss:0.617529\nepoch [4/4], Training loss:0.617205\nAccuracy: 0.7127999344853001 \n\nLambda: 52.825503355704704 \n\nepoch [1/4], Training loss:0.674842\nepoch [2/4], Training loss:0.620851\nepoch [3/4], Training loss:0.617792\nepoch [4/4], Training loss:0.617550\nAccuracy: 0.7115715338629105 \n\nLambda: 53.48993288590604 \n\nepoch [1/4], Training loss:0.662042\nepoch [2/4], Training loss:0.620554\nepoch [3/4], Training loss:0.618181\nepoch [4/4], Training loss:0.617959\nAccuracy: 0.7127999344853001 \n\nLambda: 54.15436241610739 \n\nepoch [1/4], Training loss:0.674591\nepoch [2/4], Training loss:0.621861\nepoch [3/4], Training loss:0.618613\nepoch [4/4], Training loss:0.618314\nAccuracy: 0.7115715338629105 \n\nLambda: 54.81879194630873 \n\nepoch [1/4], Training loss:0.659021\nepoch [2/4], Training loss:0.621784\nepoch [3/4], Training loss:0.619258\nepoch [4/4], Training loss:0.618854\nAccuracy: 0.709933666366391 \n\nLambda: 55.48322147651007 \n\nepoch [1/4], Training loss:0.649080\nepoch [2/4], Training loss:0.623310\nepoch [3/4], Training loss:0.620526\nepoch [4/4], Training loss:0.619679\nAccuracy: 0.709933666366391 \n\nLambda: 56.14765100671141 \n\nepoch [1/4], Training loss:0.650848\nepoch [2/4], Training loss:0.622641\nepoch [3/4], Training loss:0.620019\nepoch [4/4], Training loss:0.619557\nAccuracy: 0.7115715338629105 \n\nLambda: 56.81208053691275 \n\nepoch [1/4], Training loss:0.654278\nepoch [2/4], Training loss:0.622492\nepoch [3/4], Training loss:0.619976\nepoch [4/4], Training loss:0.619696\nAccuracy: 0.7115715338629105 \n\nLambda: 57.4765100671141 \n\nepoch [1/4], Training loss:0.673127\nepoch [2/4], Training loss:0.622686\nepoch [3/4], Training loss:0.620154\nepoch [4/4], Training loss:0.619962\nAccuracy: 0.7115715338629105 \n\nLambda: 58.14093959731544 \n\nepoch [1/4], Training loss:0.651247\nepoch [2/4], Training loss:0.623104\nepoch [3/4], Training loss:0.620801\nepoch [4/4], Training loss:0.620448\nAccuracy: 0.7115715338629105 \n\nLambda: 58.805369127516784 \n\nepoch [1/4], Training loss:0.644544\nepoch [2/4], Training loss:0.621938\nepoch [3/4], Training loss:0.620712\nepoch [4/4], Training loss:0.620604\nAccuracy: 0.709933666366391 \n\nLambda: 59.46979865771812 \n\nepoch [1/4], Training loss:0.642961\nepoch [2/4], Training loss:0.622905\nepoch [3/4], Training loss:0.621122\nepoch [4/4], Training loss:0.620923\nAccuracy: 0.7115715338629105 \n\nLambda: 60.13422818791947 \n\nepoch [1/4], Training loss:0.656354\nepoch [2/4], Training loss:0.623261\nepoch [3/4], Training loss:0.621367\nepoch [4/4], Training loss:0.621221\nAccuracy: 0.7100155597412169 \n\nLambda: 60.79865771812081 \n\nepoch [1/4], Training loss:0.641387\nepoch [2/4], Training loss:0.623392\nepoch [3/4], Training loss:0.621947\nepoch [4/4], Training loss:0.621671\nAccuracy: 0.709933666366391 \n\nLambda: 61.463087248322154 \n\nepoch [1/4], Training loss:0.654794\nepoch [2/4], Training loss:0.624242\nepoch [3/4], Training loss:0.622193\nepoch [4/4], Training loss:0.621926\nAccuracy: 0.7115715338629105 \n\nLambda: 62.12751677852349 \n\nepoch [1/4], Training loss:0.658210\nepoch [2/4], Training loss:0.624303\nepoch [3/4], Training loss:0.622357\nepoch [4/4], Training loss:0.622161\nAccuracy: 0.7115715338629105 \n\nLambda: 62.79194630872484 \n\nepoch [1/4], Training loss:0.653540\nepoch [2/4], Training loss:0.624561\nepoch [3/4], Training loss:0.622690\nepoch [4/4], Training loss:0.622475\nAccuracy: 0.709933666366391 \n\nLambda: 63.45637583892618 \n\nepoch [1/4], Training loss:0.651099\nepoch [2/4], Training loss:0.624787\nepoch [3/4], Training loss:0.622997\nepoch [4/4], Training loss:0.622781\nAccuracy: 0.709933666366391 \n\nLambda: 64.12080536912752 \n\nepoch [1/4], Training loss:0.663198\nepoch [2/4], Training loss:0.625454\nepoch [3/4], Training loss:0.623368\nepoch [4/4], Training loss:0.623107\nAccuracy: 0.709933666366391 \n\nLambda: 64.78523489932886 \n\nepoch [1/4], Training loss:0.648136\nepoch [2/4], Training loss:0.625669\nepoch [3/4], Training loss:0.623876\nepoch [4/4], Training loss:0.623501\nAccuracy: 0.709933666366391 \n\nLambda: 65.4496644295302 \n\nepoch [1/4], Training loss:0.658097\nepoch [2/4], Training loss:0.626004\nepoch [3/4], Training loss:0.623972\nepoch [4/4], Training loss:0.623692\nAccuracy: 0.709933666366391 \n\nLambda: 66.11409395973155 \n\nepoch [1/4], Training loss:0.661047\nepoch [2/4], Training loss:0.625858\nepoch [3/4], Training loss:0.624011\nepoch [4/4], Training loss:0.623862\nAccuracy: 0.709933666366391 \n\nLambda: 66.7785234899329 \n\nepoch [1/4], Training loss:0.660375\nepoch [2/4], Training loss:0.626238\nepoch [3/4], Training loss:0.624400\nepoch [4/4], Training loss:0.624189\nAccuracy: 0.7100155597412169 \n\nLambda: 67.44295302013423 \n\nepoch [1/4], Training loss:0.688090\nepoch [2/4], Training loss:0.627794\nepoch [3/4], Training loss:0.624752\nepoch [4/4], Training loss:0.624465\nAccuracy: 0.709933666366391 \n\nLambda: 68.10738255033557 \n\nepoch [1/4], Training loss:0.658872\nepoch [2/4], Training loss:0.628566\nepoch [3/4], Training loss:0.625729\nepoch [4/4], Training loss:0.625062\nAccuracy: 0.709933666366391 \n\nLambda: 68.77181208053692 \n\nepoch [1/4], Training loss:0.672751\nepoch [2/4], Training loss:0.627029\nepoch [3/4], Training loss:0.625036\nepoch [4/4], Training loss:0.624920\nAccuracy: 0.7100155597412169 \n\nLambda: 69.43624161073826 \n\nepoch [1/4], Training loss:0.662545\nepoch [2/4], Training loss:0.627784\nepoch [3/4], Training loss:0.625607\nepoch [4/4], Training loss:0.625309\nAccuracy: 0.7100155597412169 \n\nLambda: 70.1006711409396 \n\nepoch [1/4], Training loss:0.662860\nepoch [2/4], Training loss:0.627859\nepoch [3/4], Training loss:0.625861\nepoch [4/4], Training loss:0.625572\nAccuracy: 0.7100155597412169 \n\nLambda: 70.76510067114094 \n\nepoch [1/4], Training loss:0.672087\nepoch [2/4], Training loss:0.627722\nepoch [3/4], Training loss:0.625814\nepoch [4/4], Training loss:0.625698\nAccuracy: 0.7100155597412169 \n\nLambda: 71.42953020134229 \n\nepoch [1/4], Training loss:0.657657\nepoch [2/4], Training loss:0.627409\nepoch [3/4], Training loss:0.626152\nepoch [4/4], Training loss:0.626006\nAccuracy: 0.7100155597412169 \n\nLambda: 72.09395973154363 \n\nepoch [1/4], Training loss:0.664460\nepoch [2/4], Training loss:0.628240\nepoch [3/4], Training loss:0.626372\nepoch [4/4], Training loss:0.626227\nAccuracy: 0.703136516255835 \n\nLambda: 72.75838926174497 \n\nepoch [1/4], Training loss:0.669770\nepoch [2/4], Training loss:0.629156\nepoch [3/4], Training loss:0.626868\nepoch [4/4], Training loss:0.626576\nAccuracy: 0.7100155597412169 \n\nLambda: 73.42281879194631 \n\nepoch [1/4], Training loss:0.664059\nepoch [2/4], Training loss:0.629034\nepoch [3/4], Training loss:0.627044\nepoch [4/4], Training loss:0.626798\nAccuracy: 0.7005159282614036 \n\nLambda: 74.08724832214766 \n\nepoch [1/4], Training loss:0.681602\nepoch [2/4], Training loss:0.629399\nepoch [3/4], Training loss:0.627084\nepoch [4/4], Training loss:0.626948\nAccuracy: 0.709933666366391 \n\nLambda: 74.751677852349 \n\nepoch [1/4], Training loss:0.677877\nepoch [2/4], Training loss:0.629351\nepoch [3/4], Training loss:0.627338\nepoch [4/4], Training loss:0.627200\nAccuracy: 0.7100155597412169 \n\nLambda: 75.41610738255034 \n\nepoch [1/4], Training loss:0.662568\nepoch [2/4], Training loss:0.629627\nepoch [3/4], Training loss:0.627779\nepoch [4/4], Training loss:0.627526\nAccuracy: 0.7003521415117517 \n\nLambda: 76.08053691275168 \n\nepoch [1/4], Training loss:0.650048\nepoch [2/4], Training loss:0.628846\nepoch [3/4], Training loss:0.627848\nepoch [4/4], Training loss:0.627715\nAccuracy: 0.7005159282614036 \n\nLambda: 76.74496644295303 \n\nepoch [1/4], Training loss:0.667298\nepoch [2/4], Training loss:0.629750\nepoch [3/4], Training loss:0.628014\nepoch [4/4], Training loss:0.627904\nAccuracy: 0.7005159282614036 \n\nLambda: 77.40939597315436 \n\nepoch [1/4], Training loss:0.652810\nepoch [2/4], Training loss:0.629483\nepoch [3/4], Training loss:0.628286\nepoch [4/4], Training loss:0.628162\nAccuracy: 0.7032184096306608 \n\nLambda: 78.0738255033557 \n\nepoch [1/4], Training loss:0.683570\nepoch [2/4], Training loss:0.631688\nepoch [3/4], Training loss:0.628911\nepoch [4/4], Training loss:0.628521\nAccuracy: 0.703136516255835 \n\nLambda: 78.73825503355705 \n\nepoch [1/4], Training loss:0.656582\nepoch [2/4], Training loss:0.630108\nepoch [3/4], Training loss:0.628664\nepoch [4/4], Training loss:0.628577\nAccuracy: 0.7032184096306608 \n\nLambda: 79.4026845637584 \n\nepoch [1/4], Training loss:0.651350\nepoch [2/4], Training loss:0.630487\nepoch [3/4], Training loss:0.629037\nepoch [4/4], Training loss:0.628857\nAccuracy: 0.6996151011383179 \n\nLambda: 80.06711409395973 \n\nepoch [1/4], Training loss:0.660704\nepoch [2/4], Training loss:0.631025\nepoch [3/4], Training loss:0.629244\nepoch [4/4], Training loss:0.629069\nAccuracy: 0.702727049381705 \n\nLambda: 80.73154362416108 \n\nepoch [1/4], Training loss:0.669701\nepoch [2/4], Training loss:0.632889\nepoch [3/4], Training loss:0.630148\nepoch [4/4], Training loss:0.629551\nAccuracy: 0.7005159282614036 \n\nLambda: 81.39597315436242 \n\nepoch [1/4], Training loss:0.658859\nepoch [2/4], Training loss:0.630911\nepoch [3/4], Training loss:0.629592\nepoch [4/4], Training loss:0.629491\nAccuracy: 0.6997788878879698 \n\nLambda: 82.06040268456377 \n\nepoch [1/4], Training loss:0.660996\nepoch [2/4], Training loss:0.631431\nepoch [3/4], Training loss:0.629826\nepoch [4/4], Training loss:0.629707\nAccuracy: 0.7005159282614036 \n\nLambda: 82.7248322147651 \n\nepoch [1/4], Training loss:0.661803\nepoch [2/4], Training loss:0.632264\nepoch [3/4], Training loss:0.630298\nepoch [4/4], Training loss:0.630010\nAccuracy: 0.703136516255835 \n\nLambda: 83.38926174496645 \n\nepoch [1/4], Training loss:0.660844\nepoch [2/4], Training loss:0.631565\nepoch [3/4], Training loss:0.630336\nepoch [4/4], Training loss:0.630175\nAccuracy: 0.7005159282614036 \n\nLambda: 84.0536912751678 \n\nepoch [1/4], Training loss:0.653575\nepoch [2/4], Training loss:0.632410\nepoch [3/4], Training loss:0.630845\nepoch [4/4], Training loss:0.630489\nAccuracy: 0.7005159282614036 \n\nLambda: 84.71812080536914 \n\nepoch [1/4], Training loss:0.653556\nepoch [2/4], Training loss:0.631766\nepoch [3/4], Training loss:0.630673\nepoch [4/4], Training loss:0.630567\nAccuracy: 0.7005159282614036 \n\nLambda: 85.38255033557047 \n\nepoch [1/4], Training loss:0.661053\nepoch [2/4], Training loss:0.632365\nepoch [3/4], Training loss:0.630985\nepoch [4/4], Training loss:0.630809\nAccuracy: 0.7005159282614036 \n\nLambda: 86.04697986577182 \n\nepoch [1/4], Training loss:0.645873\nepoch [2/4], Training loss:0.632007\nepoch [3/4], Training loss:0.631125\nepoch [4/4], Training loss:0.630997\nAccuracy: 0.7002702481369257 \n\nLambda: 86.71140939597316 \n\nepoch [1/4], Training loss:0.677230\nepoch [2/4], Training loss:0.633551\nepoch [3/4], Training loss:0.631434\nepoch [4/4], Training loss:0.631219\nAccuracy: 0.7005159282614036 \n\nLambda: 87.3758389261745 \n\nepoch [1/4], Training loss:0.668497\nepoch [2/4], Training loss:0.632858\nepoch [3/4], Training loss:0.631424\nepoch [4/4], Training loss:0.631356\nAccuracy: 0.7005159282614036 \n\nLambda: 88.04026845637584 \n\nepoch [1/4], Training loss:0.665486\nepoch [2/4], Training loss:0.634658\nepoch [3/4], Training loss:0.632267\nepoch [4/4], Training loss:0.631769\nAccuracy: 0.7005159282614036 \n\nLambda: 88.70469798657719 \n\nepoch [1/4], Training loss:0.679336\nepoch [2/4], Training loss:0.633398\nepoch [3/4], Training loss:0.631841\nepoch [4/4], Training loss:0.631761\nAccuracy: 0.6997788878879698 \n\nLambda: 89.36912751677853 \n\nepoch [1/4], Training loss:0.675835\nepoch [2/4], Training loss:0.634031\nepoch [3/4], Training loss:0.632175\nepoch [4/4], Training loss:0.632002\nAccuracy: 0.6997788878879698 \n\nLambda: 90.03355704697987 \n\nepoch [1/4], Training loss:0.650042\nepoch [2/4], Training loss:0.633007\nepoch [3/4], Training loss:0.632200\nepoch [4/4], Training loss:0.632149\nAccuracy: 0.6996151011383179 \n\nLambda: 90.69798657718121 \n\nepoch [1/4], Training loss:0.668419\nepoch [2/4], Training loss:0.634034\nepoch [3/4], Training loss:0.632520\nepoch [4/4], Training loss:0.632377\nAccuracy: 0.7005159282614036 \n\nLambda: 91.36241610738256 \n\nepoch [1/4], Training loss:0.664127\nepoch [2/4], Training loss:0.634811\nepoch [3/4], Training loss:0.632953\nepoch [4/4], Training loss:0.632645\nAccuracy: 0.6997788878879698 \n\nLambda: 92.0268456375839 \n\nepoch [1/4], Training loss:0.679865\nepoch [2/4], Training loss:0.636519\nepoch [3/4], Training loss:0.633425\nepoch [4/4], Training loss:0.632913\nAccuracy: 0.695848005896323 \n\nLambda: 92.69127516778524 \n\nepoch [1/4], Training loss:0.669088\nepoch [2/4], Training loss:0.634601\nepoch [3/4], Training loss:0.633061\nepoch [4/4], Training loss:0.632938\nAccuracy: 0.6965031528949308 \n\nLambda: 93.35570469798658 \n\nepoch [1/4], Training loss:0.670263\nepoch [2/4], Training loss:0.634705\nepoch [3/4], Training loss:0.633274\nepoch [4/4], Training loss:0.633136\nAccuracy: 0.6997788878879698 \n\nLambda: 94.02013422818793 \n\nepoch [1/4], Training loss:0.668747\nepoch [2/4], Training loss:0.634718\nepoch [3/4], Training loss:0.633404\nepoch [4/4], Training loss:0.633304\nAccuracy: 0.6960117926459749 \n\nLambda: 94.68456375838927 \n\nepoch [1/4], Training loss:0.668751\nepoch [2/4], Training loss:0.634727\nepoch [3/4], Training loss:0.633529\nepoch [4/4], Training loss:0.633471\nAccuracy: 0.6997788878879698 \n\nLambda: 95.3489932885906 \n\nepoch [1/4], Training loss:0.676586\nepoch [2/4], Training loss:0.635568\nepoch [3/4], Training loss:0.633860\nepoch [4/4], Training loss:0.633694\nAccuracy: 0.695848005896323 \n\nLambda: 96.01342281879195 \n\nepoch [1/4], Training loss:0.672108\nepoch [2/4], Training loss:0.636183\nepoch [3/4], Training loss:0.634194\nepoch [4/4], Training loss:0.633923\nAccuracy: 0.695848005896323 \n\nLambda: 96.6778523489933 \n\nepoch [1/4], Training loss:0.660231\nepoch [2/4], Training loss:0.635600\nepoch [3/4], Training loss:0.634189\nepoch [4/4], Training loss:0.634046\nAccuracy: 0.7005159282614036 \n\nLambda: 97.34228187919463 \n\nepoch [1/4], Training loss:0.671765\nepoch [2/4], Training loss:0.635661\nepoch [3/4], Training loss:0.634300\nepoch [4/4], Training loss:0.634207\nAccuracy: 0.695848005896323 \n\nLambda: 98.00671140939598 \n\nepoch [1/4], Training loss:0.667545\nepoch [2/4], Training loss:0.635468\nepoch [3/4], Training loss:0.634435\nepoch [4/4], Training loss:0.634376\nAccuracy: 0.695848005896323 \n\nLambda: 98.67114093959732 \n\nepoch [1/4], Training loss:0.675443\nepoch [2/4], Training loss:0.636210\nepoch [3/4], Training loss:0.634665\nepoch [4/4], Training loss:0.634561\nAccuracy: 0.695848005896323 \n\nLambda: 99.33557046979867 \n\nepoch [1/4], Training loss:0.668946\nepoch [2/4], Training loss:0.636302\nepoch [3/4], Training loss:0.634862\nepoch [4/4], Training loss:0.634742\nAccuracy: 0.6965031528949308 \n\nLambda: 100.0 \n\nepoch [1/4], Training loss:0.667289\nepoch [2/4], Training loss:0.636630\nepoch [3/4], Training loss:0.635104\nepoch [4/4], Training loss:0.634934\nAccuracy: 0.695848005896323 \n\n"
],
[
"save_obj(metrics_rw, 'adult_nonbinary_met')",
"_____no_output_____"
]
],
[
[
"\nPlot TPR and NPR for each sensitive class with respect to $\\lambda$:\n\n\n",
"_____no_output_____"
]
],
[
[
"TPR_priv = []\nTNR_priv = []\n\nTPR_non_priv1 = []\nTNR_non_priv1 = []\nTPR_non_priv2 = []\nTNR_non_priv2 = []\nTPR_non_priv3 = []\nTNR_non_priv3 = []\nlambdas = []\nfor l in metrics_rw:\n lambdas.append(l)\n TPR_priv.append(metrics_rw[l]['privilaged']['White Male'][0])\n TNR_priv.append(metrics_rw[l]['privilaged']['White Male'][1])\n TPR_non_priv1.append(metrics_rw[l]['unprivilaged']['Nonwhite Male'][0])\n TNR_non_priv1.append(metrics_rw[l]['unprivilaged']['Nonwhite Male'][1])\n TPR_non_priv2.append(metrics_rw[l]['unprivilaged']['White Female'][0])\n TNR_non_priv2.append(metrics_rw[l]['unprivilaged']['White Female'][1])\n TPR_non_priv3.append(metrics_rw[l]['unprivilaged']['Nonwhite Female'][0])\n TNR_non_priv3.append(metrics_rw[l]['unprivilaged']['Nonwhite Female'][1])\n\nfig, axs = plt.subplots(1, 2, figsize=(10,5))\nfig.suptitle('Investigating Equalized Odds')\naxs[0].plot(lambdas, TPR_priv)\naxs[0].plot(lambdas, TPR_non_priv1)\naxs[0].plot(lambdas, TPR_non_priv2)\naxs[0].plot(lambdas, TPR_non_priv3)\naxs[0].set_title('TPR')\naxs[0].set(xlabel='Reg-lambda', ylabel='TPR')\naxs[0].legend(['White Male', 'Nonwhite Male', 'White Female', 'Nonwhite Female'])\naxs[0].set(ylim=(0.2,0.9))\n\naxs[1].plot(lambdas, TNR_priv)\naxs[1].plot(lambdas, TNR_non_priv1)\naxs[1].plot(lambdas, TNR_non_priv2)\naxs[1].plot(lambdas, TNR_non_priv3)\naxs[1].set_title('TNR')\naxs[1].set(xlabel='Reg-lambda', ylabel='TNR')\naxs[1].legend(['White Male', 'Nonwhite Male', 'White Female', 'Nonwhite Female'])\naxs[1].set(ylim=(0.5,1))",
"_____no_output_____"
]
],
[
[
"Plot accuracy with respect to $\\lambda$:",
"_____no_output_____"
]
],
[
[
"ACC = []\nfor l in metrics_rw:\n ACC.append(metrics_rw[l]['accuracy'])\n\n\nfig, axs = plt.subplots(1, 1, figsize=(6,3))\naxs.plot(lambdas, ACC)\naxs.set_title('Accuracy')\naxs.set(xlabel='Reg-lambda', ylabel = 'Accuracy')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7497e236b6f436de3e40d26a407543d3d1587f8 | 76,751 | ipynb | Jupyter Notebook | Week 6 - Regularized Linear Regression and Bias v.s. Variance/Polynomial Regression.ipynb | Nikronic/Coursera-Machine-Learning | 4bc5de16446b664f9995eba95762b730547b1fce | [
"MIT"
] | 16 | 2018-09-11T10:55:57.000Z | 2022-01-31T21:08:18.000Z | Week 6 - Regularized Linear Regression and Bias v.s. Variance/Polynomial Regression.ipynb | Nikronic/Coursera-Machine-Learning | 4bc5de16446b664f9995eba95762b730547b1fce | [
"MIT"
] | 1 | 2018-10-14T18:40:21.000Z | 2018-11-08T20:06:03.000Z | Week 6 - Regularized Linear Regression and Bias v.s. Variance/Polynomial Regression.ipynb | Nikronic/Coursera-Machine-Learning | 4bc5de16446b664f9995eba95762b730547b1fce | [
"MIT"
] | 6 | 2019-11-21T02:59:22.000Z | 2022-01-31T21:08:26.000Z | 77.919797 | 32,360 | 0.775143 | [
[
[
"# Polynomial Regression\nThe problem with our linear model was that it was too simple for the data and resulted in underfitting (high bias). In this part of the exercise, you will address this problem by adding more features.<br><br>\n\nThe file <strong><em>ex5data1</em></strong> contains a data set which includes train set, test set, validation set.<br>\nThe structure of the dataset described blow:<br>\n1. x = **Water level (a single feature)**\n2. y = **Value of flowed water**\n3. xval = **x validation set**\n4. yval = **y validation set**\n5. xtest = **x test set**\n6. ytest = **y test set**\n\n\n<br><br>\n<strong>\nOur assignment has these sections:\n1. Visualizing the Data\n 1. Converting .mat to .csv\n 2. Loading Dataset\n 3. Visualizing The Dataset\n2. Adding Polynomial Features\n 1. Adding Features\n 2. Normalizing Features\n3. Learning Polynomial Regression\n 1. Regularized Linear Regression Cost Function\n 2. Regularized Linear Regression Gradient\n 3. Fitting Linear Regression\n 4. Visualization of Fitted Model\n4. Adjusting The Regularization Parameter\n 1. \n5. Selecting λ Using a Cross Validation Set\n 1. \n6. Computing Test Set Error\n 1. \n7. Plotting Learning Curves With Randomly Selected Examples\n \n</strong>\n\nIn each section full description provided.",
"_____no_output_____"
],
[
"## 1. Visualizing the Dataset\nBefore starting on any task, it is often useful to understand the data by visualizing it.<br>",
"_____no_output_____"
],
[
"### 1.A Converting .mat to .csv\nIn this specific assignment, the instructor added a .mat file as training set and weights of trained neural network. But we have to convert it to .csv to use in python.<br>\nAfter all we now ready to import our new csv files to pandas dataframes and do preprocessing on it and make it ready for next steps.",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport scipy.io\nimport numpy as np\n\ndata = scipy.io.loadmat(\"ex5data1\")",
"_____no_output_____"
]
],
[
[
"Now we **extract** `x`, `y`, `xval`, `yval`, `xtest` and `ytest`</strong> variables from the .mat file and save them into .csv file for further usage. After running the below code you should see:\n1. X.csv\n2. y.csv \n3. Xtest.csv\n4. ytest.csv\n5. Xval.csv\n6. yval.csv\n\nfiles in your directory.",
"_____no_output_____"
]
],
[
[
"for i in data:\n if '__' not in i and 'readme' not in i:\n np.savetxt((i+\".csv\"),data[i],delimiter=',')",
"_____no_output_____"
]
],
[
[
"### 1.B Loading Dataset\nFirst we import .csv files into pandas dataframes then save them into numpy arrays.<br><br>",
"_____no_output_____"
]
],
[
[
"# import library\nimport pandas as pd\n\n# saving .csv files to pandas dataframes\nx_df = pd.read_csv('X.csv',names= ['x'])\nxtest_df = pd.read_csv('Xtest.csv',names= ['xtest'])\nxval_df = pd.read_csv('Xval.csv',names= ['xval'])\ny_df = pd.read_csv('y.csv',names=['y'])\nytest_df = pd.read_csv('ytest.csv',names= ['ytest'])\nyval_df = pd.read_csv('yval.csv',names= ['yval'])",
"_____no_output_____"
],
[
"x_df.head(3)",
"_____no_output_____"
],
[
"y_df.head(3)",
"_____no_output_____"
]
],
[
[
"Now we convert all **pandas dataframes** to **numpy arrays** for calculations.",
"_____no_output_____"
]
],
[
[
"# saving x, y, xval, yval, xtest and ytest into numpy arrays\nx = x_df.iloc[:,:].values\nxval = xval_df.iloc[:,:].values\nxtest = xtest_df.iloc[:,:].values\n\ny = y_df.iloc[:,:].values\nyval = yval_df.iloc[:,:].values\nytest = ytest_df.iloc[:,:].values\n\n# number of examples and number of features\nm, n = x.shape\n\nm_val = xval.shape[0]\nm_test = xtest.shape[0]\n\nprint('#{} Number of training samples, #{} features per sample'.format(m,n))\nprint('#{} Number of validation samples, #{} features per sample'.format(m_val,n))\nprint('#{} Number of test samples, #{} features per sample'.format(m_test,n))",
"#12 Number of training samples, #1 features per sample\n#21 Number of validation samples, #1 features per sample\n#21 Number of test samples, #1 features per sample\n"
],
[
"# define some hypter parameters\n\n# define theta as zero\ntheta = None\n\n# define hyperparameter λ\nlambda_ = None\n\n# reshape (-1,1) because we just have one feature in y column\ny = y.reshape(-1,1)",
"_____no_output_____"
]
],
[
[
"### 1.C Ploting Dataset\nWe will begin by visualizing the dataset containing historical records on **the change in the water level**, `x`, and **the amount of water flowing out of the dam**, `y`.<br><br>\nThis dataset is divided into three parts:<br>\n • A **training set** that your model will learn on: `x`, `y`<br>\n • A **cross validation set** for determining the regularization parameter: `xval`, `yval`<br>\n • A **test set** for evaluating performance. These are **\"unseen\" examples** which your model did not see during training: `xtest`, `ytest`<br>",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.scatter(x, y, color='red', marker='x')\nplt.title('Training Set')\nplt.xlabel('Change in water level (x)')\nplt.ylabel('Water flowing out of the dam (y)')\nplt.grid()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 2. Adding Polynomial Features\n\nFor use polynomial regression, our hypothesis has the form:<img src='img/poly.jpg'>\nNotice that by defining `x1 = (waterLevel)`, `x2 = (waterLevel)`<sup>2</sup>, ... , `xp = (waterLevel)`<sup>p</sup>, we obtain a linear regression model where the features are the various powers of the original value (waterLevel).<br><br>\nNow, you will **add more features using the higher powers** of the existing feature `x` in the dataset. Your task write a function so that the function **maps the original training set `x` of size `m × 1` into its higher powers**. Specifically, when a training set `x` of size `m × 1` is passed into the function, the **function should return a `m × p` matrix `x_poly`**, where column `1` holds the original values of `x`, column `2` holds the values of `x^2`, column `3` holds the values of `x^3`, and so on.<br> \nNote that **you don’t have to account for the zero-eth power in this function**.",
"_____no_output_____"
],
[
"## 2.A Adding Features\n`poly_features(x, p)` takes a data matrix `x` (size `m x 1`) and **maps each example into its polynomial features** where <br>\n`x_poly[i, :] = [x(i) x(i)^2 x(i)^3 ... x(i)^p]`",
"_____no_output_____"
]
],
[
[
"x_poly = None # the output of polu_features\np = 8 # order of polynomial features\n\nfrom sklearn.preprocessing import PolynomialFeatures # import libraries\ndef poly_features(x,p): \n polynomial_features = PolynomialFeatures(degree=8, include_bias=False)\n x_poly = polynomial_features.fit_transform(x)\n return x_poly\n ",
"_____no_output_____"
],
[
"x_poly = poly_features(x,p)\nx_poly_df = pd.DataFrame(x_poly,columns=None)\nx_poly_df.head(3)",
"_____no_output_____"
]
],
[
[
"#### Now we add these features to `xtest` and `xval`",
"_____no_output_____"
]
],
[
[
"xval_poly = poly_features(xval,p)\nxval_poly_df = pd.DataFrame(xval_poly,columns=None)\nxval_poly_df.head(3)",
"_____no_output_____"
],
[
"xtest_poly = poly_features(xtest,p)\nxtest_poly_df = pd.DataFrame(xtest_poly,columns=None)\nxtest_poly_df.head(3)",
"_____no_output_____"
]
],
[
[
"## 2.B Normalize Features\nIt turns out that if we run the training directly on the projected data, will not work well as the **features** would be **badly scaled** (e.g., an example with **x = 40 will now have a feature x<sub>8</sub> = 40<sup>8</sup> = 6.5 × 10<sup>12</sup>**). <br>\nTherefore, you will need to use **feature normalization**.\n`feature_normalize(x)` returns a **normalized version** of `x` where the **mean value of each feature is 0** and the **standard deviation is 1**. This is often a **good preprocessing step** to do when working with learning algorithms.",
"_____no_output_____"
]
],
[
[
"def feature_normalize(x, xtest, xval):\n sigma = x.std()\n mean = x.mean()\n x_norm = (x-mean)/sigma\n xtest_norm = (xtest-mean)/sigma\n xval_norm = (xval-mean)/sigma\n return (x_norm, xtest_norm, xval_norm)",
"_____no_output_____"
],
[
"x_poly_norm, xtest_poly_norm, xval_poly_norm = feature_normalize(x_poly,xtest_poly, xval_poly)",
"_____no_output_____"
],
[
"print('x_poly_norm : mean= {}, std={}.'.format(x_poly_norm.mean(),x_poly_norm.std()))\nprint('xval_poly_norm : mean= {}, std={}.'.format(xval_poly_norm.mean(),xval_poly_norm.std()))\nprint('xtest_poly_norm : mean= {}, std={}.'.format(xtest_poly_norm.mean(),xtest_poly_norm.std()))",
"x_poly_norm : mean= -1.850371707708594e-17, std=0.9999999999999999.\nxval_poly_norm : mean= 0.044848630228004685, std=1.2247731967735336.\nxtest_poly_norm : mean= 0.1606768875137592, std=2.5245490041714636.\n"
]
],
[
[
"Look at this [link](https://stackoverflow.com/questions/40405803/mean-of-data-scaled-with-sklearn-standardscaler-is-not-zero) if you have any question why after using **scaling**, we still do not have **mean = 0** and **std = 1**.<br>\nActually the values are zero.",
"_____no_output_____"
]
],
[
[
"# add 1's to the features of x as bias\nx_poly_norm = np.append(np.ones(shape=(m,1)),x_poly_norm,axis = 1)\nxval_poly_norm = np.append(np.ones(shape=(xval_poly_norm.shape[0],1)),xval_poly_norm,axis = 1)\nxtest_poly_norm = np.append(np.ones(shape=(xtest_poly_norm.shape[0],1)),xtest_poly_norm,axis = 1)",
"_____no_output_____"
]
],
[
[
"# 3. Learning Polynomial Regression\nWe will proceed to **train polynomial regression using your linear regression cost function**.<br><br>\nKeep in mind that even though **we have polynomial terms** in our feature vector, we are **still solving a linear regression** optimization problem. The **polynomial terms have simply turned into features** that we can use for linear regression. We are using the **same cost function and gradient** that you wrote for the earlier part of this exercise.<br>",
"_____no_output_____"
],
[
"## 3.A Regularized Linear Regression Cost Function\nRecall that regularized linear regression has the following cost function:<img src='img/rlrc.jpg'><br><br>\nwhere `lambda` is a regularization parameter which controls the degree of regularization (thus, **help preventing overfitting**). The **regularization** term puts a **penalty** on the overal **cost J**.<br><br>\nAs the magnitudes of the model parameters **θ<sub>j</sub>**\nincrease, the penalty increases as well. Note that you should not regularize the **θ<sub>0</sub>** term.<br><br>\nFor this part of the exercise, you will be using a **polynomial of degree 8**.<br>\nAfter learning the parameters **θ**, you should draw two plots generated for polynomial regression with **λ = 0**.<br>",
"_____no_output_____"
],
[
"### Implementation\n`linear_reg_cost(x, y, theta, lambda_)` computes the cost of using `theta` as the parameter for linear regression to fit the data points in `x` and `y`. Returns the cost in `j` as cost.",
"_____no_output_____"
]
],
[
[
"def hypothesis(x,theta):\n return np.dot(x,theta)",
"_____no_output_____"
],
[
"def linear_reg_cost(theta_flatten, x_flatten, y, lambda_, num_of_samples, num_of_features):\n x = x_flatten.reshape(num_of_samples, num_of_features)\n theta = theta_flatten.reshape(n,1)\n loss = hypothesis(x,theta)-y\n regularizer = lambda_*np.sum(theta[1:,:]**2)/(2*m)\n j = np.sum(loss ** 2)/(2*m) \n return j",
"_____no_output_____"
]
],
[
[
"## 3.B Regularized Linear Regression Gradient\nCorrespondingly, the **partial derivative of regularized linear regression’s cost for θ<sub>j</sub>** is defined as:<img src='img/rlrg.jpg'>",
"_____no_output_____"
],
[
"### Implementation\n`linear_reg_grad(x, y, theta, lambda_)` computes the gradient of cost of using `theta` as the parameter for linear regression to fit the data points in `x` and `y`. Returns the gradient in `grad`.",
"_____no_output_____"
]
],
[
[
"def linear_reg_grad(theta_flatten, x_flatten, y, lambda_, num_of_samples, num_of_features):\n x = x_flatten.reshape(num_of_samples, num_of_features)\n m,n = x.shape\n theta = theta_flatten.reshape(n,1)\n new_theta = np.zeros(shape=(theta.shape))\n loss = hypothesis(x,theta)-y\n gradient = np.dot(x.T,loss)\n new_theta[0:,:] = gradient/m\n new_theta[1:,:] = gradient[1:,:]/m + lambda_*(theta[1:,]/m)\n return new_theta.flatten()",
"_____no_output_____"
]
],
[
[
"## 3.C Fitting Linear Regression\nOnce your cost function and gradient are working correctly, the next part is to **compute the optimal values** of **θ**.<br>\nThis training function uses `fmin_cg` to optimize the cost function. <a href='https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.optimize.fmin_cg.html'>See official doc</a><br>",
"_____no_output_____"
],
[
"### Implementation\nOnce you have implemented the cost and gradient correctly, the `fmin_cg` function will use your cost function to train regularized linear regression and update theta each time.",
"_____no_output_____"
]
],
[
[
"m,n = x_poly_norm.shape\nlambda_ = 0\ntheta = np.ones(n)\n\nfrom scipy.optimize import fmin_cg\nnew_theta = fmin_cg(f=linear_reg_cost, x0=theta, fprime=linear_reg_grad, args=(x_poly_norm.flatten(), y, lambda_, m,n))",
"Optimization terminated successfully.\n Current function value: 15.795185\n Iterations: 198\n Function evaluations: 454\n Gradient evaluations: 454\n"
],
[
"new_theta",
"_____no_output_____"
]
],
[
[
"## 3.D Visualization of Fitted Model\nFinally, you should also **plot the best fit line**. The best fit line tells us that the model is a good fit to the data because the **data has a non-linear pattern**. While **visualizing the best fit** as shown is **one possible way to debug** your learning algorithm, it is not always easy to visualize the data and model.<br><br>\nIn the next section, you will implement a function to **generate learning curves** that can help you debug your learning algorithm even if it is **not easy to visualize** the data.",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.scatter(x, y, color='red', marker='x', label= 'train data')\nplt.plot(x.flatten(),np.dot(x_poly_norm,new_theta.reshape(n,1).flatten()), label = 'best fit model')\n#plt.axis([-40,50,-75,75])\nplt.title('Training Set')\nplt.xlabel('Change in water level (x)')\nplt.ylabel('Water flowing out of the dam (y)')\nplt.grid()\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7497faa938867f79d97436b1f634361b59e17d3 | 937,537 | ipynb | Jupyter Notebook | Karplus Strong Algorithm/Karplus Strong Algorithm.ipynb | Nil69420/Digital-Signal-Processing | c322192224fee9515bb1ab5a47aabace2abd7b81 | [
"Apache-2.0"
] | null | null | null | Karplus Strong Algorithm/Karplus Strong Algorithm.ipynb | Nil69420/Digital-Signal-Processing | c322192224fee9515bb1ab5a47aabace2abd7b81 | [
"Apache-2.0"
] | null | null | null | Karplus Strong Algorithm/Karplus Strong Algorithm.ipynb | Nil69420/Digital-Signal-Processing | c322192224fee9515bb1ab5a47aabace2abd7b81 | [
"Apache-2.0"
] | null | null | null | 1,792.613767 | 170,820 | 0.942734 | [
[
[
"def KS_1(x, N): #x is initial buffer and N is sample output\n y = x #concatenate identical copies of buffer\n while len(y) < N:\n #keep appending until required length is reached\n y = np.append(y, x)\n #trim the excess\n y = y[0:N+1]\n return y",
"_____no_output_____"
],
[
"%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport IPython",
"_____no_output_____"
],
[
"plt.rcParams[\"figure.figsize\"] = (14,4)",
"_____no_output_____"
],
[
"#Sampling Rate\nFs = 16000",
"_____no_output_____"
],
[
"b = np.random.randn(50)\nplt.stem(b);",
"_____no_output_____"
],
[
"y = KS_1(b, Fs*2)\nplt.stem(y[0:500]);",
"_____no_output_____"
],
[
"IPython.display.Audio(y, rate = Fs)",
"_____no_output_____"
],
[
"#lower octave\nIPython.display.Audio(KS_1(np.random.randn(100), Fs * 2), rate = Fs)",
"_____no_output_____"
],
[
"#KS Algorithm as a signal processing loop\ndef KS_2(x, N):\n M = len(x) #length of the input\n y = np.zeros(N)\n #assuming y[n] = 0 for n < 0\n for n in range(0, N):\n y[n] = (x[n] if n < M else 0) + (y[n-M] if n-M >= 0 else 0)\n return y",
"_____no_output_____"
],
[
"IPython.display.Audio(KS_2(np.random.rand(50), Fs * 2), rate = Fs)",
"_____no_output_____"
],
[
"def KS_3(x, N, alpha = 0.99):\n M = len(x)\n y = np.zeros(N)\n for n in range(0, N):\n y[n] = (x[n] if n < M else 0) + alpha * (y[n-M] if n-M >= 0 else 0)\n return y",
"_____no_output_____"
],
[
"y = KS_3(b, Fs * 2)\nplt.stem(y[0:2000]);",
"_____no_output_____"
],
[
"IPython.display.Audio(y, rate = Fs)",
"_____no_output_____"
],
[
"IPython.display.Audio(KS_3(np.random.rand(50), Fs * 2), rate=Fs)",
"_____no_output_____"
],
[
"IPython.display.Audio(KS_3(np.random.rand(10), Fs * 2), rate=Fs)",
"_____no_output_____"
],
[
"def KS(x, N, alpha = 0.99):\n #adjusting alpha so that all notes have a decay\n REF_LEN = 50\n M = len(x)\n a = alpha ** (float(M)/REF_LEN)\n y = np.zeros(N)\n for n in range(0, N):\n y[n] = (x[n] if n < M else 0) + a * (y[n-M] if n-M >= 0 else 0)\n return y",
"_____no_output_____"
],
[
"IPython.display.Audio(KS(np.random.rand(50), Fs * 2), rate=Fs)",
"_____no_output_____"
],
[
"IPython.display.Audio(KS(np.random.rand(10), Fs * 2), rate=Fs)",
"_____no_output_____"
],
[
"def freq(note):\n # general purpose function to convert a note in standard notation \n # to corresponding frequency\n if len(note) < 2 or len(note) > 3 or \\\n note[0] < 'A' or note[0] > 'G':\n return 0\n if len(note) == 3:\n if note[1] == 'b':\n acc = -1\n elif note[1] == '#':\n acc = 1\n else:\n return 0\n octave = int(note[2])\n else:\n acc = 0\n octave = int(note[1])\n SEMITONES = {'A': 0, 'B': 2, 'C': -9, 'D': -7, 'E': -5, 'F': -4, 'G': -2}\n n = 12 * (octave - 4) + SEMITONES[note[0]] + acc\n f = 440 * (2 ** (float(n) / 12.0))\n #print note, f\n return f\n\n\ndef ks_chord(chord, N, alpha):\n y = np.zeros(N)\n # the chord is a dictionary: pitch => gain\n for note, gain in chord.items():\n # create an initial random-filled KS buffer the note\n x = np.random.randn(int(np.round(float(Fs) / freq(note))))\n y = y + gain * KS(x, N, alpha)\n return y",
"_____no_output_____"
],
[
"# A Hard Day's Night's chord\nhdn_chord = {\n 'D2' : 2.2, \n 'D3' : 3.0, \n 'F3' : 1.0, \n 'G3' : 3.2, \n 'F4' : 1.0, \n 'A4' : 1.0, \n 'C5' : 1.0, \n 'G5' : 3.5,\n}\n \nIPython.display.Audio(ks_chord(hdn_chord, Fs * 4, 0.995), rate=Fs)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e749a01e2cf589df355bdfc68c24469b14883b08 | 106,935 | ipynb | Jupyter Notebook | notebooks/results_downstream_fully_obs.ipynb | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
] | 2 | 2022-01-18T09:59:01.000Z | 2022-02-02T10:01:45.000Z | notebooks/results_downstream_fully_obs.ipynb | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
] | null | null | null | notebooks/results_downstream_fully_obs.ipynb | se-jaeger/data-imputation-paper | 498d2d871302d917f58ecf6a9576e3a3451c5faa | [
"Apache-2.0"
] | null | null | null | 109.340491 | 73,944 | 0.798532 | [
[
[
"# Visualize Results: Downstream Performance - \"Fully Observed\" Experiment\n\nThis notebook should answer the questions: *Does imputation lead to better downstream performances?*\n\n## Notebook Structure \n\n* Application Scenario 2 - Downstream Performance \n * Categorical Columns (Classification)\n * Numerical Columns (Regression)\n * Heterogenous Columns (Classification and Regression Combined)",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport re\nimport seaborn as sns\n\nfrom pathlib import Path\nfrom data_imputation_paper.experiment import read_experiment, read_csv_files\nfrom data_imputation_paper.plotting import draw_cat_box_plot\n\n%matplotlib inline\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"## Settings",
"_____no_output_____"
]
],
[
[
"sns.set(style=\"whitegrid\")\nsns.set_context('paper', font_scale=1.5)\nmpl.rcParams['lines.linewidth'] = '2'",
"_____no_output_____"
],
[
"EXPERIMENT = \"fully_observed_fix\"\n\nEXPERIMENT_PATH = Path(f\"../data/experiments/{EXPERIMENT}/\")\n\nCLF_METRIC = \"Classification Tasks\"\nREG_METRIC = \"Regression Tasks\"\n\nDOWNSTREAM_RESULT_TYPE = \"downstream_performance_mean\"\nIMPUTE_RESULT_TYPE = \"impute_performance_mean\"\n\nFIGURES_PATH = Path(f\"../paper/figures/\")",
"_____no_output_____"
]
],
[
[
"## Import the data",
"_____no_output_____"
]
],
[
[
"%%time\n\nresults = read_csv_files(read_experiment(EXPERIMENT_PATH), read_details=False)\nresults.head()",
"CPU times: user 1min 26s, sys: 4.33 s, total: 1min 30s\nWall time: 1min 40s\n"
],
[
"na_impute_results = results[\n (results[\"result_type\"] == IMPUTE_RESULT_TYPE) & \n (results[\"metric\"].isin([\"F1_macro\", \"RMSE\"]))\n]\nna_impute_results.drop([\"baseline\", \"corrupted\", \"imputed\"], axis=1, inplace=True)\nna_impute_results = na_impute_results[na_impute_results.isna().any(axis=1)]\nna_impute_results.shape",
"_____no_output_____"
],
[
"downstream_results = results[\n (results[\"result_type\"] == DOWNSTREAM_RESULT_TYPE) & \n (results[\"metric\"].isin([\"F1_macro\", \"RMSE\"]))\n]\n\n# remove experiments where imputation failed\ndownstream_results = downstream_results.merge(\n na_impute_results,\n how = \"left\",\n validate = \"one_to_one\",\n indicator = True,\n suffixes=(\"\", \"_imp\"),\n on = [\"experiment\", \"imputer\", \"task\", \"missing_type\", \"missing_fraction\", \"strategy\", \"column\"]\n)\ndownstream_results = downstream_results[downstream_results[\"_merge\"]==\"left_only\"]\n\nassert len(results[\"strategy\"].unique()) == 1\ndownstream_results.drop([\"experiment\", \"strategy\", \"result_type_imp\", \"metric_imp\", \"train\", \"test\", \"train_imp\", \"test_imp\", \"_merge\"], axis=1, inplace=True)\n\ndownstream_results = downstream_results.rename(\n {\n \"imputer\": \"Imputation Method\",\n \"task\": \"Task\",\n \"missing_type\": \"Missing Type\",\n \"missing_fraction\": \"Missing Fraction\",\n \"column\": \"Column\",\n \"baseline\": \"Baseline\",\n \"imputed\": \"Imputed\",\n \"corrupted\": \"Corrupted\"\n },\n axis = 1\n)",
"_____no_output_____"
],
[
"rename_imputer_dict = {\n \"ModeImputer\": \"Mean/Mode\",\n \"KNNImputer\": \"$k$-NN\",\n \"ForestImputer\": \"Random Forest\",\n \"AutoKerasImputer\": \"Discriminative DL\",\n \"VAEImputer\": \"VAE\",\n \"GAINImputer\": \"GAIN\" \n}\n\nrename_metric_dict = {\n \"F1_macro\": CLF_METRIC,\n \"RMSE\": REG_METRIC\n}\n\ndownstream_results = downstream_results.replace(rename_imputer_dict)\ndownstream_results = downstream_results.replace(rename_metric_dict)\n\ndownstream_results",
"_____no_output_____"
]
],
[
[
"### Robustness: check which imputers yielded `NaN`values",
"_____no_output_____"
]
],
[
[
"for col in downstream_results.columns:\n na_sum = downstream_results[col].isna().sum()\n if na_sum > 0:\n print(\"-----\" * 10) \n print(col, na_sum)\n print(\"-----\" * 10) \n na_idx = downstream_results[col].isna()\n print(downstream_results.loc[na_idx, \"Imputation Method\"].value_counts(dropna=False))\n print(\"\\n\")",
"_____no_output_____"
]
],
[
[
"## Compute Downstream Performance relative to Baseline",
"_____no_output_____"
]
],
[
[
"clf_row_idx = downstream_results[\"metric\"] == CLF_METRIC\nreg_row_idx = downstream_results[\"metric\"] == REG_METRIC",
"_____no_output_____"
],
[
"downstream_results[\"Improvement\"] = (downstream_results[\"Imputed\"] - downstream_results[\"Corrupted\"] ) / downstream_results[\"Baseline\"]\ndownstream_results.loc[reg_row_idx, \"Improvement\"] = downstream_results.loc[reg_row_idx, \"Improvement\"] * -1\n\ndownstream_results",
"_____no_output_____"
]
],
[
[
"## Application Scenario 2 - Downstream Performance",
"_____no_output_____"
],
[
"### Categorical Columns (Classification)",
"_____no_output_____"
]
],
[
[
"draw_cat_box_plot(\n downstream_results,\n \"Improvement\",\n (-0.15, 0.3),\n FIGURES_PATH,\n \"fully_observed_downstream_boxplot.eps\",\n hue_order=list(rename_imputer_dict.values()),\n row_order=list(rename_metric_dict.values())\n)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e749a8dd3cdca1d46097a170fcc4657be3495a1b | 96,077 | ipynb | Jupyter Notebook | Pandas/DataAggregations.ipynb | EstebanBrito/TallerIA-SISI2019 | 282cab1cc42bafdb38b8535367a1cee3b561db2d | [
"MIT"
] | 1 | 2019-09-25T14:13:57.000Z | 2019-09-25T14:13:57.000Z | Pandas/DataAggregations.ipynb | EstebanBrito/TallerIA-SISI2019 | 282cab1cc42bafdb38b8535367a1cee3b561db2d | [
"MIT"
] | null | null | null | Pandas/DataAggregations.ipynb | EstebanBrito/TallerIA-SISI2019 | 282cab1cc42bafdb38b8535367a1cee3b561db2d | [
"MIT"
] | 3 | 2019-09-27T14:28:42.000Z | 2019-09-29T23:15:56.000Z | 28.877968 | 104 | 0.35151 | [
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"from IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"",
"_____no_output_____"
]
],
[
[
"# Agrupación (groupby)",
"_____no_output_____"
],
[
"## Lo Básico",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({'key1': ['a','a','b','b','a'],\n 'key2': ['one','two','one','two','one'],\n 'data1': np.random.randn(5),\n 'data2': np.random.randn(5)})\ndf",
"_____no_output_____"
],
[
"# Calcula la media por cada grupo\ngrouped = df['data1'].groupby(df['key1']) # La columna llave será 'data'\ngrouped # Objeto GroupBy\ngrouped.mean() # Calcula media",
"_____no_output_____"
],
[
"# Pasar arrays múltiples arrojará un objeto groupBy jerarquizado\nmeans = df['data1'].groupby([df['key1'], df['key2']]).mean()\nmeans # Nested grouping",
"_____no_output_____"
],
[
"# Podemos convertir ese objeto Series jerarquizado en un DF\nmeans.unstack()",
"_____no_output_____"
],
[
"# Agrupando por columnas criterio externas y diferentes\nstates = np.array(['Ohio','California','California','Ohio','Ohio'])\nyears = np.array([2005, 2005, 2006, 2005, 2006])\n# Cada combinación de valores entre las llaves se toma como identificador\ndf['data1'].groupby([states, years]).mean()",
"_____no_output_____"
],
[
"# Si la llave (columna) de agrupación ya está en el DF, no necesitamos declararla\ndf.groupby('key1').mean() # Nótese que 'key 2' no aparece (nuisance column, datos no numéricos)\ndf.groupby(['key1', 'key2']).mean()",
"_____no_output_____"
],
[
"# Un uso común de la agrupación es calcular el tamaño de grupos\ndf.groupby(['key1', 'key2']).size()",
"_____no_output_____"
]
],
[
[
"## Iterando sobre grupos",
"_____no_output_____"
]
],
[
[
"# Generemos los grupos uno por uno (primero nombre, luego valores)\ndf\nfor name, group in df.groupby('key1'):\n print(name)\n print(group)",
"_____no_output_____"
],
[
"# Para agrupaciones por llave múltiple, el resultado de iterar es una tupla\nfor (k1, k2), group in df.groupby(['key1', 'key2']):\n print((k1, k2)) # tupla(tupla de combinación, valores)\n print(group)",
"('a', 'one')\n key1 key2 data1 data2\n0 a one 0.496714 -0.234137\n4 a one -0.234153 0.542560\n('a', 'two')\n key1 key2 data1 data2\n1 a two -0.138264 1.579213\n('b', 'one')\n key1 key2 data1 data2\n2 b one 0.647689 0.767435\n('b', 'two')\n key1 key2 data1 data2\n3 b two 1.52303 -0.469474\n"
],
[
"# Convertir objeto GroupBy a diccionario y acceder a los grupos\npieces = dict(list(df.groupby('key1')))\npieces\npieces['b']",
"_____no_output_____"
],
[
"# Agrupando a lo largo de columnas\ndf.dtypes # Agrupando por tipo de datos (object, float64)\ngrouped = df.groupby(df.dtypes, axis=1)\n# Veamos los grupos\nfor dtype, group in grouped:\n print(dtype)\n print(group)",
"_____no_output_____"
]
],
[
[
"## Selección",
"_____no_output_____"
]
],
[
[
"# Azúcar sintáctica para df['data1'].groupby(df['key1'])\ndf.groupby('key1')['data1'] # Obj GrpBy como Series\ndf.groupby('key1')[['data2']] # Obj GrpBy como DF",
"_____no_output_____"
],
[
"# Resulta útil para revisar sólo una columna\ndf.groupby(['key1', 'key2'])[['data2']].mean()",
"_____no_output_____"
],
[
"# Obtenemos un Df si selccionamos dos o más columnas para revisar\ns_grouped = df.groupby(['key1', 'key2'])['data2'] # Series jerarquizado\nd_grouped = df.groupby(['key1', 'key2'])['data1', 'data2'] # DF\ns_grouped\nd_grouped\ns_grouped.mean()\nd_grouped.mean()",
"_____no_output_____"
]
],
[
[
"## Agrupando con diccionarios",
"_____no_output_____"
]
],
[
[
"people = pd.DataFrame(np.random.randn(5,5),\n columns=['a','b','c','d','e'],\n index=['Joe','Steve','Wes','Jim','Travis'])",
"_____no_output_____"
],
[
"# Añadimos unos cuentos valores nulos\npeople.iloc[2:3, [1, 2]] = np.nan\npeople",
"_____no_output_____"
],
[
"# Secuencia grupo-valores\nmapping = {'a': 'red', 'b': 'red', 'c': 'blue',\n 'd': 'blue', 'e': 'red', 'f': 'orange'}",
"_____no_output_____"
],
[
"# Método eficiente si ya tenemos los valores únicos de la columna criterio\nby_column = people.groupby(mapping, axis=1)\nby_column.sum() # Suma a lo largo de las columnas (axis=1)",
"_____no_output_____"
],
[
"# En lugar de un diccionario, podemos utilizar un Series\nmap_series = pd.Series(mapping)\nmap_series\npeople.groupby(map_series, axis=1).count()",
"_____no_output_____"
]
],
[
[
"## Agrupando con funciones",
"_____no_output_____"
]
],
[
[
"# Agrupación basada en el largo de los valores de columnas clave\n# Aplicamos len() a cada valor y utilizamos el entero como valor criterio\npeople\npeople.groupby(len).sum() ",
"_____no_output_____"
],
[
"# Un ejemplo más difícil\nkey_list = ['one','one','one','two','two']\npeople.groupby([len, key_list]).min()",
"_____no_output_____"
]
],
[
[
"# Agregaciones (agg)",
"_____no_output_____"
],
[
"#### Aggregations: Tranformación que convierta arrays en números (escalares)\n- count: ¿Cuántos existen?\n- sum: ¿Cuánto representan?\n- mean: Media\n- median: Mediana\n- std, var: Desv. est., varianza\n- min, max: Mínimos máximos\n- prod: Product de valores no nulos\n- first, last: Primer o último valore\n\nCada una de estas funciones ignora los valores NaN y tiene implmentaciones optimizadas",
"_____no_output_____"
],
[
"## Lo Básico",
"_____no_output_____"
]
],
[
[
"# Podemos usar funciones que no estén definidos en un GrpObj, pero sí en otras variables\n# quantile() está definido en Series, no en GroupBy\ndf\ngrouped = df.groupby('key1')\ngrouped['data1'].quantile(0.9) # Posible valor en posicion 90 del rango ",
"_____no_output_____"
],
[
"# No es una agregación, pero ilustra el uso de funciones no definidas\ngrouped.describe()",
"_____no_output_____"
],
[
"# Agregamos usando agg(). Podemos agregar pasando nuestras propies funciones incluso\ndef peak_to_peak(arr):\n return arr.max() - arr.min()\n\n# Llama función personalizada con agg()\ngrouped.agg(peak_to_peak)",
"_____no_output_____"
]
],
[
[
"## Más cosas sobre agregaciones",
"_____no_output_____"
]
],
[
[
"# Cargamos un dataset\ntips = pd.read_csv('../examples/tips.csv')",
"_____no_output_____"
],
[
"# Añadimos columna de propinas como % de la cuenta total\ntips['tip_pct'] = tips['tip'] / tips['total_bill']",
"_____no_output_____"
],
[
"tips[:6]",
"_____no_output_____"
],
[
"# Tal vez queramos usar diferentes agregaciones en diferentes columans\n# ... o utilizar múltiples agregaciones en una columna\n\n# Agrupamos por combinación (day, smoker)\ngrouped = tips.groupby(['day', 'smoker'])\n# Creamos columa % de propina\ngrouped_pct = grouped['tip_pct']\n# Si la agregación es una estadística descriptiva, sólo pasamos el nombre\ngrouped_pct.agg('mean') # --> Series",
"_____no_output_____"
],
[
"# Podemos hacer agregaciones múltiples (incluso agregaciones personalizadas)\ngrouped_pct.agg(['mean', 'std', peak_to_peak]) # -->DF",
"_____no_output_____"
],
[
"# Podemos personalizar el nombre de la columna agregada.\n# Usamos la sintáxis (nombre_columna, func_agregacion)\ngrouped_pct.agg([('MEAN VALUE', 'mean'), ('STA.ND. DEV.', np.std)])",
"_____no_output_____"
],
[
"# Si agregamos en múltiples columnas, obtenemos un DF jerárquico\nfunctions = ['count', 'mean', 'max']\nresult = grouped['tip_pct', 'total_bill'].agg(functions) # Agregación en dos columnas\nresult # --> DF jerarquizado\nresult['tip_pct'] # -->DF",
"_____no_output_____"
],
[
"# Algo más difícil: Múltiples agregaciones (con nombres) a múltiples columnas\nftuples = [('PROMEDIO', 'mean'), ('VARIACION', np.var)]\ngrouped['tip_pct', 'total_bill'].agg(ftuples)",
"_____no_output_____"
]
],
[
[
"## Mapeo de Agregaciones",
"_____no_output_____"
]
],
[
[
"# Different agregaciones para diferentes columnas\n# --> Obtenemos un DF jerárquico si al menos a una columna se le aplican múltiples agregaciones\ngrouped.agg({'tip': np.max, 'size': 'sum'}) # DF\ngrouped.agg({'tip_pct': ['min', 'max', 'mean', 'std'], 'size': 'sum'}) # DF jerárquico ",
"_____no_output_____"
]
],
[
[
"## apply() en objetos GroupBy",
"_____no_output_____"
],
[
"De forma más general, podemos ejecutar la función apply sobre un objeto GroupBy.",
"_____no_output_____"
]
],
[
[
"# Función que trae el top N de filas con alto porcentaje propina/cuenta total\ndef top(df, n=5, column='tip_pct'):\n return df.sort_values(by=column)[-n:]",
"_____no_output_____"
],
[
"# Las 6 filas con % propina/cuenta más grandes\ntop(tips, n=6)",
"_____no_output_____"
],
[
"# Trae las top 5 filas con mayor % de propinas, tanto de fumadores como de no fumadores\n# Agrupa, luego aplica la función\ntips.groupby('smoker').apply(top)",
"_____no_output_____"
],
[
"# Si necesitamos pasar agumentos a nuestra función dentro de apply()...\n# podemos hacerlo de la forma apply(función, arg1=x, arg2=y, ..., argN=z)\ntips.groupby(['smoker', 'day']).apply(top, n=1, column='total_bill')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e749b6d77d68011ad8f6d578ab46499bfb08d9a1 | 11,999 | ipynb | Jupyter Notebook | docs/physics/montecarlo/initialization.ipynb | svenkat19/tardis | ae4e954d8037bf2bdd71d03bdb60e681d9851f12 | [
"BSD-3-Clause"
] | 1 | 2020-03-04T07:49:00.000Z | 2020-03-04T07:49:00.000Z | docs/physics/montecarlo/initialization.ipynb | svenkat19/tardis | ae4e954d8037bf2bdd71d03bdb60e681d9851f12 | [
"BSD-3-Clause"
] | 1 | 2021-04-10T07:39:32.000Z | 2021-04-10T07:39:32.000Z | docs/physics/montecarlo/initialization.ipynb | svenkat19/tardis | ae4e954d8037bf2bdd71d03bdb60e681d9851f12 | [
"BSD-3-Clause"
] | null | null | null | 36.141566 | 565 | 0.615718 | [
[
[
".. _initialization:\n\n****************************\nEnergy Packet Initialization\n****************************",
"_____no_output_____"
]
],
[
[
"While it is instructive to think about tracking the propagation history of\nindividual photons when illustrating the basic idea behind Monte Carlo radiative transfer\ntechniques, there are important numerical reasons for using a different\ndiscretization scheme. Instead of thinking in the photon picture, it brings\nsignificant advantages to follow the idea of <strong data-cite=\"Abbott1985\">[]</strong> and\n<strong data-cite=\"Lucy1999\">[]</strong> and consider parcels of radiant energy as the fundamental\nbuilding blocks of the Monte Carlo calculation. These basic Monte Carlo quanta\nare commonly referred to as \"energy packets\" or simply \"packets\", and are composed of many photons with the same frequency.\n\nDuring a Monte Carlo calculation, $N$ (a large number) packets, all with a certain\nenergy $\\varepsilon$, are created at the inner boundary of the computational domain (which is discussed in [Model of Supernova Domain](../setup/model.rst)) known as the photosphere. Currently, the photosphere is modeled as a spherical blackbody with a radius $R_\\mathrm{phot}$ and temperature $T_\\mathrm{phot}$. In TARDIS, all packets are assigned identical energies, and the total energy of the packets is 1 erg (and thus each packet has an energy of $\\frac{1}{N}$ ergs).\n\n.. note:: The indivisible energy packet scheme does not require that all packets have the same energy. This is just a convenient and simple choice adopted in TARDIS.\n\nSince the photosphere is modeled as a blackbody, its total luminosity $L$ (recall that luminosity is energy emitted divided by the time in which it is emitted) is\n$$L=\\frac{N\\varepsilon}{\\Delta t}=4 \\pi R_{\\mathrm{phot}}^2 \\sigma_{\\mathrm{R}} T_{\\mathrm{phot}}^4$$\nwhere $\\sigma_\\mathrm{R}$ is the Stefan-Boltzmann constant and $\\Delta t$ is the physical duration of the simulation. In order to make this relationship hold (remembering that $N\\varepsilon = 1$ erg), we use\n$$\\Delta t = \\frac{1}{L}=\\frac{1}{4 \\pi R_{\\mathrm{phot}}^2 \\sigma_{\\mathrm{R}} T_{\\mathrm{phot}}^4}.$$\n\nDuring packet initialization, each packet is assigned an initial propagation direction $\\mu$ which is the cosine of the angle $\\theta$ which the packet's path makes with the radial direction. Using a pseudo-random number generator which generates numbers $z$ uniformly distributed on the interval $[0,1]$, the propagation direction is determined according to\n$$\\mu = \\sqrt{z}.$$\nThis sampling is demonstrated in the code below.\n\nFinally, each packet is assigned an initial frequency (or more precisely, the initial frequency of its consitiuent photons). Note that since each packet has the same energy, each packet will represent a different number of real photons. The sampling on packet frequencies is more involved than that of the propagation direction, as it involves sampling the Planck distribution (see below). TARDIS uses the technique described in <strong data-cite=\"Carter1975\">[]</strong> and summarized in <strong data-cite=\"Bjorkman2001\">[]</strong> for this purpose.\n\nDuring the simulation, the energy of the packet remains constant in the local\nco-moving frame (see [Reference Frames](propagation.rst#reference-frames)). This naturally ensures energy\nconservation and constitutes the main advantage of this discretization scheme. **However, while the energy of the packets is conserved in the co-moving frame, the frequency of the constituent photons (in the local co-moving frame) may vary over the course of the simulation. Thus, a packet may represent several different numbers of real photons throughout their lifetimes.**\n\nWe now demonstrate the TARDIS packet initialization framework:",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom tardis.montecarlo.packet_source import BlackBodySimpleSource\nfrom astropy import units as u\nfrom tardis import constants as const\nimport matplotlib.pyplot as plt\n\n#The random number generator that will be used\nrng = np.random.default_rng()",
"_____no_output_____"
]
],
[
[
"The following cell contains values that you can change to see how it affects the spectrum:",
"_____no_output_____"
]
],
[
[
"# Seed for the pseudo-random number generator\nseed = 1\n\n# Radius of the supernova's photosphere in cm\nr_phot = 1e15 * u.cm\n\n# Number of packets generated\nn_packets = 40000",
"_____no_output_____"
]
],
[
[
"You can either set a temperatature of the photosphere, which will determine its luminosity; or you can set the luminosity of the photosphere, which will determine its temperature.",
"_____no_output_____"
]
],
[
[
"# Temperature in K\ntemperature = 10000 * u.K\n\nluminosity = 4 * np.pi * (r_phot**2) * const.sigma_sb * (temperature**4)\n\n# Makes sure the luminosity is given in erg/s\nluminosity = luminosity.to('erg/s')\n\nprint('Luminosity:', luminosity)",
"_____no_output_____"
],
[
"# Luminosity in erg/s\nluminosity = 7e42 * u.erg / u.s\n\ntemperature = (luminosity / (4 * np.pi * const.sigma_sb))**0.25 / np.sqrt(r_phot)\n\n# Makes sure the termperature is given in K\ntemperature = temperature.to('K')\n\nprint('Temperature:', temperature)",
"_____no_output_____"
]
],
[
[
"We now generate the ensemble of packets. The array of packet energies and radii are also shown.",
"_____no_output_____"
]
],
[
[
"# We define our packet source\npacket_source = BlackBodySimpleSource(seed)\n\nradii, nus, mus, energies = packet_source.create_packets(\n temperature.value, \n n_packets, \n rng, \n r_phot)\n\n# Sets the energies in units of ergs\nenergies *= u.erg\n\n# Sets the frequencies in units of Hz\nnus *= u.Hz\n\nprint('Energies:', energies)\nprint('Radii:', radii)",
"_____no_output_____"
]
],
[
[
"We set the timespan of the simulation so that each packet contributes the appropriate luminosity to the spectrum.",
"_____no_output_____"
]
],
[
[
"# Time of simulation\nt_simulation = 1 * u.erg / luminosity\nprint('Time of simulation:', t_simulation)\n\n# Array of luminosity contribution by each packet\nlumin_per_packet = energies / t_simulation\nprint('Luminosity per packet:', lumin_per_packet)",
"_____no_output_____"
]
],
[
[
"We define important constants, and for comparison's sake, we code the Planck distribution function\n$$L_\\nu (\\nu)=\\frac{8\\pi R_\\mathrm{phot}^2 h\\nu^3}{c^2}\\frac{1}{\\exp\\left(\\frac{h\\nu}{k_BT_\\mathrm{phot}}\\right)-1}$$\nwhere $L_\\nu$ is the luminosity density with respect to frequency, $\\nu$ is frequency, $h$ is Planck's constant, $c$ is the speed of light, and $k_B$ is Boltzmann's constant:",
"_____no_output_____"
]
],
[
[
"h = const.h.cgs\nc2 = const.c.cgs**2\nkB = const.k_B.cgs\n\ndef planck_function(nu):\n return 8 * np.pi**2 * r_phot**2 * h * nu**3 / (c2 * (np.exp(h * nu / (kB * temperature)) - 1))",
"_____no_output_____"
]
],
[
[
"We plot the Planck distribution and a histogram of the generated packet distribution:",
"_____no_output_____"
]
],
[
[
"# We set important quantites for making our histogram\nbins = 200\nnus_planck = np.linspace(min(nus), max(nus), bins)\nbin_width = nus_planck[1] - nus_planck[0]\n\n# In the histogram plot below, the weights argument is used \n# to make sure our plotted spectrum has the correct y-axis scale\nplt.hist(nus.value,\n bins=bins,\n weights=lumin_per_packet/bin_width)\n\n# We plot the planck function for comparison\nplt.plot(nus_planck, planck_function(nus_planck))\n\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Luminosity density w.r.t. frequency (erg/s/Hz)')\nplt.show()",
"_____no_output_____"
]
],
[
[
"We finally plot the generated $\\mu$ density distribution, followed by the generated $\\theta=\\arccos (\\mu)$ density distribution, compared with the respective curves $\\rho = 2\\mu$ and $\\rho = \\sin(2\\theta)$:",
"_____no_output_____"
]
],
[
[
"x = np.linspace(0, 1, 1000)\n\nplt.hist(mus, bins=bins, density=True)\nplt.plot(x, 2*x)\nplt.xlabel('Propagation direction')\nplt.ylabel('Probability density')\nplt.show()",
"_____no_output_____"
],
[
"thetas = np.linspace(0, np.pi/2, 1000)\n\nplt.hist(np.arccos(mus), bins=bins, density=True)\nplt.plot(thetas, np.sin(2*thetas))\nplt.xlabel('Angle with normal (rad)')\nplt.ylabel('Probability density')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Custom Packet Source\n\nTARDIS allows for the user to input a custom function that generates energy packets instead of the basic blackbody source described here. See [Running TARDIS with a Custom Packet Source](../../io/optional/custom_source.ipynb) for more information.",
"_____no_output_____"
]
]
] | [
"raw",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"raw"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e749b91ab50bfb42995aeac814e68825684b839d | 116,607 | ipynb | Jupyter Notebook | training_model_WCONV.ipynb | Rene-Michel99/Hand-Sign-Detector | 49f29a745bb1b50272e4b35608df4c7e8d358834 | [
"MIT"
] | 1 | 2021-11-28T23:35:20.000Z | 2021-11-28T23:35:20.000Z | training_model_WCONV.ipynb | Rene-Michel99/Hand-Sign-Detector | 49f29a745bb1b50272e4b35608df4c7e8d358834 | [
"MIT"
] | null | null | null | training_model_WCONV.ipynb | Rene-Michel99/Hand-Sign-Detector | 49f29a745bb1b50272e4b35608df4c7e8d358834 | [
"MIT"
] | null | null | null | 227.748047 | 51,464 | 0.890821 | [
[
[
"import numpy as np\nimport cv2 as cv\nimport os\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport time\n%matplotlib inline",
"_____no_output_____"
],
[
"def process_img(img):\n img = cv.bilateralFilter(img,9+7,75+7,75+7)\n img = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n \n dst = cv.Laplacian(img, cv.CV_16S, ksize=5)\n abs_dst = cv.convertScaleAbs(dst)\n \n kernel = np.ones(shape=(3,3))\n abs_dst = cv.dilate(abs_dst,kernel)\n abs_dst = cv.medianBlur(abs_dst,5)\n\n return abs_dst",
"_____no_output_____"
],
[
"pastas = sorted(os.listdir(\"dataset\"))\npastas.remove(\"None\")\n\ntemplate = [0 for i in range(len(pastas))]\ndic = {}\n\nfor i,item in enumerate(pastas):\n copy = template.copy()\n copy[i] = 1\n \n dic[item] = copy\nprint(dic)",
"{'A': [1, 0, 0, 0, 0, 0], 'B': [0, 1, 0, 0, 0, 0], 'C': [0, 0, 1, 0, 0, 0], 'D': [0, 0, 0, 1, 0, 0], 'E': [0, 0, 0, 0, 1, 0], 'F': [0, 0, 0, 0, 0, 1]}\n"
],
[
"dataset = []\nclasses = []\n\ninit = time.time()\n\nfor pasta in pastas:\n for file in os.listdir(\"dataset/\"+pasta):\n img = cv.imread(\"dataset/\"+pasta+\"/\"+file)\n \n laplacian = process_img(img).reshape(img.shape[0],img.shape[1],1)\n \n dataset.append(laplacian)\n classes.append(dic[pasta])\n\ndataset = np.array(dataset)\nclasses = np.array(classes)\n\nend = time.time()\n\nprint(dataset.shape,classes.shape)\nprint(\"Ended in\",(end-init)/60)",
"(4475, 250, 150, 1) (4475, 6)\nEnded in 3.502507742245992\n"
],
[
"plt.imshow(dataset[4200])",
"_____no_output_____"
],
[
"plt.imshow(dataset[3900])",
"_____no_output_____"
],
[
"SHAPE_CNN = dataset[0].shape\nOUTPUT_SHAPE = len(classes[0])\nLR = 0.00092\nprint(SHAPE_CNN,OUTPUT_SHAPE)",
"(250, 150, 1) 6\n"
],
[
"def hidden_layers(inputs):\n x = tf.keras.layers.Flatten()(inputs)\n x = tf.keras.layers.Dense(16,activation='relu')(x)\n x = tf.keras.layers.Dense(32,activation='relu')(x)\n x = tf.keras.layers.Dense(64,activation='relu')(x)\n x = tf.keras.layers.Dense(128,activation='relu')(x)\n \n return x",
"_____no_output_____"
],
[
"def classifier(inputs):\n classification = tf.keras.layers.Dense(OUTPUT_SHAPE,activation='sigmoid',name='classification')(inputs)\n \n return classification",
"_____no_output_____"
],
[
"def final_model(inputs):\n dense_layers = hidden_layers(inputs)\n classification_layer = classifier(dense_layers)\n \n model = tf.keras.Model(inputs=inputs, outputs=[classification_layer])\n return model",
"_____no_output_____"
],
[
"def define_and_compile_model(inputs):\n from tensorflow.keras.optimizers import SGD\n \n model = final_model(inputs)\n model.compile(\n optimizer=SGD(learning_rate=LR),\n loss={\"classification\":\"binary_crossentropy\"},\n metrics={\"classification\":\"accuracy\"}\n )\n return model",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(dataset, classes, test_size=0.3)\nprint(X_train.shape,X_test.shape)",
"(3132, 250, 150, 1) (1343, 250, 150, 1)\n"
],
[
"inputs = tf.keras.layers.Input(shape=(SHAPE_CNN[0],SHAPE_CNN[1],1))\nmodel = define_and_compile_model(inputs)\n\nmodel.summary()",
"Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_2 (InputLayer) [(None, 250, 150, 1)] 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 37500) 0 \n_________________________________________________________________\ndense_4 (Dense) (None, 16) 600016 \n_________________________________________________________________\ndense_5 (Dense) (None, 32) 544 \n_________________________________________________________________\ndense_6 (Dense) (None, 64) 2112 \n_________________________________________________________________\ndense_7 (Dense) (None, 128) 8320 \n_________________________________________________________________\nclassification (Dense) (None, 6) 774 \n=================================================================\nTotal params: 611,766\nTrainable params: 611,766\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"init = time.time()\n\nmodel.fit(X_train, y_train,\n batch_size=32, epochs=50, verbose=1)\n\nend = time.time()\nprint(\"Ended in\",(end-init)/60)",
"Epoch 1/50\n98/98 [==============================] - 1s 7ms/step - loss: 1.9576 - accuracy: 0.1675\nEpoch 2/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.5952 - accuracy: 0.2509\nEpoch 3/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.5282 - accuracy: 0.2055\nEpoch 4/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.5025 - accuracy: 0.3247\nEpoch 5/50\n98/98 [==============================] - 1s 9ms/step - loss: 0.4933 - accuracy: 0.3451\nEpoch 6/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.4816 - accuracy: 0.3235\nEpoch 7/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.4752 - accuracy: 0.3253\nEpoch 8/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4762 - accuracy: 0.3154\nEpoch 9/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.4785 - accuracy: 0.3303\nEpoch 10/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4665 - accuracy: 0.3171\nEpoch 11/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4719 - accuracy: 0.3225\nEpoch 12/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.4603 - accuracy: 0.3184\nEpoch 13/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.4629 - accuracy: 0.3146\nEpoch 14/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4639 - accuracy: 0.3189\nEpoch 15/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.4601 - accuracy: 0.3148\nEpoch 16/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4546 - accuracy: 0.2950\nEpoch 17/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.4446 - accuracy: 0.3245\nEpoch 18/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4185 - accuracy: 0.3746\nEpoch 19/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.4042 - accuracy: 0.3532\nEpoch 20/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3965 - accuracy: 0.3254\nEpoch 21/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3794 - accuracy: 0.3659\nEpoch 22/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.3668 - accuracy: 0.3870\nEpoch 23/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3595 - accuracy: 0.4097\nEpoch 24/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.3592 - accuracy: 0.4122\nEpoch 25/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.3523 - accuracy: 0.4207\nEpoch 26/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3323 - accuracy: 0.4369\nEpoch 27/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3275 - accuracy: 0.4427\nEpoch 28/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3239 - accuracy: 0.4534\nEpoch 29/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.3167 - accuracy: 0.4447\nEpoch 30/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3050 - accuracy: 0.4795\nEpoch 31/50\n98/98 [==============================] - 1s 6ms/step - loss: 0.3130 - accuracy: 0.4773\nEpoch 32/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3066 - accuracy: 0.4694\nEpoch 33/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3238 - accuracy: 0.4456\nEpoch 34/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3193 - accuracy: 0.4864\nEpoch 35/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3004 - accuracy: 0.4844\nEpoch 36/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3029 - accuracy: 0.4702\nEpoch 37/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.3000 - accuracy: 0.4905\nEpoch 38/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.2885 - accuracy: 0.4941\nEpoch 39/50\n98/98 [==============================] - 1s 9ms/step - loss: 0.3216 - accuracy: 0.4690\nEpoch 40/50\n98/98 [==============================] - 1s 9ms/step - loss: 0.2916 - accuracy: 0.4809\nEpoch 41/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.2879 - accuracy: 0.4940\nEpoch 42/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.3229 - accuracy: 0.4917\nEpoch 43/50\n98/98 [==============================] - 1s 9ms/step - loss: 0.2925 - accuracy: 0.4903\nEpoch 44/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.2892 - accuracy: 0.4866\nEpoch 45/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.2834 - accuracy: 0.5087\nEpoch 46/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.2784 - accuracy: 0.5072\nEpoch 47/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.2818 - accuracy: 0.5032\nEpoch 48/50\n98/98 [==============================] - 1s 8ms/step - loss: 0.2733 - accuracy: 0.5122\nEpoch 49/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.2740 - accuracy: 0.5182\nEpoch 50/50\n98/98 [==============================] - 1s 7ms/step - loss: 0.2771 - accuracy: 0.5028\nEnded in 0.5994028369585673\n"
],
[
"from sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score\n\npredictions = model.predict(X_test)\npredictions = [np.argmax(item) for item in predictions]\nfor i in range(len(predictions)):\n temp = [0,0,0,0,0,0]\n temp[predictions[i]] += 1\n predictions[i] = temp\n\nprint(classification_report(y_test,predictions))\nprint(accuracy_score(y_test,predictions))",
" precision recall f1-score support\n\n 0 0.27 0.99 0.43 243\n 1 0.99 0.80 0.89 289\n 2 0.00 0.00 0.00 191\n 3 0.00 0.00 0.00 183\n 4 0.76 0.74 0.75 243\n 5 0.00 0.00 0.00 194\n\n micro avg 0.48 0.48 0.48 1343\n macro avg 0.34 0.42 0.34 1343\nweighted avg 0.40 0.48 0.40 1343\n samples avg 0.48 0.48 0.48 1343\n\n0.4847356664184661\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.