path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
solar_system.ipynb
###Markdown Create a simple solar system model ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np from collections import namedtuple ###Output _____no_output_____ ###Markdown Define a planet class ###Code class planet(): "A planet in our solar system" def __init__(self,semimajor,eccentricity): self.x = np.array(2) # x and y position self.v = np.array(2) # x and y velocity self.a_g = np.array(2) # x and y acceleration self.t = 0.0 # current time self.dt = 0.0 # current timestep self.a = semimajor # semimajor axis of the orbit self.e = eccentricity # eccentricity of the orbit self.istep = 0 # current integer timestep self.name = "" ###Output _____no_output_____ ###Markdown Define a dictionary with some constants ###Code solar_system = { "M_sun":1.0, "G": 39.4784176043574320} ###Output _____no_output_____ ###Markdown Define some functions for setting circular velocity, and acceleration ###Code def solar_circular_velocity(p,solar_system): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # return the circular velocity return (G*M/r)**0.5 def solar_gravitational_acceleration(p, solar_system): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # acceleration in AU/yr/yr a_grav = -1.0*G*M/r**2 # find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.ataon(p.x[1],p.x[0]) # set the x and y components of the velocity p.a_g[0] = a_grav * np.cos(theta) p.a_g[1] = a_grav * np.sin(theta) ###Output _____no_output_____ ###Markdown Compute the timestep ###Code def calc_dt(p): # integration tolerance ETA_TIME_STEP = 0.0004 # compute timestep eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Define the initial conditions ###Code def SetPlanet(p, i): AU_in_km = 1.495979e+8 # an astronomical unit in kilometers # circular velocity v_c = 0.0 # circular velocity in AU/yr v_e = 0.0 # velocity at perihelion in AU/yr # planet-by-planet intial conditions # Mercury if(i==0): # semi-major axis in AU p.a = 57909227.0 / AU_in_km # eccentricity p.e = 0.20563593 # Venus elif(i==1): # semi-major axis in AU p.a = 108209475.0 / AU_in_km # eccentricity p.e = 0.00677672 # Earth elif(i==2): # semi-major axis in AU p.a = 1.0 # eccentricity p.e = 0.01671123 # set remaining properties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 # get equivalent circular velocity v_c = solar_circular_velocity(p) # velocity at perihelion v_e = v_c*(1+p.e)**0.5 # set velocity p.v[0] = 0.0 # no x velocity at perihelion p.v[1] = v_e # y velocity at perihelion (counter clockwise) # calculate gravitational acceleration from Sun solar_gravitational_acceleration(p) # set timestamp p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown Define leapfrog integrator ###Code def x_first_step(x_i, v_i, a_i, dt): # x_1/2 = x_0 + 1/2 v_0 Delta_t + 1/4 a_0 Delta_t^2 return x_i + 0.5*v_i*dt + 0.25*a_i*dt**2 def v_full_step(x_i, v_i, a_ipoh, dt): # v_i+1 = v_i + a_i+1/2 Delta_t return v_i + a_ipoh*dt def x_full_step(x_ipoh, v_ip1, a_ipoh, dt): # x_3/2 = x_1/2 + v_i+1 Delta_t return x_ipoh + v_ip1*dt ###Output _____no_output_____ ###Markdown Define a function to save the data to file ###Code def SaveSolarSystem(p, n_planets, t , dt, istep, ndim): # loop over the number of planets for i in range(n_planents): # define a filename fname = "planet.%s.txt" % p[i].name if(istep==0): # create the file on first timestep fp = open(fname,"w") else: # append the file on subsequent timesteps fp = open(fname,"a") # compute the drifted properties of the planet v_drift = np.zeros(ndim) for k in range(ndim): v_drift[k] = p[i].v[k] + 0.5*p[i].a_g[k]*p[i].dt # write the data to file s = "%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\n" % \ (istep,t,dt,p[i].istep,p[i].t,p[i].dt,p[i].x[0],p[i].x[1],v_drift[0],v_drift[1],p[i].a_g[0],p[i].a_g[1]) fp.write(s) # close the file fp.close() ###Output _____no_output_____ ###Markdown Write function to evolve the solar system ###Code def EvolveSolarSystem(p, n_planets, t_max): # set number of spatial dimensions ndim = 2 # define the first timestep dt = 0.5/365.25 # define the start time t = 0.0 # define the start timestep istep = 0 # save the initial conditions SaveSolarSystem(p,n_planets, t, dt, istep, ndim) # begin a loop over the global timestep while(t<t_max): # check to see if the next step exceeds the max time, if so take smaller step if(t+dt>t_max): dt = t_max - t # limit to t_max # evolve each planet for i in range(n_planets): ###Output _____no_output_____ ###Markdown Create a simple solar system model ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown Define a planet class ###Code class planet(): "A plante in our solar system" def __init__(self, semimajor, eccentricity): self.x = np.zeros(2) # x and y position self.v = np.zeros(2) # x and y velocity self.a_g = np.zeros(2) # x and y acceleration self.t = 0.0 # current time self.dt = 0.0 # current timestep self.a = semimajor # semimajor axis of the orbit self.e = eccentricity # eccentricity of the object self.istep = 0 # current interger timestep self.name = "" # name for the planet ###Output _____no_output_____ ###Markdown Define a dictionary with some constants ###Code solar_system = { "M_sun": 1.0, "G": 39.4784176043574320} ###Output _____no_output_____ ###Markdown Define some function for setting circular velocity, and acceleration ###Code def SolarCilcularVelocity(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # return the circular velocity return (G*M/r)**0.5 ###Output _____no_output_____ ###Markdown Write a function to compute the gravitational acceleration on each planet form the Sun ###Code def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # acceleration in AU/yr/yr a_grav = -1.0*G*M/r**2 # find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) # set the x and y components of the velocity # p.a_g[0] = a_grav * np.cos(theta) # p.a_g[1] = a_grav * np.sin(theta) return a_grav*np.cos(theta), a_grav*np.sin(theta) ###Output _____no_output_____ ###Markdown Compute the timestep ###Code def calc_dt(p): # integration tolerance ETA_TIME_STEP = 0.0004 #compute timestep eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v), 1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Define the initial conditions ###Code def SetPlanet(p, i): AU_in_hm = 1.495979e+8 # an AU in km # circular velocity v_c = 0.0 # circular velocity in AU/yr v_e = 0.0 # velocity at perihelion in AU/yr # planet-by-planet initial conditions # Mercury if(i==0): # semi-major axis in AU p.a = 57909227.0/AU_in_km # eccentricity p.e = 0.20563593 # name p.name = "Mercury" # Venus elif(i==1): # semi-major axis in AU p.a = 108209475.0/AU_in_km # eccentricity p.e = 0.00677672 # name p.name = "Venus" # Earth elif(i==2): # semi-major axis in AU p.a = 1.0 # eccentricity p.e = 0.01671123 # name p.name = "Earth" # set remaining properties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 # get equiv circular velocity v_c = SolarCircularVelocity(p) # velocity at perihelion v_e = v_c*(1 + p.e)**0.5 # set velocity p.v[0] = 0.0 # no x velocity at perihelion p.v[1] = v_e # y velocity at perihelion (counter clockwise) # calculate gravitational acceleration from Sun p.a_g = SolarGravitationalAccelertation(p) # set timestep p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown Write leapfrog integrator ###Code def x_first_step(x_i, v_i, a_i, dt): # x_1/2 = x_0 _ 1/2 v_0 Delta_t + 1/4 a_0 Delta t^2 return x_i + 0.5*v_i*dt + 0.25*a_i*dt**2 def v_full_step(v_i, a_ipoh, dt): # v_i+1 = v_i + a_i+1/2 Delta t return v_i + a_ipoh*dt; def x_full_step(x_ipoh, v_ip1, a_ipoh, dt): # x_3/2 = x_1/2 + v_i+1 Delta t return x_ipoh + v_ipl*dt ###Output _____no_output_____ ###Markdown Write a function to save the data to file ###Code def SaveSolarSystem(p, n_planets, t, dt, istep, ndim): # loop over the number of planets for i in range(n_planets): # define a filename fname = "planet.%s.txt" % p[i].name if(istep==0): # create the file on the first timestep fp = open(fname, "w") else: # append the file on the subsequent tiemstep fp = open(fname,"a") # compute the drifted properties of the planet v_drift = np.zeros(ndim) ###Output _____no_output_____ ###Markdown A Simple Solar System Model ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Define a planet class ###Code class planet(): "A planet in our solar system" def __init__(self,semimajor,eccentricity): self.x = np.zeros(2) #x and y position self.v = np.zeros(2) #x and y velocity self.a_g = np.zeros(2) #x and y acceleration self.t = 0.0 #current time self.dt = 0.0 #current timestep self.a = semimajor #semimajor axis of the orbit self.e = eccentricity #ecc of the orbit self.istep = 0. #current integer timestep self.name = "" #name for planet ###Output _____no_output_____ ###Markdown dictionary with constants ###Code solar_system = {"M-sun": 1.0, "G":39.4784176043574320} ###Output _____no_output_____ ###Markdown define some functions for setting circular velocity and acceleration ###Code def SolarCircularVelocity(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**5 #return the circular velocity return (G*M/r)**0.5 def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 #acceleration in AU/yr/yr a_grav = -1.0*G*M/r**2 #find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) #set x and y components of the velocity #p.a_g[0] = a_grav * np.cos(theta) #p.a_g[1] = a_grav * np.sin(theta) return a_grav*np.cos(theta), a_grav*np.sin(theta) def calc_dt(p): #integration tolerance ETA_TIME_STEP = 0.0004 #compute timestep eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a= (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown create a simple solar syste model ###Code import numpy as np import matplotlib.pyplot as plt %matplotlib inline class planet(): "A planet in our solar system" def _init_(self,semimajpr,eccentricity): self.x = np.zeor(2) self.v = no.zeros(2) self.a_g = np.zeors(2) self.t = 0.0 self.dt = 0.0 self.a = semimajpr self.e = eccentricity self.isetp = 0 self.name = "" ###Output _____no_output_____ ###Markdown Define a dictinary with some constants ###Code solar_system = { "M_sun":1.0, "G":39.4784176043574320} def SolarCircularVelocity(P): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 return(G*M/r)**0.5 def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 #acceleration in AU/yr/yr a_grav = -1.0*G*m/r**2 #fine the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) return a_grav*np.cos(theta), a_grav*np.sin(theta) def calc_dt(p): ETA_TIMESTEP = 0.0004 eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**.5) return dt def SetPlanet(p, i): AU_in_km = 1.495979e+8 #an AU in km #circular velocity v_c = 0.0 v_e = 0.0 #planet by planet initial conditions #Mercury if(i==0): #semi-major axis in AU p.a = 57909227.0/AU_in_km p.e = 0.20563593 p.name = "Mercury" #Venus elif(i==1): #semi-major axis in AU p.a = 108209475.0/AU_in_km p.e = 0.00677672 p.name = "Venus" #Earth elif(i==2): #semi-major axis in AU p.a = 1.0 p.e = 0.01671123 p.name = "Earth" #set remaining porperties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 #get equiv circular velocity v_c = SolarCircularVelocity(p) #velocity at perihelion v_e = v_c*(1 + p.e)**0.5 #set velocity p.v[0] = 0.0 p.v[1] = v_e p.a_g = SolarGravitationalAcceleration(p) #set timestep p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown define a planet class ###Code class planet(): "A planet in our solar system" def __init__(self, semimajor, eccentricity): self.x = np.zeros(2) #x and y position self.v = np.zeros(2) #x and y velocity self.a_g = np.zeros(2) #x and y accel self.t = 0.0 ##current time self.dt = 0.0 #crrent timestep self.a = semimajor self.e = eccentricity self.istep = 0 self.name = "" ###Output _____no_output_____ ###Markdown Define a dicionary with some constants ###Code solar_system = { "M_sun": 1.0, "G":39.4784176043574320} ###Output _____no_output_____ ###Markdown Define some functions for setting circular velocity and acceleration ###Code def SolarCircularVelocity(p): G = solar_system["G"] M = solar_system["M_sun"] r = (p.x[0]**2 + p.x[1]**2)**0.5 #return the circular velocity return (G*M/r)**0.5 ###Output _____no_output_____ ###Markdown write a function to compute the grav. accel. on each planet from the Sun ###Code def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = (p.x[0]**2 + p.x[1]**2)**0.5 #accel in AU/yr/yr a_grav = -1.0*G*M/r**2 #find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1], p.x[0]) #set the x and y components of the velocity #p.a_g[0] = a_grav *np.cos(theta) return a_grav*np.cos(theta), a_grav*np.sin(theta) ###Output _____no_output_____ ###Markdown Compute timestep ###Code def calc_dt(p): #integration tolerance ETA_TIME_STEP = 0.0004 #compute timestep eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v), 1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Define the initial conditions ###Code def SetPlanet(p, i): AU_in_km = 1.495979e+8 #cirular velocity v_c = 0.0 v_e = 0.0 #planet by planet initial conditions #Mercury if(i==0): #semimajor axis in AU p.a = 57909227.0/AU_in_km #eccentricity p.e = 0.20563593 #name p.name = "Mercury" #Venus elif(i==1): #semimajor axis in AU p.a = 108209475.0/AU_in_km #eccentricity p.e = 0.00677672 #name p.name = "Venus" #Earth elif(i==2): #semimajor axis in AU p.a = 1.0 #eccentricity p.e = 0.01671123 #name p.name = "Earth" #set remaining properties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 #get equiv. circular velocity v_c = SolarCircularVelocity(p) #velocity at perihelion v_e = v_c*(1 + p.e)**0.5 #et velocity p.v[0]= 0.0 #no x velocity at perihelion p.v[1]= v_e #y velocity at perihelion (counter clockwise) #calc gravitational accel from sun p.a_g = SolarGravitationalAcceleration(p) #set timestep p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown write leapfrog integrator ###Code def x_first_step(x_i, v_i, a_i, dt): #x_i/2 = x_0 + 1/2 v_0 Delta_t + 1/4 a_0 Delta t^2 return x_i + 0.5*v_i*dt + 0.25*a_i*dt**2 def v_full_step(v_i, a_ipoh, dt): #v_i+1 = v_i + a_i+1/2 Delta t return v_i + a_ipoh*dt def x_full_step(x_ipoh, v_ip1, a_ipoh, dt): #x_3/2 = x_1/2 + v_i+1 Delta t return x_ipoh + v_ip1*dt ###Output _____no_output_____ ###Markdown write a function to save the data to file ###Code def SaveSolarSystem(p,n_planets, t, dt, istep, ndim): #loop over the number of planets for i in range(n_planets): #define filename fname = "planet.%s.txt" % p[i].name if(istep==0): #create the file on the first timestep fp = open(fname, "w") else: #append the file on subsequent timesteps fp = open(fname, "a") #compute the drifted properties of the planet v_drift = np.zeros(ndim) for k in range(ndim): v_drift[k] =p[i].v[k] + 0.5*p[i].a_g[k]*p[i].dt #write the data to file s = "%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\n" % \ (istep, t, dt, p[i].istep, p[i].t, p[i].dt, p[i].x[0], p[i].x[1], v_drift[0], v_drift[1], \ p[i].a_g[0], p[i].a_g[1]) fp.write(s) #close the file fp.close() ###Output _____no_output_____ ###Markdown write function to evolve the solar system ###Code def EvolveSolarSystem(p, n_planets, t_max): #nmber of spatial dimensions ndim = 2 #define the first timestep dt = 0.5/365.25 #define the starting time t = 0.0 #define the starting timestep istep = 0 #save the initial conditions SaveSolarSystem(p, n_planets, t ,dt, istep, ndim) #begin a loop over the global timescale while(t<t_max): #check to see if the next step exceeds the macimum time #if so take a smaller step if(t+dt>t_max): dt = t_max - t #limit the step to align with t_max #evolve each planet for i in range(n_planets): while(p[i].t<t+dt): #special case for istep == 0 if(p[i].istep == 0): #take the first step according o a verlet scheme for k in range(ndim): p[i].x[k] = x_first_step(p[i].x[k], p[i].v[k], p[i].a_g[k], p[i].dt) #update the acceleration p[i].a_g = SolarGravitationalAcceleration(p[i]) #update the time by 1/2dt p[i].t += 0.5*p[i].dt #update the timestep p[i].dt = calc_dt(p[i]) #continue with a normal step #limit to align with the global timestep if (p[i].t + p[i].dt > t + dt): p[i].dt = t+dt-p[i].t #evolve the velocity for k in range(ndim): p[i].v[k] = v_full_step(p[i].v[k], p[i].a_g[k], p[i].dt) #evolve the position for k in range(ndim): p[i].x[k] = x_full_step(p[i].x[k], p[i].v[k], p[i].a_g[k], p[i].dt) #update the acceleration p[i].a_g = SolarGravitationalAcceleration(p[i]) #update by dt p[i].t += p[i].dt #compute the new timestep p[i].dt = calc_dt(p[i]) #update the planets timestep p[i].istep+=1 #now update the global system time t+=dt #update the global step number istep += 1 #output the current state SaveSolarSystem(p, n_planets, t, dt, istep, ndim) #print the final steps and time print("Time t =", t) print("Maximum t = ",t_max) print("Maximum number of steps =",istep) #end of evolution ###Output _____no_output_____ ###Markdown create a routine to read in the data ###Code def read_twelve_arrays(fname): fp = open(fname,"r") fl = fp.readlines() n = len(fl) a = np.zeros(n) b = np.zeros(n) c = np.zeros(n) d = np.zeros(n) f = np.zeros(n) g = np.zeros(n) h = np.zeros(n) j = np.zeros(n) k = np.zeros(n) l = np.zeros(n) m = np.zeros(n) p = np.zeros(n) for i in range(n): a[i] = float(fl[i].split()[0]) b[i] = float(fl[i].split()[1]) c[i] = float(fl[i].split()[2]) d[i] = float(fl[i].split()[3]) f[i] = float(fl[i].split()[4]) g[i] = float(fl[i].split()[5]) h[i] = float(fl[i].split()[6]) j[i] = float(fl[i].split()[7]) k[i] = float(fl[i].split()[8]) l[i] = float(fl[i].split()[9]) m[i] = float(fl[i].split()[10]) p[i] = float(fl[i].split()[11]) return a, b, c, d, f, g, h, j, k, l, m, p ###Output _____no_output_____ ###Markdown perform the integration of the S.S. ###Code #set the number of planets n_planets = 3 #set the max time of the simulation t_max = 2.0 #create empy list of planets p = [] #set the planets for i in range(n_planets): #create an empty planet ptmp = planet(0.0, 0.0) #set the planet properties SetPlanet(ptmp,i) #remember the planet p.append(ptmp) #evolve the solar system EvolveSolarSystem( p, n_planets, t_max) ###Output _____no_output_____ ###Markdown Create a simple solar system model ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np from collections import namedtuple ###Output _____no_output_____ ###Markdown Define a planet class ###Code class planet(): "A planet in our solar system" def __init__(self,semimajor,eccentricity): self.x = np.zeros(2) # x and y position self.v = np.zeros(2) # x and y velocity self.a_g = np.zeros(2) # x and y acceleration self.t = 0.0 # current time self.dt = 0.0 # current timestep self.a = semimajor # semimajor axis of the orbit self.e = eccentricity # eccentricity of the orbit self.istep = 0 # current integer timestep self.name = "" ###Output _____no_output_____ ###Markdown Define a dictionary with some constants ###Code solar_system = { "M_sun":1.0, "G": 39.4784176043574320} ###Output _____no_output_____ ###Markdown Define some functions for setting circular velocity, and acceleration ###Code def solar_circular_velocity(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # return the circular velocity return (G*M/r)**0.5 def solar_gravitational_acceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # acceleration in AU/yr/yr a_grav = -1.0*G*M/r**2 # find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) # return the x and y components of the velocity return a_grav * np.cos(theta), a_grav * np.sin(theta) ###Output _____no_output_____ ###Markdown Compute the timestep ###Code def calc_dt(p): # integration tolerance ETA_TIME_STEP = 0.0004 # compute timestep eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Define the initial conditions ###Code def SetPlanet(p, i): AU_in_km = 1.495979e+8 # an astronomical unit in kilometers # circular velocity v_c = 0.0 # circular velocity in AU/yr v_e = 0.0 # velocity at perihelion in AU/yr # planet-by-planet intial conditions # Mercury if(i==0): # semi-major axis in AU p.a = 57909227.0 / AU_in_km # eccentricity p.e = 0.20563593 # name p.name = "Mercury" # Venus elif(i==1): # semi-major axis in AU p.a = 108209475.0 / AU_in_km # eccentricity p.e = 0.00677672 # name p.name = "Venus" # Earth elif(i==2): # semi-major axis in AU p.a = 1.0 # eccentricity p.e = 0.01671123 # name p.name = "Earth" # set remaining properties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 # get equivalent circular velocity v_c = solar_circular_velocity(p) # velocity at perihelion v_e = v_c*(1+p.e)**0.5 # set velocity p.v[0] = 0.0 # no x velocity at perihelion p.v[1] = v_e # y velocity at perihelion (counter clockwise) # calculate gravitational acceleration from Sun solar_gravitational_acceleration(p) # set timestamp p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown Define leapfrog integrator ###Code def x_first_step(x_i, v_i, a_i, dt): # x_1/2 = x_0 + 1/2 v_0 Delta_t + 1/4 a_0 Delta_t^2 return x_i + 0.5*v_i*dt + 0.25*a_i*dt**2 def v_full_step(v_i, a_ipoh, dt): # v_i+1 = v_i + a_i+1/2 Delta_t return v_i + a_ipoh*dt def x_full_step(x_ipoh, v_ip1, a_ipoh, dt): # x_3/2 = x_1/2 + v_i+1 Delta_t return x_ipoh + v_ip1*dt ###Output _____no_output_____ ###Markdown Define a function to save the data to file ###Code def SaveSolarSystem(p, n_planets, t , dt, istep, ndim): # loop over the number of planets for i in range(n_planets): # define a filename fname = "planet.%s.txt" % p[i].name if(istep==0): # create the file on first timestep fp = open(fname,"w") else: # append the file on subsequent timesteps fp = open(fname,"a") # compute the drifted properties of the planet v_drift = np.zeros(ndim) for k in range(ndim): v_drift[k] = p[i].v[k] + 0.5*p[i].a_g[k]*p[i].dt # write the data to file s = "%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\n" % \ (istep,t,dt,p[i].istep,p[i].t,p[i].dt,p[i].x[0],p[i].x[1],v_drift[0],v_drift[1],p[i].a_g[0],p[i].a_g[1]) fp.write(s) # close the file fp.close() ###Output _____no_output_____ ###Markdown Write function to evolve the solar system ###Code def EvolveSolarSystem(p, n_planets, t_max): # set number of spatial dimensions ndim = 2 # define the first timestep dt = 0.5/365.25 # define the start time t = 0.0 # define the start timestep istep = 0 # save the initial conditions SaveSolarSystem(p,n_planets, t, dt, istep, ndim) # begin a loop over the global timestep while(t<t_max): # check to see if the next step exceeds the max time, if so take smaller step if(t+dt>t_max): dt = t_max - t # limit to t_max # evolve each planet for i in range(n_planets): while(p[i].t<t+dt): # special case for istep==0 if(p[i].istep==0): # take the first step according to a verlet scheme for k in range(ndim): p[i].x[k] = x_first_step(p[i].x[k],p[i].v[k],p[i].a_g[k],p[i].dt) # update the acceleration p[i].a_g = solar_gravitational_acceleration(p[i]) # update the time by 1/2 dt p[i].t += 0.5*p[i].dt # update the timestep p[i].dt = calc_dt(p[i]) # continue with a normal step # limit to align with the global timestep if(p[i].t + p[i].dt > t+dt): p[i].dt = t+dt-p[i].t # evolve the velocity for k in range(ndim): p[i].v[k] = v_full_step(p[i].v[k],p[i].a_g[k],p[i].dt) # evolve the position for k in range(ndim): p[i].x[k] = x_full_step(p[i].x[k],p[i].v[k],p[i].a_g[k],p[i].dt) # update the acceleration p[i].a_g = solar_gravitational_acceleration(p[i]) # update by dt p[i].t += p[i].dt # compute the new timestep p[i].dt = calc_dt(p[i]) # update the planet's timestep p[i].istep+=1 # now update the global system time t+=dt # output the current state SaveSolarSystem(p,n_planets,t,dt,istep,ndim) # update the global step number istep += 1 # print the final steps and time print("Time t = ",t) print("Maximum t = ",t_max) print("Maximum number of steps = ",istep) # end of evolution ###Output _____no_output_____ ###Markdown Create a routine to read in the data ###Code def read_twelve_arrays(fname): fp = open(fname,"r") f1 = fp.readlines() n = len(f1) a = np.zeros(n) b = np.zeros(n) c = np.zeros(n) d = np.zeros(n) f = np.zeros(n) g = np.zeros(n) h = np.zeros(n) j = np.zeros(n) k = np.zeros(n) l = np.zeros(n) m = np.zeros(n) p = np.zeros(n) for i in range(n): a[i] = float(f1[i].split()[0]) b[i] = float(f1[i].split()[0]) c[i] = float(f1[i].split()[0]) d[i] = float(f1[i].split()[0]) f[i] = float(f1[i].split()[0]) g[i] = float(f1[i].split()[0]) h[i] = float(f1[i].split()[0]) j[i] = float(f1[i].split()[0]) k[i] = float(f1[i].split()[0]) l[i] = float(f1[i].split()[0]) m[i] = float(f1[i].split()[0]) p[i] = float(f1[i].split()[0]) return a,b,c,d,f,g,h,j,k,l,m,p ###Output _____no_output_____ ###Markdown Perform the integration of the solar system ###Code # set the number of planets n_planets = 3 # set the maximum time of the simulation t_max = 2.0 # create empty list of planets plist = [] # set the planets for i in range(n_planets): # create an empty planet ptmp = planet(0.0,0.0) # set the planet properties SetPlanet(ptmp,i) # remember the planet plist.append(ptmp) # evolve the solar system EvolveSolarSystem(plist,n_planets,t_max) ###Output <ipython-input-153-7df1e0b0a1ac>:10: RuntimeWarning: divide by zero encountered in double_scalars dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5) ###Markdown Read the data back in for every planet ###Code fname = "planet.Mercury.txt" istepMg,tMg,dtMg,istepM,tM,dtM,xM,yM,vxM,vyM,axM,ayM = read_twelve_arrays(fname) fname = "planet.Earth.txt" istepEg,tEg,dtEg,istepE,tE,dtE,xE,yE,vxE,vyE,axE,ayE = read_twelve_arrays(fname) fname = "planet.Venus.txt" istepVg,tVg,dtVg,istepV,tV,dtV,xV,yV,vxV,vyV,axV,ayV = read_twelve_arrays(fname) ###Output _____no_output_____ ###Markdown Plot the data ###Code fig = plt.figure(figsize=(7,7)) xSun = [0.0] ySun = [0.0] plt.plot(xSun,ySun,'o',color="0.5",label="Sun") plt.plot(xM,yM,color="red") plt.plot(xM[-1],yM[-1],'o',color="red",label="Mercury") plt.plot(xV,yV,color="green") plt.plot(xV[-1],yV[-1],'o',color="green",label="Venus") plt.plot(xE,yE,color="blue") plt.plot(xE[-1],yE[-1],'o',color="blue",label="Earth") plt.xlim([-1.25,1.25]) plt.ylim([-1.25,1.25]) plt.xlabel('x [AU]') plt.ylabel('y [AU]') plt.axes().set_aspect('equal') plt.legend(frameon=False,loc=2) ###Output <ipython-input-165-5804f8699059>:21: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. plt.axes().set_aspect('equal') ###Markdown Evolución del Sistema Solar *(si no tienes Python y/o Jupyter instalado localmente, considera usar Google Colab para ejecutar este cuadernillo; una cuenta Google Drive puede ser necesaria)* Preliminares: Instalación de libreriasProbablemente, necesitaras instalar el paquete rebound. Si es así, descomenta la siguiente celda: ###Code # %pip install rebound ###Output _____no_output_____ ###Markdown 1. Cargar librerías relevantes ###Code import rebound import numpy as np ###Output _____no_output_____ ###Markdown 2. Inicializar simulación (creación del objeto sim) ###Code sim = rebound.Simulation() sim.G = 1.4880826e-34 sim.integrator = "whfast" ###Output _____no_output_____ ###Markdown 3. Condiciones iniciales para el Sistema Solar (el Sol más nueve planetas) ###Code # r0 es una lista de 10 vectores de posición (x,y,z), i.e , una matriz 10x3 r0=np.array([[3.256101656448802E-03 , -1.951205394420489E-04 , -1.478264728548705E-04], [-1.927589645545195E-01 , 2.588788361485397E-01 , 3.900432597062033E-02 ], [-5.976537074581466E-01 , 3.918678996109574E-01 , 3.990356741282203E-02 ], [-7.986189029000561E-01 , -6.086873314992410E-01 , -1.250824315650566E-04], [7.897942807177173E-01 , 1.266671734964037E+00 , 7.092292179885432E-03 ], [-4.314503046344270E+00 , 3.168094294126697E+00 , 8.331048545353310E-02 ], [-4.882304833383455E+00 , -8.689263067189865E+00 , 3.453930436208210E-01 ], [1.917757033372740E+01 , 5.671738750949031E+00 , -2.273858614425555E-01], [2.767031517959636E+01 , -1.150331645280942E+01 , -4.008018419157927E-01], [7.765250227278298E+00 , -3.190996242617413E+01 , 1.168394015703735E+00 ]]) # v0 es una lista de 10 vectores de velocidad (vx,vy,vz) v0=np.array([[3.039963463108432E-06 , 6.030576499910942E-06 , -7.992931269075703E-08], [-2.811550184725887E-02, -1.586532995282261E-02, 1.282829413699522E-03 ], [-1.113090630745269E-02, -1.703310700277280E-02, 4.089082927733997E-04 ], [1.012305635253317E-02 , -1.376389620972473E-02, 3.482505080431706E-07 ], [-1.135279609707971E-02, 8.579013475676980E-03 , 4.582774369441005E-04 ], [-4.555986691913995E-03, -5.727124269621595E-03, 1.257262404884127E-04 ], [4.559352462922572E-03 , -2.748632232963112E-03, -1.337915989241807E-04], [-1.144087185031310E-03, 3.588282323722787E-03 , 2.829006644043203E-05 ], [1.183702780101068E-03 , 2.917115980784960E-03 , -8.714411604869349E-05], [3.112825364672655E-03 , 1.004673400082409E-04 , -9.111652976208292E-04]]) # m is una lista de 10 masas m = np.array([ 1.988544e30, 3.302e23, 48.685e23, 6.0477246e24, 6.4185e23, 1898.13e24, 5.68319e26, 86.8103e24, 102.41e24, 1.4639248e+22]) ###Output _____no_output_____ ###Markdown 4. Inicializar la simulación computacional, agregando cada cuerpo a objeto sim ###Code # Agregamos un planeta a la vez for kk in range(len(m)): sim.add(m=m[kk],x=r0[kk,0],y=r0[kk,1],z=r0[kk,2],vx=v0[kk,0],vy=v0[kk,1],vz=v0[kk,2]) #...y nos aseguramos que el centro de masa está en reposo en el origen sim.move_to_com() ###Output _____no_output_____ ###Markdown 5. Integración numérica en el tiempo ###Code tmax = 50*365.24 Nout = 1500 aearth,amars = np.zeros(Nout),np.zeros(Nout) x,y,z = np.zeros([Nout,len(m)]), np.zeros([Nout,len(m)]), np.zeros([Nout,len(m)]) vx,vy,vz = np.zeros([Nout,len(m)]), np.zeros([Nout,len(m)]), np.zeros([Nout,len(m)]) times = np.linspace(0.,tmax,Nout) for i,time in enumerate(times): sim.integrate(time) for kk in range(len(m)): x[i,kk], y[i,kk], z[i,kk] = sim.particles[kk].x, sim.particles[kk].y, sim.particles[kk].z vx[i,kk], vy[i,kk], vz[i,kk] = sim.particles[kk].vx, sim.particles[kk].vy, sim.particles[kk].vz aearth[i], amars[i] = sim.particles[3].a, sim.particles[4].a ###Output _____no_output_____ ###Markdown 6. Graficar solución en coordenadas baricéntricas (i.e., respecto al centro de masa) ###Code import matplotlib.pyplot as plt plt.plot(x[::1,3],y[::1,3],'.') plt.plot(x[::1,4],y[::1,4],'.') plt.plot(x[::1,5],y[::1,5],'.') plt.xlim(-6,6) plt.ylim(-6,6) plt.axis('equal') ###Output _____no_output_____ ###Markdown 7. Trasladar todos los vectores posición a un sistema relativo a la Tierra ###Code xrel,yrel, zrel = x-x[:,3][:,None], y-y[:,3][:,None], z-z[:,3][:,None] ###Output _____no_output_____ ###Markdown 8. Graficar solución en coordenadas geocéntricas ###Code plt.plot(xrel[::1,3],yrel[::1,3]) plt.plot(xrel[::1,4],yrel[::1,4]) plt.plot(xrel[::1,5],yrel[::1,5]) plt.xlim(-6,6) plt.ylim(-6,6) plt.axis('equal') ###Output _____no_output_____ ###Markdown Create a simple solar system model ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np from collections import namedtuple ###Output _____no_output_____ ###Markdown Define a planet class ###Code class planet(): "A plante in our solar system" def __init__(self, semimajor, eccentricity): self.x = np.zeros(2) # x and y position self.v = np.zeros(2) # x and y velocity self.a_g = np.zeros(2) # x and y acceleration self.t = 0.0 # current time self.dt = 0.0 # current timestep self.a = semimajor # semimajor axis of the orbit self.e = eccentricity # eccentricity of the object self.istep = 0 # current interger timestep self.name = "" # name for the planet ###Output _____no_output_____ ###Markdown Define a dictionary with some constants ###Code solar_system = { "M_sun": 1.0, "G": 39.4784176043574320} ###Output _____no_output_____ ###Markdown Define some function for setting circular velocity, and acceleration ###Code def SolarCircularVelocity(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # return the circular velocity return (G*M/r)**0.5 ###Output _____no_output_____ ###Markdown Write a function to compute the gravitational acceleration on each planet form the Sun ###Code def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 # acceleration in AU/yr/yr a_grav = -1.0*G*M/r**2 # find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) # set the x and y components of the velocity # p.a_g[0] = a_grav * np.cos(theta) # p.a_g[1] = a_grav * np.sin(theta) return a_grav*np.cos(theta), a_grav*np.sin(theta) ###Output _____no_output_____ ###Markdown Compute the timestep ###Code def calc_dt(p): # integration tolerance ETA_TIME_STEP = 0.0004 #compute timestep eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v), 1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Define the initial conditions ###Code def SetPlanet(p, i): AU_in_km = 1.495979e+8 # an AU in km # circular velocity v_c = 0.0 # circular velocity in AU/yr v_e = 0.0 # velocity at perihelion in AU/yr # planet-by-planet initial conditions # Mercury if(i==0): # semi-major axis in AU p.a = 57909227.0/AU_in_km # eccentricity p.e = 0.20563593 # name p.name = "Mercury" # Venus elif(i==1): # semi-major axis in AU p.a = 108209475.0/AU_in_km # eccentricity p.e = 0.00677672 # name p.name = "Venus" # Earth elif(i==2): # semi-major axis in AU p.a = 1.0 # eccentricity p.e = 0.01671123 # name p.name = "Earth" # set remaining properties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 # get equiv circular velocity v_c = SolarCircularVelocity(p) # velocity at perihelion v_e = v_c*(1 + p.e)**0.5 # set velocity p.v[0] = 0.0 # no x velocity at perihelion p.v[1] = v_e # y velocity at perihelion (counter clockwise) # calculate gravitational acceleration from Sun p.a_g = SolarGravitationalAcceleration(p) # set timestep p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown Write leapfrog integrator ###Code def x_first_step(x_i, v_i, a_i, dt): # x_1/2 = x_0 _ 1/2 v_0 Delta_t + 1/4 a_0 Delta t^2 return x_i + 0.5*v_i*dt + 0.25*a_i*dt**2 def v_full_step(v_i, a_ipoh, dt): # v_i+1 = v_i + a_i+1/2 Delta t return v_i + a_ipoh*dt; def x_full_step(x_ipoh, v_ipl, a_ipoh, dt): # x_3/2 = x_1/2 + v_i+1 Delta t return x_ipoh + v_ipl*dt ###Output _____no_output_____ ###Markdown Write a function to save the data to file ###Code def SaveSolarSystem(p, n_planets, t, dt, istep, ndim): # loop over the number of planets for i in range(n_planets): # define a filename fname = "planet.%s.txt" % p[i].name if(istep==0): # create the file on the first timestep fp = open(fname, "w") else: # append the file on the subsequent tiemstep fp = open(fname,"a") # compute the drifted properties of the planet v_drift = np.zeros(ndim) for k in range(ndim): v_drift[k] = p[i].v[k] + 0.5*p[i].a_g[k]*p[i].dt # write the data to file #s = "%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t s = "%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\n" % \ (istep, t, dt, p[i].istep, p[i].t, p[i].dt, p[i].x[0], p[i].x[1], v_drift[0], v_drift[1], p[i].a_g[0], p[i].a_g[1]) fp.write(s) # close the file fp.close() ###Output _____no_output_____ ###Markdown Write a function to evolve the solar system ###Code def EvolveSolarSystem(p, n_planets, t_max): # number of spatial dimemsions ndim = 2 # define the first timestep dt = 0.5/365.25 # define the starting time t = 0.0 # define the starting timestep istep = 0 # save the initial conditions SaveSolarSystem(p, n_planets, t, dt, istep, ndim) # begin a loop over the global timescale while(t < t_max): # check to see if the next step exceeds the maximum time. # If so, take a smaller step if(t + dt > t_max): dt = t_max - t # limit the step to align with t_max # evolve each planet for i in range(n_planets): while(p[i].t < t + dt): # special case for istep == 0 if(p[i].istep == 0): # take the first step according to a verlet scheme for k in range(ndim): p[i].x[k] = x_first_step(p[i].x[k], p[i].v[k], p[i].a_g[k], p[i].dt) # update the acceleration p[i].a_g = SolarGravitationalAcceleration(p[i]) # update the time by 1/2dt p[i].t += 0.5 * p[i].dt # update the timestpe p[i].dt = calc_dt(p[i]) # continue with a norlam step # limit to align with the global timestep if(p[i].t + p[i].dt > t + dt): p[i].dt = t + dt - p[i].t # evolve the velocity for k in range(ndim): p[i].v[k] = v_full_step(p[i].v[k], p[i].a_g[k], p[i].dt) # evolve the position for k in range(ndim): p[i].x[k] = x_full_step(p[i].x[k], p[i].v[k], p[i].a_g[k], p[i].dt) # udpat the acceleration p[i].a_g = SolarGravitationalAcceleration(p[i]) # update by dt p[i].t += p[i].dt # compute the new timestep p[i].dt = calc_dt(p[i]) # udpate the planet's timestep p[i].istep += 1 # now update the global system time t += dt # update the global step number istep += 1 # output the current state SaveSolarSystem(p, n_planets, t, dt, istep, ndim) # print the final steps and time print("Time t = ", t) print("Maximum t = ", t_max) print("Maximum number of steps = ", istep) # end of evolution ###Output _____no_output_____ ###Markdown Create a routine to read in the data ###Code def read_twelve_arrays(fname): fp = open(fname, "r") fl = fp.readlines() n = len(fl) a = np.zeros(n) b = np.zeros(n) c = np.zeros(n) d = np.zeros(n) f = np.zeros(n) g = np.zeros(n) h = np.zeros(n) j = np.zeros(n) k = np.zeros(n) l = np.zeros(n) m = np.zeros(n) p = np.zeros(n) for i in range(n): a[i] = float(fl[i].split()[0]) b[i] = float(fl[i].split()[1]) c[i] = float(fl[i].split()[2]) d[i] = float(fl[i].split()[3]) f[i] = float(fl[i].split()[4]) g[i] = float(fl[i].split()[5]) h[i] = float(fl[i].split()[6]) j[i] = float(fl[i].split()[7]) k[i] = float(fl[i].split()[8]) l[i] = float(fl[i].split()[9]) m[i] = float(fl[i].split()[10]) p[i] = float(fl[i].split()[11]) return a, b, c, d, f, g, h, j, k, l, m, p ###Output _____no_output_____ ###Markdown Perform the integration of the solar system ###Code # set the number of planets n_planets = 3 # set the maximum time of the simulation t_max = 2.0 # create empty list of planets p = [] # set the planets for i in range(n_planets): # create an empty planet ptmp = planet(0.0,0.0) # set the planet properties SetPlanet(ptmp, i) # remember the planet p.append(ptmp) # evolve the solar system EvolveSolarSystem(p, n_planets, t_max) ###Output _____no_output_____ ###Markdown Read the data back in for every planet ###Code fname = "planet.Mercury.txt" istepMg, tMg, dtMg, istepM, tM, dtM, xM, yM, vxM, vyM, axM, ayM = read_twelve_arrays(fname) fname = "planet.Earth.txt" istepEg, tEg, dtEg, istepE, tE, dtE, xE, yE, vxE, vyE, axE, ayE = read_twelve_arrays(fname) fname = "planet.Venus.txt" istepVg, tVg, dtVg, istepV, tV, dtV, xV, yV, vxV, vyV, axV, ayV = read_twelve_arrays(fname) ###Output _____no_output_____ ###Markdown Plot the data ###Code fig = plt.figure(figsize = (7,7)) xSun = [0.0] ySun = [0.0] plt.plot(xSun, ySun, 'o', color = "0.5", label = "Sun") plt.plot(xM, yM, color = "red") plt.plot(xM[-1], yM[-1], 'o', color = "red", label = "Mercury") plt.plot(xV, yV, color = "green") plt.plot(xV[-1], yV[-1], 'o', color = "green", label = "Venus") plt.plot(xE, yE, color = "blue") plt.plot(xE[-1], yE[-1], 'o', color = "blue", label = "Earth") plt.xlim([-1.25, 1.25]) plt.ylim([-1.25, 1.25]) plt.xlabel('x [AU]') plt.ylabel('y [AU]') plt.axes().set_aspect('equal') plt.legend(frameon=False, loc=2) ###Output _____no_output_____ ###Markdown Solar System Model Create a simple solar system model ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np ###Output _____no_output_____ ###Markdown Define a planet class ###Code class planet(): "A planet in our solar system" def __init__(self,semimajor,eccentricity): self.x = np.zeros(2) #x and y position self.v = np.zeros(2) #x and y velocity self.a_g = np.zeros(2) #x and y acceleration self.t = 0.0 #current time self.dt = 0.0 #current time step self.a = semimajor #semimajor axis of the orbit self.e = eccentricity #eccentricity of the orbit self.istep = 0 #current integer time step self.name = "" #name for the planet ###Output _____no_output_____ ###Markdown Define a dictionary with some constants ###Code solar_system = { "M_sun":1.0, "G":39.4784176043574320} ###Output _____no_output_____ ###Markdown Define some functions for setting circular velocity, and acceleration ###Code def SolarCircularVelocity(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 #return circular velocity return (G*M/r)**0.5 ###Output _____no_output_____ ###Markdown Write a function to compute the gravitational acceleration on each planet from the Sun ###Code def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_sun"] r = ( p.x[0]**2 + p.x[1]**2 )**0.5 #acceleration in AU/yr/yr a_grav = -1.0*G*M/r**2 #find the angle at this position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) #set the x and y components of the velocity #p.a_g[0] = a_grav *np.cos(theta) #p.a_g[1] = a_grav *np.sin(theta) return a_grav*np.cos(theta), a_grav*np.sin(theta) ###Output _____no_output_____ ###Markdown Compute the time step ###Code def calc_dt(p): #integration tolerance ETA_TIME_STEP = 0.0004 #compute the time step eta = ETA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = eta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Define the initial conditions ###Code def SetPlanet(p, i): AU_in_km = 1.495979e+8 #an AU in km #circular velocity v_c = 0.0 #circular velocity in AU/yr v_e = 0.0 #veloctiy at perihelion in AU/yr #planet-by-planet initial conditions #Mercury if(i==0): #semi-major axis in AU p.a = 57909227.0/AU_in_km #eccentricity p.e = 0.20563593 #name p.name = "Mercury" #Venus elif(i==1): #semi-major axis in AU p.a = 108209475.0/AU_in_km #eccentricity p.e = 0.00677672 #name p.name = "Venus" #Earth elif(i==2): #semi-major axis in AU p.a = 1.0 #eccentricity p.e = 0.01671123 #name p.name = "Earth" #set remaining properties p.t = 0.0 p.x[0] = p.a*(1.0-p.e) p.x[1] = 0.0 #get equiv circular veloctiy v_c = SolarCircularVelocity(p) #velocity at perihelion v_e = v_c*(1 + p.e)**0.5 #set velocity p.v[0] = 0.0 #no x velocity at perihelion p.v[1] = v_e #y velocity at perihelion (counter clockwise) #calculate gravitational acceleration from Sun p.a_g = SolarGravitationalAcceleration(p) #set time step p.dt = calc_dt(p) ###Output _____no_output_____ ###Markdown Write leapfrog integrator ###Code def x_first_step(x_i, v_i, a_i, dt): #x_1/2 = x_0 + 1/2 v_0 Delta_t + 1/4 a_0 Delta t^2 return x_i +0.5*v_i*dt + 0.25*a_i*dt**2 def v_full_step(v_i, a_ipoh, dt): #v_i+1 = v_i + a_i+1/2 Delta_t return v_i + a_ipoh*dt; def x_full_step(x_ipoh, v_ipl, a_ipoh, dt): #x_3/2 = x_1/2 + v_i+1 Delta t return x_ipoh + v_ipl*dt; ###Output _____no_output_____ ###Markdown Write a function to save the data to file ###Code def SaveSolarSystem(p, n_planets, t, dt, istep, ndim): #loop over the number of planets for i in range(n_planets): #define a filename fname = "planet.%s.txt" %p[i].name if(istep==0): #create the file on the first timestep fp = open(fname,"w") else: #append the file on subsequent timesteps fp = open(fname,"a") #compute the drifted properties of the planet v_drift = np.zeros(ndim) for k in range(ndim): v_drift[k] = p[i].v[k] + 0.5*p[i].a_g[k]*p[i].dt #write the data to file s = "%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\n" % \ (istep,t,dt,p[i].istep,p[i].t,p[i].dt,p[i].x[0],p[i].x[1],v_drift[0],v_drift[1], \ p[i].a_g[0],p[i].a_g[1]) fp.write(s) #close the file fp.close() ###Output _____no_output_____ ###Markdown Write a function to evolve the solar system ###Code def EvolveSolarSystem(p,n_planets,t_max): #numberof spatial dimensions ndim = 2 #define the first timestep dt = 0.5/365.25 #define he starting time t = 0.0 #define the starting timestep istep = 0 #save the initial conditions SaveSolarSystem(p,n_planets,t,dt,istep,ndim) #begin a loop over the global timescale while(t<t_max): #check to see if the next step exceeds the #maximum time. If so, take a smaller step if(t+dt>t_max): dt = t_max - t #limit the step to allign with t_max #evolve each planet for i in range(n_planets): while(p[i].t<t+dt): #special case for istep==0 if(p[i].istep==0): #take the first step according to a verlet scheme for k in range(ndim): p[i].x[k] = x_first_step(p[i].x[k],p[i].v[k],p[i].a_g[k],p[i].dt) #update the acceleration p[i].a_g = SolarGravitationalAcceleration(p[i]) #update the time by 1/2dt p[i].t += 0.5*p[i].dt #update the timestep p[i].dt = calc_dt(p[i]) #continue with a normal step #limit to align with the global timestep if(p[i].t + p[i].dt > t+dt): p[i].dt = t+dt-p[i].t #evolve the velocity for k in range(ndim): p[i].v[k] = v_full_step(p[i].v[k],p[i].a_g[k],p[i].dt) #evolve the position for k in range(ndim): p[i].x[k] = x_full_step(p[i].x[k],p[i].v[k],p[i].a_g[k],p[i].dt) #update the acceleration p[i].a_g = SolarGravitationalAcceleration(p[i]) #update by dt p[i].t += p[i].dt #compute the new timestep p[i].dt = calc_dt(p[i]) #update the planet's timestep p[i].istep+=1 #now update the global system time t+=dt #update the global step number istep += 1 #output the current state SaveSolarSystem(p,n_planets,t,dt,istep,ndim) #print the final steps and time print("Time t = ",t) print("Maximum t = ",t_max) print("Maximum number of steps = ",istep) #end of evolution ###Output _____no_output_____ ###Markdown Create a routine to read in the data ###Code def read_twelve_arrays(fname): fp = open(fname,"r") fl = fp.readlines() n = len(fl) a = np.zeros(n) b = np.zeros(n) c = np.zeros(n) d = np.zeros(n) f = np.zeros(n) g = np.zeros(n) h = np.zeros(n) j = np.zeros(n) k = np.zeros(n) l = np.zeros(n) m = np.zeros(n) p = np.zeros(n) for i in range(n): a[i] = float(fl[i].split()[0]) b[i] = float(fl[i].split()[1]) c[i] = float(fl[i].split()[2]) d[i] = float(fl[i].split()[3]) f[i] = float(fl[i].split()[4]) g[i] = float(fl[i].split()[5]) h[i] = float(fl[i].split()[6]) j[i] = float(fl[i].split()[7]) k[i] = float(fl[i].split()[8]) l[i] = float(fl[i].split()[9]) m[i] = float(fl[i].split()[10]) p[i] = float(fl[i].split()[11]) return a,b,c,d,f,g,h,j,k,l,m,p ###Output _____no_output_____ ###Markdown Perform the integration of the solar system ###Code #set the number of planets n_planets = 3 #set the maximum time of the simulation t_max = 2.0 #create empty list of planets p = [] #set the planets for i in range(n_planets): #create an empty planet ptmp = planet(0.0,0.0) #set the planet properties SetPlanet(ptmp,i) #remember the planet p.append(ptmp) #evolve the solar system EvolveSolarSystem(p,n_planets,t_max) ###Output _____no_output_____ ###Markdown Read the data back in for every planet ###Code fname = "planet.Mercury.txt" istepMg,tMg,dtMg,istepM,tM,dtM,xM,yM,vxM,vyM,axM,ayM = read_twelve_arrays(fname) fname = "planet.Earth.txt" istepEg,tEg,dtEg,istepE,tE,dtE,xE,yE,vxE,vyE,axE,ayE = read_twelve_arrays(fname) fname = "planet.Venus.txt" istepVg,tVg,dtVg,istepv,tV,dtV,xV,yV,vxV,vyV,axV,ayV = read_twelve_arrays(fname) ###Output _____no_output_____ ###Markdown Plot the data ###Code fig = plt.figure(figsize=(7,7)) xSun = [0.0] ySun = [0.0] plt.plot(xSun,ySun,'o',color="0.5",label="Sun") plt.plot(xM,yM,color="red") plt.plot(xM[-1],yM[-1],'o',color="red",label="Mercury") plt.plot(xV,yV,color="green") plt.plot(xV[-1],yV[-1],'o',color="green",label="Venus") plt.plot(xE,yE,color="blue") plt.plot(xE[-1],yE[-1],'o',color="blue",label="Earth") plt.xlim([-1.25,1.25]) plt.ylim([-1.25,1.25]) plt.xlabel('x [AU]') plt.ylabel('y [AU]') plt.axes().set_aspect('equal') plt.legend(frameon=False,loc=2) ###Output _____no_output_____ ###Markdown Define a dictionary with some constants ###Code solar_system = { "M_sun":1.0, "G":39.4784176043574320} ###Output _____no_output_____ ###Markdown DEfine some functions for setting circular velocity and acceleration ###Code def solarCircularVelocity(p): G = solar_system["G"] M = solar_system["M_Sun"] r = ( p.x[0]**2 + p.x[1]**2)**0.5 #return the circular velocity return (G*M/r)**0.5 ###Output _____no_output_____ ###Markdown Write a function to compute the gravitational acceleration on each planet from the sun ###Code def SolarGravitationalAcceleration(p): G = solar_system["G"] M = solar_system["M_Sun"] r = ( p.x[0]**2 + p.x[1]**2)**0.5 #acceleration in AU/yr/yr #find the angle at ths position if(p.x[0]==0.0): if(p.x[1]>0.0): theta = 0.5*np.pi else: theta = 1.5*np.pi else: theta = np.arctan2(p.x[1],p.x[0]) #set the x and y components of the velocity #p.a_g[0] = a_grav * np.cos(theta) #p.a_g[1] = a_grav * np.sin(theta) return a_grav*np.cos(tehta), a_grav*np.sin(theta) ###Output _____no_output_____ ###Markdown Compute the timestep ###Code def calc_dt(p): #integration tolerance ETA_TIME_STEP = 0.0004 #compute the timestep eta = EPA_TIME_STEP v = (p.v[0]**2 + p.v[1]**2)**0.5 a = (p.a_g[0]**2 + p.a_g[1]**2)**0.5 dt = dta * np.fmin(1./np.fabs(v),1./np.fabs(a)**0.5) return dt ###Output _____no_output_____ ###Markdown Solar System Mark Croom, Hannah Gallamore, Dylan Gatlin, Cristo SanchezBased on the tests in python_tests.ipynb, the Fortran adaptive timestep model is ready for use, but in order to interact with the data, we will need to run it through Python using f2py. This will enable us to plot our outputs easily. A Word of CautionThis model doesn't throw out points mid-execution. An 64 bit integer (8 Bytes), stored in two 3x10x~ 2 Million sized arrays, as well as a time array, means that this model will crash with less than 10 GB of free memory. It will also need some overhead for plotting. It will also take 75s per 100 years. Only run it when absolutely necessary, with only things that are absolutely necessary open. ###Code import time import tqdm import numpy as np import matplotlib.pyplot as plt import seaborn as sns from astroquery.jplhorizons import Horizons from matplotlib.animation import FuncAnimation from astropy.time import Time import fortpy as fp sns.set(style='darkgrid') ###Output _____no_output_____ ###Markdown Fetching a DatasetMost of what we need can be found via Horizons' Python interface. However, mass is not accessible there (although it's in the actual database), so it was copied manually. ###Code bodies = ["Sun", "Mercury Barycenter", "Venus Barycenter", "Earth-Moon Barycenter", "Mars Barycenter", "Jupiter Barycenter", "Saturn Barycenter", "Uranus Barycenter", "Neptune Barycenter", "Pluto Barycenter"] masses = np.array([1.989e33, 0.3302e27, 4.8685e27, 5.9736e27, 0.64185e27, 1898.6e27, 568.46e27, 86.832e27, 102.43e27, 0.01303e27]) ss_masses = masses / masses[0] ss_i_poses = [] ss_i_vels = [] today = Time.now() for body in bodies: qry = Horizons(id=body, epochs={'start':today.iso[:10], 'stop': (today + 1).iso[:10], 'step':'2d'}, id_type='majorbody') ss_i_poses.append([qry.vectors()[0]['x'], qry.vectors()[0]['y'], qry.vectors()[0]['z']]) ss_i_vels.append([qry.vectors()[0]['vx'], qry.vectors()[0]['vy'], qry.vectors()[0]['vz']]) ss_i_poses = np.array(ss_i_poses).astype(order='F', dtype=float) ss_i_vels = np.array(ss_i_vels).astype(order='F', dtype=float) ###Output _____no_output_____ ###Markdown Pure Fortran ###Code dt_0 = 1e-3 ss_times = [] ss_poses = [] now = time.time() years = 1000 ss_times, ss_poses, ss_vels, ss_status = fp.n_bodies.adaptive_n_body_model( "rkf45", 25000 * years, ss_masses, ss_i_poses, ss_i_vels, 365 * years, 1e-10, dt_0, 4, 0.00029591220828559, 1) new = time.time() ss_filt = ss_times > 0 ss_filt[0] = True lim = ss_filt.sum() ss_times = ss_times[:lim:2] ss_poses = ss_poses[:lim:2] ss_vels = ss_vels[:lim:2] print(f"Solar System Status: {ss_status.decode('utf-8')}") print(f"Length: {ss_times.size}") print(f"Time consumption: {new - now:.2f} s") fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=150) colors = ['orange', 'gray', 'yellow', 'green', 'red', 'orange', 'tan', 'turquoise', 'blue', 'tan'] frame_skip = 10000 for i, body in enumerate(bodies): ax.plot(ss_poses[::frame_skip, i, 0], ss_poses[::frame_skip, i, 1], # s=(np.log(1 + ss_masses[i]) * 50 + 8), c=colors[i], # edgecolor='k', linewidth=0.6) ax.set_xlabel("x (AU, relative to barycenter)") ax.set_ylabel("y (AU, relative to barycenter)") ax.set_title(f"Solar System started on {today.isot[:10]} for {ss_times[-1]:.0f} Days") fig.savefig("spatial_plog.png") indexes = [0] dt_lim = 25. for i, t in enumerate(ss_times): if t - ss_times[indexes[-1]] > dt_lim: indexes.append(i) fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=150) colors = ['orange', 'gray', 'yellow', 'green', 'red', 'orange', 'tan', 'turquoise', 'blue', 'tan'] pbar = tqdm.tqdm(total=len(indexes)//10) t = int(ss_times[-1] * 0.1) + 1 def init(): for i, body in enumerate(bodies): line = ax.scatter(ss_poses[0, i, 0], ss_poses[0, i, 1], s=(np.log(1 + ss_masses[i]) * 50 + 8), c=colors[i], edgecolor='k', linewidth=0.2) ax.set_xlabel("x (AU, relative to barycenter)") ax.set_ylabel("y (AU, relative to barycenter)") ax.set_title(f"Solar System for {t:.0f} Days") ax.set_xlim(-35, 50) ax.set_ylim(-40, 50) return line def update(i): pbar.update(1) ax.cla() for j, body in enumerate(bodies): line = ax.scatter(ss_poses[indexes[i], j, 0], ss_poses[indexes[i], j, 1], s=(np.log(1 + ss_masses[j]) * 50 + 8), c=colors[j], edgecolor='k', linewidth=0.2) ax.set_xlabel("x (AU, relative to barycenter)") ax.set_ylabel("y (AU, relative to barycenter)") ax.set_title(f"Solar System for {t:.0f} Days") ax.set_xlim(-35, 50) ax.set_ylim(-40, 50) ani = FuncAnimation(fig, update, frames=len(indexes)//10, init_func=init) ani.save("solar_system_animation.mp4", writer="ffmpeg", fps=24) fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=100) colors = ['orange', 'gray', 'yellow', 'green', 'red', 'orange', 'tan', 'turquoise', 'blue', 'tan'] frame_skip = 30000 for j, t in tqdm.tqdm(list(enumerate(ss_times[:100000:frame_skip]))): for i, body in enumerate(bodies): ax.scatter(ss_poses[j * frame_skip, i, 0], ss_poses[j * frame_skip, i, 1], s=(np.log(1 + ss_masses[i]) * 50 + 8), c=colors[i], edgecolor='k', linewidth=0.2) ax.set_xlabel("x (AU, relative to barycenter)") ax.set_ylabel("y (AU, relative to barycenter)") ax.set_title(f"Spatial Plot from 0-{t:.0f} Days") fig.savefig("spatial_plog.png") ###Output 0%| | 0/4 [00:00<?, ?it/s] 100%|██████████| 4/4 [00:00<00:00, 24.00it/s] ###Markdown Python/Fortran Hybrid ModelIn the early stages of this project, we weren't sure if the full-Fortran model would work, so we prepared a version with a Python wrapper around a Fortran integrator. Once we were sure the pure-Fortran method worked, we stopped work here, so this may be out of date. ###Code class Output(object): def __init__(self, masses=[], positions=[], velocities=[], times=[], coms=[], avvs=[], energies=[], amomentums=[], eccentricities=[], count=0, n_bodies=0): self.masses = masses self.positions = positions self.velocities = velocities self.times = times self.coms = coms self.avvs = avvs self.energies = energies self.amomentums = amomentums self.eccentricities = eccentricities self.count = count self.n_bodies = n_bodies self.model_out = [self.masses, self.positions, self.velocities, self.times, self.coms, self.avvs, self.n_bodies] def calculate_auxilary_values(self, com=None, avv=None): """Computes the energy and angular momentum of a system of particles. The energy is computed as the kinetic energy of a particle minus the potential energy of every other particle relative to it. The angular momentum is the cross product of positions and velocities, relative to the center of mass. """ if com is None: com = self.coms if avv is None: avv = self.avvs nrg_rel = np.zeros(self.positions.shape[0]) h_rel = np.zeros((self.positions.shape[0],) + (3,)) for i in range(self.positions.shape[1]): # if masses[i] != 0: nrg_rel += 0.5 * np.linalg.norm(self.velocities[:, i, :] - avv, axis=1)**2 for j in range(i+1, self.positions.shape[1]): # if masses[i] != 0: nrg_rel -= self.masses[i] / np.linalg.norm( self.positions[:, i, :] - self.positions[:, j, :], axis=1 ) h_rel += np.cross(self.positions[:, i, :] - com, self.velocities[:, i, :] - avv) e_rel = np.sqrt(1 + 2 * nrg_rel * np.linalg.norm(h_rel, axis=1) / np.sum(self.masses)**2) self.eccentricities = e_rel self.energies = nrg_rel self.amomentums = h_rel def calc_com(masses, pos): """A simple function that compute the center of mass of a system """ return (masses[:, None] * pos).sum(axis=0) / masses.sum() # return ((masses*pos.T).T).sum(axis = 0) / masses.sum() def calc_avv(masses, vel): """A simple function that computes the average velocity of a system """ return (masses[:, None] * vel).sum(axis=0) / masses.sum() # return ((masses*vel.T).T).sum(axis = 0) / masses.sum() def adjust_timestep(dt, err, q, t_tot): # facmax = 6. # facmin = 0.33 fac = 0.38**(1. / (1. + q)) dt_new = dt * fac * err**(-1. / (1. + q)) # if (dt_new > facmax * dt): # dt_new = facmax * dt # elif (dt_new < facmin * dt): # dt_new = facmin * dt if (dt_new / t_tot < 1e-12) or np.isnan(dt_new): dt_new = t_tot * 1e-12 return dt_new def adaptive_n_body(integrator, masses, i_positions, i_velocities, t_tot, tolerance, dt_0=1e-2, q=2, g=1, update_com=True, **kwargs): """ An n-body time integrator, that simulates the positions and velocities of an ensemble of particles in a gravity field. Inputs: integrator: A function that computes new positions and velocities, which takes masses, positions, velocities, and a timestep as inputs, and returns a pair of positions and velocities, the first two are the lower-order positions and velocities, the second pair is the higher-order check of the positions and velocities masses: A 1-D array of particle masses i_positions: A 2-D array of shape particles*dimensions of particle initial positions i_velocities: A 2-D array of shape particles*dimensions of particle initial velocities t_tot: A float of the total time of integration dt: The timestep Note: The integration is done using jacobi coordinates (ie relative to a center of mass reference frame. If your inputs are not given in a center of mass reference frame, your inputs may not look like the initial state of the outputs) Outputs: positions, velocities, times, center of masses, average velocities, all as arrays. Positions and velocities are 3-D arrays of shape times*particles*dimensions. Times is a 1-D array of shape times. Center of mass and average velocities are of shape times*dimensions and only represent the change of the center of mass and velocity in the previous time step """ # All the information we need to compute the times is given at the start times = [] positions = [] velocities = [] coms = [] avvs = [] errors = [] # compute the center of mass and average velocities and update the inputs # before they are added to our data com = calc_com(masses, i_positions) avv = calc_avv(masses, i_velocities) if update_com: pos2 = i_positions - com vel2 = i_velocities - avv else: pos2 = i_positions vel2 = i_velocities t = 0. dt = dt_0 err = 0. count = 0 pbar = tqdm.tqdm(total=t_tot,bar_format='{desc}: {percentage:3.0f}' '% {n:.3f}/{total:.3f} [{elapsed}<{remaining}, {rate_fmt}{postfix}]') n_recalculates = 0 while ((t < t_tot) and (count < 1000000)): pbar.update(dt) times.append(t) positions.append(pos2) velocities.append(vel2) coms.append(com) avvs.append(avv) errors.append(err) # second_count = 0 # while second_count < 100: pos1, vel1, pos2, vel2 = integrator(masses, pos2, vel2, dt, g=g, **kwargs) # To handle most critical bugs in any integrator function, this will # raise an error if the outputs are nonsense if np.any(np.isnan(pos2)): raise ValueError('nan encountered') com = calc_com(masses, pos2) avv = calc_avv(masses, vel2) if update_com: pos2 -= com vel2 -= avv # Adjust timestep err = np.concatenate((pos1 - pos2, vel1 - vel2)) err = np.linalg.norm(err / tolerance) / np.sqrt(err.size) dt = adjust_timestep(dt, err, q, t_tot) # if err <= 1: # Happens when the solution is accurate enough # break # # second_count += 1 # if second_count > 10: # print('Recalculated for more than 10 loops') # n_recalculates += second_count t += dt count += 1 pbar.close() times = np.array(times) positions = np.array(positions) velocities = np.array(velocities) coms = np.array(coms) avvs = np.array(avvs) errors = np.array(errors) out = Output(masses=masses, positions=positions, velocities=velocities, times=times, coms=coms, avvs=avvs, count=count, n_bodies=len(masses)) out.errors = errors return out def rk_pde(masses, positions, velocities, g=1): """Returns velocities and accelerations of a body at a given position. Used inside the Runge-Kutta Methods """ assert (positions.shape == velocities.shape)\ and (positions.shape[0] == masses.shape[0]), ("Shapes must match: {} {}" " {}".format( positions.shape, velocities.shape, masses.shape)) k_vels = np.zeros(positions.shape) k_poses = velocities.copy() for i, pos in enumerate(positions): for j, pos2 in enumerate(positions): if i != j: if masses[j] != 0: r = pos2 - pos k_vels[i] += g * masses[j] / np.linalg.norm(r)**3 * r return k_poses, k_vels def rkf45(masses, positions, velocities, dt, **kwargs): k_poses = np.zeros((6,) + positions.shape) k_vels = np.zeros((6,) + positions.shape) coefficients = np.array([[1. / 4., 0., 0., 0, 0.], [3. / 32., 9. / 32., 0., 0., 0.], [1932. / 2197., -7200. / 2197., 7296. / 2197., 0., 0.], [439. / 216., -8, 3680. / 513., -845. / 4104., 0.], [-8. / 27., 2., -3544. / 2565., 1859. / 4104., -11. / 40.]]) weights_4 = np.array([25. / 216, 0., 1408. / 2565., 2197. / 4104., -0.2, 0.]) weights_5 = np.array([16. / 135., 0.,6656. /12825., 28561. / 56430., -9. / 50., 2. / 55.]) k_poses[0], k_vels[0] = rk_pde(masses, positions, velocities, **kwargs) for i, coeffs in enumerate(coefficients): k_poses[i + 1], k_vels[i + 1] = rk_pde(masses, positions + (k_poses[:-1] * coefficients[i][:, None, None] * dt ).sum(axis=0), velocities + (k_vels[:-1] * coefficients[i][:, None, None] * dt ).sum(axis=0), **kwargs) poses_4 = positions + (dt * weights_4[:, None, None] * k_poses).sum(axis=0) vels_4 = velocities + (dt * weights_4[:, None, None] * k_vels).sum(axis=0) poses_5 = positions + (dt * weights_5[:, None, None] * k_poses).sum(axis=0) vels_5 = velocities + (dt * weights_5[:, None, None] * k_vels).sum(axis=0) return poses_4, vels_4, poses_5, vels_5 now = time.time() ss_model = adaptive_n_body(fp.integrators.rkf45, ss_masses, ss_i_poses, ss_i_vels, 365*100, 1e-10, dt_0, 4, g=0.00029591220828559, update_com=True) new = time.time() print(f"Length: {ss_model.count}") print(f"Time consumption: {new - now:.2f} s") fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=100) # ax.plot(ss_poses[:, :, 0], ss_poses[:, :, 1]) colors = ['orange', 'gray', 'yellow', 'green', 'red', 'tan', 'orange', 'turquoise', 'blue', 'tan'] for j, t in tqdm.tqdm(list(enumerate(ss_model.times[::10000]))): for i, body in enumerate(bodies): ax.scatter(ss_model.positions[j * 10000, i, 0], ss_model.positions[j * 10000, i, 1], s=(np.log(1 + ss_masses[i]) * 50 + 8), c=colors[i], edgecolor='k', linewidth=0.2) ###Output 0%| | 0/91 [00:00<?, ?it/s] 3%|▎ | 3/91 [00:00<00:03, 26.42it/s] 7%|▋ | 6/91 [00:00<00:03, 26.42it/s] 10%|▉ | 9/91 [00:00<00:03, 26.30it/s] 13%|█▎ | 12/91 [00:00<00:03, 25.70it/s] 16%|█▋ | 15/91 [00:00<00:03, 25.08it/s] 20%|█▉ | 18/91 [00:00<00:02, 24.40it/s] 23%|██▎ | 21/91 [00:00<00:02, 23.64it/s] 26%|██▋ | 24/91 [00:01<00:03, 22.07it/s] 30%|██▉ | 27/91 [00:01<00:03, 21.27it/s] 33%|███▎ | 30/91 [00:01<00:03, 19.92it/s] 35%|███▌ | 32/91 [00:01<00:03, 18.11it/s] 37%|███▋ | 34/91 [00:01<00:03, 17.09it/s] 40%|███▉ | 36/91 [00:01<00:03, 16.66it/s] 42%|████▏ | 38/91 [00:01<00:04, 12.67it/s] 44%|████▍ | 40/91 [00:02<00:03, 13.65it/s] 46%|████▌ | 42/91 [00:02<00:03, 14.38it/s] 48%|████▊ | 44/91 [00:02<00:03, 15.02it/s] 51%|█████ | 46/91 [00:02<00:03, 14.90it/s] 53%|█████▎ | 48/91 [00:02<00:02, 15.11it/s] 55%|█████▍ | 50/91 [00:02<00:02, 15.11it/s] 57%|█████▋ | 52/91 [00:02<00:02, 14.96it/s] 59%|█████▉ | 54/91 [00:03<00:02, 14.17it/s] 62%|██████▏ | 56/91 [00:03<00:02, 14.45it/s] 64%|██████▎ | 58/91 [00:03<00:02, 14.61it/s] 66%|██████▌ | 60/91 [00:03<00:02, 14.55it/s] 68%|██████▊ | 62/91 [00:03<00:02, 14.25it/s] 70%|███████ | 64/91 [00:03<00:01, 14.12it/s] 73%|███████▎ | 66/91 [00:03<00:01, 13.80it/s] 75%|███████▍ | 68/91 [00:04<00:01, 13.78it/s] 77%|███████▋ | 70/91 [00:04<00:01, 13.72it/s] 79%|███████▉ | 72/91 [00:04<00:01, 13.45it/s] 81%|████████▏ | 74/91 [00:04<00:01, 13.20it/s] 84%|████████▎ | 76/91 [00:04<00:01, 12.61it/s] 86%|████████▌ | 78/91 [00:04<00:01, 12.68it/s] 88%|████████▊ | 80/91 [00:04<00:00, 12.58it/s] 90%|█████████ | 82/91 [00:05<00:00, 12.54it/s] 92%|█████████▏| 84/91 [00:05<00:00, 12.44it/s] 95%|█████████▍| 86/91 [00:05<00:00, 12.19it/s] 97%|█████████▋| 88/91 [00:05<00:00, 11.87it/s] 100%|██████████| 91/91 [00:05<00:00, 15.28it/s]
Shelsy_Dalcide_week5().ipynb
###Markdown Capstone Project - The Battle of the Neighborhoods (Week 2) Applied Data Science Capstone by IBM/Coursera Table of contents* [Introduction: Business Problem](introduction)* [Data](data)* [Methodology](methodology)* [Analysis](analysis)* [Results and Discussion](results)* [Conclusion](conclusion) Introduction: Business Problem In this project we will try How to find a Dance School next to an Elementary school in **Chicago** with a **high score of Instruction** and where **the teachers score** and **the safety score** is also high. These three parameters are very important because it is important for a parent to know that his or her kid is safe at school, that the instruction level is great and the teachers score is good enough. Data Based on definition of our problem, factors that will influence our decission are:* Number of School Dance in Chicago* Number ###Code import pandas as pd import numpy as np from geopy.geocoders import Nominatim import json import requests from pandas.io.json import json_normalize import matplotlib.cm as cm import matplotlib.colors as colors import matplotlib.cm as cm from sklearn.cluster import KMeans import folium df_capstone = pd.read_csv("Chicago_Public_Schools.csv") neigh = pd.read_csv('Neighborhoods_2012b.csv') neig_list = list(set(neigh.PRI_NEIGH)) ###Output _____no_output_____ ###Markdown Methodology After having done our data processing import our libraries look at the types of each column on jupyter notebook calculate their average describes our numeric columns. our dataset was at about 78 columns after our analysis it was reduced to those columns which are necessary for us:* NAME_OF_SCHOOL* Elementary,Middle, or High School* SAFETY_SCORE* Instruction Score* Teachers Score* Latitude* Longitude* COMMUNITY_AREA_NAME* Location* Street Address**We requested data on dance schools located in chicago thanks to foursquare in order to achieve our goal of finding a primary school with a high level of education, high scoring teachers and a high safety score next to a school dance.****We have created maps to see the city of Chicago and the addresses of the Schools.** ###Code df_capstone.head() df_capstone.columns ###Output _____no_output_____ ###Markdown Analysis ###Code df_capstone.mean() df_capstone.describe() df_capstone2 = df_capstone [['NAME_OF_SCHOOL','Street Address','Elementary, Middle, or High School','SAFETY_SCORE','Instruction Score','Teachers Score','Latitude','Longitude','COMMUNITY_AREA_NAME','Location']] df_capstone2_ = df_capstone2[df_capstone2["Elementary, Middle, or High School"] == 'ES'] df_capstone2_ = df_capstone2_[df_capstone2_["SAFETY_SCORE"] >= 70.0] df_capstone2_= df_capstone2_[df_capstone2_["Instruction Score"] >= 70.0] df_capstone2_= df_capstone2_[df_capstone2_["Teachers Score"] >= '70.0'] df_capstone2_.describe() df_capstone2_.shape df_capstone2.describe() # the types of our dataset df_capstone2.dtypes # we can see the mean df_capstone2.mean() df_capstone2.COMMUNITY_AREA_NAME = df_capstone2.COMMUNITY_AREA_NAME.str.lower() df_capstone2 ###Output C:\Users\bootcamp\Anaconda3\lib\site-packages\pandas\core\generic.py:5208: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy self[name] = value ###Markdown Clustering ###Code longitudes=[] latitudes= [] neighs= [] for index in range(0,len(neig_list)): print(index) try: print(neig_list[index]) query = neig_list[index]+ " Chicago,IL" geolocator = Nominatim(user_agent="ny_explorer") location = geolocator.geocode(query) latitudes.append(location.latitude) longitudes.append(location.longitude) neighs.append(neig_list[index]) except: latitudes.append(np.nan) longitudes.append(np.nan) neighs.append(neig_list[index]) df_3 =pd.DataFrame(dict(Neighborhoods=neighs,latitude=latitudes,longitude=longitudes)) latitudes longitudes df_3 #delete null values df_3.dropna(inplace=True) df_3.Neighborhoods =df_3.Neighborhoods.str.lower() df_3.head() ###Output _____no_output_____ ###Markdown Foursquare ###Code client_id = '0KTKKCYIO0BFEWJMDQ4ZTOADCGOED1KYXWIVS1JO3DPQ0LG1' # our Foursquare ID client_secret = '5URQZPCQJT4U1ZOU1I1NX2AUYIWBFNVHC520MXQE3BFZRJYW' # our Foursquare Secret version = '20180604' radius = 3000 query= "Chicago,IL" limit=500 categoryId ="4bf58dd8d48988d1f2931735" url_root = " https://api.foursquare.com/v2/venues/search?" # convert an address into latitude and longitude values geolocator = Nominatim(user_agent="ny_explorer") location = geolocator.geocode(query) latitude = location.latitude longitude = location.longitude (latitude,longitude) url = 'https://api.foursquare.com/v2/venues/search?client_id={}&client_secret={}&ll={},{}&v={}&query={}&radius={}&limit={}&categoryId={}'.format(client_id, client_secret, latitude, longitude, version, query, radius, limit,categoryId) url #show the dataset found in foursquare results = requests.get(url).json() results['response']['venues'] #let's see our venues venues = results['response']['venues'] # tranform venues into a dataframe dataframe = json_normalize(venues) dataframe # keep only columns that include venue name, and anything that is associated with location filtered_columns = ['name', 'categories'] + [col for col in dataframe.columns if col.startswith('location.')] + ['id'] dataframe_filtered = dataframe.loc[:, filtered_columns] # function that extracts the category of the venue def get_category_type(row): try: categories_list = row['categories'] except: categories_list = row['venue.categories'] if len(categories_list) == 0: return None else: return categories_list[0]['name'] # filter the category for each row dataframe_filtered['categories'] = dataframe_filtered.apply(get_category_type, axis=1) # clean column names by keeping only last term dataframe_filtered.columns = [column.split('.')[-1] for column in dataframe_filtered.columns] dataframe_filtered # here we filtre our columns ['name','categories','address','lat','lng'] df_capstone3 = dataframe_filtered[['name','categories','address','lat','lng','neighborhood']] df_capstone3.head() ###Output _____no_output_____ ###Markdown Here the maps of all Elementary School at Chicago ###Code address = 'Chicago,IL' geolocator = Nominatim(user_agent="ny_explorer") location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude print('The geograpical coordinates of Chicago,IL are {}, {}.'.format(latitude, longitude)) #create map of CHicago using latitude and longitude values chicago_map = folium.Map(location=[latitude, longitude], zoom_start=10) # add markers to map for NAME_OF_SCHOOL, lat, lng,COMMUNITY_AREA_NAME in zip(df_capstone2_['NAME_OF_SCHOOL'], df_capstone2_['Latitude'], df_capstone2_['Longitude'],df_capstone2_['COMMUNITY_AREA_NAME']): label = '{},{}'.format(NAME_OF_SCHOOL,COMMUNITY_AREA_NAME) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=5, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(chicago_map) chicago_map df_capstone3 = dataframe_filtered[['name','categories','address','lat','lng']] df_capstone3 df_capstone4 = df_capstone3[df_capstone3["categories"] == 'Dance Studio'] df_capstone4 chicago_map2 = folium.Map(location=[latitude, longitude], zoom_start=13) # add markers to map for name, lat, lng in zip(df_capstone4['name'], df_capstone4['lat'], df_capstone4['lng']): label = '{}'.format(name) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=4, popup=label, color='red', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(chicago_map2) chicago_map2 df_capstone4.shape df_capstone2_.rename(columns={"Street Address": "address","Elementary, Middle, or High School":"Grade","COMMUNITY_AREA_NAME":"borough"}, errors="raise") df_capstone2_['NAME_OF_SCHOOL'].head() chicago_map3 = folium.Map(location=[latitude, longitude], zoom_start=11) # add markers to map for NAME_OF_SCHOOL, lat, lng in zip(df_capstone2_['NAME_OF_SCHOOL'], df_capstone2_['Latitude'], df_capstone2_['Longitude']): label = '{}'.format(NAME_OF_SCHOOL) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=4, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(chicago_map3) for name, lat2, lng2 in zip (df_capstone4['name'], df_capstone4['lat'], df_capstone4['lng']): label1 = '{}'.format(name) label1 = folium.Popup(label1, parse_html=True) folium.CircleMarker( [lat2, lng2], radius=4, popup=label1, color='red', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(chicago_map3) chicago_map3 ###Output _____no_output_____
D56_K-mean觀察_使用輪廓分析/Day_056_kmean.ipynb
###Markdown K-Mean 觀察 : 使用輪廓分析 [教學目標]- 載入鳶尾花(iris)資料集, 以輪廓分析 (Silhouette analysis) 來觀察 K-mean 分群時不同 K 值的比較 - 因為非監督模型的效果, 較難以簡單的範例看出來 所以後續非監督偶數日提供的範例與作業, 主要目的在於觀察非監督模型的效果, 同學只要能感受到模型效果即可, 不用執著於搞懂程式的每一個部分 [範例重點]- 使用輪廓分析的圖表, 以及實際的分群散佈圖, 觀察 K-Mean 分群法在 K 有所不同時, 分群的效果如何變化 (In[3], Out[3]) ###Code # 載入套件 import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn.cluster import KMeans from sklearn import datasets from sklearn.metrics import silhouette_samples, silhouette_score np.random.seed(5) %matplotlib inline # 載入 iris 資料集 iris = datasets.load_iris() X = iris.data y = iris.target # 設定需要計算的 K 值集合 range_n_clusters = [2, 3, 4, 5, 6, 7, 8] # 計算並繪製輪廓分析的結果 # 因下列為迴圈寫法, 無法再分拆為更小執行區塊, 請見諒 for n_clusters in range_n_clusters: # 設定小圖排版為 1 row 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # 左圖為輪廓分析(Silhouette analysis), 雖然輪廓係數範圍在(-1,1)區間, 但範例中都為正值, 因此我們把顯示範圍定在(-0.1,1)之間 ax1.set_xlim([-0.1, 1]) # (n_clusters+1)*10 這部分是用來在不同輪廓圖間塞入空白, 讓圖形看起來更清楚 ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # 宣告 KMean 分群器, 對 X 訓練並預測 clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # 計算所有點的 silhouette_score 平均 silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # 計算所有樣本的 The silhouette_score sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # 收集集群 i 樣本的輪廓分數,並對它們進行排序 ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.nipy_spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # 在每個集群中間標上 i 的數值 ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # 計算下一個 y_lower 的位置 y_lower = y_upper + 10 ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # 將 silhouette_score 平均所在位置, 畫上一條垂直線 ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # 清空 y 軸的格線 ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 右圖我們用來畫上每個樣本點的分群狀態, 從另一個角度觀察分群是否洽當 colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k') # 在右圖每一群的中心處, 畫上一個圓圈並標註對應的編號 centers = clusterer.cluster_centers_ ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200, edgecolor='k') for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50, edgecolor='k') ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') plt.show() ###Output For n_clusters = 2 The average silhouette_score is : 0.681046169211746 For n_clusters = 3 The average silhouette_score is : 0.5528190123564091 For n_clusters = 4 The average silhouette_score is : 0.4980505049972867 For n_clusters = 5 The average silhouette_score is : 0.4887488870931048 For n_clusters = 6 The average silhouette_score is : 0.3648340039670018 For n_clusters = 7 The average silhouette_score is : 0.35445799253167404 For n_clusters = 8 The average silhouette_score is : 0.34873453772193763
source_code/f2py/code_generation.ipynb
###Markdown The following Python function will create a string representation of a Fortran expression for a polynomial. ###Code def create_poly_expression(constant, *coeffs): expr = f'({constant})' for i, coeff in enumerate(coeffs): expr += f' &\n + ({coeff})*x**{i + 1}' return expr ###Output _____no_output_____ ###Markdown For example, we can now create a polynomial of degree 2. ###Code print(create_poly_expression(1.0, 3.0, -5.0)) ###Output (1.0) & + (3.0)*x**1 & + (-5.0)*x**2 ###Markdown The following function will return a Fortran function definition for given coefficients of the polynomial. ###Code def create_poly_function(name, constant, *coeffs): expr = create_poly_expression(constant, *coeffs) return f''' function {name}(x) result(y) implicit none real, value :: x real :: y y = {expr} end function {name} ''' print(create_poly_function('func1', 1.2, 3.5, -2.1)) ###Output function func1(x) result(y) implicit none real, value :: x real :: y y = (1.2) & + (3.5)*x**1 & + (-2.1)*x**2 end function func1 ###Markdown The next function will create a random polynomial with a given name. ###Code def create_random_poly_function(name): degree = random.randint(0, 10) coeffs = np.random.normal(size=degree + 1) return create_poly_function(name, coeffs[0], *coeffs[1:]) print(create_random_poly_function('func1')) ###Output function func1(x) result(y) implicit none real, value :: x real :: y y = (1.4265728749067677) & + (-1.2457570237626667)*x**1 & + (1.3201876548848241)*x**2 & + (-0.8790831303443152)*x**3 & + (0.8247735882571433)*x**4 & + (-0.4385351609008346)*x**5 & + (0.4112613698619335)*x**6 & + (-0.6587906293050799)*x**7 & + (-1.0842532329414618)*x**8 & + (0.08736515053718635)*x**9 end function func1 ###Markdown Now we create a Fortran source file with 5 random polynomial functions. ###Code with open('functions.f90', 'w') as src_file: for i in range(5): print(create_random_poly_function(f'func{i + 1:03d}'), file=src_file) ###Output _____no_output_____ ###Markdown Using f2py, this Fortran file can be converted into a Python module. ###Code !f2py -c -m functions functions.f90 ###Output running build running config_cc unifing config_cc, config, build_clib, build_ext, build commands --compiler options running config_fc unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options running build_src build_src building extension "functions" sources f2py options: [] f2py:> /tmp/tmpdgrbvska/src.linux-x86_64-3.8/functionsmodule.c creating /tmp/tmpdgrbvska/src.linux-x86_64-3.8 Reading fortran codes... Reading file 'functions.f90' (format:free) Post-processing... Block: functions Block: func001 Block: func002 Block: func003 Block: func004 Block: func005 Post-processing (stage 2)... Building modules... Building module "functions"... Creating wrapper for Fortran function "func001"("func001")... Constructing wrapper function "func001"... y = func001(x) Creating wrapper for Fortran function "func002"("func002")... Constructing wrapper function "func002"... y = func002(x) Creating wrapper for Fortran function "func003"("func003")... Constructing wrapper function "func003"... y = func003(x) Creating wrapper for Fortran function "func004"("func004")... Constructing wrapper function "func004"... y = func004(x) Creating wrapper for Fortran function "func005"("func005")... Constructing wrapper function "func005"... y = func005(x) Wrote C/API module "functions" to file "/tmp/tmpdgrbvska/src.linux-x86_64-3.8/functionsmodule.c" Fortran 77 wrappers are saved to "/tmp/tmpdgrbvska/src.linux-x86_64-3.8/functions-f2pywrappers.f"  adding '/tmp/tmpdgrbvska/src.linux-x86_64-3.8/fortranobject.c' to sources.  adding '/tmp/tmpdgrbvska/src.linux-x86_64-3.8' to include_dirs. copying /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/f2py/src/fortranobject.c -> /tmp/tmpdgrbvska/src.linux-x86_64-3.8 copying /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/f2py/src/fortranobject.h -> /tmp/tmpdgrbvska/src.linux-x86_64-3.8  adding '/tmp/tmpdgrbvska/src.linux-x86_64-3.8/functions-f2pywrappers.f' to sources. build_src: building npy-pkg config files running build_ext customize UnixCCompiler customize UnixCCompiler using build_ext get_default_fcompiler: matching types: '['gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor']' customize Gnu95FCompiler Found executable /usr/bin/gfortran customize Gnu95FCompiler customize Gnu95FCompiler using build_ext building 'functions' extension compiling C sources C compiler: gcc -pthread -B /home/gjb/miniconda3/envs/fortran_mooc/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC  creating /tmp/tmpdgrbvska/tmp creating /tmp/tmpdgrbvska/tmp/tmpdgrbvska creating /tmp/tmpdgrbvska/tmp/tmpdgrbvska/src.linux-x86_64-3.8 compile options: '-I/tmp/tmpdgrbvska/src.linux-x86_64-3.8 -I/home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include -I/home/gjb/miniconda3/envs/fortran_mooc/include/python3.8 -c' gcc: /tmp/tmpdgrbvska/src.linux-x86_64-3.8/functionsmodule.c gcc: /tmp/tmpdgrbvska/src.linux-x86_64-3.8/fortranobject.c In file included from /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h:1822, from /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h:12, from /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h:4, from /tmp/tmpdgrbvska/src.linux-x86_64-3.8/fortranobject.h:13, from /tmp/tmpdgrbvska/src.linux-x86_64-3.8/functionsmodule.c:15: /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h:17:2: warning: #warning "Using deprecated NumPy API, disable it with " "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wcpp-Wcpp]8;;] 17 | #warning "Using deprecated NumPy API, disable it with " \ | ^~~~~~~ In file included from /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h:1822, from /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h:12, from /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h:4, from /tmp/tmpdgrbvska/src.linux-x86_64-3.8/fortranobject.h:13, from /tmp/tmpdgrbvska/src.linux-x86_64-3.8/fortranobject.c:2: /home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h:17:2: warning: #warning "Using deprecated NumPy API, disable it with " "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wcpp-Wcpp]8;;] 17 | #warning "Using deprecated NumPy API, disable it with " \ | ^~~~~~~ compiling Fortran sources Fortran f77 compiler: /usr/bin/gfortran -Wall -g -ffixed-form -fno-second-underscore -fPIC -O3 -funroll-loops Fortran f90 compiler: /usr/bin/gfortran -Wall -g -fno-second-underscore -fPIC -O3 -funroll-loops Fortran fix compiler: /usr/bin/gfortran -Wall -g -ffixed-form -fno-second-underscore -Wall -g -fno-second-underscore -fPIC -O3 -funroll-loops compile options: '-I/tmp/tmpdgrbvska/src.linux-x86_64-3.8 -I/home/gjb/miniconda3/envs/fortran_mooc/lib/python3.8/site-packages/numpy/core/include -I/home/gjb/miniconda3/envs/fortran_mooc/include/python3.8 -c' gfortran:f90: functions.f90 functions.f90:19:22: 19 | function func002(x) result(y) | 1 Warning: Unused dummy argument ‘x’ at (1) []8;;https://gcc.gnu.org/onlinedocs/gfortran/Error-and-Warning-Options.html#index-Wunused-dummy-argument-Wunused-dummy-argument]8;;] gfortran:f77: /tmp/tmpdgrbvska/src.linux-x86_64-3.8/functions-f2pywrappers.f /usr/bin/gfortran -Wall -g -Wall -g -shared /tmp/tmpdgrbvska/tmp/tmpdgrbvska/src.linux-x86_64-3.8/functionsmodule.o /tmp/tmpdgrbvska/tmp/tmpdgrbvska/src.linux-x86_64-3.8/fortranobject.o /tmp/tmpdgrbvska/functions.o /tmp/tmpdgrbvska/tmp/tmpdgrbvska/src.linux-x86_64-3.8/functions-f2pywrappers.o -L/usr/lib/gcc/x86_64-pc-linux-gnu/10.2.0/../../../../lib -L/usr/lib/gcc/x86_64-pc-linux-gnu/10.2.0/../../../../lib -lgfortran -o ./functions.cpython-38-x86_64-linux-gnu.so Removing build directory /tmp/tmpdgrbvska ###Markdown If everything went well, f2py has created a shared object that contains the functions, and a module that we can import. ###Code !ls import functions ###Output _____no_output_____ ###Markdown The module contains the functions that were generated. ###Code dir(functions) ###Output _____no_output_____ ###Markdown Note that some bare-bones documentation has been generated automatically. ###Code ?functions.func001 ###Output _____no_output_____ ###Markdown We can collect the function names from the module and store the actual functions in a list. ###Code funcs = list() for name in dir(functions): if not name.startswith('_'): funcs.append(getattr(functions, name)) funcs[1](5.3) ###Output _____no_output_____ ###Markdown Now we can call the functions on some value of $x$. ###Code x = 1.2 for func in funcs: print(f'{func.__name__}({x}) = {func(x)}') ###Output func001(1.2) = -10.370649337768555 func002(1.2) = 1.4554624557495117 func003(1.2) = -1.170980453491211 func004(1.2) = -7.09548282623291 func005(1.2) = 5.6847968101501465
lecture/lec20/lec20.ipynb
###Markdown Lecture 20 – Grouping and Pivoting Data 94, Spring 2021 ###Code from datascience import * import numpy as np import matplotlib.pyplot as plt %matplotlib inline import plotly.express as px ###Output _____no_output_____ ###Markdown Run the following cell to load in our full dataset. ###Code cars = Table.read_table('data/models-2021.csv') ###Output _____no_output_____ ###Markdown Here we'll take a subset of the rows and columns for illustration. ###Code gm = cars.where('Manufacturer', 'General Motors').select('Brand', 'Model', 'Cylinders', 'MPG').take([0, 1, 9, 16, 20, 30, 31, 35, -1]).take([1, 2, 4, 8, 5, 6, 3, 7, 0]) gm ###Output _____no_output_____ ###Markdown `group` ###Code gm ###Output _____no_output_____ ###Markdown Default behavior ###Code gm.group('Brand') gm.group('Cylinders') # shuffles the rows in the table; returns a new table cars.shuffle() cars.group('Brand').sort('count', descending = True) ###Output _____no_output_____ ###Markdown Specifying a `collect` function ###Code gm.group('Brand', np.mean) ###Output _____no_output_____ ###Markdown How does this work under the hood? ###Code gm.where('Brand', 'Buick') print('mean of Cylinders: ', gm.where('Brand', 'Buick').column('Cylinders').mean()) print('mean of MPG: ', gm.where('Brand', 'Buick').column('MPG').mean()) gm.where('Brand', 'Cadillac') print('mean of Cylinders: ', gm.where('Brand', 'Cadillac').column('Cylinders').mean()) print('mean of MPG: ', gm.where('Brand', 'Cadillac').column('MPG').mean()) gm.where('Brand', 'Chevrolet') print('mean of Cylinders: ', gm.where('Brand', 'Chevrolet').column('Cylinders').mean()) print('mean of MPG: ', gm.where('Brand', 'Chevrolet').column('MPG').mean()) gm.where('Brand', 'GMC') print('mean of Cylinders: ', gm.where('Brand', 'GMC').column('Cylinders').mean()) print('mean of MPG: ', gm.where('Brand', 'GMC').column('MPG').mean()) ###Output _____no_output_____ ###Markdown If you want a more concise way of doing the above: ###Code for brand in np.unique(gm.column('Brand')): brand_only = gm.where('Brand', brand) print(brand) print('mean of Cylinders: ', brand_only.column('Cylinders').mean()) print('mean of MPG: ', brand_only.column('MPG').mean()) print('\n') ###Output _____no_output_____ ###Markdown What if we use other `collect` functions? ###Code gm gm.group('Brand', sum) gm.group('Brand', list) gm.group('Brand', len) gm.group('Brand', max) ###Output _____no_output_____ ###Markdown Quick Check 1 ###Code cars.shuffle() # cars.group('Cylinders', ____) \ # .where(____, 6) \ # .column(____) \ # .____ cars.group('Cylinders', np.mean).bar('Cylinders', 'MPG mean') ###Output _____no_output_____ ###Markdown `group`ing by multiple columns ###Code cars cars.group(['Manufacturer', 'Brand']).show() cars.group(['Brand', 'Cylinders'], np.mean) cars.group(['Manufacturer', 'Brand', 'Displacement']) ###Output _____no_output_____ ###Markdown `pivot` ###Code cars.group(['Brand', 'Cylinders']).show() cars.pivot('Cylinders', 'Brand', 'MPG', np.mean) ###Output _____no_output_____ ###Markdown Quick Check 2 ###Code cars # cars.pivot(___, ___, ___, ___) ###Output _____no_output_____
NeuralNetworks/RNN/RNNProj.ipynb
###Markdown Recurrent Neural Networks with Keras IntroductionA RNN layer uses a `for` loop to iterate over the timesteps of a sequence, while maintaining an internal state that encodes information about the timestes it has seen so far. Imports ###Code import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds # GPU Test if tf.test.gpu_device_name(): print('Default GPU Device:{}'.format(tf.test.gpu_device_name())) tf.config.list_physical_devices("GPU") ###Output _____no_output_____ ###Markdown Dataset ###Code dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True) train_dataset, test_dataset = dataset['train'], dataset['test'] encoder = info.features['text'].encoder print('Vocabulary size: {}'.format(encoder.vocab_size)) sample_string = 'Hello TensorFlow.' encoded_string = encoder.encode(sample_string) print('Encoded string is {}'.format(encoded_string)) original_string = encoder.decode(encoded_string) print('The original string: "{}"'.format(original_string)) assert original_string == sample_string for index in encoded_string: print('{} ----> {}'.format(index, encoder.decode([index]))) BUFFER_SIZE = 10000 BATCH_SIZE = 64 train_dataset = train_dataset.shuffle(BUFFER_SIZE) train_dataset = train_dataset.padded_batch(BATCH_SIZE) test_dataset = test_dataset.padded_batch(BATCH_SIZE) ###Output _____no_output_____ ###Markdown Model ###Code model = keras.Sequential([ layers.Embedding(encoder.vocab_size, 64), layers.Bidirectional(layers.LSTM(64)), layers.Dense(64, activation='relu'), layers.Dense(1) ]) model.summary() ###Output _____no_output_____ ###Markdown Training ###Code EPOCHS = 10 VALIDAITON_STEPS = 30 model.compile(loss=keras.losses.BinaryCrossentropy(from_logits=True), optimizer=keras.optimizers.Adam(1e-4), metrics=['accuracy']) history = model.fit(train_dataset, epochs=EPOCHS, validation_data=test_dataset, validation_steps=VALIDAITON_STEPS) ###Output _____no_output_____ ###Markdown Evaluate ###Code test_loss, test_acc = model.evaluate(test_dataset) print('Test Loss: {}'.format(test_loss)) print('Test Accuracy: {}'.format(test_acc)) #Helper function def plot_graphs(history, metric): plt.plot(history.history[metric]) plt.plot(history.history['val_'+metric], '') plt.xlabel("Epochs") plt.ylabel(metric) plt.legend([metric, 'val_'+metric]) plt.show() plot_graphs(history, 'accuracy') ###Output _____no_output_____
m2_demo5 - Box Plot.ipynb
###Markdown Module 2 - Exploratory Data AnalysisDemo 5: Visualize Data using Box PlotIn this demo, you will be shown how to visualize simple data using a box plot and its parameters. ###Code #Step1: Import the required libraries import pandas as pd import seaborn as sns import matplotlib.pyplot as plt #Step2: Read data from the uploaded csv file df = pd.read_csv('cancer.csv') #Step3: Fetch the preview of dataset df.head() ###Output _____no_output_____ ###Markdown This data is sourced from Breast Cancer Wisconsin (Diagnostic) Data Set. ###Code #Step4: Look at all the columns in the dataframe df.columns #Step5: Graph a boxplot for a specific numerical column plt.boxplot(df.radius_mean) #Step6: boxplot parameters plt.boxplot(df.radius_mean, showmeans=True, vert=False, sym='b+') ###Output _____no_output_____ ###Markdown - showmeans parameter shows the mean line. In this case, it is overlapping with the median (you can confirm it using meanline parameter).- vert parameter is true by default. You can change the orientation of the boxplot to horizontal by making it false.- sym parameter changed the outlier symbol to (+). 'b' changed the color to blue. If you specify it to none, it will remove the outliers.You can check out all the paramters for plotting the boxplot by pressing shift+tab+tab. Now, let us use this boxplot to analyze the relationship between a categorical feature (diagnosis: malignant or benign tumor) and a continuous feature (area_mean). We will be using three ways to graph the boxplot: ###Code #Step7: Graph the boxplot using pandas df.boxplot(column = 'area_mean', by = 'diagnosis'); plt.title('') #Step8: Graph the boxplot using seaborn sns.boxplot(x='diagnosis', y='area_mean', data=df) #Step9: Graph the boxplot using matplotlib malignant = df[df['diagnosis']=='M']['area_mean'] benign = df[df['diagnosis']=='B']['area_mean'] fig = plt.figure() ax = fig.add_subplot(111) ax.boxplot([malignant,benign], labels=['M', 'B']) ###Output _____no_output_____
Lectures/Lecture04 -- Intro to CLIMLAB.ipynb
###Markdown [ATM 623: Climate Modeling](../index.ipynb)[Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany Lecture 4: Building simple climate models using `climlab` Warning: content out of date and not maintainedYou really should be looking at [The Climate Laboratory book](https://brian-rose.github.io/ClimateLaboratoryBook) by Brian Rose, where all the same content (and more!) is kept up to date.***Here you are likely to find broken links and broken code.*** About these notes:This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)- A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).[Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab ###Code # Ensure compatibility with Python 2 and 3 from __future__ import print_function, division ###Output _____no_output_____ ###Markdown Contents1. [Introducing `climlab`](section1)2. [Using `climlab` to implement the zero-dimensional energy balance model](section2)3. [Run the zero-dimensional EBM out to equilibrium](section3)4. [A climate change scenario in the EBM](section4)5. [Further `climlab` resources](section5) ____________ 1. Introducing `climlab`____________`climlab` is a python package for process-oriented climate modeling.It is based on a very general concept of a model as a collection of individual, interacting processes. `climlab` defines a base class called `Process`, whichcan contain an arbitrarily complex tree of sub-processes (each also some sub-class of `Process`). Every climate process (radiative, dynamical, physical, turbulent, convective, chemical, etc.) can be simulated as a stand-aloneprocess model given appropriate input, or as a sub-process of a more complex model. New classes of model can easily be defined and run interactively by putting together anappropriate collection of sub-processes.`climlab` is an open-source community project. The latest code can always be found on `github`:https://github.com/brian-rose/climlabYou can install `climlab` by doing```conda install -c conda-forge climlab``` ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt import climlab ###Output _____no_output_____ ###Markdown ____________ 2. Using `climlab` to implement the zero-dimensional energy balance model____________ Recall that we have worked with a zero-dimensional Energy Balance Model$$ C \frac{dT_s}{dt} = (1-\alpha) Q - \tau \sigma T_s^4 $$ Here we are going to implement this exact model using `climlab`.Yes, we have already written code to implement this model, but we are going to repeat this effort here as a way of learning how to use `climlab`.There are tools within `climlab` to implement much more complicated models, but the basic interface will be the same. ###Code # create a zero-dimensional domain with a single surface temperature state = climlab.surface_state(num_lat=1, # a single point water_depth = 100., # 100 meters slab of water (sets the heat capacity) ) state ###Output _____no_output_____ ###Markdown Here we have created a dictionary called `state` with a single item called `Ts`: ###Code state['Ts'] ###Output _____no_output_____ ###Markdown This dictionary holds the state variables for our model -- which is this case is a single number! It is a **temperature in degrees Celsius**. For convenience, we can access the same data as an attribute (which lets us use tab-autocomplete when doing interactive work): ###Code state.Ts ###Output _____no_output_____ ###Markdown It is also possible to see this `state` dictionary as an `xarray.Dataset` object: ###Code climlab.to_xarray(state) # create the longwave radiation process olr = climlab.radiation.Boltzmann(name='OutgoingLongwave', state=state, tau = 0.612, eps = 1., timestep = 60*60*24*30.) # Look at what we just created print(olr) # create the shortwave radiation process asr = climlab.radiation.SimpleAbsorbedShortwave(name='AbsorbedShortwave', state=state, insolation=341.3, albedo=0.299, timestep = 60*60*24*30.) # Look at what we just created print(asr) # couple them together into a single model ebm = olr + asr # Give the parent process name ebm.name = 'EnergyBalanceModel' # Examine the model object print(ebm) ###Output climlab Process of type <class 'climlab.process.time_dependent_process.TimeDependentProcess'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: EnergyBalanceModel: <class 'climlab.process.time_dependent_process.TimeDependentProcess'> OutgoingLongwave: <class 'climlab.radiation.boltzmann.Boltzmann'> AbsorbedShortwave: <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'> ###Markdown The object called `ebm` here is the entire model -- including its current state (the temperature `Ts`) as well as all the methods needed to integrated forward in time! The current model state, accessed two ways: ###Code ebm.state ebm.Ts ###Output _____no_output_____ ###Markdown Here is some internal information about the timestep of the model: ###Code print(ebm.time['timestep']) print(ebm.time['steps']) ###Output 2592000.0 0 ###Markdown This says the timestep is 2592000 seconds (30 days!), and the model has taken 0 steps forward so far. To take a single step forward: ###Code ebm.step_forward() ebm.Ts ###Output _____no_output_____ ###Markdown The model got colder!To see why, let's look at some useful diagnostics computed by this model: ###Code ebm.diagnostics ###Output _____no_output_____ ###Markdown This is another dictionary, now with two items. They should make sense to you.Just like the `state` variables, we can access these `diagnostics` variables as attributes: ###Code ebm.OLR ebm.ASR ###Output _____no_output_____ ###Markdown So why did the model get colder in the first timestep?What do you think will happen next? ____________ 3. Run the zero-dimensional EBM out to equilibrium____________ Let's look at how the model adjusts toward its equilibrium temperature.Exercise:- Using a `for` loop, take 500 steps forward with this model- Store the current temperature at each step in an array- Make a graph of the temperature as a function of time ____________ 4. A climate change scenario ____________ Suppose we want to investigate the effects of a small decrease in the transmissitivity of the atmosphere `tau`. Previously we used the zero-dimensional model to investigate a **hypothetical climate change scenario** in which:- the transmissitivity of the atmosphere `tau` decreases to 0.57- the planetary albedo increases to 0.32How would we do that using `climlab`? Recall that the model is comprised of two sub-components: ###Code for name, process in ebm.subprocess.items(): print(name) print(process) ###Output OutgoingLongwave climlab Process of type <class 'climlab.radiation.boltzmann.Boltzmann'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: OutgoingLongwave: <class 'climlab.radiation.boltzmann.Boltzmann'> AbsorbedShortwave climlab Process of type <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: AbsorbedShortwave: <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'> ###Markdown The parameter `tau` is a property of the `OutgoingLongwave` subprocess: ###Code ebm.subprocess['OutgoingLongwave'].tau ###Output _____no_output_____ ###Markdown and the parameter `albedo` is a property of the `AbsorbedShortwave` subprocess: ###Code ebm.subprocess['AbsorbedShortwave'].albedo ###Output _____no_output_____ ###Markdown Let's make an exact clone of our model and then change these two parameters: ###Code ebm2 = climlab.process_like(ebm) print(ebm2) ebm2.subprocess['OutgoingLongwave'].tau = 0.57 ebm2.subprocess['AbsorbedShortwave'].albedo = 0.32 ###Output _____no_output_____ ###Markdown Now our model is out of equilibrium and the climate will change!To see this without actually taking a step forward: ###Code # Computes diagnostics based on current state but does not change the state ebm2.compute_diagnostics() ebm2.ASR - ebm2.OLR ###Output _____no_output_____ ###Markdown Shoud the model warm up or cool down? Well, we can find out: ###Code ebm2.Ts ebm2.step_forward() ebm2.Ts ###Output _____no_output_____ ###Markdown Automatic timestepping Often we want to integrate a model forward in time to equilibrium without needing to store information about the transient state.`climlab` offers convenience methods to do this easily: ###Code ebm3 = climlab.process_like(ebm2) ebm3.integrate_years(50) # What is the current temperature? ebm3.Ts # How close are we to energy balance? ebm3.ASR - ebm3.OLR # We should be able to accomplish the exact same thing with explicit timestepping for n in range(608): ebm2.step_forward() ebm2.Ts ebm2.ASR - ebm2.OLR ###Output _____no_output_____ ###Markdown ____________ 5. Further `climlab` resources____________ We will be using `climlab` extensively throughout this course. Lots of examples of more advanced usage are found here in the course notes. Here are some links to other resources:- The documentation is hosted at - Source code (for both software and docs) are at - [A video of a talk I gave about `climlab` at the 2018 AMS Python symposium](https://ams.confex.com/ams/98Annual/videogateway.cgi/id/44948?recordingid=44948) (January 2018)- [Slides from a talk and demonstration that I gave in Febrary 2018](https://livealbany-my.sharepoint.com/:f:/g/personal/brose_albany_edu/EuA2afxy5-hNkzNhHgkp_HYBYcJumR3l6ukRVIEl4W3MmA?e=sbXN0d) (The Apple Keynote version contains some animations that will not show up in the pdf version) ____________ Version information____________ ###Code %load_ext version_information %version_information numpy, matplotlib, climlab ###Output Loading extensions from ~/.ipython/extensions is deprecated. We recommend managing extensions like any other Python packages, in site-packages. ###Markdown [ATM 623: Climate Modeling](../index.ipynb)[Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany Lecture 4: Building simple climate models using `climlab` About these notes:This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)- A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).[Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab ###Code # Ensure compatibility with Python 2 and 3 from __future__ import print_function, division ###Output _____no_output_____ ###Markdown Contents1. [Introducing `climlab`](section1)2. [Using `climlab` to implement the zero-dimensional energy balance model](section2)3. [Run the zero-dimensional EBM out to equilibrium](section3)4. [A climate change scenario in the EBM](section4)5. [Further `climlab` resources](section5) ____________ 1. Introducing `climlab`____________`climlab` is a python package for process-oriented climate modeling.It is based on a very general concept of a model as a collection of individual, interacting processes. `climlab` defines a base class called `Process`, whichcan contain an arbitrarily complex tree of sub-processes (each also some sub-class of `Process`). Every climate process (radiative, dynamical, physical, turbulent, convective, chemical, etc.) can be simulated as a stand-aloneprocess model given appropriate input, or as a sub-process of a more complex model. New classes of model can easily be defined and run interactively by putting together anappropriate collection of sub-processes.`climlab` is an open-source community project. The latest code can always be found on `github`:https://github.com/brian-rose/climlabYou can install `climlab` by doing```conda install -c conda-forge climlab``` ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt import climlab ###Output _____no_output_____ ###Markdown ____________ 2. Using `climlab` to implement the zero-dimensional energy balance model____________ Recall that we have worked with a zero-dimensional Energy Balance Model$$ C \frac{dT_s}{dt} = (1-\alpha) Q - \tau \sigma T_s^4 $$ Here we are going to implement this exact model using `climlab`.Yes, we have already written code to implement this model, but we are going to repeat this effort here as a way of learning how to use `climlab`.There are tools within `climlab` to implement much more complicated models, but the basic interface will be the same. ###Code # create a zero-dimensional domain with a single surface temperature state = climlab.surface_state(num_lat=1, # a single point water_depth = 100., # 100 meters slab of water (sets the heat capacity) ) state ###Output _____no_output_____ ###Markdown Here we have created a dictionary called `state` with a single item called `Ts`: ###Code state['Ts'] ###Output _____no_output_____ ###Markdown This dictionary holds the state variables for our model -- which is this case is a single number! It is a **temperature in degrees Celsius**. For convenience, we can access the same data as an attribute (which lets us use tab-autocomplete when doing interactive work): ###Code state.Ts ###Output _____no_output_____ ###Markdown It is also possible to see this `state` dictionary as an `xarray.Dataset` object: ###Code climlab.to_xarray(state) # create the longwave radiation process olr = climlab.radiation.Boltzmann(name='OutgoingLongwave', state=state, tau = 0.612, eps = 1., timestep = 60*60*24*30.) # Look at what we just created print(olr) # create the shortwave radiation process asr = climlab.radiation.SimpleAbsorbedShortwave(name='AbsorbedShortwave', state=state, insolation=341.3, albedo=0.299, timestep = 60*60*24*30.) # Look at what we just created print(asr) # couple them together into a single model ebm = olr + asr # Give the parent process name ebm.name = 'EnergyBalanceModel' # Examine the model object print(ebm) ###Output climlab Process of type <class 'climlab.process.time_dependent_process.TimeDependentProcess'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: EnergyBalanceModel: <class 'climlab.process.time_dependent_process.TimeDependentProcess'> OutgoingLongwave: <class 'climlab.radiation.boltzmann.Boltzmann'> AbsorbedShortwave: <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'> ###Markdown The object called `ebm` here is the entire model -- including its current state (the temperature `Ts`) as well as all the methods needed to integrated forward in time! The current model state, accessed two ways: ###Code ebm.state ebm.Ts ###Output _____no_output_____ ###Markdown Here is some internal information about the timestep of the model: ###Code print(ebm.time['timestep']) print(ebm.time['steps']) ###Output 2592000.0 0 ###Markdown This says the timestep is 2592000 seconds (30 days!), and the model has taken 0 steps forward so far. To take a single step forward: ###Code ebm.step_forward() ebm.Ts ###Output _____no_output_____ ###Markdown The model got colder!To see why, let's look at some useful diagnostics computed by this model: ###Code ebm.diagnostics ###Output _____no_output_____ ###Markdown This is another dictionary, now with two items. They should make sense to you.Just like the `state` variables, we can access these `diagnostics` variables as attributes: ###Code ebm.OLR ebm.ASR ###Output _____no_output_____ ###Markdown So why did the model get colder in the first timestep?What do you think will happen next? ____________ 3. Run the zero-dimensional EBM out to equilibrium____________ Let's look at how the model adjusts toward its equilibrium temperature.Exercise:- Using a `for` loop, take 500 steps forward with this model- Store the current temperature at each step in an array- Make a graph of the temperature as a function of time ____________ 4. A climate change scenario ____________ Suppose we want to investigate the effects of a small decrease in the transmissitivity of the atmosphere `tau`. Previously we used the zero-dimensional model to investigate a **hypothetical climate change scenario** in which:- the transmissitivity of the atmosphere `tau` decreases to 0.57- the planetary albedo increases to 0.32How would we do that using `climlab`? Recall that the model is comprised of two sub-components: ###Code for name, process in ebm.subprocess.items(): print(name) print(process) ###Output OutgoingLongwave climlab Process of type <class 'climlab.radiation.boltzmann.Boltzmann'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: OutgoingLongwave: <class 'climlab.radiation.boltzmann.Boltzmann'> AbsorbedShortwave climlab Process of type <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: AbsorbedShortwave: <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'> ###Markdown The parameter `tau` is a property of the `OutgoingLongwave` subprocess: ###Code ebm.subprocess['OutgoingLongwave'].tau ###Output _____no_output_____ ###Markdown and the parameter `albedo` is a property of the `AbsorbedShortwave` subprocess: ###Code ebm.subprocess['AbsorbedShortwave'].albedo ###Output _____no_output_____ ###Markdown Let's make an exact clone of our model and then change these two parameters: ###Code ebm2 = climlab.process_like(ebm) print(ebm2) ebm2.subprocess['OutgoingLongwave'].tau = 0.57 ebm2.subprocess['AbsorbedShortwave'].albedo = 0.32 ###Output _____no_output_____ ###Markdown Now our model is out of equilibrium and the climate will change!To see this without actually taking a step forward: ###Code # Computes diagnostics based on current state but does not change the state ebm2.compute_diagnostics() ebm2.ASR - ebm2.OLR ###Output _____no_output_____ ###Markdown Shoud the model warm up or cool down? Well, we can find out: ###Code ebm2.Ts ebm2.step_forward() ebm2.Ts ###Output _____no_output_____ ###Markdown Automatic timestepping Often we want to integrate a model forward in time to equilibrium without needing to store information about the transient state.`climlab` offers convenience methods to do this easily: ###Code ebm3 = climlab.process_like(ebm2) ebm3.integrate_years(50) # What is the current temperature? ebm3.Ts # How close are we to energy balance? ebm3.ASR - ebm3.OLR # We should be able to accomplish the exact same thing with explicit timestepping for n in range(608): ebm2.step_forward() ebm2.Ts ebm2.ASR - ebm2.OLR ###Output _____no_output_____ ###Markdown ____________ 5. Further `climlab` resources____________ We will be using `climlab` extensively throughout this course. Lots of examples of more advanced usage are found here in the course notes. Here are some links to other resources:- The documentation is hosted at - Source code (for both software and docs) are at - [A video of a talk I gave about `climlab` at the 2018 AMS Python symposium](https://ams.confex.com/ams/98Annual/videogateway.cgi/id/44948?recordingid=44948) (January 2018)- [Slides from a talk and demonstration that I gave in Febrary 2018](https://livealbany-my.sharepoint.com/:f:/g/personal/brose_albany_edu/EuA2afxy5-hNkzNhHgkp_HYBYcJumR3l6ukRVIEl4W3MmA?e=sbXN0d) (The Apple Keynote version contains some animations that will not show up in the pdf version) ____________ Version information____________ ###Code %load_ext version_information %version_information numpy, matplotlib, climlab ###Output Loading extensions from ~/.ipython/extensions is deprecated. We recommend managing extensions like any other Python packages, in site-packages. ###Markdown [ATM 623: Climate Modeling](../index.ipynb)[Brian E. J. Rose](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany Lecture 4: Building simple climate models using `climlab` About these notes:This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:- The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware- The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)- A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).[Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab ###Code # Ensure compatibility with Python 2 and 3 from __future__ import print_function, division ###Output _____no_output_____ ###Markdown Contents1. [Introducing `climlab`](section1)2. [Using `climlab` to implement the zero-dimensional energy balance model](section2)3. [Run the zero-dimensional EBM out to equilibrium](section3)4. [A climate change scenario in the EBM](section4)5. [Further `climlab` resources](section5) ____________ 1. Introducing `climlab`____________`climlab` is a python package for process-oriented climate modeling.It is based on a very general concept of a model as a collection of individual, interacting processes. `climlab` defines a base class called `Process`, whichcan contain an arbitrarily complex tree of sub-processes (each also some sub-class of `Process`). Every climate process (radiative, dynamical, physical, turbulent, convective, chemical, etc.) can be simulated as a stand-aloneprocess model given appropriate input, or as a sub-process of a more complex model. New classes of model can easily be defined and run interactively by putting together anappropriate collection of sub-processes.`climlab` is an open-source community project. The latest code can always be found on `github`:https://github.com/brian-rose/climlabYou can install `climlab` by doing```conda install -c conda-forge climlab``` ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt import climlab ###Output _____no_output_____ ###Markdown ____________ 2. Using `climlab` to implement the zero-dimensional energy balance model____________ Recall that we have worked with a zero-dimensional Energy Balance Model$$ C \frac{dT_s}{dt} = (1-\alpha) Q - \tau \sigma T_s^4 $$ Here we are going to implement this exact model using `climlab`.Yes, we have already written code to implement this model, but we are going to repeat this effort here as a way of learning how to use `climlab`.There are tools within `climlab` to implement much more complicated models, but the basic interface will be the same. ###Code # create a zero-dimensional domain with a single surface temperature state = climlab.surface_state(num_lat=1, # a single point water_depth = 100., # 100 meters slab of water (sets the heat capacity) ) state ###Output _____no_output_____ ###Markdown Here we have created a dictionary called `state` with a single item called `Ts`: ###Code state['Ts'] ###Output _____no_output_____ ###Markdown This dictionary holds the state variables for our model -- which is this case is a single number! It is a **temperature in degrees Celsius**. For convenience, we can access the same data as an attribute (which lets us use tab-autocomplete when doing interactive work): ###Code state.Ts ###Output _____no_output_____ ###Markdown It is also possible to see this `state` dictionary as an `xarray.Dataset` object: ###Code climlab.to_xarray(state) # create the longwave radiation process olr = climlab.radiation.Boltzmann(name='OutgoingLongwave', state=state, tau = 0.612, eps = 1., timestep = 60*60*24*30.) # Look at what we just created print(olr) # create the shortwave radiation process asr = climlab.radiation.SimpleAbsorbedShortwave(name='AbsorbedShortwave', state=state, insolation=341.3, albedo=0.299, timestep = 60*60*24*30.) # Look at what we just created print(asr) # couple them together into a single model ebm = olr + asr # Give the parent process name ebm.name = 'EnergyBalanceModel' # Examine the model object print(ebm) ###Output climlab Process of type <class 'climlab.process.time_dependent_process.TimeDependentProcess'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: EnergyBalanceModel: <class 'climlab.process.time_dependent_process.TimeDependentProcess'> OutgoingLongwave: <class 'climlab.radiation.boltzmann.Boltzmann'> AbsorbedShortwave: <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'> ###Markdown The object called `ebm` here is the entire model -- including its current state (the temperature `Ts`) as well as all the methods needed to integrated forward in time! The current model state, accessed two ways: ###Code ebm.state ebm.Ts ###Output _____no_output_____ ###Markdown Here is some internal information about the timestep of the model: ###Code print(ebm.time['timestep']) print(ebm.time['steps']) ###Output 2592000.0 0 ###Markdown This says the timestep is 2592000 seconds (30 days!), and the model has taken 0 steps forward so far. To take a single step forward: ###Code ebm.step_forward() ebm.Ts ###Output _____no_output_____ ###Markdown The model got colder!To see why, let's look at some useful diagnostics computed by this model: ###Code ebm.diagnostics ###Output _____no_output_____ ###Markdown This is another dictionary, now with two items. They should make sense to you.Just like the `state` variables, we can access these `diagnostics` variables as attributes: ###Code ebm.OLR ebm.ASR ###Output _____no_output_____ ###Markdown So why did the model get colder in the first timestep?What do you think will happen next? ____________ 3. Run the zero-dimensional EBM out to equilibrium____________ Let's look at how the model adjusts toward its equilibrium temperature.Exercise:- Using a `for` loop, take 500 steps forward with this model- Store the current temperature at each step in an array- Make a graph of the temperature as a function of time ____________ 4. A climate change scenario ____________ Suppose we want to investigate the effects of a small decrease in the transmissitivity of the atmosphere `tau`. Previously we used the zero-dimensional model to investigate a **hypothetical climate change scenario** in which:- the transmissitivity of the atmosphere `tau` decreases to 0.57- the planetary albedo increases to 0.32How would we do that using `climlab`? Recall that the model is comprised of two sub-components: ###Code for name, process in ebm.subprocess.items(): print(name) print(process) ###Output OutgoingLongwave climlab Process of type <class 'climlab.radiation.boltzmann.Boltzmann'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: OutgoingLongwave: <class 'climlab.radiation.boltzmann.Boltzmann'> AbsorbedShortwave climlab Process of type <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'>. State variables and domain shapes: Ts: (1, 1) The subprocess tree: AbsorbedShortwave: <class 'climlab.radiation.absorbed_shorwave.SimpleAbsorbedShortwave'> ###Markdown The parameter `tau` is a property of the `OutgoingLongwave` subprocess: ###Code ebm.subprocess['OutgoingLongwave'].tau ###Output _____no_output_____ ###Markdown and the parameter `albedo` is a property of the `AbsorbedShortwave` subprocess: ###Code ebm.subprocess['AbsorbedShortwave'].albedo ###Output _____no_output_____ ###Markdown Let's make an exact clone of our model and then change these two parameters: ###Code ebm2 = climlab.process_like(ebm) print(ebm2) ebm2.subprocess['OutgoingLongwave'].tau = 0.57 ebm2.subprocess['AbsorbedShortwave'].albedo = 0.32 ###Output _____no_output_____ ###Markdown Now our model is out of equilibrium and the climate will change!To see this without actually taking a step forward: ###Code # Computes diagnostics based on current state but does not change the state ebm2.compute_diagnostics() ebm2.ASR - ebm2.OLR ###Output _____no_output_____ ###Markdown Shoud the model warm up or cool down? Well, we can find out: ###Code ebm2.Ts ebm2.step_forward() ebm2.Ts ###Output _____no_output_____ ###Markdown Automatic timestepping Often we want to integrate a model forward in time to equilibrium without needing to store information about the transient state.`climlab` offers convenience methods to do this easily: ###Code ebm3 = climlab.process_like(ebm2) ebm3.integrate_years(50) # What is the current temperature? ebm3.Ts # How close are we to energy balance? ebm3.ASR - ebm3.OLR # We should be able to accomplish the exact same thing with explicit timestepping for n in range(608): ebm2.step_forward() ebm2.Ts ebm2.ASR - ebm2.OLR ###Output _____no_output_____ ###Markdown ____________ 5. Further `climlab` resources____________ We will be using `climlab` extensively throughout this course. Lots of examples of more advanced usage are found here in the course notes. Here are some links to other resources:- The documentation is hosted at - Source code (for both software and docs) are at - [A video of a talk I gave about `climlab` at the 2018 AMS Python symposium](https://ams.confex.com/ams/98Annual/videogateway.cgi/id/44948?recordingid=44948) (January 2018)- [Slides from a talk and demonstration that I gave in Febrary 2018](https://livealbany-my.sharepoint.com/:f:/g/personal/brose_albany_edu/EuA2afxy5-hNkzNhHgkp_HYBYcJumR3l6ukRVIEl4W3MmA?e=sbXN0d) (The Apple Keynote version contains some animations that will not show up in the pdf version) ____________ Version information____________ ###Code %load_ext version_information %version_information numpy, scipy, matplotlib ###Output Loading extensions from ~/.ipython/extensions is deprecated. We recommend managing extensions like any other Python packages, in site-packages.
1. Machine Learning Foundations - A Case Study Approach/Assignments/3. Classification/Analyzing product sentiment.ipynb
###Markdown Predicting sentiment from product reviews Fire up GraphLab Create ###Code import graphlab ###Output _____no_output_____ ###Markdown Read some product review dataLoading reviews for a set of baby products. ###Code products = graphlab.SFrame('amazon_baby.gl/') ###Output This non-commercial license of GraphLab Create for academic use is assigned to [email protected] and will expire on January 02, 2018. ###Markdown Let's explore this data togetherData includes the product name, the review text and the rating of the review. ###Code products.head() ###Output _____no_output_____ ###Markdown Build the word count vector for each review ###Code products['word_count'] = graphlab.text_analytics.count_words(products['review']) products.head() graphlab.canvas.set_target('ipynb') products['name'].show() ###Output _____no_output_____ ###Markdown Examining the reviews for most-sold product: 'Vulli Sophie the Giraffe Teether' ###Code giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether'] len(giraffe_reviews) giraffe_reviews['rating'].show(view='Categorical') ###Output _____no_output_____ ###Markdown Build a sentiment classifier ###Code products['rating'].show(view='Categorical') ###Output _____no_output_____ ###Markdown Define what's a positive and a negative sentimentWe will ignore all reviews with rating = 3, since they tend to have a neutral sentiment. Reviews with a rating of 4 or higher will be considered positive, while the ones with rating of 2 or lower will have a negative sentiment. ###Code #ignore all 3* reviews products = products[products['rating'] != 3] #positive sentiment = 4* or 5* reviews products['sentiment'] = products['rating'] >=4 products.head() ###Output _____no_output_____ ###Markdown Let's train the sentiment classifier ###Code train_data,test_data = products.random_split(.8, seed=0) sentiment_model = graphlab.logistic_classifier.create(train_data, target='sentiment', features=['word_count'], validation_set=test_data) ###Output _____no_output_____ ###Markdown Evaluate the sentiment model ###Code sentiment_model.evaluate(test_data, metric='roc_curve') sentiment_model.show(view='Evaluation') ###Output _____no_output_____ ###Markdown Applying the learned model to understand sentiment for Giraffe ###Code giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability') giraffe_reviews.head() ###Output _____no_output_____ ###Markdown Sort the reviews based on the predicted sentiment and explore ###Code giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False) giraffe_reviews.head() ###Output _____no_output_____ ###Markdown Most positive reviews for the giraffe ###Code giraffe_reviews[0]['review'] giraffe_reviews[1]['review'] ###Output _____no_output_____ ###Markdown Show most negative reviews for giraffe ###Code giraffe_reviews[-1]['review'] giraffe_reviews[-2]['review'] selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate'] def awesome_count(d): if 'awesome' in d: return d['awesome'] else: return 0 products['awesome'] = products['word_count'].apply(awesome_count) def great_count(d): if 'great' in d: return d['great'] else: return 0 #return count products['great'] = products['word_count'].apply(great_count) def fantastic_count(d): if 'fantastic' in d: return d['fantastic'] else: return 0 products['fantastic'] = products['word_count'].apply(fantastic_count) products['awesome'].show('Categorical') def amazing_count(d): if 'amazing' in d: return d['amazing'] else: return 0 products['amazing'] = products['word_count'].apply(amazing_count) def love_count(d): if 'love' in d: return d['love'] else: return 0 products['love'] = products['word_count'].apply(love_count) def horrible_count(d): if 'horrible' in d: return d['horrible'] else: return 0 products['horrible'] = products['word_count'].apply(horrible_count) def bad_count(d): if 'bad' in d: return d['bad'] else: return 0 products['bad'] = products['word_count'].apply(bad_count) def terrible_count(d): if 'terrible' in d: return d['terrible'] else: return 0 products['terrible'] = products['word_count'].apply(terrible_count) def awful_count(d): if 'awful' in d: return d['awful'] else: return 0 products['awful'] = products['word_count'].apply(awful_count) def wow_count(d): if 'wow' in d: return d['wow'] else: return 0 products['wow'] = products['word_count'].apply(wow_count) def hate_count(d): if 'hate' in d: return d['hate'] else: return 0 products['hate'] = products['word_count'].apply(hate_count) products for i in selected_words: print products[i].sum() train_data,test_data = products.random_split(.8, seed=0) selected_words_model = graphlab.logistic_classifier.create(train_data, target='sentiment', features=selected_words, validation_set=test_data) selected_words_model['coefficients'].sort('value', ascending=True) diaper_champ_reviews = products[products['name'] == "Baby Trend Diaper Champ"] len(diaper_champ_reviews) diaper_champ_reviews['predicted_sentiment'] = sentiment_model.predict(diaper_champ_reviews, output_type='probability') diaper_champ_reviews.head() diaper_champ_reviews = diaper_champ_reviews.sort('predicted_sentiment', ascending=False) diaper_champ_reviews.head() selected_words_model.evaluate(test_data) selected_words_model.predict(diaper_champ_reviews[0:1], output_type='probability') sentiment_model.predict(diaper_champ_reviews[0:1], output_type='probability') diaper_champ_reviews[0:1]['review'] ###Output _____no_output_____
docs/notebook/mindspore_loading_text_dataset.ipynb
###Markdown 加载文本数据集 概述 MindSpore提供的`mindspore.dataset`模块可以帮助用户构建数据集对象,分批次地读取文本数据。同时,在各个数据集类中还内置了数据处理和数据分词算子,使得数据在训练过程中能够像经过pipeline管道的水一样源源不断地流向训练系统,提升数据训练效果。此外,MindSpore还支持分布式场景数据加载,用户可以在加载数据集时指定分片数目,具体用法参见[数据并行模式加载数据集](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/distributed_training_ascend.htmlid6)。下面,本教程将简要演示如何使用MindSpore加载和处理文本数据。 整体流程 - 准备环节。- 加载数据集。- 数据处理。- 数据分词。 准备环节 导入模块 导入`mindspore.dataset`和`mindspore.dataset.text`模块。 ###Code import mindspore.dataset as ds import mindspore.dataset.text as text ###Output _____no_output_____ ###Markdown 准备所需数据集 创建文本数据,内容如下:```Welcome to Beijing北京欢迎您!我喜欢English!``` ###Code import os if not os.path.exists('./datasets'): os.mkdir('./datasets') file_handle=open('./datasets/tokenizer.txt',mode='w') file_handle.write('Welcome to Beijing \n北京欢迎您! \n我喜欢English! \n') file_handle.close() ! tree ./datasets ###Output ./datasets └── tokenizer.txt 0 directories, 1 file ###Markdown 加载数据集 MindSpore目前支持加载文本领域常用的经典数据集和多种数据存储格式下的数据集,用户也可以通过构建自定义数据集类实现自定义方式的数据集加载。各种数据集的详细加载方法,可参考编程指南中[数据集加载](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/dataset_loading.html)章节。下面演示使用`MindSpore.dataset`模块中的`TextFileDataset`类加载数据集。 1. 配置数据集目录,创建数据集对象。 ###Code DATA_FILE = './datasets/tokenizer.txt' dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) ###Output _____no_output_____ ###Markdown 2. 创建字典迭代器,通过迭代器获取数据。 ###Code for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) ###Output Welcome to Beijing 北京欢迎您! 我喜欢English! ###Markdown 数据处理 MindSpore目前支持的数据处理算子及其详细使用方法,可参考编程指南中[数据处理](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/pipeline.html)章节。在生成`dataset`对象后可对其进行数据处理操作,比如`SlidingWindow`、`shuffle`等。 - SlidingWindow 下面演示使用`SlidingWindow`对文本数据进行切片操作。 1. 加载数据集。 ###Code inputs = [["大","家","早","上","好"]] dataset_slide = ds.NumpySlicesDataset(inputs, column_names=['text'], shuffle=False) ###Output _____no_output_____ ###Markdown 2. 原始数据输出效果。 ###Code for data in dataset_slide.create_dict_iterator(output_numpy=True): print(text.to_str(data['text']).tolist()) ###Output ['大', '家', '早', '上', '好'] ###Markdown 3. 执行切片操作。 ###Code dataset_slide = dataset_slide.map(operations=text.SlidingWindow(2,0),input_columns=['text']) ###Output _____no_output_____ ###Markdown 4. 执行之后输出效果。 ###Code for data in dataset_slide.create_dict_iterator(output_numpy=True): print(text.to_str(data['text']).tolist()) ###Output [['大', '家'], ['家', '早'], ['早', '上'], ['上', '好']] ###Markdown - shuffle 下面演示在加载数据集时使用`shuffle`对文本数据进行混洗操作。 1. 加载数据集。 ###Code inputs = ["a","b","c","d"] dataset_shuffle = ds.NumpySlicesDataset(inputs, column_names=['text'], shuffle=True) ###Output _____no_output_____ ###Markdown 2. 数据输出效果。 ###Code for data in dataset_shuffle.create_dict_iterator(output_numpy=True): print(text.to_str(data['text']).tolist()) ###Output a b c d ###Markdown 数据分词 MindSpore目前支持的数据分词算子及其详细使用方法,可参考编程指南中[分词器](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/tokenizer.html)章节。下面演示使用`WhitespaceTokenizer`分词器来分词,该分词是按照空格来进行分词。 1. 创建`tokenizer`。 ###Code tokenizer = text.WhitespaceTokenizer() ###Output _____no_output_____ ###Markdown 2. 执行操作`tokenizer`。 ###Code dataset = dataset.map(operations=tokenizer) ###Output _____no_output_____ ###Markdown 3. 创建字典迭代器,通过迭代器获取数据。 ###Code for data in dataset.create_dict_iterator(num_epochs=1,output_numpy=True): print(text.to_str(data['text']).tolist()) ###Output ['Welcome', 'to', 'Beijing'] ['北京欢迎您!'] ['我喜欢English!']
Optuna + XgBoost.ipynb
###Markdown Search algorithms within OptunaWe can select the search algorithm from the [optuna.study.create_study()](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.htmloptuna.study.create_study) class. ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler import xgboost as xgb from sklearn.datasets import load_breast_cancer from sklearn.metrics import accuracy_score, roc_auc_score from sklearn.model_selection import cross_val_score, train_test_split from sklearn.ensemble import RandomForestClassifier import optuna ###Output _____no_output_____ ###Markdown Data Pre-processingThe data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed. ###Code data = pd.read_csv('balanced_bank.csv', index_col = 0) data data.info() data.head() data.columns data.loan.value_counts() data.isnull().sum() ###Output _____no_output_____ ###Markdown Categorical Encoding ###Code categorical_cols = ['job','marital','education','month','day_of_week'] data_encoded = pd.get_dummies(data, drop_first=True,columns = categorical_cols) ###Output _____no_output_____ ###Markdown Binary encoding ###Code binary_cat_cols = ['contact','housing','loan','poutcome','default'] for i in binary_cat_cols: if len(data_encoded[i].unique())==2: data_encoded[i].replace({data_encoded[i].unique()[0]: 0, data_encoded[i].unique()[1]: 1, 'unknown':-1}, inplace=True) else: data_encoded[i].replace({data_encoded[i].unique()[0]: 0, data_encoded[i].unique()[1]: 1, data_encoded[i].unique()[2]: 2, 'unknown':-1}, inplace=True) ###Output _____no_output_____ ###Markdown Label encoding ###Code from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() data_encoded['y'] = label_encoder.fit_transform(data_encoded['y']) data_encoded.dtypes ###Output _____no_output_____ ###Markdown Scaling ###Code print(data_encoded.shape) X = data_encoded.loc[:,data_encoded.columns!='y'] print(X.shape) y = data_encoded.loc[:,'y'] print(y.shape) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1) scaler = MinMaxScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) ###Output _____no_output_____ ###Markdown Baseline model ###Code model = xgb.XGBClassifier() model.fit(X_train, y_train) preds = model.predict(X_test) accuracy = accuracy_score(y_test, preds) accuracy ###Output _____no_output_____ ###Markdown HyperParameter optimization with OptunaIn this notebook, I will demo how to select the search algorithm with Optuna. We will compare the use of:- Randomized search- Tree-structured Parzen Estimators- CMA-ES Define the objective functionThis is the hyperparameter response space, the function we want to minimize. ###Code def objective(trial): param = { "objective": 'binary:logistic', 'reg_lambda': trial.suggest_loguniform('lambda', 1e-3, 10.0), 'reg_alpha': trial.suggest_loguniform('alpha', 1e-3, 10.0), 'gamma':trial.suggest_loguniform('gamma', 1e-3,1), 'colsample_bytree': trial.suggest_categorical('colsample_bytree', [i/10.0 for i in range(4,11)]), 'subsample': trial.suggest_categorical('subsample', [i/10.0 for i in range(4,11)]), 'learning_rate': trial.suggest_categorical('learning_rate', [0.008,0.009,0.01,0.012,0.014,0.016,0.018, 0.02,0.300000012]), 'n_estimators': trial.suggest_int('n_estimators',100,500), 'max_depth': trial.suggest_categorical('max_depth', [5,6,7,9,11,13,15,17,20]), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 300), } model = xgb.XGBClassifier(**param) model.fit(X_train, y_train) preds = model.predict(X_test) accuracy = accuracy_score(y_test, preds) return accuracy random_study = optuna.create_study( direction="maximize", sampler=optuna.samplers.RandomSampler(), ) tpe_study = optuna.create_study( direction="maximize", sampler=optuna.samplers.TPESampler(), ) cmaes_study = optuna.create_study( direction="maximize", sampler=optuna.samplers.CmaEsSampler(), ) cmaes_study.optimize(objective, n_trials=100) tpe_study.optimize(objective, n_trials=100) random_study.optimize(objective, n_trials=100) search_space = { "n_estimators": [100, 500, 1000], "max_depth": [5,6,7,9,11,13,15,17,20], "min_samples_split": [0.1, 1.0], 'min_child_weight': [1, 5, 10], 'gamma': [0.5, 1, 1.5, 2, 5], 'subsample': [0.6, 0.8, 1.0], 'colsample_bytree': [0.6, 0.8, 1.0], } def objective(trial): param = { 'n_estimators': trial.suggest_int('n_estimators',100,500), 'max_depth': trial.suggest_categorical('max_depth', [5,6,7,9,11,13,15,17,20]), } model = xgb.XGBClassifier(**param) model.fit(X_train, y_train) preds = model.predict(X_test) accuracy = accuracy_score(y_test, preds) return accuracy grid_study = optuna.create_study( direction="maximize", sampler=optuna.samplers.GridSampler(search_space), ) grid_study.optimize(objective) tstamp1 = cmaes_study.trials_dataframe()['datetime_complete'].min() tstamp2 = grid_study.trials_dataframe()['datetime_complete'].max() temp = tstamp1 - tstamp2 if tstamp1 > tstamp2 else tstamp2 - tstamp1 temp print('GRID SEARCH: ',study.best_value) print('CMAES: ',cmaes_study.best_value) print('TPE: ',tpe_study.best_value) print('RANDOM SEARCH : ',random_study.best_value) print('BASELINE MODEL: ',accuracy) tstamp1 = tpe_study.trials_dataframe()['datetime_complete'].min() tstamp2 = tpe_study.trials_dataframe()['datetime_complete'].max() temp = tstamp1 - tstamp2 if tstamp1 > tstamp2 else tstamp2 - tstamp1 temp ###Output _____no_output_____
R_scripts/ARIMA.ipynb
###Markdown The script `ARIMA_Model.R` starts with pre-processing the data. There was a problem in the code that I fixed based on a guess. We'll continue assuming that the fix was correct. First we store the data of the last two days of train data in `demand_list_ar`: ###Code n_tr = length(train) n_ts = length(test) demand_list_ar = lapply(1:2, function(i) train[[n_tr-2+i]]) ###Output _____no_output_____ ###Markdown Then we change these matrices into a long list of consecutive counts. We also do the same for the test data, and concatenate the two lists together. If we assume that test events have happened right after the train events, `vec_arima` is a log of counts of consecutive events starting from two weeks before the starting date of test events.![](./images/cells_train_test.png) ###Code demand_ar=c(sapply(1:2, function(i) c(t(demand_list_ar[[i]])))) demand_a=c(sapply(1:n_ts, function(x) c(t(test[[i]])))) vec_arima=c(demand_ar,demand_a) ###Output _____no_output_____ ###Markdown The train/test operation is repeated for each day in the test. Two weeks of data before the test date is concatenated to te first period of the test day. Depending on where the test day is located, the previous two weeks can contain data from train, test, or both datasets.![](images/cells_traindata.png)The first two weeks is sent to `calculaModelArima` function to train an ARIMA model. ###Code train_start = 48 * (target_day-1) twoweeks = 48 * 14 datatrain = vec_arima[(train_start+1):(train_start+twoweeks)] # send this data to train the ARIMA model o = calculaModelArima(datatrain) ###Output _____no_output_____ ###Markdown Let's assume that order `o` is computed and jump to the prediction step. This is how prediction for one period is performed: - take the series containing 24\*7 periods before the target- that series together with `o` is fed into function `predict`![](./images/pred_slide.png)This is repeated until all 48 periods in the target day are predicted![](./images/pred_slide_2.png) ###Code pred_oneday = function (x) { pred_start = x + 48*(target_day-1) datapred = vec_arima[pred_start:(pred_start+twoweeks-1)] r <- predict(arima(datapred, order=o), n.ahead =1)$pred return (r) } predictions = sapply(1:48, function(x) pred_oneday(x)) ###Output _____no_output_____
class9_apache_spark/3. grouping.ipynb
###Markdown Data Analysis ###Code # Below variables are to be set in the shell profile # export SPARK_HOME=/Users/pmacharl/spark-2.4.4-bin-hadoop2.7 # export PATH=$PATH:$SPARK_HOME/bin # export PYSPARK_SUBMIT_ARGS="pyspark-shell" # export PYSPARK_DRIVER_PYTHON=/usr/local/bin/python3 # export PYSPARK_PYTHON=/usr/local/bin/python3 ###Output _____no_output_____ ###Markdown Start cluster manually ###Code # https://spark.apache.org/docs/latest/spark-standalone.html # ./sbin/start-master.sh # Start your spark server by navigating to SPARK_HOME/sbin and executing ./start-all.sh # By default web Spark UI serves on :8080 in cluster mode. See all options for setting host, ip etc. in documentation from pyspark.sql import SparkSession from pyspark.conf import SparkConf # https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkConf config = SparkConf() config.set("spark.driver.memory", "2g") config.set("spark.executor.memory", "1g") #Because you are likely running in local mode, it is a good practice to set the number of shuffle partitions # to something that is going to fit local mode. By default, the value is 200, but there aren't many executors # on this machine, its worth reducing this to 5 config.set("spark.sql.shuffle.partitions", "5") spark = SparkSession.builder.config(conf=config).master("spark://192.168.0.6:7077").appName("Analyzing Real Estate Sales").getOrCreate() df = spark.read.format('csv').option("header", "true").load('../Real_Estate_Sales_2001-2017.csv') df.printSchema() ###Output root |-- ID: string (nullable = true) |-- SerialNumber: string (nullable = true) |-- ListYear: string (nullable = true) |-- DateRecorded: string (nullable = true) |-- Town: string (nullable = true) |-- Address: string (nullable = true) |-- AssessedValue: string (nullable = true) |-- SaleAmount: string (nullable = true) |-- SalesRatio: string (nullable = true) |-- PropertyType: string (nullable = true) |-- ResidentialType: string (nullable = true) |-- NonUseCode: string (nullable = true) |-- Remarks: string (nullable = true) ###Markdown Get total counts of properties transacted by ListYear ###Code property_by_year = df.groupBy("ListYear").agg({"ID":"count"}) property_by_year.show() ###Output +--------+---------+ |ListYear|count(ID)| +--------+---------+ | 2016| 49773| | 2012| 35973| | 2017| 45692| | 2014| 49563| | 2013| 39943| | 2005| 61602| | 2002| 106068| | 2009| 42508| | 2006| 48775| | 2004| 84056| | 2011| 31065| | 2008| 32734| | 2007| 35616| | 2015| 46651| | 2001| 59584| | 2010| 27755| | 2003| 64239| +--------+---------+ ###Markdown How much money got transacted by PropertyType ###Code df.groupBy("PropertyType").agg({"SaleAmount":"sum"}).show() ###Output +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ 0:00:01.052141 ###Markdown Rename column ###Code df.groupBy("PropertyType").agg({"SaleAmount":"sum"}).withColumnRenamed("sum(SaleAmount)","TotalMoneyTransacted").show() ###Output +--------------+--------------------+ | PropertyType|TotalMoneyTransacted| +--------------+--------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential| 2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+--------------------+ ###Markdown Time your code to check efficiency ###Code # %%timeit Use this magic function if you want mean execution time, but beware it runs the code multiple times from datetime import datetime start = datetime.now() df.groupBy("PropertyType").agg({"SaleAmount":"sum"}).show() print("{0}".format(datetime.now() - start)) ###Output +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ +--------------+------------------+ | PropertyType| sum(SaleAmount)| +--------------+------------------+ | Apartments| 6.679886858E9| | Vacant Land| 8.648986096E9| | NA| 2.237281744E9| | Residential|2.0461496626929E11| | null| 1.7968785803E10| | Industrial| 3.698454639E9| | Condo| 2.530141781E10| |Public Utility| 2.9632347E7| |10 Mill Forest| 1830160.0| | Commercial| 3.0602284248E10| +--------------+------------------+ 974 ms ± 88.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ###Markdown Remove spark application ###Code spark.stop() ###Output _____no_output_____
DataSet_Meteo_Original_EnviDat/M1/.ipynb_checkpoints/04-M1-D2-DV-checkpoint.ipynb
###Markdown Notebook 2, Module 1, Data Aquisition and Data Management, CAS Applied Data Science, 2020-08-20, S. Haug, University of Bern. 1. Visualisation of Data - Examples**Learning outcomes:**Participants will be able to make good data science plots, with praxis on - plot line charts from series and dataframes- plot histograms - understand the effect of binning- plot scatter plots- plot box plots- plot error bars- formatting of plots- geoplotting**Introduction Slides**- https://docs.google.com/presentation/d/1HhRIIVq46DyVNm68WeTqr_vZvOgSMWBZa2XDwWNH8H4/edit?usp=sharing**Further sources**- Python: https://pandas.pydata.org/pandas-docs/stable/visualization.html- https://jakevdp.github.io/PythonDataScienceHandbook/04.00-introduction-to-matplotlib.html- Get inspired here : https://matplotlib.org/gallery/index.htmlHere you have examples on plotting possibilities with pandas. They make data science plotting very easy and fast. However, you may have special needs that are not supported. Then you can use the underlaying plotting module **matplotlib**. Plotting is an art and you can spend enourmous amounts of time doing plotting. There are many types of plots. You may invent your own type. We only show some examples and point out some important things. If you need to go further, you have to work indepentently. Some vocabulary and plots are only understandable with corresponding statistics background. This is part of module 2. 0. Load the modules ###Code import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown 1. Plot line charts (time series)First we use the data structure Series (one dimensional). ###Code # Generate 1000 random numbers for 1000 days from the normal distribution ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = ts.cumsum() ts.head() #ts.plot() #plt.show() ###Output _____no_output_____ ###Markdown We can generate 4 time series, keep them in a dataframe and plot them all four. ###Code df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index, columns=['All','Bin','C','D']) df_cumsum = df.cumsum() plt.figure() df_cumsum.plot() plt.show() ###Output _____no_output_____ ###Markdown 2. Plot histograms (frequency plots)For this we use our Iris dataset. ###Code df = pd.read_csv('iris.csv',names=['slength','swidth','plength','pwidth','species']) # data type is a string (str), i.e. not converted into numbers df.head() # print first five lines of data ###Output _____no_output_____ ###Markdown Plot two histograms with a legend in the same graph. ###Code df['slength'].plot(kind="hist",fill=True,histtype='step',label='slength') df['swidth'].plot(kind="hist",fill=False,histtype='step',label='swidth') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown The effect of binningWhen data is binned (or sampled) the bin size effects the amount of counts in each bin. Counts fluctuate like a normal distribution for counts above about 20. So depending on your bin size, the same data may look differently.Hard binning (small bin size) may introduce pure statistical structures without any other meaning. This is then overfitting. Too big bin sizes may wipe out structures in the data (underfitting). If known, a bin size guided by the physical resolution of the sensor is close to optimal. Plot the same histograms with a different binning. ###Code df['slength'].plot(bins=10,range=(2,8), kind="hist",fill=False,histtype='step') plt.show() ###Output _____no_output_____ ###Markdown Always label the axes (also with units) ###Code ax = df['slength'].plot(kind="hist",fill=False,histtype='step',label='slength') df['swidth'].plot(kind="hist",fill=False,histtype='step',label='swidth') ax.set_xlabel('x / cm') ax.set_ylabel('Count / 0.3 cm') ax.set_title('Sepal Length and Width') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown 3. Scatter plotsScatter plots show how the data is distributed in two dimensions. They are good for finding (anti) correlations between two variables. We plot several plots in one figure. ###Code df.plot(x='slength',y='swidth',kind="scatter",c='c') plt.show() ###Output _____no_output_____ ###Markdown With the plotting module there are some nice tools. For example authomatic plotting of all scatter plots. ###Code from pandas.plotting import scatter_matrix scatter_matrix(df[df['species']=='Iris-setosa'], alpha=0.2, figsize=(8, 8), diagonal='hist') plt.show() ###Output _____no_output_____ ###Markdown Or plotting of Andrew curves. https://en.wikipedia.org/wiki/Andrews_plot ###Code from pandas.plotting import andrews_curves andrews_curves(df, 'species') plt.show() ###Output _____no_output_____ ###Markdown There are several other tools too. See https://pandas.pydata.org/pandas-docs/stable/visualization.html 4. Box plots Boxplot can be drawn calling Series.plot.box() and DataFrame.plot.box(), or DataFrame.boxplot() to visualize the distribution of values within each column. ###Code color = dict(boxes='DarkGreen', whiskers='DarkOrange', medians='DarkBlue', caps='Gray') df.plot.box(color=color) plt.show() ###Output _____no_output_____ ###Markdown Box plots are non-parametric. The box shows the first second and third quartiles. The whiskers may be standard deviations or other percentiles. 5. Plotting with error bars There is no science without error bars, or better, uncertainties. The meaning of uncertainties is discussed in module 2. Here we only show by example how to plot uncertainties. Plotting with error bars is supported in DataFrame.plot() and Series.plot().Horizontal and vertical error bars can be supplied to the xerr and yerr keyword arguments to plot(). The error values can be specified using a variety of formats:- As a DataFrame or dict of errors with column names matching the columns attribute of the plotting DataFrame or matching the name attribute of the Series.- As a str indicating which of the columns of plotting DataFrame contain the error values.- As raw values (list, tuple, or np.ndarray). Must be the same length as the plotting DataFrame/Series.Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a M length Series, a Mx2 array should be provided indicating lower and upper (or left and right) errors. For a MxN DataFrame, asymmetrical errors should be in a Mx2xN array.Here is an example using an error dataframe (symmetric uncertainties). ###Code my_df = pd.DataFrame([6,15,4,20,16,13]) # Some random data my_df_e = (my_df)**0.5 # The error dataframe my_df.plot(yerr=my_df_e) plt.show() ###Output _____no_output_____ ###Markdown 6. Formatting plots Plots can easily be formatted with keywords. One can adjust colors, types of shading, lines, axes, legends, titles, etc. Some formatting has been exemplified above. More examples are in the documentation. https://pandas.pydata.org/pandas-docs/stable/visualization.htmlWith the matplotlib module you are even more flexible. See https://matplotlib.org/gallery/index.html for inspirations. 7. Summary- Do you remember three important plot types?- What can the binning of a histogram do to the interpretation of it?- What are the three parts of the general communication process?- Can you mention three important points to include in plots and their figure legends? 8 Plotting GeodataOften is very nice to visualise data on geographical maps. Fortunately people have written packages helpin us with that. This is an example how the geopandas package can be used. You need to install geopandas (via the Anaconda Navigator in the environment you are using. Geopandas is part repository/channel conda-forge). This you may do at home or now or just follow Sigve showing you this example. First we need some data files. On Linux or MacOs you can do this in order to hace the files in a subfolder data of your current notebook folder:>wget http://biogeo.ucdavis.edu/data/diva/adm/CHE_adm.zip>mkdir -p data>mv CHE_adm.zip data>cd data>unzip CHE_adm.zipNow we can use this code: ###Code import pandas as pd import numpy as np import geopandas as gpd import matplotlib.pyplot as plt import datetime import os fp = "data/CHE_adm1.shp" map_df = gpd.read_file(fp) cantons_df = pd.read_csv("data/cantons.csv") #cantons_df merged_df = map_df.merge(cantons_df, how="left", left_on="NAME_1", right_on="CANTON") merged_df def plotmap(df, datacol, vmax, filename, title): sm = plt.cm.ScalarMappable(cmap='Blues', norm=plt.Normalize(vmin=0, vmax=vmax)) fig, ax = plt.subplots(1, figsize=(30, 10)) ax.axis("off") ax.set_title(title, fontdict={'fontsize': '25', 'fontweight' : '3'}) ax.annotate("Sources: BAG, WP, ZHAW SPLab", xy=(0.68, 0.11), xycoords='figure fraction', fontsize=12, color='#555555') sm.set_array([]) fig.colorbar(sm, ax=ax, extend="max") df['coords'] = df['geometry'].apply(lambda x: x.representative_point().coords[:]) df['coords'] = [coords[0] for coords in df['coords']] for idx, row in df.iterrows(): plt.annotate(s=row['NAME_1'], xy=row['coords'],horizontalalignment='center') df.plot(column=datacol, cmap='Blues', linewidth=0.8, ax=ax, edgecolor='0.8', vmax=vmax) fig.savefig(filename, dpi=150) merged_df["VIRUSCASESDENSITY"] = 100 * merged_df["VIRUSCASESCONFIRMED"] / merged_df["INHABITANTS"] print(merged_df[["ACR", "VIRUSCASESDENSITY"]]) if not os.path.isfile("map_absolute.png"): plotmap(merged_df, "VIRUSCASESCONFIRMED", 500, "map_absolute.png", "# of confirmed coronavirus cases per canton") plotmap(merged_df, "VIRUSCASESDENSITY", 0.1, "map_density.png", "% of coronavirus cases per cantonal population") os.makedirs("dailymaps", exist_ok=True) stamp = datetime.datetime.now().strftime("%Y%m%d") hdate = datetime.datetime.now().strftime("%d.%m.%Y") plotmap(merged_df, "VIRUSCASESCONFIRMED", 5000, f"dailymaps/map_abs_{stamp}.png", f"# of confirmed coronavirus cases per canton [{hdate}]") plotmap(merged_df, "VIRUSCASESDENSITY", 1, f"dailymaps/map_den_{stamp}.png", f"% of coronavirus cases per cantonal population [{hdate}]") ###Output _____no_output_____
2020/day02-haskell.ipynb
###Markdown Day 2: Password Philosophyhttps://adventofcode.com/2020/day/2 ###Code inputLines = lines <$> readFile "input/day02.txt" ###Output _____no_output_____ ###Markdown We will use a regular expression to parse the input file. ###Code import Text.Regex.PCRE -- install with 'stack install regex-pcre' testInput = [ "1-3 a: abcde" , "1-3 b: cdefg" , "2-9 c: ccccccccc" ] ###Output _____no_output_____ ###Markdown `parseLine` reads a line in the specified format and extracts two `Int`s (separated by '`-`'), a `Char` (followed by a '`:`') and the password. ###Code parseLine :: String -> (Int, Int, Char, String) parseLine line = let match = (line =~ "(\\d+)-(\\d+) (\\w): (\\w+)" :: [[String]]) [_, n1, n2, letterString, password] = head match in (read n1, read n2, head letterString, password) ###Output _____no_output_____ ###Markdown Part 1First, we check if the number of ocurrences of the given letter in the password is between the two numbers. ###Code validate1 :: (Int, Int, Char, String) -> Bool validate1 (minCount, maxCount, letter, password) = minCount <= count && count <= maxCount where count = length . filter (== letter) $ password solution lineValidator = length . filter (lineValidator . parseLine) solution1 = solution validate1 solution1 testInput ###Output _____no_output_____ ###Markdown Solution, part 1 ###Code solution1 <$> inputLines ###Output _____no_output_____ ###Markdown Part 2Then we verify if exactly one of the letters in the password which are given by the two one-based indices is equal to the given letter. ###Code validate2 :: (Int, Int, Char, String) -> Bool validate2 (pos1, pos2, letter, password) = matchLetter pos1 /= matchLetter pos2 where matchLetter pos = letter == password !! pred pos solution2 = solution validate2 solution2 testInput ###Output _____no_output_____ ###Markdown Solution, part 2 ###Code solution2 <$> inputLines ###Output _____no_output_____
Visualization using Matplotlib and Pandas/Pandas Visualization.ipynb
###Markdown Pandas visualization ###Code import numpy as np import pandas as pd import seaborn as sns sns.set() %matplotlib inline df1 = pd.read_csv('df1',index_col=0) df1.head() df2=pd.read_csv('df2') df2.head() ###Output _____no_output_____ ###Markdown Visualising a dataframe directly ###Code df1['A'].hist(bins=20) df1['A'].plot(kind='hist') df1['A'].plot.hist() df2.plot.area(alpha=0.4) df2.plot.bar() df1['A'].plot.hist(bins=50) df1.reset_index().plot.line(x='index',y='B', figsize=(12,3),lw=1) df1.plot.scatter(x='A',y='B',c='C',cmap='coolwarm') df1.plot.scatter(x='A',y='B',s=df1['C']*10) df2.plot.box() df = pd.DataFrame(np.random.randn(1000,2),columns=['a','b']) df.plot.hexbin(x='a',y='b',gridsize=25,cmap='coolwarm') df2.plot.kde() ###Output _____no_output_____
src/examples/nb_test/get_file.ipynb
###Markdown Process ###Code MNM_nb_folder = os.path.join('..', '..', '..', 'side_project', 'network_builder') sys.path.append(MNM_nb_folder) python_lib_folder = os.path.join('..', '..', 'pylib') sys.path.append(python_lib_folder) from MNM_nb import * # import MNMAPI data_folder = os.path.join('..', '..', '..', 'data', 'input_files_MckeesRocks_SPC_wei') nb = MNM_network_builder() nb.load_from_folder(data_folder) # for i, ID in enumerate(nb.path_table.ID2path.keys()): # path = nb.path_table.ID2path[ID] # path.route_portions = np.tile(path.route_portions, 5) for O in nb.demand.demand_dict.keys(): for D in nb.demand.demand_dict[O].keys(): nb.demand.demand_dict[O][D] = nb.demand.demand_dict[O][D][:20] f = nb.get_path_flow() new_folder = "wei_mcroad" nb.dump_to_folder(new_folder) ###Output _____no_output_____ ###Markdown Modify ###Code orig_f = f new_folder = "wei_mcroad" nb = MNM_network_builder() nb.load_from_folder(new_folder) f = nb.get_path_flow() f = orig_f * 2 nb.update_demand_path(f) nb.dump_to_folder('new_mcroad2') f.sum() ###Output _____no_output_____
1. Python Introductory Course/2.Python for Data Science/3. Introduction to Pandas/.ipynb_checkpoints/2_Indexing_and_Selecting_Data-checkpoint.ipynb
###Markdown Indexing and Selecting DataIn this section, you will:* Select rows from a dataframe* Select columns from a dataframe* Select subsets of dataframes Selecting RowsSelecting rows in dataframes is similar to the indexing you have seen in numpy arrays. The syntax ```df[start_index:end_index]``` will subset rows according to the start and end indices. ###Code import numpy as np import pandas as pd market_df = pd.read_csv("../global_sales_data/market_fact.csv") market_df.head() ###Output _____no_output_____ ###Markdown Notice that, by default, pandas assigns integer labels to the rows, starting at 0. ###Code # Selecting the rows from indices 2 to 6 market_df[2:7] # Selecting alternate rows starting from index = 5 market_df[5::2].head() ###Output _____no_output_____ ###Markdown Selecting ColumnsThere are two simple ways to select a single column from a dataframe - ```df['column_name']``` and ```df.column_name```. ###Code # Using df['column'] sales = market_df['Sales'] sales.head() # Using df.column sales = market_df.Sales sales.head() # Notice that in both these cases, the resultant is a Series object print(type(market_df['Sales'])) print(type(market_df.Sales)) ###Output <class 'pandas.core.series.Series'> <class 'pandas.core.series.Series'> ###Markdown Selecting Multiple Columns You can select multiple columns by passing the list of column names inside the ```[]```: ```df[['column_1', 'column_2', 'column_n']]```.For instance, to select only the columns Cust_id, Sales and Profit: ###Code # Select Cust_id, Sales and Profit: market_df[['Cust_id', 'Sales', 'Profit']].head() ###Output _____no_output_____ ###Markdown Notice that in this case, the output is itself a dataframe. ###Code type(market_df[['Cust_id', 'Sales', 'Profit']]) # Similarly, if you select one column using double square brackets, # you'll get a df, not Series type(market_df[['Sales']]) ###Output _____no_output_____ ###Markdown Selecting Subsets of DataframesUntil now, you have seen selecting rows and columns using the following ways:* Selecting rows: ```df[start:stop]```* Selecting columns: ```df['column']``` or ```df.column``` or ```df[['col_x', 'col_y']]``` * ```df['column']``` or ```df.column``` return a series * ```df[['col_x', 'col_y']]``` returns a dataframeBut pandas does not prefer this way of indexing dataframes, since it has some ambiguity. For instance, let's try and select the third row of the dataframe. ###Code # Trying to select the third row: Throws an error market_df[2] ###Output _____no_output_____ ###Markdown Pandas throws an error because it is confused whether the ```[2]``` is an *index* or a *label*. Recall from the previous section that you can change the row indices. ###Code # Changing the row indices to Ord_id market_df.set_index('Ord_id').head() ###Output _____no_output_____ ###Markdown Now imagine you had a column with entries ```[2, 4, 7, 8 ...]```, and you set that as the index. What should ```df[2]``` return?The second row, or the row with the index value = 2?Taking an example from this dataset, say you decide to assign the ```Order_Quantity``` column as the index. ###Code market_df.set_index('Order_Quantity').head() ###Output _____no_output_____
week_8/marl_tictactoe_pt2.ipynb
###Markdown Multi-agent Reinforcement Learning using PettingZoo: Tic-tac-toe example part II*Gertjan Verhoeven**March 2021* Having learned the basics of PettingZoo, and how to use hashing with `defaultdict` to build up a Q-table, we are ready to implement Q-learning on the tic-tac-toe environment using PettingZoo. Before we get into the coding part, first some general remarks about Q-learning in a two-player alternating turn game. Training multi-agent games using single agent techniquesHere we will train a multi-agent game using single agent Q-learning. This means that from the perspective of each player, the other player is part of the environment. It follows that the learned strategy of the first player is tuned to the strategy used by the second player. The second player's strategy can be thought of forming a part of the environment the first player learns to perform optimally in.Therefore, if we use Q-learning against a player that uses a random strategy, The Q-learner optimizes for play in an evironment **that contains a player that uses a random strategy**.Here, we let **both** players learn using Q-learning with constant learning parameter $\alpha = 0.6$, and have them use the same decreasing $\epsilon$ exploration rate. So both players start off playing randomly ($\epsilon = 1$), and after 200.000 steps end up exploiting the learned policy for 97.5% of the time.We expect that this produces Q-tables for both players that perform well against random players and human players. Q-learning with two players: remembering previous states and actionsMany implementations of Q-learning for Tic-Tac-Toe play out a full game and memorize the sequence of states for both players. After a single game has ended, the "Q-learning" part is done in one go, starting from the end state en propagating back to the beginning. This is possible for Tic-Tac-Toe because no state is ever visited twice during a single game. However, this is not true for all games, so I think it is better to implement Q-learning in a more general way, where we learn each time the player "steps" through the game. In single agent RL using Gym, we start in a state, make a move, end up in a new state, and collect our reward. This allowed us to use variables like `current_state` and `next_state` within a single RL iteration for the Q-learning. In multi-agent (two player alternating turn, to be more precise), an agent observes a state, chooses an action, THEN hands over the turn, where the other agent observes the state, and chooses an action, then control returns to the first agent, which sees the result of its previous action, and finds itself in a new state.Thus, we need a way to store the previous state and previous action for each player to use Q-learning. We store these values in two simple dictionaries: ###Code prev_state = {'player_1': -1, 'player_2': -1} prev_action = {'player_1': -1, 'player_2': -1} ###Output _____no_output_____ ###Markdown I went for two independent Q-tables, and player 1 always starts first (This is how the environment works). So we will end up with two Q-tables, one for the player going first, and one for the player going second. Coding Q-learning in PettingZoo step by step ###Code from pettingzoo.classic import tictactoe_v3 import random import numpy as np from collections import defaultdict import dill ###Output _____no_output_____ ###Markdown It is advisable to have the actual Q-learning and helper functions in a separate `marl_qlearning.py` python script, which we import and run code from in Jupyter notebook, i.e.: ###Code from marl_qlearning import * ###Output _____no_output_____ ###Markdown We build on the code from the previous notebook.We already have `encode_state()` to convert the observation into a unique state label. ###Code import hashlib def encode_state(observation): # encode observation as bytes obs_bytes = str(observation).encode('utf-8') # create md5 hash m = hashlib.md5(obs_bytes) # return hash as hex digest state = m.hexdigest() return(state) ###Output _____no_output_____ ###Markdown Exercise 1: Create epsilon_greedy_policy()We need a function that contains the epsilon-greedy policy functionality.Code a function `epsilon_greedy_policy()` that takes as arguments:* `nA`, number of actions* `Q`, the list of Q-table dictionaries for both players, indexed by agent and by state* `agent`, the agent currently acting* `action_mask`, containing the available actions in the current state* `state` , the hash of the current state* `eps`, the value of the exploration parameter epsilon(Hint: I used `Q[agent][state][action_mask == 1]` to select the Q-values of all available actions for an agent in a state) ###Code def epsilon_greedy_policy(nA, Q, agent, action_mask, \ state, eps): return action ###Output _____no_output_____ ###Markdown Exercise 2: Create update_Q_value()Code a function update_Q_value() that takes as arguments:* `Q`, the list of Q-table dictionaries for both players, indexed by agent and by state* `agent`, the agent currently acting* `previous_state`, the hash of the previous state* `previous_action`, the previous action* `reward`, the reward received since the previous action* `alpha`, the learning parameter* `gamma`, the discounting parameter* `current_state`, the hash value of the current state ###Code def update_Q_value(Q, agent, previous_state, previous_action,\ reward, alpha, gamma, current_state = None): return Q ###Output _____no_output_____ ###Markdown Exercise 3: create marl_q_learning()Finally, the main program where all the parts come together.For this function, we built on the Pettingzoo code from the previous notebook.Code a function `marl_q_learning()` that takes as arguments* multi_env, a pettingzoo multi-agent environment* num_episodes, the number of episodes to run* alpha, the learning parameter* gamma, the discounting parameter with default 1* eps_start, the starting value for epsilon* eps_decay, the decay factor for epsilon* eps_min, the minimal value for epsilon ###Code def marl_q_learning(multi_env, num_episodes, alpha, gamma=1.0, \ eps_start=1.0, eps_decay=.99999, \ eps_min=0.025): multi_env.reset() Q = {} for agent in multi_env.agents: nA = multi_env.action_spaces[agent].n Q[agent] = defaultdict(lambda: np.zeros(nA)) epsilon = eps_start i_episode = 1 prev_state = {'player_1': -1, 'player_2': -1} prev_action = {'player_1': -1, 'player_2': -1} # keeps iterating over active agents until num episode break while i_episode <= num_episodes: for agent in multi_env.agent_iter(): # get observation (state) for current agent: observation, reward, done, info = multi_env.last() # perform q-learning with update_Q_value() # your code here # store current state prev_state[agent] = state if not done: # choose action using epsilon_greedy_policy() # your code here multi_env.step(action) # store chosen action prev_action[agent] = action else: # agent is done multi_env.step(None) # reset env and memory multi_env.reset() prev_state = {'player_1': -1, 'player_2': -1} prev_action = {'player_1': -1, 'player_2': -1} # bump episode i_episode += 1 # decrease epsilon epsilon = max(epsilon*eps_decay, eps_min) return Q ###Output _____no_output_____ ###Markdown Learning the optimal policy for Tic-Tac-ToeWe run the Q-learning algorithm for 500.000 steps and use `dill` to save the Q dictionary to disk. ###Code from marl_qlearning import * env = tictactoe_v3.env() random.seed(123) fullrun = 0 if fullrun: Q, N = marl_q_learning(env, 500_000, alpha = 0.6, gamma = 0.95, \ decay = True, render = False) with open('cached/Q.pkl', 'wb') as f: dill.dump(Q, f) else: with open('cached/Q.pkl', 'rb') as f: Q = dill.load(f) ###Output _____no_output_____
sprint_03_data_analysis/data_analysis_03_data_07.ipynb
###Markdown data_07 (indicators 02) adjustment 1. Imports ###Code import pandas as pd import numpy as np import os ###Output _____no_output_____ ###Markdown 2. Load dataframe ###Code input_path_indicators_2 = os.path.join(os.getcwd(), '..', 'sprint_01_data_collection', 'data_07') input_file_indicators_2 = 'data_07_brazil_indicators.csv' df_indicators_2 = pd.read_csv(os.path.join(input_path_indicators_2, input_file_indicators_2)).drop('Unnamed: 0', axis=1) df_indicators_2.head() ###Output _____no_output_____ ###Markdown 3. split states from cities ###Code try: df_indicators_2['Municipio'] = df_indicators_2['Territorialidade'].apply(lambda city: city.split('(')[0].rstrip(' ')) df_indicators_2['Estado'] = df_indicators_2['Territorialidade'].apply(lambda city: city.split('(')[1].replace(')', '')) df_indicators_2.drop('Territorialidade', axis=1, inplace=True) except KeyError: print('Territorialidade had already been removed') df_indicators_2.head() ###Output Territorialidade had already been removed ###Markdown 4. Save ###Code output_path = os.path.join(os.getcwd(), 'output') output_file = 'output_04_data_07_brazil_indicators_02_with_states_and_cities.csv' df_indicators_2.to_csv(os.path.join(output_path, output_file)) ###Output _____no_output_____
issues/errors in thermo conversion.ipynb
###Markdown Table of Contents ###Code from rmgpy.thermo.wilhoit import Wilhoit from rmgpy import constants import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt wilhoit = Wilhoit( Cp0 = (4.0*constants.R,"J/(mol*K)"), CpInf = (21.5*constants.R,"J/(mol*K)"), a0 = 0.0977518, a1 = -16.3067, a2 = 26.2524, a3 = -12.6785, B = (1068.68,"K"), H0 = (-94.088*constants.R,"kJ/mol"), S0 = (-118.46*constants.R,"J/(mol*K)"), Tmin = (10,"K"), Tmax = (3000,"K"), comment = 'C2H6', ) nasa = wilhoit.toNASA(Tmin=10,Tmax=3000, Tint=1000) output_error = pd.DataFrame(index = ['cp','h','s']) Tlist = np.arange(10, 3000, 10) for T in Tlist: differences = {} Cp_wilhoit = wilhoit.getHeatCapacity(T) Cp_nasa = nasa.getHeatCapacity(T) differences['cp'] = (Cp_nasa-Cp_wilhoit) / Cp_wilhoit H_wilhoit = wilhoit.getEnthalpy(T) H_nasa = nasa.getEnthalpy(T) differences['h'] = (H_nasa - H_wilhoit) / H_wilhoit S_wilhoit = wilhoit.getEntropy(T) S_nasa = nasa.getEntropy(T) differences['s'] = (S_nasa - S_wilhoit) / S_wilhoit output_error[T] = pd.Series(differences) output_error output_error.T.plot() plt.xlabel('temperature (K)') plt.ylabel('relative error from conversion') ###Output _____no_output_____
1 Data Prep/Data Preparation Workbook.ipynb
###Markdown Data Preparation Phase**Author:** Gabriel Lorenzo I. Santos ([email protected]) -------------------- The MIT License (MIT)Copyright (c) 2020 Gabriel Lorenzo I. SantosPermission is hereby granted, free of charge, to any person obtaining a copyof this software and associated documentation files (the "Software"), to dealin the Software without restriction, including without limitation the rightsto use, copy, modify, merge, publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software isfurnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in allcopies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THESOFTWARE. ------------------- DescriptionMost data mining activities require data preparation before analysis is undertaken. According to the CrowdFlower Data Science Report of 2016, 3 out of 5 data scientists spend most of their time in cleaning and organizing data. **Data Preparation** is the process of selecting, cleaning, constructing, integrating, and formatting data so that it can be used for modeling and analysis. The output of this phase is an _Analytical Data Set_ , which is used in the Modeling phase.This process usually involves domain knowledge as they define the usual parameters needed for a specific goal. In this activity, we will implement commonly used data preparation techniques presented by Hadley Wickham in his paper [*Tidy Data*](https://vita.had.co.nz/papers/tidy-data.pdf).Wickham's paper presented common causes of messiness in data and how to *tidy* them. As his techniques are implemented in R, we will do the same techniques in Python using Pandas functions. What is Tidy Data? (Wickham, 2014) - A step along the road to clean data - Data that is easy to model, visualize, and aggregate (understandable by computers) - Characteristics: - 1 variable per column - 1 observation/entity per row - 1 value belongs to a variable and an observation (1 value per cell) For now, let's import the following libraries: ###Code import pandas as pd import numpy as np ###Output _____no_output_____ ###Markdown In this example, we rely on datasets published by Wickham (2014).Let's take a look at our sample dataset: ###Code df_preg = pd.read_csv('preg.csv') df_preg ###Output _____no_output_____ ###Markdown **Question:** What are the variables in this data set? **Answer:** Sex, Pregnant Identifier, Count To transform this dataset into its tidy data form, we need to place the $Pregnant Identifier$ as one of its columns. In a more technical term, we need to _unpivot_ this data frame. In Pandas, we use $melt()$ function to do the unpivotting. ###Code df_preg_unpivot = df_preg.melt(id_vars=["sex"], var_name="is_pregnant", value_name="count") df_preg_unpivot ###Output _____no_output_____ ###Markdown $df\_preg\_unpivot$ complies with the characteristics of a tidy data. This can now be used for further analysis and other data mining techniques. Aside from the tidy data properties observed in $df\_preg\_unpivot$, we can also observe the following: - No duplicate dimensions with varying measures. - Each row is at the most granular level. _Remember data warehousing?_ This emphasizes the importance of having a data warehouse (and to an extent, data lakes) as the single source of trusted data in an organization. However, it is understood that not all data is captured or owned by an organization; hence, data scientists in corporations are heavily involved in data preparation or wrangling as they capture data of varying formats from various sources (external sources) and merge them with the structured data from their data warehouse (internal sources). There is a common link between the a data warehouse schema (for RDBMS) or cube (for OLAP systems) and the tidy data: _granularity_ . From the data warehousing modules, we learned that each row in fact table must be an instance of a distinct observation. In tidy data, each row has to be a distict observation. We can then change the way how we think of tidy data, in case you forget its characteristics:_**What is the entity that we are targetting to analyze?**__Entity_ may be a customer, a product, or a store, etc., and is usually identified by a unique identifier. By identifying the target entity to be analyzed, we also define the granularity of the data set. Common Causes of Messiness Wickham enumarated 5 scenarios where data can be considered as messy data:1. Column headers are values, not variables names.2. Multiple variables are stored in one column.3. Variables are stored in both rows and columns.4. Multiple types of experimental unit stored in the same table.5. One type of experimental unit stored in multiple tables.In this notebook, we will look at the first two types of messy data and how to tidy them. Mess 1: Column headers are values, not variable names. ###Code df_mess1 = pd.read_csv('mess1.csv') df_mess1 ###Output _____no_output_____ ###Markdown There are 3 variables in this data: **religion, income, count**Similar to $df\_preg$, we need to unpivot the income-labeled columns and place them in 1 column.In the original R package Wickham developed, this process is called *gather* - combining multiple columns into a single column with a key-value pair format. In Pandas, we use the $melt(id\_vars = ["religion"], var\_name = "income", value\_name = "count")$: - id_vars = \["religion"\] since every row is identified by religion - var_name = "income" since the variables at the column headers are income brackets - value_name = "count" since we are taking the frequency or count of households per religion per income bracket ###Code df_tidy1 = df_mess1.melt(id_vars=["religion"], var_name="income", value_name="count") df_tidy1.head() ###Output _____no_output_____ ###Markdown Mess 2: Multiple Variables are stored in one column. ###Code df_mess2 = pd.read_csv('mess2.csv') df_mess2 ###Output _____no_output_____ ###Markdown The data needs to be cleansed due to NaNs. Let's drop them and replace to zeros. ###Code df_mess2.fillna(0, inplace=True) df_mess2 ###Output _____no_output_____ ###Markdown Let's have a look at our columns. ###Code df_mess2.columns ###Output _____no_output_____ ###Markdown This dataset comes from the World Health Organization, and records the counts of confirmed tuberculosis cases by country (iso2), year (year), and demographic group (m04, m514, f04, f514, etc.). Looking at our list of columns, *how many total variables do we have?* **4 (country, year, sex, age range)**Now, how do we approach this? Before we transform the table into its tidy version, let's have another round of data understanding. We can see that there is a column named 'new_sp' which has no demographic information. Our assumption is that this is the totals column. Totals can be done on the fly but from granularity perspective, we only need 1 type of granularity and that includes demographics sex and age range. ###Code df_mess2.drop(columns=["new_sp"], inplace=True) df_mess2.head() ###Output _____no_output_____ ###Markdown Now we can further proceed in tidying this dataset. We need to melt the table to bring down the demographics variable into one column. ###Code df_mess2 = df_mess2.melt(id_vars=["iso2", "year"], var_name="demog", value_name="cases") df_mess2.head() ###Output _____no_output_____ ###Markdown Let's clean the elements under the $demog$ column. ###Code # From https://stackoverflow.com/questions/13682044/remove-unwanted-parts-from-strings-in-a-column df_mess2["demog"] = df_mess2["demog"].map(lambda x: x.lstrip('new_sp_')) df_mess2 ###Output _____no_output_____ ###Markdown How do we execute the split of the two variables? We know that the demography data is comprised of two parts: sex (1st letter) and age range (rest of string). Let's split this by getting the substrings and then drop the $demog$ column. ###Code df_mess2["sex"] = df_mess2.demog.str[0] df_mess2["age"] = df_mess2.demog.str[1:len(df_mess2.demog)] df_mess2.drop(columns=["demog"], inplace=True) df_mess2.head() ###Output _____no_output_____ ###Markdown To clean the age column, let's look at the values under it. ###Code df_mess2.age.unique() ###Output _____no_output_____ ###Markdown We can see that there are age groups 0-4, 5-14, and 0-14. To have a better approach here, a deeper look at the data must be done to see if the age groups 0-4 and 5-14 are relevant or not. Since the purpose of this notebook is to teach how to clean messy datasets, let's drop the rows consist of 0-4 and 5-14, for now. ###Code df_mess2 = df_mess2[(df_mess2["age"] != "04") & (df_mess2["age"] != "514")] df_mess2.head() ###Output _____no_output_____ ###Markdown Cleanse the data further to come up with our tidy version. ###Code # Replacing the age group data from "014" to "0-14" df_mess2.replace(["014", "1524", "2534", "3544", "4544", "5564", "65", "u"], ["0-14", "15-24", "25-34", "35-44", "45-44", "55-64", "65+", "unknown"], inplace=True) # Renaming columns df_mess2.columns = ["country", "year", "cases", "sex", "age"] # Rearranging columns and resetting the index df_tidy2 = df_mess2[["country", "year", "sex", "age", "cases"]].reset_index() df_tidy2.head() ###Output C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\frame.py:4172: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy method=method, ###Markdown ------------ Exercise Mess 3: Variables are stored in both rows and columns.From the 2 previous examples of messy data, we dealt with variables stored in columns. As an exercise, you will be doing the tidying of our third dataset, which includes variables stored in rows. Don't worry! If this is your first time dealing with messy data, it may look daunting. But in reality, this is still a simple scenario! *(Although according to Wickham, this is the most complicated form of messy data...you'll be the judge :D)*Let me help you on assessing the data set and I'll let you do the rest. ###Code df_mess3 = pd.read_csv('mess3.csv') df_mess3 ###Output _____no_output_____ ###Markdown The data comes from the Global Historical Climatology Network for one weather station (MX17004) in Mexico for five months. It has the minimum (tmin) and maximum (tmax) temperature spread out across columns (days). In variables stored in rows, the reverse of "unpivot" has to be performed. In this case, explore the $pivot()$ function in pandas.Your task is to tidy the data above. Do the necessary cleanup if needed. Write your code below and upload both the code and the output file in our Moodle page. See you on Saturday for the open consultation session! ###Code # Fill up the following information by replacing XXXXXX student_id = "XXXXXX" nickname = "XXXXXX" lastname = "XXXXXX" # If the data needs to be cleansed, write the code below # ------------------------------------------------------ # Write your script here for transforming the data set into its tidy version. # Name your final dataframe as df_tidy3. # -------------------------------------------------------------------------- # DO NOT TOUCH THE CODE BELOW WHEN TESTING YOUR SCRIPTS # Uncomment the code below only when you are done with the script to export the table for submission. # ------------------------------------------------------------------------------------ #filename = student_id + "_" + lastname + "_" + nickname + ".csv" #df_tidy3.to_csv(filename) ###Output _____no_output_____
plot_WR_data.ipynb
###Markdown Fantasy Points ###Code data = df[['Player', 'Team', 'FantasyPoints']].copy() chart = alt.Chart(data).mark_bar().encode( alt.Y( 'FantasyPoints:Q', axis = alt.Axis(title = 'Fantasy Points') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'FantasyPoints', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ), color = alt.Color( 'Team:N', scale = alt.Scale( domain = teams, range = color_codes ) ) ).properties(title = 'Week 1: 0.5 PPR Fantasy Points by Wide Receivers') chart chart.save(PATH + 'plots/WR_fantasypoints_color.png', scale_factor=3.0) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'FantasyPoints:Q', axis = alt.Axis(title = 'Fantasy Points') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'FantasyPoints', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ) ).properties(title = 'Week 1: 0.5 PPR Fantasy Points by Wide Receivers') chart chart.save(PATH + 'plots/WR_fantasypoints_plain.png', scale_factor=3.0) ###Output _____no_output_____ ###Markdown Receiving Yards ###Code data = df[['Player', 'Team', 'ReceivingYards']].copy() chart = alt.Chart(data).mark_bar().encode( alt.Y( 'ReceivingYards:Q', axis = alt.Axis(title = 'Receiving Yards') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'ReceivingYards', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ), color = alt.Color( 'Team:N', scale = alt.Scale( domain = teams, range = color_codes ) ) ).properties(title = 'Week 1: Receiving Yards by Wide Receivers (Top 50)') chart chart.save(PATH + 'plots/WR_receivingyards_color.png', scale_factor=3.0) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'ReceivingYards:Q', axis = alt.Axis(title = 'Receiving Yards') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'ReceivingYards', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ) ).properties(title = 'Week 1: Receiving Yards by Wide Receivers (Top 50)') chart chart.save(PATH + 'plots/WR_receivingyards_plain.png', scale_factor=3.0) ###Output _____no_output_____ ###Markdown Targets ###Code data = df[['Player', 'Team', 'Targets']].copy() # Make sure to run color_codes with updated data extra = pd.DataFrame({ 'Player': ['James Washington', 'Dede Westbrook'], 'Team': ['PIT', 'JAC'], 'Targets': [6, 6]}) data = data.append(extra, ignore_index = True) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'Targets:Q', axis = alt.Axis(title = 'Targets') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'Targets', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ), color = alt.Color( 'Team:N', scale = alt.Scale( domain = teams, range = color_codes ) ) ).properties(title = 'Week 1: Targets for Wide Receivers') chart chart.save(PATH + 'plots/WR_targets_color.png', scale_factor=3.0) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'Targets:Q', axis = alt.Axis(title = 'Targets') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'Targets', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ) ).properties(title = 'Week 1: Targets for Wide Receivers') chart chart.save(PATH + 'plots/WR_targets_plain.png', scale_factor=3.0) ###Output _____no_output_____ ###Markdown Receptions ###Code data = df[['Player', 'Team', 'Receptions']].copy() # Make sure to run color_codes with updated data extra = pd.DataFrame({ 'Player': ['Paul Richardson', 'Calvin Ridley', 'Marquez Valdes-Scantling'], 'Team': ['WAS', 'ATL', 'GB'], 'Receptions': [4, 4, 4]}) data = data.append(extra, ignore_index = True) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'Receptions:Q', axis = alt.Axis(title = 'Receptions') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'Receptions', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ), color = alt.Color( 'Team:N', scale = alt.Scale( domain = teams, range = color_codes ) ) ).properties(title = 'Week 1: Receptions for Wide Receivers') chart chart.save(PATH + 'plots/WR_receptions_color.png', scale_factor=3.0) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'Receptions:Q', axis = alt.Axis(title = 'Receptions') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'Receptions', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ) ).properties(title = 'Week 1: Receptions for Wide Receivers') chart chart.save(PATH + 'plots/WR_receptions_plain.png', scale_factor=3.0) ###Output _____no_output_____ ###Markdown Yards Per Catch ###Code data = df[['Player', 'Team', 'Receptions', 'ReceivingYards']].copy() data['YPC'] = data['ReceivingYards'] / data['Receptions'] chart = alt.Chart(data).mark_bar().encode( alt.Y( 'YPC:Q', axis = alt.Axis(title = 'Yards Per Catch') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'YPC', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ), color = alt.Color( 'Team:N', scale = alt.Scale( domain = teams, range = color_codes ) ) ).properties(title = 'Week 1: Yards Per Catch for Wide Receivers (Top 50 in 0.5 PPR Points)') chart chart.save(PATH + 'plots/WR_ypc_color.png', scale_factor=3.0) chart = alt.Chart(data).mark_bar().encode( alt.Y( 'YPC:Q', axis = alt.Axis(title = 'Yards Per Catch') ), alt.X( 'Player:N', sort = alt.EncodingSortField( field = 'YPC', op = 'sum', order = 'descending' ), axis = alt.Axis(title = '', labelAngle = -45) ) ).properties(title = 'Week 1: Yards Per Catch for Wide Receivers (Top 50 in 0.5 PPR Points)') chart chart.save(PATH + 'plots/WR_ypc_plain.png', scale_factor=3.0) ###Output _____no_output_____
src/NaiveBayes.ipynb
###Markdown Naive BayesThis was just a side-path: Naive Bayes is typically used for text processing with word counts in documents. I tried it with packet counts in a network log. This didn't work well enough to put in production, but for such a simple model, I was pretty impressed with how it did! ###Code import pandas as pd import numpy as np from sklearn.metrics import balanced_accuracy_score, confusion_matrix from sklearn.naive_bayes import MultinomialNB from sklearn.ensemble import BaggingClassifier from mlxtend.plotting import plot_confusion_matrix from data_read import data_read %matplotlib inline %load_ext watermark %watermark -iv -p sklearn,mlxtend train = data_read("train", "fixed") test = data_read("test", "fixed") X_train = train.drop(columns=['label', 'attack_cat']) y_train = train.label.astype(np.int) X_test = test.drop(columns=['label', 'attack_cat']) y_test = test.label.astype(np.int) clf = MultinomialNB() clf.fit(X_train, y_train) y_pred = clf.predict(X_test) balanced_accuracy_score(y_test, y_pred, adjusted=True) cm = confusion_matrix(y_test, y_pred) cm plot_confusion_matrix(cm, show_absolute=False, show_normed=True) # and of course, what do you do with an underperforming predictor? make a whole bunch of them! clf = BaggingClassifier(MultinomialNB(), n_estimators=100, max_features=.50, max_samples=.50, n_jobs=-1, random_state=22) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) balanced_accuracy_score(y_test, y_pred) cm = confusion_matrix(y_test, y_pred) cm plot_confusion_matrix(cm, show_absolute=False, show_normed=True) ###Output _____no_output_____ ###Markdown Naive BayesReferenece : https://www.freecodecamp.org/news/how-naive-bayes-classifiers-work/ ###Code class NaiveBayesClassifier: def __init__(self, X, y): self.X, self.y = X, y self.N = len(self.X) # Training set length self.dim = len(self.X[0]) # Dimension of the vector of features self.attrs = [[] for _ in range(self.dim)] self.output_dom = {} # Output classes with the number of ocurrences in the training set. In this case we have only 2 classes self.data = [] for i in range(len(self.X)): for j in range(self.dim): if not self.X[i][j] in self.attrs[j]: self.attrs[j].append(self.X[i][j]) if not self.y[i] in self.output_dom.keys(): self.output_dom[self.y[i]] = 1 else: self.output_dom[self.y[i]] += 1 self.data.append([self.X[i], self.y[i]]) def classify(self, entry): solve = None # Final result max_arg = -1 # partial maximum for y in self.output_dom.keys(): prob = self.output_dom[y]/self.N # P(y) for i in range(self.dim): cases = [x for x in self.data if x[0][i] == entry[i] and x[1] == y] # all rows with Xi = xi n = len(cases) prob *= n/self.N # P *= P(Xi = xi) # if we have a greater prob for this output than the partial maximum... if prob > max_arg: max_arg = prob solve = y return solve import pandas as pd data = pd.read_csv('train.csv') print(data.head()) y = list(map(lambda v: 'yes' if v == 1 else 'no', data['Survived'].values)) X = data[['Pclass', 'Sex', 'Age']].values # features values print(len(y)) # >> 887 # We'll take 600 examples to train and the rest to the validation process y_train = y[:700] y_val = y[700:] X_train = X[:700] X_val = X[700:] ## Creating the Naive Bayes Classifier instance with the training data nbc = NaiveBayesClassifier(X_train, y_train) total_cases = len(y_val) # size of validation set # Well classified examples and bad classified examples good = 0 bad = 0 for i in range(total_cases): predict = nbc.classify(X_val[i]) # print(y_val[i] + ' --------------- ' + predict) if y_val[i] == predict: good += 1 else: bad += 1 print('TOTAL EXAMPLES:', total_cases) print('RIGHT:', good) print('WRONG:', bad) print('ACCURACY:', good/total_cases) ###Output _____no_output_____
Chapter 11 Introducing Python Statements.ipynb
###Markdown Chapter 10 Introducing Python Statements Python Program Structure Revisited At its core, Python syntax is composed of statements and expressions. Expressionsprocess objects and are embedded in statements. Statements code the larger logic of aprogram’s operation—they use and direct expressions to process the objects we studiedin the preceding chapters. Moreover, statements are where objects spring into existence(e.g., in expressions within assignment statements), and some statements create entirely new kinds of objects (functions, classes, and so on). Statements always exist inmodules, which themselves are managed with statements. This chapter climbs the hierarchy to the next level:1. Programs are composed of modules.2. Modules contain statements.3. Statements contain expressions.4. Expressions create and process objects. A Few Special Cases As mentioned previously, in Python’s syntax model:• The end of a line terminates the statement on that line (without semicolons).• Nested statements are blocked and associated by their physical indentation (without braces). ###Code a = 1; b = 2; print(a + b) # Three statements on one line mlist = [111, 222, 333] print(mlist) ###Output [111, 222, 333] ###Markdown Parentheses are the catchall device—because any expression can be wrapped up inthem, simply inserting a left parenthesis allows you to drop down to the next line andcontinue your statement: ###Code from sympy import * A, B, C, D = symbols('A, B , C , D') X = symbols('X') X = A + B + C + D if (A == 1 and B == 2 and C == 3): print('spam'*3) A, B, C = 1, 2, 3 exec('print("spam");'*3) ###Output spam spam spam ###Markdown An older rule also allows for continuation lines when the prior line ends in a backslash: ###Code X = A + B + \ C + D X ###Output _____no_output_____ ###Markdown This alternative technique is dated, though, and is frowned on today because it’s difficult to notice and maintain the backslashes, and it’s fairly brittle—there can be nospaces after the backslash, and omitting it can have unexpected effects if the next lineis mistaken to be a new statement. It’s also another throwback to the C language, whereit is commonly used in “define” macros; again, when in Pythonland, do as Pythonistasdo, not as C programmers do. Block rule special case ###Code x, y = 26,57 if x > y: print(x) ###Output _____no_output_____ ###Markdown A Simple Interactive Loop Doing Math on User Inputs ###Code while True: reply = input('Enter text:') if reply == 'stop': break print(int(reply) ** 2) print('Bye') ###Output Enter text: 4546 ###Markdown Handling Errors by Testing Inputs ###Code S = '123' T = 'xxx' S.isdigit(), T.isdigit() while True: reply = input('Enter text:') if reply == 'stop': break elif not reply.isdigit(): print('Bad!' * 8) else: print(int(reply) ** 2) print('Bye') ###Output Enter text: 24 ###Markdown Handling Errors with try Statements ###Code while True: reply = input('Enter text:') if reply == 'stop': break try: num = int(reply) except: print('Bad!' * 8) else: print(int(reply) ** 2) print('Bye') ###Output Enter text: 456 ###Markdown Nesting Code Three Levels Deep ###Code while True: reply = input('Enter text:') if reply == 'stop': break elif not reply.isdigit(): print('Bad!' * 8) else: num = int(reply) if num < 20: print('low') else: print(num ** 2) print('Bye') ###Output Enter text: 454 ###Markdown Chapter Summary That concludes our quick look at Python statement syntax. This chapter introducedthe general rules for coding statements and blocks of code. As you’ve learned, in Pythonwe normally code one statement per line and indent all the statements in a nested blockthe same amount (indentation is part of Python’s syntax). However, we also looked ata few exceptions to these rules, including continuation lines and single-line tests andloops. Finally, we put these ideas to work in an interactive script that demonstrated ahandful of statements and showed statement syntax in action.In the next chapter, we’ll start to dig deeper by going over each of Python’s basic procedural statements in depth. As you’ll see, though, all statements follow the same general rules introduced here. ###Code import sys from tkinter import * class HelloClass: def __init__(self): widget = Button(None, text = 'Hello event world', command = self.quit) widget.pack() def quit(self): print('Hello class method world') sys.exit() HelloClass() mainloop() # coding:utf-8 import turtle as t import time #皮卡丘 #基础设置 t.screensize(800,600) t.pensize(2) # 设置画笔的大小 t.speed(10) # 设置画笔速度为10 #画左偏曲线函数 def radian_left(ang,dis,step,n): for i in range(n): dis+=step #dis增大step t.lt(ang) #向左转ang度 t.fd(dis) #向前走dis的步长 def radian_right(ang,dis,step,n): for i in range(n): dis+=step t.rt(ang) #向左转ang度 t.fd(dis) #向前走dis的步长 #画耳朵 def InitEars(): t.color("black","yellow") #左耳朵曲线 t.pu() # 提笔 t.goto(-50,100) # 笔头初始位置 t.pd() # 下笔 t.setheading(110)#画笔角度 t.begin_fill() radian_left(1.2,0.4,0.1,40) t.setheading(270) #画笔角度 radian_left(1.2,0.4,0.1,40) t.setheading(44) #画笔角度 t.forward(32) t.end_fill() #右耳朵曲线 t.pu() # 提笔 t.goto(50,100) # 笔头初始位置 t.pd() # 下笔 t.setheading(70)#画笔角度 t.begin_fill() radian_right(1.2,0.4,0.1,40) t.setheading(270) #画笔角度 radian_right(1.2,0.4,0.1,40) t.setheading(136) #画笔角度 t.forward(32) t.end_fill() #耳朵黑 t.begin_fill() t.fillcolor("black") t.pu() # 提笔 t.goto(88,141) # 笔头初始位置 t.pd() # 下笔 t.setheading(35)#画笔角度 radian_right(1.2,1.6,0.1,16) t.setheading(270) #画笔角度 radian_right(1.2,0.4,0.1,25) t.setheading(132) #画笔角度 t.forward(31) t.end_fill() t.begin_fill() t.fillcolor("black") t.pu() # 提笔 t.goto(-88,141) # 笔头初始位置 t.pd() # 下笔 t.setheading(145)#画笔角度 radian_left(1.2,1.6,0.1,16) t.setheading(270) #画笔角度 radian_left(1.2,0.4,0.1,25) t.setheading(48) #画笔角度 t.forward(31) t.end_fill() #画尾巴 def InitTail(): #尾巴 t.begin_fill() t.fillcolor("yellow") t.pu() # 提笔 t.goto(64,-140) # 笔头初始位置 t.pd() # 下笔 t.setheading(10) #画笔角度 t.forward(20) t.setheading(90) #画笔角度 t.forward(20) t.setheading(10) #画笔角度 t.forward(10) t.setheading(80) #画笔角度 t.forward(100) t.setheading(35) #画笔角度 t.forward(80) t.setheading(260) #画笔角度 t.forward(100) t.setheading(205) #画笔角度 t.forward(40) t.setheading(260) #画笔角度 t.forward(37) t.setheading(205) #画笔角度 t.forward(20) t.setheading(260) #画笔角度 t.forward(25) t.setheading(175) #画笔角度 t.forward(30) t.setheading(100) #画笔角度 t.forward(13) t.end_fill() #画脚 def InitFoots(): #脚 t.begin_fill() t.fillcolor("yellow") t.pensize(2) t.pu() # 提笔 t.goto(-70,-200) # 笔头初始位置 t.pd() # 下笔 t.setheading(225) #画笔角度 radian_left(0.5,1.2,0,12) radian_left(35,0.6,0,4) radian_left(1,1.2,0,18) t.setheading(160) #画笔角度 t.forward(13) t.end_fill() t.begin_fill() t.fillcolor("yellow") t.pensize(2) t.pu() # 提笔 t.goto(70,-200) # 笔头初始位置 t.pd() # 下笔 t.setheading(315) #画笔角度 radian_right(0.5,1.2,0,12) radian_right(35,0.6,0,4) radian_right(1,1.2,0,18) t.setheading(20) #画笔角度 t.forward(13) t.end_fill() #画身体 def InitBody(): #外形轮廓 t.begin_fill() t.pu() # 提笔 t.goto(112,0) # 笔头初始位置 t.pd() # 下笔 t.setheading(90) #画笔角度 t.circle(112,180) t.setheading(250) #画笔角度 radian_left(1.6,1.3,0,50) radian_left(0.8,1.5,0,25) t.setheading(255) #画笔角度 radian_left(0.4,1.6,0.2,27) radian_left(2.8,1,0,45) radian_right(0.9,1.4,0,31) t.setheading(355) #画笔角度 radian_right(0.9,1.4,0,31) radian_left(2.8,1,0,45) radian_left(0.4,7.2,-0.2,27) t.setheading(10) #画笔角度 radian_left(0.8,1.5,0,25) radian_left(1.6,1.3,0,50) t.end_fill() def InitEyes(): #左眼睛 t.begin_fill() t.fillcolor("black") t.pu() # 提笔 t.goto(-46,10) # 笔头初始位置 t.pd() # 下笔 t.setheading(90) #画笔角度 t.circle(5,360) t.end_fill() #右眼睛 t.begin_fill() t.fillcolor("black") t.pu() # 提笔 t.goto(46,10) # 笔头初始位置 t.pd() # 下笔 t.setheading(-90) #画笔角度 t.circle(5,360) t.end_fill() #画脸 def InitFace(): #脸蛋 t.begin_fill() t.fillcolor("red") t.pu() # 提笔 t.goto(-63,-10) # 笔头初始位置 t.pd() # 下笔 t.setheading(90) #画笔角度 t.circle(10,360) t.end_fill() t.begin_fill() t.fillcolor("red") t.pu() # 提笔 t.goto(63,-10) # 笔头初始位置 t.pd() # 下笔 t.setheading(-90) #画笔角度 t.circle(10,360) t.end_fill() #嘴巴 t.pensize(2.2) t.pu() # 提笔 t.goto(0,0) # 笔头初始位置 t.pd() # 下笔 t.setheading(235) #画笔角度 radian_right(5,0.8,0,30) t.pu() # 提笔 t.goto(0,0) # 笔头初始位置 t.pd() # 下笔 t.setheading(305) #画笔角度 radian_left(5,0.8,0,30) #画手 def InitHands(): #左手 t.pensize(2) t.pu() # 提笔 t.goto(-46,-100) # 笔头初始位置 t.pd() # 下笔 t.setheading(285) #画笔角度 radian_right(0.4,1.2,0,26) radian_right(5,0.35,0,26) radian_right(0.3,1.2,0,15) #右手 t.pu() # 提笔 t.goto(46,-100) # 笔头初始位置 t.pd() # 下笔 t.setheading(255) #画笔角度 radian_left(0.4,1.2,0,26) radian_left(5,0.35,0,26) radian_left(0.3,1.2,0,15) def CloseEyes(): #左眼睛 t.pu() # 提笔 t.goto(-46,12) # 笔头初始位置 t.pd() # 下笔 t.setheading(180) #画笔角度 t.forward(10) #右眼睛 t.pu() # 提笔 t.goto(46,12) # 笔头初始位置 t.pd() # 下笔 t.setheading(0) #画笔角度 t.forward(10) #初始化 def Init(): InitEars() InitTail() InitFoots() InitBody() InitFace() InitHands() InitEyes() #眨眼睛 def Upgarde(): InitEars() InitTail() InitFoots() InitBody() InitFace() InitHands() CloseEyes() def Upgarde_Init(): InitEars() InitTail() InitFoots() InitBody() InitFace() InitHands() InitEyes() def main(): Init() t.tracer(False) #眨眼睛动画 for i in range(30): if i%2==0: t.reset() t.hideturtle() Upgarde() t.update() time.sleep(0.3) else: t.reset() t.hideturtle() Upgarde_Init() t.update() time.sleep(1) main() #结束画笔 t.done() import turtle as T import random import time # 画樱花的躯干(60,t) def Tree(branch, t): time.sleep(0.000005) if branch > 3: if 8 <= branch <= 12: if random.randint(0, 2) == 0: t.color('snow') # 白 else: t.color('lightcoral') # 淡珊瑚色 t.pensize(branch / 3) elif branch < 8: if random.randint(0, 1) == 0: t.color('snow') else: t.color('lightcoral') # 淡珊瑚色 t.pensize(branch / 2) else: t.color('sienna') # 赭(zhě)色 t.pensize(branch / 10) # 6 t.forward(branch) a = 1.5 * random.random() t.right(20 * a) b = 1.5 * random.random() Tree(branch - 10 * b, t) t.left(40 * a) Tree(branch - 10 * b, t) t.right(20 * a) t.up() t.backward(branch) t.down() # 掉落的花瓣 def Petal(m, t): for i in range(m): a = 200 - 400 * random.random() b = 10 - 20 * random.random() t.up() t.forward(b) t.left(90) t.forward(a) t.down() t.color('lightcoral') # 淡珊瑚色 t.circle(1) t.up() t.backward(a) t.right(90) t.backward(b) # 绘图区域 t = T.Turtle() # 画布大小 w = T.Screen() t.hideturtle() # 隐藏画笔 t.getscreen().tracer(5, 0) w.screensize(bg='wheat') # wheat小麦 t.left(90) t.up() t.backward(150) t.down() t.color('sienna') # 画樱花的躯干 Tree(79, t) # 掉落的花瓣 Petal(200, t) w.exitonclick() from turtle import * from random import * from math import * def tree(n,l): pd()#下笔 #阴影效果 t = cos(radians(heading()+45))/8+0.25 pencolor(t,t,t) pensize(n/3) forward(l)#画树枝 if n>0: b = random()*15+10 #右分支偏转角度 c = random()*15+10 #左分支偏转角度 d = l*(random()*0.25+0.7) #下一个分支的长度 #右转一定角度,画右分支 right(b) tree(n-1,d) #左转一定角度,画左分支 left(b+c) tree(n-1,d) #转回来 right(c) else: #画叶子 right(90) n=cos(radians(heading()-45))/4+0.5 pencolor(n,n*0.8,n*0.8) circle(3) left(90) #添加0.3倍的飘落叶子 if(random()>0.7): pu() #飘落 t = heading() an = -40 +random()*40 setheading(an) dis = int(800*random()*0.5 + 400*random()*0.3 + 200*random()*0.2) forward(dis) setheading(t) #画叶子 pd() right(90) n = cos(radians(heading()-45))/4+0.5 pencolor(n*0.5+0.5,0.4+n*0.4,0.4+n*0.4) circle(2) left(90) pu() #返回 t=heading() setheading(an) backward(dis) setheading(t) pu() backward(l)#退回 bgcolor(0.5,0.5,0.5)#背景色 ht()#隐藏turtle speed(2)#速度 1-10渐进,0 最快 tracer(0,0) pu()#抬笔 backward(100) left(90)#左转90度 pu()#抬笔 backward(300)#后退300 tree(12,100)#递归7层 done() from turtle import * from random import * from math import * def tree(n, l): pd() t = cos(radians(heading() + 45)) / 8 + 0.25 pencolor(t, t, t) pensize(n / 4) forward(l) if n > 0: b = random() * 15 + 10 c = random() * 15 + 10 d = l * (random() * 0.35 + 0.6) right(b) tree(n - 1, d) left(b + c) tree(n - 1, d) right(c) else: right(90) n = cos(radians(heading() - 45)) / 4 + 0.5 pencolor(n, n, n) circle(2) left(90) pu() backward(l) bgcolor(0.5, 0.5, 0.5) ht() speed(5) tracer(0, 0) left(90) pu() backward(300) tree(20, 100) done() from turtle import * import random import time n = 100.0 speed(10) screensize(bg='seashell') left(90) forward(3*n) color("orange", "yellow") begin_fill() left(126) for i in range(5): forward(n/5) right(144) forward(n/5) left(72) end_fill() right(126) color("dark green") backward(n*4.8) def tree(d, s): if d <= 0: return forward(s) tree(d-1, s*.8) right(120) tree(d-3, s*.5) right(120) tree(d-3, s*.5) right(120) backward(s) tree(15, n) backward(n/2) for i in range(200): a = 200 - 400 * random.random() b = 10 - 20 * random.random() up() forward(b) left(90) forward(a) down() if random.randint(0, 1) == 0: color('tomato') else: color('wheat') circle(2) up() backward(a) right(90) backward(b) # time.sleep(60) # 魔法少女 import turtle as te import time WriteStep = 5000 # 贝塞尔函数的取样次数 Speed = 5 Width = 1800 # 界面宽度 Height = 1800 # 界面高度 Xh = 0 # 记录前一个贝塞尔函数的手柄 Yh = 0 def Bezier(p1, p2, t): # 一阶贝塞尔函数 return p1 * (1 - t) + p2 * t def Bezier_2(x1, y1, x2, y2, x3, y3): # 二阶贝塞尔函数 te.goto(x1, y1) te.pendown() for t in range(0, WriteStep + 1): x = Bezier(Bezier(x1, x2, t / WriteStep), Bezier(x2, x3, t / WriteStep), t / WriteStep) y = Bezier(Bezier(y1, y2, t / WriteStep), Bezier(y2, y3, t / WriteStep), t / WriteStep) te.goto(x, y) te.penup() def Bezier_3(x1, y1, x2, y2, x3, y3, x4, y4): # 三阶贝塞尔函数 x1 = -Width / 2 + x1 y1 = Height / 2 - y1 x2 = -Width / 2 + x2 y2 = Height / 2 - y2 x3 = -Width / 2 + x3 y3 = Height / 2 - y3 x4 = -Width / 2 + x4 y4 = Height / 2 - y4 # 坐标变换 te.goto(x1, y1) te.pendown() for t in range(0, WriteStep + 1): x = Bezier(Bezier(Bezier(x1, x2, t / WriteStep), Bezier(x2, x3, t / WriteStep), t / WriteStep), Bezier(Bezier(x2, x3, t / WriteStep), Bezier(x3, x4, t / WriteStep), t / WriteStep), t / WriteStep) y = Bezier(Bezier(Bezier(y1, y2, t / WriteStep), Bezier(y2, y3, t / WriteStep), t / WriteStep), Bezier(Bezier(y2, y3, t / WriteStep), Bezier(y3, y4, t / WriteStep), t / WriteStep), t / WriteStep) te.goto(x, y) te.penup() def Moveto(x, y): # 移动到svg坐标下(x,y) te.penup() te.goto(-Width / 2 + x, Height / 2 - y) def line(x1, y1, x2, y2): # 连接svg坐标下两点 te.penup() te.goto(-Width / 2 + x1, Height / 2 - y1) te.pendown() te.goto(-Width / 2 + x2, Height / 2 - y2) te.penup() def lineto(dx, dy): # 连接当前点和相对坐标(dx,dy)的点 te.pendown() te.goto(te.xcor() + dx, te.ycor() - dy) te.penup() def Lineto(x, y): # 连接当前点和svg坐标下(x,y) te.pendown() te.goto(-Width / 2 + x, Height / 2 - y) te.penup() def Horizontal(x): # 做到svg坐标下横坐标为x的水平线 te.pendown() te.setx(x - Width / 2) te.penup() def horizontal(dx): # 做到相对横坐标为dx的水平线 te.seth(0) te.pendown() te.fd(dx) te.penup() def vertical(dy): # 做到相对纵坐标为dy的垂直线 te.seth(-90) te.pendown() te.fd(dy) te.penup() te.seth(0) def polyline(x1, y1, x2, y2, x3, y3): # 做svg坐标下的折线 te.penup() te.goto(-Width / 2 + x1, Height / 2 - y1) te.pendown() te.goto(-Width / 2 + x2, Height / 2 - y2) te.goto(-Width / 2 + x3, Height / 2 - y3) te.penup() def Curveto(x1, y1, x2, y2, x, y): # 三阶贝塞尔曲线到(x,y) te.penup() X_now = te.xcor() + Width / 2 Y_now = Height / 2 - te.ycor() Bezier_3(X_now, Y_now, x1, y1, x2, y2, x, y) global Xh global Yh Xh = x - x2 Yh = y - y2 def curveto_r(x1, y1, x2, y2, x, y): # 三阶贝塞尔曲线到相对坐标(x,y) te.penup() X_now = te.xcor() + Width / 2 Y_now = Height / 2 - te.ycor() Bezier_3(X_now, Y_now, X_now + x1, Y_now + y1, X_now + x2, Y_now + y2, X_now + x, Y_now + y) global Xh global Yh Xh = x - x2 Yh = y - y2 def Smooth(x2, y2, x, y): # 平滑三阶贝塞尔曲线到(x,y) global Xh global Yh te.penup() X_now = te.xcor() + Width / 2 Y_now = Height / 2 - te.ycor() Bezier_3(X_now, Y_now, X_now + Xh, Y_now + Yh, x2, y2, x, y) Xh = x - x2 Yh = y - y2 def smooth_r(x2, y2, x, y): # 平滑三阶贝塞尔曲线到相对坐标(x,y) global Xh global Yh te.penup() X_now = te.xcor() + Width / 2 Y_now = Height / 2 - te.ycor() Bezier_3(X_now, Y_now, X_now + Xh, Y_now + Yh, X_now + x2, Y_now + y2, X_now + x, Y_now + y) Xh = x - x2 Yh = y - y2 te.tracer(10) te.setup(Width, Height, 0, 0) te.pensize(1) te.speed(Speed) te.penup() # 图层_2 # time.sleep(20) te.color("black", "#F2F2F2") # 外套 Moveto(61, 462) te.begin_fill() smooth_r(12, -41, 27, -58) curveto_r(-6, -36, 6, -118, 9, -132) curveto_r(-15, -27, -23, -51, -26, -74) curveto_r(4, -66, 38, -105, 65, -149) Horizontal(486) curveto_r(12, 24, 40, 99, 33, 114) curveto_r(39, 82, 55, 129, 39, 144) smooth_r(-31, 23, -39, 28) smooth_r(-12, 37, -12, 37) lineto(50, 92) Horizontal(445) smooth_r(-29, -38, -31, -46) smooth_r(78, -107, 72, -119) Smooth(355, 178, 340, 176) Smooth(272, 63, 264, 64) smooth_r(-29, 67, -27, 73) Curveto(99, 292, 174, 428, 173, 439) smooth_r(-8, 23, -8, 23) Lineto(61, 462) te.end_fill() Moveto(60.5, 461.5) # 阴影 te.color("black", "#D3DFF0") te.begin_fill() curveto_r(0, 0, 17, -42, 27, -59) curveto_r(-6, -33, 6, -128, 10, -133) curveto_r(-15, -10, -27, -66, -27.285, -75) te.pencolor("#D3DFF0") curveto_r(12.285, 11, 82.963, 156, 82.963, 156) te.pencolor("black") smooth_r(12.322, 75, 19.322, 86) curveto_r(-1, 11, -8, 25, -8, 25) Horizontal(60.5) te.end_fill() Moveto(444.5, 464) te.begin_fill() curveto_r(0, 0, -29, -36, -31, -46) smooth_r(53.59, -82.337, 53.59, -82.337) te.pencolor("#D3DFF0") smooth_r(86.41, -47.663, 96.072, -54.85) Curveto(563.5, 297.5, 570.5, 299.5, 518.5, 334) te.pencolor("black") curveto_r(-2, 16, -12, 33, -12, 37) smooth_r(50, 92, 50, 93) Horizontal(444.5) te.end_fill() Moveto(195, 49) te.begin_fill() te.pencolor("#D3DFF0") polyline(195, 49, 175.5, 106.5, 202.522, 49) te.pencolor("black") Horizontal(195) te.pencolor("#D3DFF0") te.end_fill() Moveto(327.997, 49) te.begin_fill() te.pencolor("#D3DFF0") curveto_r(0, 0, 11.503, 121.087, 13.503, 128.087) curveto_r(11, 2, 54, 37, 54, 37) lineto(-40, -165.087) te.pencolor("black") Horizontal(327.997) te.pencolor("#D3DFF0") te.end_fill() te.pencolor("black") line(94.5, 397.5, 107.5, 373.5) # 皱纹 line(122.5, 317.5, 95.875, 274.699) line(122.5, 341.5, 141.5, 402.5) line(141.5, 409.5, 153.5, 431.5) # line(328,47.712,344,175.977) line(340.023, 49, 360.5, 144) # line(353.5,47.5,395.5,208.5) line(478.5, 95.5, 518.5, 161.5) line(518.5, 332.5, 460.5, 359.5) polyline(506.5, 369.5, 493.5, 402.5, 502.5, 443.5) Moveto(530, 429) curveto_r(4, 16, -5, 33, -5, 33) # 图层_3 te.color("black", "#2b1d2a") # 外套内侧 Moveto(225, 462) te.begin_fill() Horizontal(165) smooth_r(9, -15, 8, -25) curveto_r(-47, -126, 6, -212, 12, -225) Curveto(185, 305, 202, 428, 225, 462) Lineto(225, 462) te.end_fill() Moveto(390, 462) te.begin_fill() curveto_r(10, -23, 34, -180, 35, -222) # !!!227 curveto_r(7, 4, 54, 45, 61, 61) # 61 smooth_r(-73, 101, -72, 118) curveto_r(5, 15, 31, 46, 31, 45) Lineto(390, 462) te.end_fill() # 图层_4 te.color("black", "#2b1d29") # 外套内侧 Moveto(225, 462) te.begin_fill() curveto_r(-28, -50, -40, -166, -40, -250) curveto_r(6, 51, -6, 87, 45, 106) smooth_r(64, 27, 89, 24) smooth_r(49, -18, 56, -20) smooth_r(50, -10, 51, -85) curveto_r(0, 29, -25, 201, -36, 225) Lineto(225, 462) te.end_fill() # 图层_5 te.color("black", "#3D3D3D") # 衣服 Moveto(225, 462) te.begin_fill() curveto_r(-5, -5, -22, -53, -23, -70) lineto(32, -13) curveto_r(3, -25, 6, -28, 12, -36) smooth_r(13, -12, 16, -12) vertical(-2) curveto_r(45, 20, 64, 14, 94, 1) vertical(2) curveto_r(8, -2, 15, 2, 17, 4) smooth_r(0, 6, -2, 9) curveto_r(10, 10, 10, 29, 11, 33) smooth_r(23, 4, 25, 6) smooth_r(-17, 83, -17, 78) Lineto(225, 462) te.end_fill() # 图层_6 te.color("black", "#968281") # 脖子 Moveto(262, 329) te.begin_fill() vertical(17) curveto_r(1, 2, 44, 14, 45, 15) smooth_r(3, 12, 3, 12) horizontal(3) vertical(-5) curveto_r(1, -3, 4, -6, 5, -7) lineto(36, -14) curveto_r(1, -1, 3, -16, 2, -17) Curveto(318, 348, 296, 344, 262, 329) te.end_fill() # 图层_8 te.color("black", "#E7F1FF") # 白色褶皱 Moveto(225, 462) te.begin_fill() lineto(-3, -5) # -3,-3,-3,-5 curveto_r(0, -2, 4, -4, 5, -6) smooth_r(16, 3, 19, -8) smooth_r(0, -7, 0, -11) smooth_r(5, -8, 9, -5) smooth_r(19, -8, 19, -11) smooth_r(6, -7, 6, -7) smooth_r(7, -2, 9, -4) lineto(41, -2) lineto(12, 9) smooth_r(3, 15, 7, 18) smooth_r(15, 4, 17, 4) smooth_r(4, -4, 6, -4) smooth_r(6, 4, 5, 9) smooth_r(0, 9, 0, 9) smooth_r(1, 7, 7, 6) smooth_r(8, 0, 8, 0) lineto(-2, 8) Lineto(225, 462) te.end_fill() te.pensize(2) Moveto(240, 450) smooth_r(0, 9, 3, 12) Moveto(372, 462) curveto_r(-2, -4, -5, -29, -7, -28) te.pensize(1) # 图层_7 te.color("black", "#A2B8D6") # 衣领 Moveto(262, 331) te.begin_fill() curveto_r(0, 8, -1, 13, 0, 15) smooth_r(43, 14, 45, 15) lineto(3, 12) horizontal(3) smooth_r(-1, -3, 0, -5) lineto(5, -7) lineto(36, -14) curveto_r(1, -1, 2, -12, 2, -15) smooth_r(25, -2, 15, 13) curveto_r(-2, 4, -7, 29, -7, 32) smooth_r(-35, 19, -41, 22) smooth_r(-9, 14, -12, 14) smooth_r(-7, -12, -14, -15) curveto_r(-19, -2, -41, -25, -41, -25) smooth_r(-10, -26, -10, -30) Smooth(255, 332, 262, 331) te.end_fill() Moveto(262, 346) lineto(-12, -6) Moveto(369, 333) curveto_r(2, 4, -6, 10, -15, 14) # 图层_9 te.color("black", "#151515") # 领结 Moveto(247, 358) te.begin_fill() curveto_r(-5, 3, -8, 20, -6, 23) curveto_r(25, 21, 50, 17, 50, 17) lineto(-23, 64) horizontal(22) smooth_r(1, -13, 2, -16) lineto(13, -50) curveto_r(2, 2, 7, 3, 10, 1) smooth_r(18, 65, 18, 65) horizontal(19) lineto(-24, -65) curveto_r(21, 5, 39, -10, 44, -13) curveto_r(5, -20, 1, -21, 0, -24) curveto_r(-18, -2, -49, 15, -52, 17) smooth_r(-11, -3, -15, -1) Smooth(252, 356, 247, 358) te.end_fill() # 图层_10 te.color("black", "#A2B8D6") # 衣领(透过领结) Moveto(297, 387) te.begin_fill() lineto(-11, 6) curveto_r(-1, 0, -20, -7, -30, -19) Curveto(259, 373, 297, 385, 297, 387) te.end_fill() Moveto(323, 384) te.begin_fill() lineto(8, 7) lineto(30, -14) curveto_r(1, -1, 5, -6, 4, -7) Smooth(329, 379, 323, 384) te.end_fill() # 图层_11 te.color("black", "#F3EEEB") # 脸 Moveto(185, 212) te.begin_fill() curveto_r(4, -9, 46, -77, 52, -75) curveto_r(-2, -17, 19, -68, 27, -73) curveto_r(16, 15, 71, 108, 76, 112) smooth_r(76, 53, 86, 60) curveto_r(0, 65, -27, 75, -31, 76) curveto_r(-50, 28, -70, 30, -85, 30) smooth_r(-77, -22, -86, -26) Curveto(180, 302, 186, 228, 185, 212) te.end_fill() # 图层_12 te.color("black", "#2B1D29") # 头发 Moveto(189, 202) te.begin_fill() curveto_r(-1, 22, 19, 51, 19, 51) smooth_r(-10, -42, 7, -92) Curveto(212, 168, 196, 189, 189, 202) te.end_fill() Moveto(221, 155) te.begin_fill() curveto_r(-2, 6, 5, 48, 5, 48) smooth_r(18, -28, 20, -48) curveto_r(-5, 24, 4, 43, 7, 50) curveto_r(-10, -49, 3, -72, 13, -106) curveto_r(-2, -7, -3, -32, -3, -35) curveto_r(-17, 18, -27, 71, -27, 71) Lineto(221, 155) te.end_fill() Moveto(264, 64) te.begin_fill() curveto_r(-4, 5, 14, 100, 14, 100) smooth_r(-6, -79, -5, -85) curveto_r(0, 98, 49, 139, 49, 139) smooth_r(8, -50, 3, -65) Smooth(272, 64, 264, 64) te.end_fill() Moveto(342, 176) te.begin_fill() curveto_r(-1, 27, -10, 57, -10, 57) smooth_r(20, -33, 17, -54) Lineto(342, 176) te.end_fill() te.penup() te.begin_fill() polyline(349, 180, 353, 203, 361, 203) polyline(361, 203, 362, 188, 349, 180) te.end_fill() # 图层_13 te.pensize(2) Moveto(210, 180) # 眉毛 curveto_r(5, -4, 63, 9, 63, 14) Moveto(338, 193) curveto_r(0, -3, 18, -6, 18, -6) te.pensize(1) # 图层_14 te.color("black", "#D1D1D1") # 眼睛1 te.pensize(2) Moveto(206, 212) te.begin_fill() lineto(15, -7) curveto_r(4, -1, 26, -2, 30, 0) smooth_r(10, 3, 12, 7) te.pencolor("#D1D1D1") te.pensize(1) smooth_r(2, 27, -1, 30) smooth_r(-39, 5, -44, 1) Smooth(206, 212, 206, 212) te.end_fill() Moveto(384, 204) te.begin_fill() te.pencolor("black") te.pensize(2) curveto_r(-3, -1, -18, -1, -28, 1) smooth_r(-9, 6, -10, 9) te.pencolor("#D1D1D1") te.pensize(1) smooth_r(3, 18, 6, 23) smooth_r(38, 6, 40, 4) smooth_r(10, -9, 13, -22) te.pencolor("black") te.pensize(2) Lineto(384, 204) te.end_fill() # 图层_15 te.color("#0C1631", "#0C1631") # 眼睛2 te.pensize(1) Moveto(216, 206) te.begin_fill() curveto_r(-1, 5, 0, 26, 7, 35) smooth_r(30, 2, 33, 0) smooth_r(5, -31, 2, -34) Smooth(219, 203, 216, 206) te.end_fill() Moveto(354, 207) te.begin_fill() curveto_r(-2, 1, 2, 29, 4, 31) smooth_r(30, 3, 33, 1) smooth_r(6, -24, 4, -27) lineto(-11, -8) Curveto(382, 204, 357, 206, 354, 207) te.end_fill() # 图层_17 te.color("#F5F5F5", "#F5F5F5") # 眼睛3 Moveto(253, 211) te.begin_fill() curveto_r(-3, 0, -8, 8, 1, 10) Smooth(258, 210, 253, 211) te.end_fill() Moveto(392, 209) te.begin_fill() lineto(4, 3) vertical(4) lineto(-4, 2) Curveto(386, 214, 392, 209, 392, 209) te.end_fill() # 图层_18 te.color("#352F53", "#352F53") # 眼睛4 Moveto(219, 229) te.begin_fill() smooth_r(2, -5, 6, -4) smooth_r(18, 13, 27, 1) curveto_r(3, 0, 5, 3, 5, 3) vertical(13) Horizontal(224) Lineto(219, 229) te.end_fill() Moveto(357, 227) te.begin_fill() smooth_r(4, -6, 10, -2) smooth_r(10, 13, 19, 1) curveto_r(6, 0, 8, 6, 8, 6) lineto(-2, 9) curveto_r(-12, 3, -29, 0, -32, -2) Smooth(357, 227, 357, 227) te.end_fill() # 图层_19 te.color("#9A90CB", "#9A90CB") # 眼睛5 Moveto(227, 231) te.begin_fill() curveto_r(-6, 0, -5, 5, -3, 8) smooth_r(24, 2, 27, 0) smooth_r(0, -8, -1, -8) Smooth(234, 231, 227, 231) te.end_fill() Moveto(361, 227) te.begin_fill() curveto_r(2, 18, 26, 14, 30, 6) smooth_r(-1, -3, -2, -4) smooth_r(-15, 9, -24, -4) Curveto(363, 224, 361, 225, 361, 227) te.end_fill() # 图层_16 te.pencolor("black") # 眼睛(线条) te.pensize(3) # Moveto(206,213) # lineto(14,-8) # curveto_r(3,-1,30,0,33,1) # lineto(10,6) Moveto(225, 215) curveto_r(10, 28, 22, 16, 24, 6) Moveto(365, 219) curveto_r(4, 14, 18, 24, 22, -3) te.pensize(2) line(240.5, 207.5, 227.5, 211.5) line(245.5, 209.5, 227.5, 214.5) line(247.5, 211.5, 227.5, 217.5) line(247.5, 214.5, 229.5, 220.5) line(247.5, 218.5, 230.5, 223.5) line(246.5, 222.5, 232.5, 226.5) line(244.5, 225.5, 234.5, 228.5) line(377.5, 207.5, 367.5, 210.5) line(384.5, 207.5, 366.5, 212.5) line(385.5, 210.5, 366.5, 215.5) line(384.5, 213.5, 366.5, 218.5) line(384.5, 215.5, 367.5, 220.5) line(384.5, 218.5, 368.5, 223.5) # line(383.5,220.5,368.5,225.5) line(382.5, 223.5, 370.5, 227.5) # line(381.5,226.5,373.5,229.5) # 图层_20 te.pencolor("black") Moveto(309, 270) # 鼻子、嘴 curveto_r(0, 0, 4, 7, 1, 9) line(296.5, 307.5, 303.5, 307.5) Moveto(315, 307) smooth_r(10, -1, 10, 2) te.penup() te.hideturtle() te.update() te.done() import turtle as T import random import time # 画樱花的躯干(60,t) def Tree(branch, t): t.hideturtle() time.sleep(0.00009) if branch > 3: if 8 <= branch <= 12: if random.randint(0, 2) == 0: t.color('snow') # 白 else: t.color('lightcoral') # 淡珊瑚色 t.pensize(branch / 3) elif branch < 8: if random.randint(0, 1) == 0: t.color('snow') else: t.color('lightcoral') # 淡珊瑚色 t.pensize(branch / 2) else: t.color('sienna') # 赭(zhě)色 t.pensize(branch / 10) # 6 t.forward(branch) a = 1.5 * random.random() t.right(20 * a) b = 1.5 * random.random() Tree(branch - 10 * b, t) t.left(40 * a) Tree(branch - 10 * b, t) t.right(20 * a) t.up() t.backward(branch) t.down() T # 掉落的花瓣 def Petal(m, t): T.speed(0) for i in range(m): a = 200 - 400 * random.random() b = 10 - 20 * random.random() t.up() t.forward(b) t.left(90) t.forward(a) t.down() t.color('lightcoral') # 淡珊瑚色 t.circle(1) t.up() t.backward(a) t.right(90) t.backward(b) # 绘图区域 t = T.Turtle() # 画布大小 w = T.Screen() t.hideturtle() # 隐藏画笔 T.showturtle() t.getscreen().tracer(5, 0) w.screensize(3000,3000,bg='wheat') # wheat小麦 t.left(90) t.up() t.backward(150) t.down() t.color('sienna') t.hideturtle() T.speed(0) # 画樱花的躯干 Tree(79, t) # 掉落的花瓣 Petal(200, t) w.exitonclick() from turtle import * colors = ['red', 'purple', 'blue', 'green', 'yellow', 'orange'] for x in range(360): pencolor(colors[x % 6]) width(x / 100 + 1) forward(x) left(59) a,*b = 'spam' a,b spam = ham = 'lunch' spam += str(42) spam nudge = 1 wink = 2 A, B = nudge, wink print(A, B) [C, D] = [nudge, wink] print(C,D) ###Output 1 2 1 2 ###Markdown Advanced sequence assignment patterns ###Code string = 'SPAM' a, b, c, d = string a, d a, b, c, d a, b, c = string[0], string[1], string[2:] #Index and slice a, b, c a, b, c = list(string[:2]) + [string[2:]] a, b, c (a, b), c = string[:2], string[2:] a, b, c ((a, b), c) = ('SP',"AM") # Paired by shape and position a, b, c red, green, blue = range(3) red, blue L = [1,2,3,4] while L: front, L = L[0], L[1:] # See next section for 3.0 alternative print(front, L) L =[1,2,3,4] while L: front = L.pop(0) print(front, L) seq = [1,2,3,4] a, b, c, d = seq print(a, b, c, d) a, *b = seq a, b *a, b = seq a, b a, *b, c = seq a, b, c a, b, *c = seq a, *b = 'spam' print(a,b) a, *b, c = 'spam' a, b, c ###Output s ['p', 'a', 'm'] ###Markdown Boundary cases ###Code seq a, b, c, *d = seq print(a, b, c, d) a, b, c, d, *e = seq print(a,b,c,d,e) a, b, *e, c, d = seq print(a,b,c,d,e) *a, = seq a T = (seq[1]) # A one-item tuple (not an expression) type(T), type((seq[1],)) seq a, *b = seq a, b a, b = seq[0], seq[1:] # First, rest : traditional a, b for (a, *b, c) in [(1,2,3,4),(5,6,7,8),(9,10,11,12)]: print(a,b,c) for allnum in [(1,2,3,4),(5,6,7,8)]: a, b, c = allnum[0], allnum[1:3], allnum[3] ###Output _____no_output_____ ###Markdown Multiple-Target Assignments ###Code a = b = c = 'spam' a, b, c c = 'spam' b = c a = b T= [] T.append([123, 'xyz', 'zara', 'abc']) T ###Output _____no_output_____ ###Markdown Multiple-target assignment and shared references ###Code a = b = 0 b = b + 1 a, b ###Output _____no_output_____ ###Markdown Here, changing b only changes b because numbers do not support in-place changes. Aslong as the object assigned is immutable, it’s irrelevant if more than one name referencesit. ###Code a = b = [] b.append(42) print(a,b) ###Output [42] [42] ###Markdown As usual, though, we have to be more cautious when initializing variables to an empty mutable object such as a list or dictionary: This time, because a and b reference the same object, appending to it in-place throughb will impact what we see through a as well. This is really just another example of theshared reference phenomenon we first met in Chapter 6. To avoid the issue, initializemutable objects in separate statements instead, so that each creates a distinct emptyobject by running a distinct literal expression: ###Code a = [] b = [] b.append(42) print(a,b) ###Output [] [42] ###Markdown Augmented Assignments ###Code a = a + b print(a) a += b print(a) ###Output [42] [42, 42] ###Markdown Table 11-2. Augmented assignment statementsX += Y X &= Y X -= Y X |= YX *= Y X ^= Y X /= Y X >>= YX %= Y X <<= Y X **= Y X //= Y ###Code # a &= b unsupported operand type(s) for &=: 'list' and 'list' a, b = 36, 42 a &= b bin(36),bin(42) bin(32) 36 & 42, 1 & 1, 1 & 2, 1 & 3 for i in range(40): print(1 & i, end = ',') for i in range(50): print(36 & i, end = ', ') print(bin(36), bin(i), bin(36 & i), sep=', ') a, b = 36, 42 a -= b print(a) a, b = 36, 42 a |= b print(a) bin(36), bin(42), bin(46) import pandas as pd target_url = "http://aima.cs.berkeley.edu/data/iris.csv" data = pd.read_csv(target_url, header=None) data.describe() a, b = 36, 42 a *= b print(a) print(36*42) a = 36; b = 42; a^=b; print(a) 36 ^ 42; print(bin(36), bin(42),bin(14)) import pandas as pd data = pd.read_csv('F:\house-votes-84.data',header= None) data.head(434) 36 ^ 42; print(bin(36), bin(42),bin(14)) L1 = [] for i in range(len(str(bin(36)))): L1.append(str(bin(36))[i]) L2 = [] for j in range(len(str(bin(42)))): L2.append(str(bin(42))[j]) print(L1); print(L2) T = [] print(type(T)) for k in range(len(L1)): T.append((L1[k],L2[k])) print(T) S1 = '' for m in range(2,len(L1)): if T[m][0] == T[m][1]: S1 +='0' else: S1 +='1' S2 = '' for m in range(2,len(L1)): S2 += str(eval(T[m][0]) & eval(T[m][1])) print(S1,S2); S1 = '0b'+ S1; S2 = '0b'+ S2; print(eval(S1)); print(eval(S2)) a = 36; b = 42; a/=b print(a) print(36/42); a == 36/42 a = 36; b =42; a>>=b; print(a); print(bin(36),bin(42)) print(int(36/(2**42)) == 0); print(36<<42 == 36*2**42) print(8>>3 == 8/(2**3)) print(8%3); print(42%36); a = 42%36; print(a) a = 42; b = 36; a**=b; print(a, a==42**36); a = 42; b = 36; a //=b; print(a,a==42//36) import requests from lxml import etree from bs4 import BeautifulSoup headers = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3970.5 Safari/537.36"} url='https://www.cnblogs.com/JYNNO1/p/10525649.html' #输入我们的url get = requests.get(url) # get(url) 得到我们的网页, text将源网页转化为字符串 soup = BeautifulSoup(get.content, 'html.parser') text = soup.find_all(text=True) set([t.parent.name for t in text]) output = '' blacklist = [ '[document]', 'a', 'body', 'div', 'h1', 'h2', 'head', 'html', 'li', 'p', 'pre', 'script', 'span', 'title', 'ul' ] for t in text: if t.parent.name not in blacklist: output += '{} '.format(t) print(output) import requests from bs4 import BeautifulSoup url = 'https://www.cnblogs.com/JYNNO1/p/10525649.html' res = requests.get(url) html_page = res.content soup = BeautifulSoup(html_page, 'html.parser') text = soup.find_all(text=True) output = '' blacklist = [ '[document]', 'noscript', 'header', 'html', 'meta', 'head', 'input', 'script', # there may be more elements you don't want, such as "style", etc. ] for t in text: if t.parent.name not in blacklist: output += '{} '.format(t) print(output,end='; ') import requests from bs4 import BeautifulSoup headers = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3970.5 Safari/537.36"} url = 'https://blog.csdn.net/Betty_betty_betty/article/details/80898798' res = requests.get(url) html_page = res.content soup = BeautifulSoup(html_page, 'html.parser') text = soup.find_all(text=True) output = '' blacklist = [ '[document]', 'noscript', 'header', 'html', 'meta', 'head', 'input', 'script', # there may be more elements you don't want, such as "style", etc. ] for t in text: if t.parent.name not in blacklist: output += '{} '.format(t) print(output) import numpy as np import matplotlib.pyplot as plt t = np.arange(0.0, 12*np.pi, 0.01) x = np.sin(t)*(np.e**np.cos(t) - 2*np.cos(4*t)-np.sin(t/12)**5) y = np.cos(t)*(np.e**np.cos(t) - 2*np.cos(4*t)-np.sin(t/12)**5) plt.figure(figsize=(8,6)) plt.axis('off') plt.plot(x,y,color='blue',linewidth = '2') plt.show() # plt.savefig("butter.jpg",dpi=400) from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator import matplotlib.pyplot as plt import numpy as np fig = plt.figure() ax = fig.gca(projection='3d') [x, t] = np.meshgrid(np.array(range(25)) / 24.0, np.arange(0, 575.5, 0.5) / 575 * 17 * np.pi - 2 * np.pi) p = (np.pi / 2) * np.exp(-t / (8 * np.pi)) u = 1 - (1 - np.mod(3.6 * t, 2 * np.pi) / np.pi) ** 4 / 2 y = 2 * (x ** 2 - x) ** 2 * np.sin(p) r = u * (x * np.sin(p) + y * np.cos(p)) h = u * (x * np.cos(p) - y * np.sin(p)) surf = ax.plot_surface(r * np.cos(t), r * np.sin(t), h, rstride=1, cstride=1, cmap=cm.gist_rainbow_r, linewidth=0, antialiased=True) plt.show() from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator import matplotlib.pyplot as plt import numpy as np # 省略了头文件,可以在之前的博客里看到 fig = plt.figure() ax = fig.gca(projection='3d') # 将相位向后移动了6*pi [x, t] = np.meshgrid(np.array(range(25)) / 24.0, np.arange(0, 575.5, 0.5) / 575 * 20 * np.pi + 4*np.pi) p = (np.pi / 2) * np.exp(-t / (8 * np.pi)) # 添加边缘扰动 change = np.sin(15*t)/150 # 将t的参数减少,使花瓣的角度变大 u = 1 - (1 - np.mod(3.3 * t, 2 * np.pi) / np.pi) ** 4 / 2 + change y = 2 * (x ** 2 - x) ** 2 * np.sin(p) r = u * (x * np.sin(p) + y * np.cos(p)) h = u * (x * np.cos(p) - y * np.sin(p)) c= cm.get_cmap('Reds') surf = ax.plot_surface(r * np.cos(t), r * np.sin(t), h, rstride=1, cstride=1, cmap= c, linewidth=0, antialiased=True) plt.show() from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator import matplotlib.pyplot as plt import numpy as np fig = plt.figure() ax = fig.gca(projection='3d') [x, t] = np.meshgrid(np.array(range(25)) / 24.0, np.arange(0, 575.5, 0.5) / 575 * 30 * np.pi - 4*np.pi) p = (np.pi / 2) * np.exp(-t / (8 * np.pi)) change = np.sin(20*t)/50 u = 1 - (1 - np.mod(3.3 * t, 2 * np.pi) / np.pi) ** 4 / 2 + change y = 2 * (x ** 2 - x) ** 2 * np.sin(p) r = u * (x * np.sin(p) + y * np.cos(p)) * 1.5 h = u * (x * np.cos(p) - y * np.sin(p)) c= cm.get_cmap('magma') surf = ax.plot_surface(r * np.cos(t), r * np.sin(t), h, rstride=1, cstride=1, cmap= c, linewidth=0, antialiased=True) plt.show() # 我采用requests库 import requests import time # 用来获取 时间戳 def gettime(): return int(round(time.time() * 1000)) if __name__ == '__main__': # 用来自定义头部的 headers = {} # 用来传递参数的 keyvalue = {} # 目标网址(问号前面的东西) url = 'http://data.stats.gov.cn/easyquery.htm' # 头部的填充 headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14) ' \ 'AppleWebKit/605.1.15 (KHTML, like Gecko) ' \ 'Version/12.0 Safari/605.1.15' # 下面是参数的填充,参考图10 keyvalue['m'] = 'QueryData' keyvalue['dbcode'] = 'hgnd' keyvalue['rowcode'] = 'zb' keyvalue['colcode'] = 'sj' keyvalue['wds'] = '[]' keyvalue['dfwds'] = '[{"wdcode":"zb","valuecode":"A0301"}]' keyvalue['k1'] = str(gettime()) # 发出请求,使用get方法,这里使用我们自定义的头部和参数 # r = requests.get(url, headers=headers, params=keyvalue) # 建立一个Session s = requests.session() # 在Session基础上进行一次请求 r = s.get(url, params=keyvalue, headers=headers) # 打印返回过来的状态码 print(r.status_code) # 修改dfwds字段内容 keyvalue['dfwds'] = '[{"wdcode":"sj","valuecode":"2000"}]' # 再次进行请求 r = s.get(url, params=keyvalue, headers=headers) # 此时我们就能获取到我们搜索到的数据了 print(r.text) import pandas as pd data = pd.read_excel("F:\Course\Ad Design Project\Adanced design Project\Layout phase\VeryIMMaterials\Dist_between_2_ASVs_Reservior_No_shutdown\Dist_between_2_ASVs_2km_Reservior_No_Shutdown.xlsx",sheet_name='Rupture Data') data.head(554) data.describe() import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from mpl_toolkits.mplot3d import Axes3D import seaborn as sns color = sns.color_palette() from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor, export_graphviz from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import GridSearchCV, train_test_split, KFold from sklearn.preprocessing import StandardScaler from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble.partial_dependence import partial_dependence from scipy.stats.stats import pearsonr from IPython.display import Image import pydotplus sns.set_style("dark") def plot_corr(predictors): predictors = predictors[:] mcorr = data[predictors].corr() mask = np.zeros_like(mcorr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True cmap = sns.diverging_palette(220, 10, as_cmap=True) g = sns.heatmap(mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f') g.set_xticklabels(predictors, rotation=90) g.set_yticklabels(reversed(predictors)) plt.show() plot_corr(["Discharge Rate (kg/s)","Cumulative Mass (kg)","Pexit 1 (Bara)","Pexit 2 (Bara)","Temp 1 (K)","Temp 2 (K)","Density 1 (kg/m3)","Velocity 1 (m/s)","Velocity 2 (m/s)"]) ###Output _____no_output_____ ###Markdown Augmented assignments have three advantages:*• There’s less for you to type. Need I say more?• The left side only has to be evaluated once. In X += Y, X may be a complicated objectexpression. In the augmented form, it only has to be evaluated once. However, inthe long form, X = X + Y, X appears twice and must be run twice. Because of this,augmented assignments usually run faster.• The optimal technique is automatically chosen. That is, for objects that supportin-place changes, the augmented forms automatically perform in-place change operations instead of slower copies. ###Code import time start =time.time() #中间写上代码块 L = [1,2] L = L + [3] # Concatenation print(L) end = time.time() print('Running time: %s Seconds'%(end-start)) import time start =time.time() #中间写上代码块 L = [1,2] L = L.append(3) # Concatenation print(L) end = time.time() print('Running time: %s Seconds'%(end-start)) ###Output None Running time: 0.0 Seconds ###Markdown C/C++ programmers take note: although Python now supports statements like X += Y, it still does not haveC’s auto-increment/decrement operators (e.g., X++, −−X). These don’t quite map to the Python object modelbecause Python has no notion of in-place changes to immutable objects like numbers. ###Code import timeit start=timeit.default_timer() #中间写代码块 L = [1,2] L = L.append(3) # Concatenation end=timeit.default_timer() print('Running time: %s Seconds'%(end-start)) import timeit start=timeit.default_timer() #中间写代码块 L = [1,2] L = L + [3] # Concatenation end=timeit.default_timer() print('Running time: %s Seconds'%(end-start)) ###Output Running time: 7.509999886678997e-05 Seconds ###Markdown And to add a set of items to the end, we can either concatenate again or call the list extend method: ###Code import timeit start = timeit.default_timer() L= [1,2,3,4] L = L + [5,6] print(L) end = timeit.default_timer() print("Running time: %s Seconds"%(end-start)) import timeit start = timeit.default_timer() L= [1,2,3,4] L += [5,6] print(L) end = timeit.default_timer() print("Running time: %s Seconds"%(end-start)) import timeit start = timeit.default_timer() L = [1,2,3,4] L.extend([5,6]) print(L) end = timeit.default_timer() print("Running time: %s Seconds"%(end-start)) ###Output [1, 2, 3, 4, 5, 6] Running time: 0.00042180000127700623 Seconds ###Markdown ? When we use augmented assignment to extend a list, we can forget these details—forexample, Python automatically calls the quicker extend method instead of using the slower concatenation operation implied by ###Code import pandas as pd df = pd.DataFrame([["""def all_unique(lst): return len(lst) == len(set(lst)) x = [1,1,2,2,3,2,3,4,5,6] y = [1,2,3,4,5] print(all_unique(x)) # False print(all_unique(y)) # True print(x,y)"""],["""from collections import Counter def anagram(first, second): return Counter(first) == Counter(second) print(anagram("abcd3", "3acdb")) # True"""],["""import sys variable = 30 print(sys.getsizeof(variable)) # 24"""],["""def byte_size(string): return(len(string.encode( 'utf-8' ))) print(byte_size( 'KDA' )) # 4 print(byte_size( 'Hello World' )) # 11 """],["""n = 2; s ="Programming"; print(s * n); # ProgrammingProgramming """],["""s = "programming is awesome" print(s.title()) # Programming Is Awesome"""],[""" from math import ceil def chunk(lst, size): return list( map(lambda x: lst[x * size:x * size + size], list(range(0, ceil(len(lst) / size))))) print(chunk([1,2,3,4,5],2)) # [[1,2],[3,4],5]"""],["""def compact(lst): return list(filter(bool, lst)) print(compact([0, 1, False, 2, 3, 5 , s , 34])) # [ 1, 2, 3, a , s , 34 ]"""],["""array = [[ 'a' , 'b' ], [ 'c' , 'd' ], [ 'e' , 'f' ]] transposed = zip(*array) print(transposed) # [( a , c , e ), ( b , d , f )]"""],["""a = 3 print( 2 < a < 8) # True print(1 == a < 2) # False"""],["""hobbies = ["basketball", "football", "swimming"] print("My hobbies are: " + ", ".join(hobbies)) # My hobbies are: basketball, football, swimming"""],["""import re def count_vowels(str): return len(re.findall('[aeiou]', str, re.IGNORECASE)) print(count_vowels( 'foobar' )) # 3 print(count_vowels( 'gym' )) # 0"""],["""def decapitalize(string): return string[:1].lower() + string[1:] print(decapitalize( 'FooBar' )) # fooBar print(decapitalize( 'FooBar' )) # fooBar"""],["""def spread(arg): ret = [] for i in arg: if isinstance(i, list): ret.extend(i) else: ret.append(i) return ret def deep_flatten(lst): result = [] result.extend( spread(list(map(lambda x: deep_flatten(x) if type(x) == list else x, lst)))) return result print(deep_flatten([1, [2], [[3], 4], 5])) # [1,2,3,4,5]"""],["""def difference(a, b): set_a = set(a) set_b = set(b) comparison = set_a.difference(set_b) return list(comparison) print(difference([1,2,3], [1,2,4])) # [3]"""],["""def difference_by(a, b, fn): b = set(map(fn, b)) return [item for item in a if fn(item) not in b] from math import floor print(difference_by([2.1, 1.2], [2.3, 3.4],floor)) # [1.2] print(difference_by([{ 'x' : 2 }, { 'x' : 1 }], [{ 'x' : 1 }], lambda v : v[ 'x' ])) # [ { x: 2 } ] """],["""def add(a, b): return a + b def subtract(a, b): return a - b a, b = 4, 5 print((subtract if a > b else add)(a, b)) # 9 """],["""def has_duplicates(lst): return len(lst) != len(set(lst)) x = [1,2,3,4,5,5] y = [1,2,3,4,5] print(has_duplicates(x)) # True print(has_duplicates(y)) # False"""],["""def merge_two_dicts(a, b): c = a.copy() # make a copy of a c.update(b) # modify keys and values of a with the ones from b return c a = { 'x' : 1, 'y' : 2} b = { 'y' : 3, 'z' : 4} print(merge_two_dicts(a, b)) # { y : 3, x : 1, z : 4} """],["""def to_dictionary(keys, values): return dict(zip(keys, values)) keys = ["a", "b", "c"] values = [2, 3, 4] print(to_dictionary(keys, values)) # { a : 2, c : 4, b : 3}"""],["""lst = ["a", "b", "c", "d"] for index, element in enumerate(lst): print("Value", element, "Index ", index, ) # ( Value , a , Index , 0) # ( Value , b , Index , 1) #( Value , c , Index , 2) # ( Value , d , Index , 3)"""],["""import time start_time = time.time() a = 1 b = 2 c = a + b print(c) #3 end_time = time.time() total_time = end_time - start_time print("Time: ", total_time) # ( Time: , 1.1205673217773438e-05)"""],["""try: 2*3 except TypeError: print("An exception was raised") else: print("Thank God, no exceptions were raised.") #Thank God, no exceptions were raised."""],["""def most_frequent(list): return max(set(list), key = list.count) list = [1,2,1,2,3,2,1,4,2] print(most_frequent(list)) del list"""],["""# def palindrome(string): # from re import sub # s = sub('\w',[W_], string.lower()) # return s == s[::-1] #print(palindrome( 'taco cat' )) # True"""],["""import operator action = { "+": operator.add, "-": operator.sub, "/": operator.truediv, "*": operator.mul, "**": pow } print(action["-"](50, 25)) # 25"""],["""from copy import deepcopy from random import randint def shuffle(lst): temp_lst = deepcopy(lst) m = len(temp_lst) while (m): m -= 1 i = randint(0, m) temp_lst[m], temp_lst[i] = temp_lst[i], temp_lst[m] return temp_lst foo = [1,2,3] print(shuffle(foo)) # [2,3,1] , foo = [1,2,3]"""],["""def spread(arg): ret = [] for i in arg: if isinstance(i, list): ret.extend(i) else: ret.append(i) return ret print(spread([1,2,3,[4,5,6],[7],8,9])) # [1,2,3,4,5,6,7,8,9]"""],["""def swap(a, b): return b, a a, b = -1, 14 print(swap(a, b)) # (14, -1) print(spread([1,2,3,[4,5,6],[7],8,9])) # [1,2,3,4,5,6,7,8,9]"""],["""d = { a : 1, b : 2} print(d.get( c , 3)) # 3"""]],index=['重复元素判定','字符元素组成判定','内存占用','字节占用','打印 N 次字符串','大写第一个字母','分块','压缩','解包','链式对比','逗号连接','元音统计', '首字母小写','展开列表', '列表的差','通过函数取差','链式函数调用','检查重复项','合并两个字典','将两个列表转化为字典','使用枚举','执行时间', 'Try else','元素频率','回文序列','不使用 if-else 的计算子','Shuffle','展开列表','交换值','字典默认值'],columns=['Python codes']) df.head(31) for i in range(30): exec(str((df.iloc[i].values)[0])) help(re.sub) def difference_by(a, b, fn): b = set(map(fn, b)) return [item for item in a if fn(item) not in b] from math import floor difference_by([2.1, 1.2], [2.3, 3.4],floor) # [1.2] difference_by([{ 'x' : 2 }, { 'x' : 1 }], [{ 'x' : 1 }], lambda v : v[ 'x' ]) # [ { x: 2 } ] lines = df.iloc[0].values print(lines) print(str(lines[0])) x,y exec_code = compile(str(lines),'<string>', 'exec') print(exec_code) print(df.iloc[1].values) exec('from collections import Counter\n\ndef anagram(first, second):\n return Counter(first) == Counter(second)\n\n\nanagram("abcd3", "3acdb") # True') anagram("abcd3","333333333333333333acdb") exec(str(lines[0])) str((df.iloc[1].values)[0]) exec(str((df.iloc[1].values)[0])) print(dir(str)) print(dir(str.__str__)) print(dir((str.__class__).__call__)) import inspect help((str.__class__).__call__) ###Output Help on wrapper_descriptor: __call__(self, /, *args, **kwargs) Call self as a function. ###Markdown Naming conventions Besides these rules, there is also a set of naming conventions—rules that are not requiredbut are followed in normal practice. For instance, because names with two leading andtrailing underscores (e.g., __name__) generally have special meaning to the Python interpreter, you should avoid this pattern for your own names. Here is a list of the conventions Python follows:• Names that begin with a single underscore (_X) are not imported by a from moduleimport * statement (described in Chapter 22).• Names that have two leading and trailing underscores (__X__) are system-definednames that have special meaning to the interpreter.• Names that begin with two underscores and do not end with two more (__X) arelocalized (“mangled”) to enclosing classes (see the discussion of pseudoprivateattributes in Chapter 30).• The name that is just a single underscore (_) retains the result of the last expressionwhen working interactively. The Python 3.0 print Function ###Code x = 'spam' y = 99 z = ['eggs'] print(x,y,z) L = print(x,y,z,sep='',end='') print(L) print(x,y,z,sep='') print(x,y,z,sep='');print(x,y,z) # Two prints, same output line print(x,y,z,end='');print(x,y,z) import pyprind import time import pandas as pd bar = pyprind.ProgBar(10, monitor=True) for i in range(10): time.sleep(0.01) # your computation here data = pd.read_excel("F:\Course\Ad Design Project\Adanced design Project\Layout phase\VeryIMMaterials\Dist_between_2_ASVs_Reservior_No_shutdown\Dist_between_2_ASVs_2km_Reservior_No_Shutdown.xlsx", sheet_name='Rupture Data') bar.update() print(bar) print('x','y','z',end='...\n') print('x','y','z',sep='...',end='!\n') # Multiple keywords print('x','y','z',end='\n', sep='...') ###Output x...y...z! x...y...z ###Markdown Here is how the file keyword argument is used—it directs the printed text to an openoutput file or other compatible object for the duration of the single print (this is reallya form of stream redirection, a topic we will revisit later in this section): ###Code import requests from bs4 import BeautifulSoup url = 'https://www.cnblogs.com/JYNNO1/p/10525649.html' res = requests.get(url) html_page = res.content soup = BeautifulSoup(html_page, 'html.parser') text = soup.find_all(text=True) output = '' blacklist = [ '[document]', 'noscript', 'header', 'html', 'meta', 'head', 'input', 'script', # there may be more elements you don't want, such as "style", etc. ] for t in text: if t.parent.name not in blacklist: output += '{} '.format(t) print(output, file=open('data.txt','w',encoding='utf-8')) print('x','y','z',sep='...',file=open('data1.txt','w')) print('x','y','z',sep='...'); print(open('data1.txt').read()) text = '%s: %-.4f, %05d' % ('Result',3.14159,42) print(text) print('%s: %-.4f,%05d' % ('Result',3.14159,42)) import sys sys.stdout.write('hello world\n') ###Output hello world ###Markdown This code explicitly calls the write method of sys.stdout—an attribute preset whenPython starts up to an open file object connected to the output stream. The printoperation hides most of those details, providing a simple tool for simple printing tasks. ###Code S = """Here, we reset sys.stdout to a manually opened file named log.txt, located in the script’s working directory and opened in append mode (so we add to its current content). After the reset, every print operation anywhere in the program will write its text to the end of the file log.txt instead of to the original output stream. The print operations are happy to keep calling sys.stdout’s write method, no matter what sys.stdout happens to refer to. Because there is just one sys module in your process, assigning sys.stdout this way will redirect every print anywhere in your program.""" L = S.split() print(L, end = ' ') ###Output ['Here,', 'we', 'reset', 'sys.stdout', 'to', 'a', 'manually', 'opened', 'file', 'named', 'log.txt,', 'located', 'in', 'the', 'script’s', 'working', 'directory', 'and', 'opened', 'in', 'append', 'mode', '(so', 'we', 'add', 'to', 'its', 'current', 'content).', 'After', 'the', 'reset,', 'every', 'print', 'operation', 'anywhere', 'in', 'the', 'program', 'will', 'write', 'its', 'text', 'to', 'the', 'end', 'of', 'the', 'file', 'log.txt', 'instead', 'of', 'to', 'the', 'original', 'output', 'stream.', 'The', 'print', 'operations', 'are', 'happy', 'to', 'keep', 'calling', 'sys.stdout’s', 'write', 'method,', 'no', 'matter', 'what', 'sys.stdout', 'happens', 'to', 'refer', 'to.', 'Because', 'there', 'is', 'just', 'one', 'sys', 'module', 'in', 'your', 'process,', 'assigning', 'sys.stdout', 'this', 'way', 'will', 'redirect', 'every', 'print', 'anywhere', 'in', 'your', 'program.'] ###Markdown Manual stream redirection ###Code print('x','y') # Or, in 2.6: print X, Y import sys sys.stdout.write(str('x')+' '+ str('y') + '\n') import sys temp = sys.stdout sys.stdout = open('log.txt','a') S1 = """In fact, as this chapter’s upcoming sidebar about print and stdout will explain, you can even reset sys.stdout to an object that isn’t a file at all, as long as it has the expected interface: a method named write to receive the printed text string argument. When that object is a class, printed text can be routed and processed arbitrarily per a write method you code yourself""" print(S1.split()) print(open('log.txt').read()) df.iloc[1].values import sys temp = sys.stdin sys.stdin = open('myfile.txt','a') print('hello world') myfile = open('log.txt','w+') myfile.write("""Let’s work through a simple example that demonstrates file-processing basics. The following code begins by opening a new text file for output, writing two lines (strings terminated with a newline marker, \n), and closing the file. Later, the example opens the same file again in input mode and reads the lines back one at a time with readline. Notice that the third readline call returns an empty string; this is how Python file methods tell you that you’ve reached the end of the file (empty lines in the file come back as strings containing just a newline character, not as empty strings). """) myfile.close() open('log.txt').read() myfile = open('log.txt','a+') myfile.write("""When coded this way, the temporary file object created by open will automatically read and return one line on each loop iteration. This form is usually easiest to code, good on memory use, and may be faster than some other options (depending on many variables, of course). Since we haven’t reached statements or iterators yet, though, you’ll have to wait until Chapter 14 for a more complete explanation of this code.""") myfile.flush() myfile.readlines() open('log.txt').read() import pubchempy as pcp c = pcp.Compound.from_cid(5090) print(c.molecular_formula) print(c.molecular_weight) print(c.isomeric_smiles) import sys def test(x): if x==0: print(r'Error--> x: can\'t be zero', file=sys.stderr) else: print(x) test(0), test(1) test(0); test(1) ###Output 1 ###Markdown Now that you know all about print redirections, the equivalence between printing andfile write methods should be fairly obvious. The following interaction prints both waysin 3.0, then redirects the output to an external file to verify that the same text is printed: ###Code X = 1; Y = 2 print(X, Y) import sys sys.stdout.write(str(X)+' ' + str(Y) + '\n') print f = open('alice.txt', encoding = 'utf-8') while f.readline()!='': print(f.readline()) import os f.close() os.remove('alice.txt') ###Output _____no_output_____ ###Markdown Version-Neutral Printing Finally, if you cannot restrict your work to Python 3.0 but still want your prints to becompatible with 3.0, you have some options. For one, you can code 2.6 print statements and let 3.0’s 2to3 conversion script translate them to 3.0 function calls automatically. See the Python 3.0 documentation for more details about this script; itattempts to translate 2.X code to run under 3.0. ###Code from __future__ import print_function ###Output _____no_output_____ ###Markdown C:\misc> c:\python30\python< print('spam') 3.0 print function call syntaxspam< print('spam', 'ham', 'eggs') These are mutiple argmentsspam ham eggsThe first of these works the same in 2.6, but the second generates a tuple in the output:C:\misc> c:\python26\python< print('spam') 2.6 print statement, enclosing parensspam< print('spam', 'ham', 'eggs') This is really a tuple object!('spam', 'ham', 'eggs') ###Code print('%s %s %s ' %('spam','ham','eggs')) print('{0} {1} {2}'.format('spam','ham','eggs')) import numpy as np import matplotlib.pyplot as plt t = np.arange(0.0, 12*np.pi, 0.01) x = np.sin(t)*(np.e**np.cos(t) - 2*np.cos(4*t)-np.sin(t/12)**5) y = np.cos(t)*(np.e**np.cos(t) - 2*np.cos(4*t)-np.sin(t/12)**5) plt.figure(figsize=(8,6)) plt.axis('off') plt.plot(x,y,color='blue',linewidth = '2') plt.show() # plt.savefig("butter.jpg",dpi=400) ###Output _____no_output_____
Python Additional Challenges/String Compression.ipynb
###Markdown String Compression*Write a function compress(word) that given a word word returns the string with it's letters and how many times they occur continously together.*For eg:**Input**>a_scramble("abbs")**Output**>"a1b2s1"Explanation: 1 occurence of a, followed by 2 occurences of b, followed by 1 occurence of s________________________________________________________________________________________**Input**>compress("xxcccdex")**Output**>"x2c3d1e1x1"Explanation: 2 occurences of x, followed by 3 occurences of c, followed by 1 occurence of d, followed by 1 occurence of e, followed by 1 occurence of x ###Code # IMPORTANT : Change the cell type to 'code' to execute the program # Function definition starts here def compress(word): # use .lower() to change all characters in 'word' to lowercase word_list = list(word.lower()) char_count = [] chars = [] chars.append(word_list[0]) count = 1 result_str = '' for i in range(1,len(word_list)): if(word_list[i] == word_list[i-1]): count +=1 else: chars.append(word_list[i]) char_count.append(count) count = 1 char_count.append(count) for i in range(len(chars)): result_str = result_str + (chars[i]+str(char_count[i])) return(result_str) word = 'xxcccdex' result = compress(word) print(result, end='') ###Output _____no_output_____
3-object-tracking-and-localization/activities/4-kalman-filters/3. Predict Function.ipynb
###Markdown Predict FunctionAfter performing a parameter update, which is done after some new measurement is collected, the next step is to incorporate motion into our Gaussian calculations. Recall that, as we estimate the location of a robot or self-driving car:* the measurement update *increases* our estimation certainty* the motion update/prediction *decreases* our certaintyThat is because every motion has some chance of under or overshooting its goal, and since motion is not exact, we end up losing some certainty about our exact location after each motion.Let's take the formulas from the example below and use them to write a program that takes in a mean and a motion and squared variances for both of those quantities, and returns a *new*, updated mean and variance for a new gaussian. This step is called the **motion update** or the predict step. Below is our usual Gaussian equation and imports. ###Code # import math functions from math import * import matplotlib.pyplot as plt import numpy as np # gaussian function def f(mu, sigma2, x): ''' f takes in a mean and squared variance, and an input x and returns the gaussian value.''' coefficient = 1.0 / sqrt(2.0 * pi *sigma2) exponential = exp(-0.5 * (x-mu) ** 2 / sigma2) return coefficient * exponential ###Output _____no_output_____ ###Markdown For convenience, you've also been given the complete `update` code that performs a parameter update when an initial belief and new measurement information are merged. ###Code # the update function def update(mean1, var1, mean2, var2): ''' This function takes in two means and two squared variance terms, and returns updated gaussian parameters.''' # Calculate the new parameters new_mean = (var2*mean1 + var1*mean2)/(var2+var1) new_var = 1/(1/var2 + 1/var1) return [new_mean, new_var] ###Output _____no_output_____ ###Markdown QUIZ: Write a `predict` function that returns new values for the mean and squared variance of a Gaussian after a motion.This function should take in parameters for an initial belief and motion and perform the measurement update as seen in the image at the top of this notebook. ###Code # the motion update/predict function def predict(mean1, var1, mean2, var2): ''' This function takes in two means and two squared variance terms, and returns updated gaussian parameters, after motion.''' ## TODO: Calculate the new parameters new_mean = mean1 + mean2 new_var = var1 + var2 return [new_mean, new_var] # test your implementation new_params = predict(10, 4, 12, 4) print(new_params) ###Output [22, 8] ###Markdown Plot a GaussianPlot a Gaussian by looping through a range of x values and creating a resulting list of Gaussian values, `g`, as shown below. You're encouraged to see what happens if you change the values of `mu` and `sigma2`. ###Code # display a gaussian over a range of x values # define the parameters mu = new_params[0] sigma2 = new_params[1] # define a range of x values x_axis = np.arange(0, 40, 0.1) # create a corresponding list of gaussian values g = [] for x in x_axis: g.append(f(mu, sigma2, x)) # plot the result plt.plot(x_axis, g) ###Output _____no_output_____
Analysis/MSD_Analyzer/notebook.ipynb
###Markdown How to analyze particle motion with MSD (Mean Square Displacement)Note that this notebook is largely inspired from the excellent tutorials of Jean-Yves Tinevez available at https://tinevez.github.io/msdanalyzer/.The goal of this notebook is mainly to help others (the author included) to analyze particle motion through MSD. I also would like to create a Python module that help dealing with all that kind of stuff.TODO: I am still not sure the way I compute the MSD mean and also SEM and STD... I need to double check this.TODO: I also need to find a way to improve MSD calculation : https://stackoverflow.com/questions/32988269/speedup-msd-calculation-in-python ###Code # Some classic Python modules import %matplotlib inline import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (15, 10) import pandas as pd import numpy as np from scipy import optimize ###Output _____no_output_____ ###Markdown Brownian motion Simulate particle motion ###Code # Parameters SPACE_UNITS = '\mu m' TIME_UNITS = 's' N_PARTICLES = 10 N_TIME_STEPS = 100 COORDS = ['x', 'y'] N_DIM = len(COORDS) cm = plt.get_cmap('gist_rainbow') COLORS = [cm(i/N_PARTICLES) for i in range(N_PARTICLES)] # Typical values taken from studies of proteins diffusing in membranes: # Diffusion coefficient D = 1e-3 # µm^2/s # Time step between acquisition; fast acquisition! dt = 0.05 # s # Area size, just used to disperse particles in 2D. Has no impact on # analysis SIZE = 2 # µm ###Output _____no_output_____ ###Markdown The Einstein equation tells us that displacements follow a Gaussian PDF with standard deviation given by : ###Code k = np.sqrt(2 * D * dt) print(k) # Generate trajectories trajs = [] for i in range(N_PARTICLES): # Generate time vector time = np.arange(0, N_TIME_STEPS) * dt # Get random displacement dp = k * np.random.randn(N_TIME_STEPS, N_DIM) # Setup custom initial position initial_position = SIZE * np.random.rand(1, N_DIM) dp[0] = initial_position # Get position p = np.cumsum(dp, axis=0) # Convert to Dataframe p = pd.DataFrame({c: p[:, i] for i, c in enumerate(COORDS)}) p['t'] = time trajs.append(p) # Plot trajectories fig, ax = plt.subplots() for color, traj in zip(COLORS, trajs): traj.plot(x='x', y='y', color=color, ax=ax, legend=False) ax.set_xlabel(COORDS[0]) ax.set_ylabel(COORDS[1]) ###Output _____no_output_____ ###Markdown MSD analysis ###Code def compute_msd(trajectory, dt, coords=['x', 'y']): tau = trajectory['t'].copy() shifts = np.floor(tau / dt).astype(np.int) msds = pd.DataFrame() msds = np.zeros(shifts.size) msds_std = np.zeros(shifts.size) msds_sem = np.zeros(shifts.size) weights = np.zeros(shifts.size) for i, shift in enumerate(shifts): diffs = trajectory[coords] - trajectory[coords].shift(-shift) sqdist = np.square(diffs).sum(axis=1) msds[i] = sqdist.mean() msds_std[i] = sqdist.std() msds_sem[i] = sqdist.sem() weights[i] = len(sqdist.dropna()) msds = pd.DataFrame({'msds': msds, 'tau': tau, 'msds_std': msds_std, 'msds_sem': msds_sem, 'weights': weights}) return msds def compute_msd_mean(trajs, dt, n_steps, coords=['x', 'y']): msd_mean = pd.DataFrame() msd_mean['tau'] = np.arange(0, n_steps) * dt msd_mean['msds'] = np.zeros(n_steps) msd_mean['msds_std'] = np.zeros(n_steps) msd_mean['msds_sem'] = np.zeros(n_steps) msd_mean['weights'] = np.zeros(n_steps) all_msd = [] for i, traj in zip(range(len(trajs)), trajs): msds = compute_msd(traj, dt=dt, coords=coords) all_msd.append(msds) msd_mean['msds'] += msds['msds'] * msds['weights'] msd_mean['msds_std'] += msds['msds_std'] * msds['weights'] msd_mean['msds_sem'] += msds['msds_sem'] * msds['weights'] msd_mean['weights'] += msds['weights'] msd_mean['msds'] /= msd_mean['weights'] msd_mean['msds_std'] /= msd_mean['weights'] msd_mean['msds_sem'] /= msd_mean['weights'] msd_mean.dropna(inplace=True) return msd_mean, all_msd # Compute MSD msd_mean, all_msd = compute_msd_mean(trajs, dt, N_TIME_STEPS, coords=COORDS) # Fit model def model(tau, D): return 2*D*N_DIM*tau clip_factor = 0.25# Compute MSD msd_mean, all_msd = compute_msd_mean(trajs, dt, N_TIME_STEPS, coords=COORDS) t_stamp = np.round(len(msd_mean) * clip_factor, 0) (D,), pcov = optimize.curve_fit(model, msd_mean.loc[:t_stamp, 'tau'], msd_mean.loc[:t_stamp, 'msds']) print(D) # Plot all MSD fig, ax = plt.subplots() for color, msd in zip(COLORS, all_msd): msd.plot(x='tau', y='msds', color=color, ax=ax, legend=False) ax.set_xlabel("Delay (${}$)".format(TIME_UNITS)) ax.set_ylabel("MSD (${}^2$)".format(SPACE_UNITS)) # Plot MSD mean fig, ax = plt.subplots() msd_mean.plot(x='tau', y='msds', color=color, ax=ax, legend=False) ax.fill_between(msd_mean['tau'], msd_mean['msds'] - msd_mean['msds_sem'], msd_mean['msds'] + msd_mean['msds_sem'], alpha=0.2) ax.plot(msd_mean['tau'], model(msd_mean['tau'], D), color='red') ax.set_xlabel("Delay (${}$)".format(TIME_UNITS)) ax.set_ylabel("MSD (${}^2$)".format(SPACE_UNITS)) ###Output _____no_output_____ ###Markdown Directed motion Simulate particle motion ###Code # Parameters SPACE_UNITS = '\mu m' TIME_UNITS = 's' N_PARTICLES = 10 N_TIME_STEPS = 100 COORDS = ['x', 'y'] N_DIM = len(COORDS) cm = plt.get_cmap('gist_rainbow') COLORS = [cm(i/N_PARTICLES) for i in range(N_PARTICLES)] # Typical values taken from studies of proteins diffusing in membranes: # Diffusion coefficient D = 1e-3 # µm^2/s # Time step between acquisition; fast acquisition! dt = 0.05 # s # Mean velocity vm = 0.05 # µm/s # Area size, just used to disperse particles in 2D. Has no impact on # analysis SIZE = 2 # µm # Generate trajectories trajs = [] for i in range(N_PARTICLES): # Generate time vector time = np.arange(0, N_TIME_STEPS) * dt # Velocity orientation theta = 2 * np.pi * np.random.rand() # Mean velocity v = vm * (1 + 1/4 * np.random.randn()) # Get random displacement dp = k * np.random.randn(N_TIME_STEPS, N_DIM) dp_brownian = k * np.random.randn(N_TIME_STEPS, N_DIM) dp_directed = v * dt * (np.cos(theta) * np.ones((N_TIME_STEPS, 1)) + np.sin(theta) * np.ones((N_TIME_STEPS, 1))) dp = dp_brownian + dp_directed # Setup custom initial position initial_position = SIZE * np.random.rand(1, N_DIM) dp[0] = initial_position # Get position p = np.cumsum(dp, axis=0) # Convert to Dataframe p = pd.DataFrame({c: p[:, i] for i, c in enumerate(COORDS)}) p['t'] = time trajs.append(p) # Plot trajectories fig, ax = plt.subplots() for color, traj in zip(COLORS, trajs): traj.plot(x='x', y='y', color=color, ax=ax, legend=False) ax.set_xlabel(COORDS[0]) ax.set_ylabel(COORDS[1]) ###Output _____no_output_____ ###Markdown MSD analysis ###Code # Compute MSD msd_mean, all_msd = compute_msd_mean(trajs, dt, N_TIME_STEPS, coords=COORDS) # Fit model def model(tau, D, v): return 2*D*N_DIM*tau + v*tau**2 clip_factor = 1 t_stamp = np.round(len(msd_mean) * clip_factor, 0) (D, v), pcov = optimize.curve_fit(model, msd_mean.loc[:t_stamp, 'tau'], msd_mean.loc[:t_stamp, 'msds']) print(D) print(v) # Plot all MSD fig, ax = plt.subplots() for color, msd in zip(COLORS, all_msd): msd.plot(x='tau', y='msds', color=color, ax=ax, legend=False) ax.set_xlabel("Delay (${}$)".format(TIME_UNITS)) ax.set_ylabel("MSD (${}^2$)".format(SPACE_UNITS)) # Plot MSD mean fig, ax = plt.subplots() msd_mean.plot(x='tau', y='msds', color=color, ax=ax, legend=False) ax.fill_between(msd_mean['tau'], msd_mean['msds'] - msd_mean['msds_sem'], msd_mean['msds'] + msd_mean['msds_sem'], alpha=0.2) ax.plot(msd_mean['tau'], model(msd_mean['tau'], D, v), color='red') ax.set_xlabel("Delay (${}$)".format(TIME_UNITS)) ax.set_ylabel("MSD (${}^2$)em".format(SPACE_UNITS)) ###Output _____no_output_____ ###Markdown Confined motion (more work is needed here) Simulate particle motion ###Code # Parameters SPACE_UNITS = '\mu m' TIME_UNITS = 's' N_PARTICLES = 10 N_TIME_STEPS = 200 COORDS = ['x', 'y'] N_DIM = len(COORDS) cm = plt.get_cmap('gist_rainbow') COLORS = [cm(i/N_PARTICLES) for i in range(N_PARTICLES)] # Typical values taken from studies of proteins diffusing in membranes: # Diffusion coefficient D = 1e-3 # µm^2/s # Time step between acquisition; fast acquisition! dt = 0.05 # s # Boltzman constant kt = 4.2821e-21 # kBoltzman x T @ 37ºC # Area size, just used to disperse particles in 2D. Has no impact on # analysis SIZE = 5 # µm k = np.sqrt(2 * D * dt) # Confined motion parameters # Particle in a potential: settings the 'stiffness' of the energy potential # Typical diameter of the trap (still in micron) ltrap = 0.05 # µm ktrap = kt / ltrap**2 # = thermal energy / trap size ^ 2 # Generate trajectories def Fx(x, initial_position): return ktrap * (x - initial_position) trajs = [] for i in range(N_PARTICLES): # Generate time vector time = np.arange(0, N_TIME_STEPS) * dt # Energy potential: #V = @(x) 0.5 * ktrap * sum (x .^ 2) # Unused, just to show p = np.zeros((N_TIME_STEPS, N_DIM)) # Setup custom initial position initial_position = SIZE * np.random.rand(1, N_DIM) p[0] = initial_position for j in range(1, N_TIME_STEPS): dxtrap = D / kt * Fx(p[j-1], initial_position) * dt # ad hoc displacement dxbrownian = k * np.random.randn(1, N_DIM); p[j] = p[j-1] + dxtrap + dxbrownian # Convert to Dataframe p = pd.DataFrame({c: p[:, i] for i, c in enumerate(COORDS)}) p['t'] = time trajs.append(p) # Plot trajectories fig, ax = plt.subplots() for color, traj in zip(COLORS, trajs): traj.plot(x='x', y='y', color=color, ax=ax, legend=False) ax.set_xlabel(COORDS[0]) ax.set_ylabel(COORDS[1]) ###Output _____no_output_____ ###Markdown MSD analysis ###Code # Compute MSD msd_mean, all_msd = compute_msd_mean(trajs, dt, N_TIME_STEPS, coords=COORDS) # Plot all MSD fig, ax = plt.subplots() for color, msd in zip(COLORS, all_msd): msd.plot(x='tau', y='msds', color=color, ax=ax, legend=False) ax.set_xlabel("Delay (${}$)".format(TIME_UNITS)) ax.set_ylabel("MSD (${}^2$)".format(SPACE_UNITS)) ###Output _____no_output_____
Query Optimization/algorithms/ikkbz_hist/dpccp.ipynb
###Markdown ! Add Relations in BFS order! DPccpWe first define a query graph for our examples: ###Code qg = QueryGraph() # add relations in BFS order! r1= qg.add_relation('R1', 10) r2 = qg.add_relation('R2', 20) r3 = qg.add_relation('R3', 50) r4 = qg.add_relation('R4', 20) r5 = qg.add_relation('R5', 10) qg.add_join(r1, r2, 0.1) qg.add_join(r1, r3, 0.1) qg.add_join(r2, r3, 0.1) qg.add_join(r2, r4, 0.1) qg.add_join(r3, r4, 0.1) qg.add_join(r3, r5, 0.1) qg.add_join(r4, r5, 0.1) Source(querygraph_to_graphviz(qg, cardinalities=False, selectivities=False)) ###Output _____no_output_____ ###Markdown Example 1 - Enumeration of all CSG-CMP pairsThis example comes from the 8th tutorial. The solution can be found in moodle. ###Code dpccp = Dpccp(qg) table = [] # we're not using dpccp.get_csg_cmp_pairs here so we can also illustrate empty cmp sets. for csg in dpccp.enumerate_csg(): cmps = list(dpccp.enumerate_cmp(csg)) if len(cmps) == 0: table.append({ 'csg': str(list(map(str, csg))), 'cmp': '[]' }) else: for cmp in cmps: table.append({ 'csg': str(list(map(str, csg))), 'cmp': str(list(map(str,cmp))) }) pd.DataFrame.from_dict(table) ###Output _____no_output_____ ###Markdown Example 2 - Get the optimal join tree based on c_out ###Code t = dpccp.run() Source(join_tree_to_graphviz(t)) ###Output _____no_output_____ ###Markdown All dpccp combinations ###Code table = [] for key, tree_lst in dpccp.dpverbose.items(): for tree in tree_lst: table.append({ 'l': len(tree.relations), 'relations' : key, 'tree': str(tree), 'cost': tree.cost, 'cheapest': 'Yes' if tree == dpccp.dptable[key] else 'No' }) pd.DataFrame.from_dict(table).sort_values(by=['l', 'relations'], ascending=True) ###Output _____no_output_____ ###Markdown ex13 ###Code qg = QueryGraph() # add relations in BFS order! r1= qg.add_relation('R1', 10) r2 = qg.add_relation('R2', 10) r3 = qg.add_relation('R3', 10) r4 = qg.add_relation('R4', 10) r5 = qg.add_relation('R5', 10) qg.add_join(r1, r2, 0.1) qg.add_join(r1, r3, 0.1) qg.add_join(r1, r4, 0.1) qg.add_join(r3, r5, 0.1) Source(querygraph_to_graphviz(qg, cardinalities=False, selectivities=False)) dpccp = Dpccp(qg) table = [] # we're not using dpccp.get_csg_cmp_pairs here so we can also illustrate empty cmp sets. for csg in dpccp.enumerate_csg(): cmps = list(dpccp.enumerate_cmp(csg)) if len(cmps) == 0: table.append({ 'csg': str(list(map(str, csg))), 'cmp': '[]' }) else: for cmp in cmps: table.append({ 'csg': str(list(map(str, csg))), 'cmp': str(list(map(str,cmp))) }) pd.DataFrame.from_dict(table) ###Output _____no_output_____ ###Markdown DPccpWe first define a query graph for our examples: ###Code qg = QueryGraph() r0 = qg.add_relation('R0', 10) r1 = qg.add_relation('R1', 10) r2 = qg.add_relation('R2', 20) r3 = qg.add_relation('R3', 50) r4 = qg.add_relation('R4', 20) r5 = qg.add_relation('R5', 10) qg.add_join(r0, r1, 0.1) qg.add_join(r0, r2, 0.1) qg.add_join(r1, r3, 0.1) qg.add_join(r1, r4, 0.1) qg.add_join(r1, r5, 0.1) qg.add_join(r2, r5, 0.1) qg.add_join(r3, r4, 0.1) Source(querygraph_to_graphviz(qg, cardinalities=False, selectivities=False)) ###Output _____no_output_____ ###Markdown Example 1 - Enumeration of all CSG-CMP pairsThis example comes from the 8th tutorial. The solution can be found in moodle. ###Code dpccp = Dpccp(qg) table = [] # we're not using dpccp.get_csg_cmp_pairs here so we can also illustrate empty cmp sets. for csg in dpccp.enumerate_csg(): cmps = list(dpccp.enumerate_cmp(csg)) if len(cmps) == 0: table.append({ 'csg': str(list(map(str, csg))), 'cmp': '[]' }) else: for cmp in cmps: table.append({ 'csg': str(list(map(str, csg))), 'cmp': str(list(map(str,cmp))) }) pd.DataFrame.from_dict(table) ###Output _____no_output_____ ###Markdown Example 2 - Get the optimal join tree based on c_out ###Code t = dpccp.run() Source(join_tree_to_graphviz(t)) ###Output _____no_output_____ ###Markdown All dpccp combinations ###Code table = [] for key, tree_lst in dpccp.dpverbose.items(): for tree in tree_lst: table.append({ 'l': len(tree.relations), 'relations' : key, 'tree': str(tree), 'cost': tree.cost, 'cheapest': 'Yes' if tree == dpccp.dptable[key] else 'No' }) pd.DataFrame.from_dict(table).sort_values(by=['l', 'relations'], ascending=True) ###Output _____no_output_____ ###Markdown DPccpWe first define a query graph for our examples: ###Code qg = QueryGraph() r1= qg.add_relation('R1', 10) r2 = qg.add_relation('R2', 20) r3 = qg.add_relation('R3', 50) r4 = qg.add_relation('R4', 20) r5 = qg.add_relation('R5', 10) qg.add_join(r1, r2, 0.1) qg.add_join(r1, r3, 0.1) qg.add_join(r2, r3, 0.1) qg.add_join(r2, r4, 0.1) qg.add_join(r3, r4, 0.1) qg.add_join(r3, r5, 0.1) qg.add_join(r4, r5, 0.1) Source(querygraph_to_graphviz(qg, cardinalities=False, selectivities=False)) ###Output _____no_output_____ ###Markdown Example 1 - Enumeration of all CSG-CMP pairsThis example comes from the 8th tutorial. The solution can be found in moodle. ###Code dpccp = Dpccp(qg) table = [] # we're not using dpccp.get_csg_cmp_pairs here so we can also illustrate empty cmp sets. for csg in dpccp.enumerate_csg(): cmps = list(dpccp.enumerate_cmp(csg)) if len(cmps) == 0: table.append({ 'csg': str(list(map(str, csg))), 'cmp': '[]' }) else: for cmp in cmps: table.append({ 'csg': str(list(map(str, csg))), 'cmp': str(list(map(str,cmp))) }) pd.DataFrame.from_dict(table) ###Output _____no_output_____ ###Markdown Example 2 - Get the optimal join tree based on c_out ###Code t = dpccp.run() Source(join_tree_to_graphviz(t)) ###Output _____no_output_____ ###Markdown All dpccp combinations ###Code table = [] for key, tree_lst in dpccp.dpverbose.items(): for tree in tree_lst: table.append({ 'l': len(tree.relations), 'relations' : key, 'tree': str(tree), 'cost': tree.cost, 'cheapest': 'Yes' if tree == dpccp.dptable[key] else 'No' }) pd.DataFrame.from_dict(table).sort_values(by=['l', 'relations'], ascending=True) ###Output _____no_output_____
Process Data.ipynb
###Markdown Processing data to format usable by KerasGiven a folder `raw_data` containing several subfolders each representing a class of images, process the folder to another folder with `train, test, validation` as subfolders and split the images according to some percentages into these separate folders ###Code raw_data = './data/' base_dir = './pdata/' train = os.path.join(base_dir, 'train') test = os.path.join(base_dir, 'test') validation = os.path.join(base_dir, 'validation') #Create directories dirs = [base_dir, train, test, validation] for directory in dirs: if not os.path.exists(directory): os.makedirs(directory) classes = [cls for cls in os.listdir(raw_data) if not cls.startswith('.')] #Make class directories for cls in classes: for directory in [train, test, validation]: cls_path = os.path.join(directory, cls) if not os.path.exists(cls_path): os.makedirs(cls_path) training, testing = .5, .2 num_training_examples = 0 num_test_examples = 0 num_validation_examples = 0 for cls in classes: cls_path = os.path.join(raw_data, cls) imgs = os.listdir(cls_path) np.random.shuffle(imgs) trainidx = np.int(len(imgs) * training) testidx = trainidx + np.int(len(imgs) * testing) train_set = imgs[:trainidx] test_set = imgs[trainidx:testidx] validation_set = imgs[testidx:] for i,img in enumerate(train_set): imgpath = os.path.join(cls_path, img) topath = os.path.join(train, cls, cls+'.'+str(i)+img[-4:]) shutil.copyfile(imgpath, topath) num_training_examples += len(train_set) print("processed %s training images for class %s"%(len(train_set), cls)) #Process test data for i,img in enumerate(test_set): imgpath = os.path.join(cls_path, img) topath = os.path.join(test, cls, cls+'.'+str(i)+img[-4:]) shutil.copyfile(imgpath, topath) num_test_examples += len(test_set) print("processed %s test images for class %s"%(len(test_set), cls)) #Process validation data for i,img in enumerate(validation_set): imgpath = os.path.join(cls_path, img).strip() topath = os.path.join(validation, cls, cls+'.'+str(i)+img[-4:]) shutil.copyfile(imgpath, topath) num_validation_examples += len(validation_set) print("processed %s validation images for class %s"%(len(validation_set), cls)) print('Training examples = %s, Testing examples = %s, Validation examples = %s'%(num_training_examples, num_test_examples, num_validation_examples)) ###Output processed 49 training images for class cgm processed 19 test images for class cgm processed 30 validation images for class cgm processed 65 training images for class cmd processed 26 test images for class cmd processed 40 validation images for class cmd processed 58 training images for class healthy processed 23 test images for class healthy processed 35 validation images for class healthy processed 36 training images for class cbb processed 14 test images for class cbb processed 22 validation images for class cbb processed 60 training images for class cbsd processed 24 test images for class cbsd processed 36 validation images for class cbsd Training examples = 268, Testing examples = 106, Validation examples = 163 ###Markdown Process Data Load Libraries ###Code import sys import pandas as pd from sqlalchemy import create_engine ###Output _____no_output_____ ###Markdown Load Data ###Code def load_data(messages_filepath, categories_filepath): """ Input: 1. messages_filepath: path of messages datasets 2. categories_filepath: path of categories datasets Output: 1. df: merged dataframe, which contains data from messages, categories files Process: 1. Load the required datasets, messages, categories 2. Merge the two datasets """ # Load messages dataset messages = pd.read_csv('messages.csv') # Load categories dataset categories = pd.read_csv('categories.csv') # Merge datasets df = pd.merge(messages, categories, on='id', how='inner') # Return dataframe return df ###Output _____no_output_____ ###Markdown Clean Data ###Code def clean_data(df): """ Input: 1. df: the merged dataframed created by the load_data function Output: 1. df: cleaned dataframed after processed through the below process Process: 1. Create Y columns 2. Rename Y columns 3. Clean Y columns values 4. Replace original categories columns with the newly generated ones 5. Check and remove duplicated row if there is """ # 1. Create a dataframe of the 36 individual category columns categories = df.categories.str.split(';', expand=True) # 2. Generate Y columns name and rename columns category_colnames = [i[:-2] for i in categories.iloc[0, :]] categories.columns = category_colnames # 3. Clean columns values for column in categories: # set each value to be the last character of the string categories[column] = categories[column].str[-1] # convert column from string to numeric categories[column] = pd.to_numeric(categories[column], errors='coerce') # 4. Use new categories to replace the original column from `df` df.drop(['categories'], axis=1, inplace=True) df = pd.concat([df, categories], axis=1) # 5. Check number of duplicates if df.duplicated().sum() != 0: df = df.drop_duplicates() return df ###Output _____no_output_____ ###Markdown Save Data ###Code def save_data(df, database_filename): """ Input: 1. df: the cleaned dataframe, which is generated by the clean data function 2. database_filename: name of the database file Output: Saved database file Process: 1. Create engine for saving dataframe 2. Use to_sql method to dump dataframe to file """ engine = create_engine('sqlite:///{}.db'.formate(database_filename)) df.to_sql(database_filename, engine, index=False) ###Output _____no_output_____ ###Markdown Main ###Code def main(): if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) df = load_data(messages_filepath, categories_filepath) print('Cleaning data...') df = clean_data(df) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') ###Output _____no_output_____ ###Markdown Call Main ###Code if __name__ == '__main__': main() ###Output Please provide the filepaths of the messages and categories datasets as the first and second argument respectively, as well as the filepath of the database to save the cleaned data to as the third argument. Example: python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db
docs/tutorials/9_c51_tutorial.ipynb
###Markdown Copyright 2021 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get update !sudo apt-get install -y xvfb ffmpeg freeglut3-dev !pip install 'imageio==2.4.0' !pip install pyvirtualdisplay !pip install tf-agents !pip install pyglet from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2021 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get update !sudo apt-get install -y xvfb ffmpeg !pip install 'imageio==2.4.0' !pip install pyvirtualdisplay !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. Get Started Run in Google Colab View source on GitHub ###Code # Note: If you haven't installed the following dependencies, run: !apt-get install xvfb !pip install 'gym==0.10.11' !pip install 'imageio==2.4.0' !pip install PILLOW !pip install 'pyglet==1.3.2' !pip install pyvirtualdisplay !pip install tf-agents try: %tensorflow_version 2.x except: pass ###Output _____no_output_____ ###Markdown Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup ###Code from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2021 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get install -y xvfb ffmpeg !pip install 'imageio==2.4.0' !pip install pyvirtualdisplay !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get install -y xvfb ffmpeg !pip install 'gym==0.10.11' !pip install 'imageio==2.4.0' !pip install PILLOW !pip install 'pyglet==1.3.2' !pip install pyvirtualdisplay !pip install --upgrade tensorflow-probability !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get install -y xvfb ffmpeg !pip install gym !pip install 'imageio==2.4.0' !pip install PILLOW !pip install 'pyglet==1.3.2' !pip install pyvirtualdisplay !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup ###Code try: %tensorflow_version 2.x except: pass ###Output _____no_output_____ ###Markdown If you haven't installed tf-agents yet, run: ###Code !sudo apt-get install -y xvfb ffmpeg !pip install 'gym==0.10.11' !pip install 'imageio==2.4.0' !pip install PILLOW !pip install 'pyglet==1.3.2' !pip install pyvirtualdisplay !pip install --upgrade tensorflow-probability !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get install -y xvfb ffmpeg !pip install 'gym==0.10.11' !pip install 'imageio==2.4.0' !pip install PILLOW !pip install 'pyglet==1.3.2' !pip install pyvirtualdisplay !pip install --pre tf-agents[reverb] from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. Get Started Run in Google Colab View source on GitHub ###Code # Note: If you haven't installed the following dependencies, run: !sudo apt-get install -y xvfb ffmpeg !pip install 'gym==0.10.11' !pip install 'imageio==2.4.0' !pip install PILLOW !pip install 'pyglet==1.3.2' !pip install pyvirtualdisplay !pip install --upgrade tensorflow-probability !pip install tf-agents try: %tensorflow_version 2.x except: pass ###Output _____no_output_____ ###Markdown Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup ###Code from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____ ###Markdown Copyright 2018 The TF-Agents Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown DQN C51/Rainbow View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook Introduction This example shows how to train a [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) agent on the Cartpole environment using the TF-Agents library.![Cartpole environment](https://github.com/tensorflow/agents/blob/master/docs/tutorials/images/cartpole.png?raw=1)Make sure you take a look through the [DQN tutorial](https://github.com/tensorflow/agents/blob/master/docs/tutorials/1_dqn_tutorial.ipynb) as a prerequisite. This tutorial will assume familiarity with the DQN tutorial; it will mainly focus on the differences between DQN and C51. Setup If you haven't installed tf-agents yet, run: ###Code !sudo apt-get install -y xvfb ffmpeg !pip install gym !pip install 'imageio==2.4.0' !pip install PILLOW !pip install pyvirtualdisplay !pip install tf-agents from __future__ import absolute_import from __future__ import division from __future__ import print_function import base64 import imageio import IPython import matplotlib import matplotlib.pyplot as plt import PIL.Image import pyvirtualdisplay import tensorflow as tf from tf_agents.agents.categorical_dqn import categorical_dqn_agent from tf_agents.drivers import dynamic_step_driver from tf_agents.environments import suite_gym from tf_agents.environments import tf_py_environment from tf_agents.eval import metric_utils from tf_agents.metrics import tf_metrics from tf_agents.networks import categorical_q_network from tf_agents.policies import random_tf_policy from tf_agents.replay_buffers import tf_uniform_replay_buffer from tf_agents.trajectories import trajectory from tf_agents.utils import common tf.compat.v1.enable_v2_behavior() # Set up a virtual display for rendering OpenAI gym environments. display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start() ###Output _____no_output_____ ###Markdown Hyperparameters ###Code env_name = "CartPole-v1" # @param {type:"string"} num_iterations = 15000 # @param {type:"integer"} initial_collect_steps = 1000 # @param {type:"integer"} collect_steps_per_iteration = 1 # @param {type:"integer"} replay_buffer_capacity = 100000 # @param {type:"integer"} fc_layer_params = (100,) batch_size = 64 # @param {type:"integer"} learning_rate = 1e-3 # @param {type:"number"} gamma = 0.99 log_interval = 200 # @param {type:"integer"} num_atoms = 51 # @param {type:"integer"} min_q_value = -20 # @param {type:"integer"} max_q_value = 20 # @param {type:"integer"} n_step_update = 2 # @param {type:"integer"} num_eval_episodes = 10 # @param {type:"integer"} eval_interval = 1000 # @param {type:"integer"} ###Output _____no_output_____ ###Markdown EnvironmentLoad the environment as before, with one for training and one for evaluation. Here we use CartPole-v1 (vs. CartPole-v0 in the DQN tutorial), which has a larger max reward of 500 rather than 200. ###Code train_py_env = suite_gym.load(env_name) eval_py_env = suite_gym.load(env_name) train_env = tf_py_environment.TFPyEnvironment(train_py_env) eval_env = tf_py_environment.TFPyEnvironment(eval_py_env) ###Output _____no_output_____ ###Markdown AgentC51 is a Q-learning algorithm based on DQN. Like DQN, it can be used on any environment with a discrete action space.The main difference between C51 and DQN is that rather than simply predicting the Q-value for each state-action pair, C51 predicts a histogram model for the probability distribution of the Q-value:![Example C51 Distribution](images/c51_distribution.png)By learning the distribution rather than simply the expected value, the algorithm is able to stay more stable during training, leading to improved final performance. This is particularly true in situations with bimodal or even multimodal value distributions, where a single average does not provide an accurate picture.In order to train on probability distributions rather than on values, C51 must perform some complex distributional computations in order to calculate its loss function. But don't worry, all of this is taken care of for you in TF-Agents!To create a C51 Agent, we first need to create a `CategoricalQNetwork`. The API of the `CategoricalQNetwork` is the same as that of the `QNetwork`, except that there is an additional argument `num_atoms`. This represents the number of support points in our probability distribution estimates. (The above image includes 10 support points, each represented by a vertical blue bar.) As you can tell from the name, the default number of atoms is 51. ###Code categorical_q_net = categorical_q_network.CategoricalQNetwork( train_env.observation_spec(), train_env.action_spec(), num_atoms=num_atoms, fc_layer_params=fc_layer_params) ###Output _____no_output_____ ###Markdown We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.Note that one other significant difference from vanilla `DqnAgent` is that we now need to specify `min_q_value` and `max_q_value` as arguments. These specify the most extreme values of the support (in other words, the most extreme of the 51 atoms on either side). Make sure to choose these appropriately for your particular environment. Here we use -20 and 20. ###Code optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate) train_step_counter = tf.compat.v2.Variable(0) agent = categorical_dqn_agent.CategoricalDqnAgent( train_env.time_step_spec(), train_env.action_spec(), categorical_q_network=categorical_q_net, optimizer=optimizer, min_q_value=min_q_value, max_q_value=max_q_value, n_step_update=n_step_update, td_errors_loss_fn=common.element_wise_squared_loss, gamma=gamma, train_step_counter=train_step_counter) agent.initialize() ###Output _____no_output_____ ###Markdown One last thing to note is that we also added an argument to use n-step updates with $n$ = 2. In single-step Q-learning ($n$ = 1), we only compute the error between the Q-values at the current time step and the next time step using the single-step return (based on the Bellman optimality equation). The single-step return is defined as:$G_t = R_{t + 1} + \gamma V(s_{t + 1})$where we define $V(s) = \max_a{Q(s, a)}$.N-step updates involve expanding the standard single-step return function $n$ times:$G_t^n = R_{t + 1} + \gamma R_{t + 2} + \gamma^2 R_{t + 3} + \dots + \gamma^n V(s_{t + n})$N-step updates enable the agent to bootstrap from further in the future, and with the right value of $n$, this often leads to faster learning.Although C51 and n-step updates are often combined with prioritized replay to form the core of the [Rainbow agent](https://arxiv.org/pdf/1710.02298.pdf), we saw no measurable improvement from implementing prioritized replay. Moreover, we find that when combining our C51 agent with n-step updates alone, our agent performs as well as other Rainbow agents on the sample of Atari environments we've tested. Metrics and EvaluationThe most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows. ###Code #@test {"skip": true} def compute_avg_return(environment, policy, num_episodes=10): total_return = 0.0 for _ in range(num_episodes): time_step = environment.reset() episode_return = 0.0 while not time_step.is_last(): action_step = policy.action(time_step) time_step = environment.step(action_step.action) episode_return += time_step.reward total_return += episode_return avg_return = total_return / num_episodes return avg_return.numpy()[0] random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(), train_env.action_spec()) compute_avg_return(eval_env, random_policy, num_eval_episodes) # Please also see the metrics module for standard implementations of different # metrics. ###Output _____no_output_____ ###Markdown Data CollectionAs in the DQN tutorial, set up the replay buffer and the initial data collection with the random policy. ###Code #@test {"skip": true} replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=agent.collect_data_spec, batch_size=train_env.batch_size, max_length=replay_buffer_capacity) def collect_step(environment, policy): time_step = environment.current_time_step() action_step = policy.action(time_step) next_time_step = environment.step(action_step.action) traj = trajectory.from_transition(time_step, action_step, next_time_step) # Add trajectory to the replay buffer replay_buffer.add_batch(traj) for _ in range(initial_collect_steps): collect_step(train_env, random_policy) # This loop is so common in RL, that we provide standard implementations of # these. For more details see the drivers module. # Dataset generates trajectories with shape [BxTx...] where # T = n_step_update + 1. dataset = replay_buffer.as_dataset( num_parallel_calls=3, sample_batch_size=batch_size, num_steps=n_step_update + 1).prefetch(3) iterator = iter(dataset) ###Output _____no_output_____ ###Markdown Training the agentThe training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.The following will take ~7 minutes to run. ###Code #@test {"skip": true} try: %%time except: pass # (Optional) Optimize by wrapping some of the code in a graph using TF function. agent.train = common.function(agent.train) # Reset the train step agent.train_step_counter.assign(0) # Evaluate the agent's policy once before training. avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) returns = [avg_return] for _ in range(num_iterations): # Collect a few steps using collect_policy and save to the replay buffer. for _ in range(collect_steps_per_iteration): collect_step(train_env, agent.collect_policy) # Sample a batch of data from the buffer and update the agent's network. experience, unused_info = next(iterator) train_loss = agent.train(experience) step = agent.train_step_counter.numpy() if step % log_interval == 0: print('step = {0}: loss = {1}'.format(step, train_loss.loss)) if step % eval_interval == 0: avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes) print('step = {0}: Average Return = {1:.2f}'.format(step, avg_return)) returns.append(avg_return) ###Output _____no_output_____ ###Markdown Visualization PlotsWe can plot return vs global steps to see the performance of our agent. In `Cartpole-v1`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 500, the maximum possible return is also 500. ###Code #@test {"skip": true} steps = range(0, num_iterations + 1, eval_interval) plt.plot(steps, returns) plt.ylabel('Average Return') plt.xlabel('Step') plt.ylim(top=550) ###Output _____no_output_____ ###Markdown Videos It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab. ###Code def embed_mp4(filename): """Embeds an mp4 file in the notebook.""" video = open(filename,'rb').read() b64 = base64.b64encode(video) tag = ''' <video width="640" height="480" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"> Your browser does not support the video tag. </video>'''.format(b64.decode()) return IPython.display.HTML(tag) ###Output _____no_output_____ ###Markdown The following code visualizes the agent's policy for a few episodes: ###Code num_episodes = 3 video_filename = 'imageio.mp4' with imageio.get_writer(video_filename, fps=60) as video: for _ in range(num_episodes): time_step = eval_env.reset() video.append_data(eval_py_env.render()) while not time_step.is_last(): action_step = agent.policy.action(time_step) time_step = eval_env.step(action_step.action) video.append_data(eval_py_env.render()) embed_mp4(video_filename) ###Output _____no_output_____
examples/reference/templates/Material.ipynb
###Markdown For a large variety of use cases we do not need complete control over the exact layout of each individual component on the page, as could be achieved with a [custom template](../../user_guide/Templates.ipynb), we just want to achieve a more polished look and feel. For these cases Panel ships with a number of default templates, which are defined by declaring three main content areas on the page, which can be populated as desired:* **`header`**: The header area of the HTML page* **`sidebar`**: A collapsible sidebar* **`main`**: The main area of the application* **`modal`**: A modal area which can be opened and closed from PythonThese three areas behave very similarly to other Panel layout components and have list-like semantics. This means we can easily append new components into these areas. Unlike other layout components however, the contents of the areas is fixed once rendered. If you need a dynamic layout you should therefore insert a regular Panel layout component (e.g. a `Column` or `Row`) and modify it in place once added to one of the content areas. Templates can allow for us to quickly and easily create web apps for displaying our data. Panel comes with a default Template, and includes multiple Templates that extend the default which add some customization for a better display. Parameters:In addition to the four different areas we can populate the default templates also provide a few additional parameters:* **`busy_indicator`** (BooleanIndicator): Visual indicator of application busy state.* **`header_background`** (str): Optional header background color override.* **`header_color`** (str): Optional header text color override.* **`logo`** (str): URI of logo to add to the header (if local file, logo is base64 encoded as URI).* **`site`** (str): Name of the site. Will be shown in the header. Default is '', i.e. not shown.* **`site_url`** (str): Url of the site and logo. Default is "/".* **`title`** (str): A title to show in the header.* **`theme`** (Theme): A Theme class (available in `panel.template.theme`)* **`sidebar_width`** (int): The width of the sidebar in pixels. Default is 370.________ In this case we are using the `MaterialTemplate`, built on [Material Components for the web](https://material.io/develop/web/), which is a CSS framework that provides a lot of built in stylings to create a smooth layout. Here is an example of how you can set up a display using this template: ###Code material = pn.template.MaterialTemplate(title='Material Template') xs = np.linspace(0, np.pi) freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2) phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi) @pn.depends(freq=freq, phase=phase) def sine(freq, phase): return hv.Curve((xs, np.sin(xs*freq+phase))).opts( responsive=True, min_height=400) @pn.depends(freq=freq, phase=phase) def cosine(freq, phase): return hv.Curve((xs, np.cos(xs*freq+phase))).opts( responsive=True, min_height=400) material.sidebar.append(freq) material.sidebar.append(phase) material.main.append( pn.Row( pn.Card(hv.DynamicMap(sine), title='Sine'), pn.Card(hv.DynamicMap(cosine), title='Cosine') ) ) material.servable(); ###Output _____no_output_____ ###Markdown For a large variety of use cases we do not need complete control over the exact layout of each individual component on the page, as could be achieved with a [custom template](../../user_guide/Templates.ipynb), we just want to achieve a more polished look and feel. For these cases Panel ships with a number of default templates, which are defined by declaring three main content areas on the page, which can be populated as desired:* **`header`**: The header area of the HTML page* **`sidebar`**: A collapsible sidebar* **`main`**: The main area of the application* **`modal`**: A modal area which can be opened and closed from PythonThese three areas behave very similarly to other Panel layout components and have list-like semantics. This means we can easily append new components into these areas. Unlike other layout components however, the contents of the areas is fixed once rendered. If you need a dynamic layout you should therefore insert a regular Panel layout component (e.g. a `Column` or `Row`) and modify it in place once added to one of the content areas. Templates can allow for us to quickly and easily create web apps for displaying our data. Panel comes with a default Template, and includes multiple Templates that extend the default which add some customization for a better display. Parameters:In addition to the four different areas we can populate the default templates also provide a few additional parameters:* **`busy_indicator`** (BooleanIndicator): Visual indicator of application busy state.* **`header_background`** (str): Optional header background color override.* **`header_color`** (str): Optional header text color override.* **`logo`** (str): URI of logo to add to the header (if local file, logo is base64 encoded as URI).* **`site`** (str): Name of the site. Will be shown in the header. Default is '', i.e. not shown.* **`site_url`** (str): Url of the site and logo. Default is "/".* **`title`** (str): A title to show in the header.* **`theme`** (Theme): A Theme class (available in `panel.template.theme`)________ In this case we are using the `MaterialTemplate`, built on [Material Components for the web](https://material.io/develop/web/), which is a CSS framework that provides a lot of built in stylings to create a smooth layout. Here is an example of how you can set up a display using this template: ###Code material = pn.template.MaterialTemplate(title='Material Template') pn.config.sizing_mode = 'stretch_width' xs = np.linspace(0, np.pi) freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2) phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi) @pn.depends(freq=freq, phase=phase) def sine(freq, phase): return hv.Curve((xs, np.sin(xs*freq+phase))).opts( responsive=True, min_height=400) @pn.depends(freq=freq, phase=phase) def cosine(freq, phase): return hv.Curve((xs, np.cos(xs*freq+phase))).opts( responsive=True, min_height=400) material.sidebar.append(freq) material.sidebar.append(phase) material.main.append( pn.Row( pn.Card(hv.DynamicMap(sine), title='Sine'), pn.Card(hv.DynamicMap(cosine), title='Cosine') ) ) material.servable(); ###Output _____no_output_____ ###Markdown For a large variety of use cases we do not need complete control over the exact layout of each individual component on the page, as could be achieved with a [custom template](../../user_guide/Templates.ipynb), we just want to achieve a more polished look and feel. For these cases Panel ships with a number of default templates, which are defined by declaring four main content areas on the page, which can be populated as desired:* **`header`**: The header area of the HTML page* **`sidebar`**: A collapsible sidebar* **`main`**: The main area of the application* **`modal`**: A modal area which can be opened and closed from PythonThese four areas behave very similarly to other Panel layout components and have list-like semantics. This means we can easily append new components into these areas. Unlike other layout components however, the contents of the areas is fixed once rendered. If you need a dynamic layout you should therefore insert a regular Panel layout component (e.g. a `Column` or `Row`) and modify it in place once added to one of the content areas. Templates can allow for us to quickly and easily create web apps for displaying our data. Panel comes with a default Template, and includes multiple Templates that extend the default which add some customization for a better display. Parameters:In addition to the four different areas we can populate the default templates also provide a few additional parameters:* **`busy_indicator`** (BooleanIndicator): Visual indicator of application busy state.* **`header_background`** (str): Optional header background color override.* **`header_color`** (str): Optional header text color override.* **`logo`** (str): URI of logo to add to the header (if local file, logo is base64 encoded as URI).* **`site`** (str): Name of the site. Will be shown in the header. Default is '', i.e. not shown.* **`site_url`** (str): Url of the site and logo. Default is "/".* **`title`** (str): A title to show in the header.* **`theme`** (Theme): A Theme class (available in `panel.template.theme`)* **`sidebar_width`** (int): The width of the sidebar in pixels. Default is 370.________ In this case we are using the `MaterialTemplate`, built on [Material Components for the web](https://material.io/develop/web/), which is a CSS framework that provides a lot of built in stylings to create a smooth layout. Here is an example of how you can set up a display using this template: ###Code material = pn.template.MaterialTemplate(title='Material Template') xs = np.linspace(0, np.pi) freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2) phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi) @pn.depends(freq=freq, phase=phase) def sine(freq, phase): return hv.Curve((xs, np.sin(xs*freq+phase))).opts( responsive=True, min_height=400) @pn.depends(freq=freq, phase=phase) def cosine(freq, phase): return hv.Curve((xs, np.cos(xs*freq+phase))).opts( responsive=True, min_height=400) material.sidebar.append(freq) material.sidebar.append(phase) material.main.append( pn.Row( pn.Card(hv.DynamicMap(sine), title='Sine'), pn.Card(hv.DynamicMap(cosine), title='Cosine') ) ) material.servable(); ###Output _____no_output_____ ###Markdown For a large variety of use cases we do not need complete control over the exact layout of each individual component on the page, as could be achieved with a [custom template](../../user_guide/Templates.ipynb), we just want to achieve a more polished look and feel. For these cases Panel ships with a number of default templates, which are defined by declaring three main content areas on the page, which can be populated as desired:* **`header`**: The header area of the HTML page* **`sidebar`**: A collapsible sidebar* **`main`**: The main area of the application* **`modal`**: A modal area which can be opened and closed from PythonThese three areas behave very similarly to other Panel layout components and have list-like semantics. This means we can easily append new components into these areas. Unlike other layout components however, the contents of the areas is fixed once rendered. If you need a dynamic layout you should therefore insert a regular Panel layout component (e.g. a `Column` or `Row`) and modify it in place once added to one of the content areas. Templates can allow for us to quickly and easily create web apps for displaying our data. Panel comes with a default Template, and includes multiple Templates that extend the default which add some customization for a better display. Parameters:In addition to the four different areas we can populate the default templates also provide a few additional parameters:* **`busy_indicator`** (BooleanIndicator): Visual indicator of application busy state.* **`header_background`** (str): Optional header background color override.* **`header_color`** (str): Optional header text color override.* **`logo`** (str): URI of logo to add to the header (if local file, logo is base64 encoded as URI).* **`theme`** (Theme): A Theme class (available in `panel.template.theme`)* **`title`** (str): A title to show in the header.________ In this case we are using the `MaterialTemplate`, built on [Material Components for the web](https://material.io/develop/web/), which is a CSS framework that provides a lot of built in stylings to create a smooth layout. Here is an example of how you can set up a display using this template: ###Code material = pn.template.MaterialTemplate(title='Material Template') pn.config.sizing_mode = 'stretch_width' xs = np.linspace(0, np.pi) freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2) phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi) @pn.depends(freq=freq, phase=phase) def sine(freq, phase): return hv.Curve((xs, np.sin(xs*freq+phase))).opts( responsive=True, min_height=400) @pn.depends(freq=freq, phase=phase) def cosine(freq, phase): return hv.Curve((xs, np.cos(xs*freq+phase))).opts( responsive=True, min_height=400) material.sidebar.append(freq) material.sidebar.append(phase) material.main.append( pn.Row( pn.Card(hv.DynamicMap(sine), title='Sine'), pn.Card(hv.DynamicMap(cosine), title='Cosine') ) ) material.servable(); ###Output _____no_output_____
soln/chap08.ipynb
###Markdown Poisson Processes Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ###Code # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py import os if not os.path.exists('utils.py'): !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py from utils import set_pyplot_params set_pyplot_params() ###Output _____no_output_____ ###Markdown This chapter introduces the [Poisson process](https://en.wikipedia.org/wiki/Poisson_point_process), which is a model used to describe events that occur at random intervals.As an example of a Poisson process, we'll model goal-scoring in soccer, which is American English for the game everyone else calls "football".We'll use goals scored in a game to estimate the parameter of a Poisson process; then we'll use the posterior distribution to make predictions.And we'll solve The World Cup Problem. The World Cup ProblemIn the 2018 FIFA World Cup final, France defeated Croatia 4 goals to 2. Based on this outcome:1. How confident should we be that France is the better team?2. If the same teams played again, what is the chance France would win again?To answer these questions, we have to make some modeling decisions.* First, I'll assume that for any team against another team there is some unknown goal-scoring rate, measured in goals per game, which I'll denote with the Python variable `lam` or the Greek letter $\lambda$, pronounced "lambda".* Second, I'll assume that a goal is equally likely during any minute of a game. So, in a 90 minute game, the probability of scoring during any minute is $\lambda/90$.* Third, I'll assume that a team never scores twice during the same minute.Of course, none of these assumptions is completely true in the real world, but I think they are reasonable simplifications.As George Box said, "All models are wrong; some are useful."(https://en.wikipedia.org/wiki/All_models_are_wrong).In this case, the model is useful because if these assumptions are true, at least roughly, the number of goals scored in a game follows a Poisson distribution, at least roughly. The Poisson DistributionIf the number of goals scored in a game follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with a goal-scoring rate, $\lambda$, the probability of scoring $k$ goals is$$\lambda^k \exp(-\lambda) ~/~ k!$$for any non-negative value of $k$.SciPy provides a `poisson` object that represents a Poisson distribution.We can create one with $\lambda=1.4$ like this: ###Code from scipy.stats import poisson lam = 1.4 dist = poisson(lam) type(dist) ###Output _____no_output_____ ###Markdown The result is an object that represents a "frozen" random variable and provides `pmf`, which evaluates the probability mass function of the Poisson distribution. ###Code k = 4 dist.pmf(k) ###Output _____no_output_____ ###Markdown This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.We'll use the following function to make a `Pmf` that represents a Poisson distribution. ###Code from empiricaldist import Pmf def make_poisson_pmf(lam, qs): """Make a Pmf of a Poisson distribution.""" ps = poisson(lam).pmf(qs) pmf = Pmf(ps, qs) pmf.normalize() return pmf ###Output _____no_output_____ ###Markdown `make_poisson_pmf` takes as parameters the goal-scoring rate, `lam`, and an array of quantities, `qs`, where it should evaluate the Poisson PMF. It returns a `Pmf` object.For example, here's the distribution of goals scored for `lam=1.4`, computed for values of `k` from 0 to 9. ###Code import numpy as np lam = 1.4 goals = np.arange(10) pmf_goals = make_poisson_pmf(lam, goals) ###Output _____no_output_____ ###Markdown And here's what it looks like. ###Code from utils import decorate def decorate_goals(title=''): decorate(xlabel='Number of goals', ylabel='PMF', title=title) pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$') decorate_goals('Distribution of goals scored') ###Output _____no_output_____ ###Markdown The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.Values above 7 are negligible.This distribution shows that if we know the goal scoring rate, we can predict the number of goals.Now let's turn it around: given a number of goals, what can we say about the goal-scoring rate?To answer that, we need to think about the prior distribution of `lam`, which represents the range of possible values and their probabilities before we see the score. The Gamma DistributionIf you have ever seen a soccer game, you have some information about `lam`. In most games, teams score a few goals each. In rare cases, a team might score more than 5 goals, but they almost never score more than 10.Using [data from previous World Cups](https://www.statista.com/statistics/269031/goals-scored-per-game-at-the-fifa-world-cup-since-1930/), I estimate that each team scores about 1.4 goals per game, on average. So I'll set the mean of `lam` to be 1.4.For a good team against a bad one, we expect `lam` to be higher; for a bad team against a good one, we expect it to be lower. To model the distribution of goal-scoring rates, I'll use a [gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution), which I chose because:1. The goal scoring rate is continuous and non-negative, and the gamma distribution is appropriate for this kind of quantity.2. The gamma distribution has only one parameter, `alpha`, which is the mean. So it's easy to construct a gamma distribution with the mean we want.3. As we'll see, the shape of the gamma distribution is a reasonable choice, given what we know about soccer.And there's one more reason, which I will reveal in >.SciPy provides `gamma`, which creates an object that represents a gamma distribution.And the `gamma` object provides provides `pdf`, which evaluates the **probability density function** (PDF) of the gamma distribution.Here's how we use it. ###Code from scipy.stats import gamma alpha = 1.4 qs = np.linspace(0, 10, 101) ps = gamma(alpha).pdf(qs) ###Output _____no_output_____ ###Markdown The parameter, `alpha`, is the mean of the distribution.The `qs` are possible values of `lam` between 0 and 10.The `ps` are **probability densities**, which we can think of as unnormalized probabilities.To normalize them, we can put them in a `Pmf` and call `normalize`: ###Code from empiricaldist import Pmf prior = Pmf(ps, qs) prior.normalize() ###Output _____no_output_____ ###Markdown The result is a discrete approximation of a gamma distribution.Here's what it looks like. ###Code def decorate_rate(title=''): decorate(xlabel='Goal scoring rate (lam)', ylabel='PMF', title=title) prior.plot(label='prior', color='C5') decorate_rate(r'Prior distribution of $\lambda$') ###Output _____no_output_____ ###Markdown This distribution represents our prior knowledge about goal scoring: `lam` is usually less than 2, occasionally as high as 6, and seldom higher than that. And we can confirm that the mean is about 1.4. ###Code prior.mean() ###Output _____no_output_____ ###Markdown As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update. The UpdateSuppose you are given the goal-scoring rate, $\lambda$, and asked to compute the probability of scoring a number of goals, $k$. That is precisely the question we answered by computing the Poisson PMF.For example, if $\lambda$ is 1.4, the probability of scoring 4 goals in a game is: ###Code lam = 1.4 k = 4 poisson(lam).pmf(4) ###Output _____no_output_____ ###Markdown Now suppose we are have an array of possible values for $\lambda$; we can compute the likelihood of the data for each hypothetical value of lam, like this: ###Code lams = prior.qs k = 4 likelihood = poisson(lams).pmf(k) ###Output _____no_output_____ ###Markdown And that's all we need to do the update.To get the posterior distribution, we multiply the prior by the likelihoods we just computed and normalize the result.The following function encapsulates these steps. ###Code def update_poisson(pmf, data): """Update Pmf with a Poisson likelihood.""" k = data lams = pmf.qs likelihood = poisson(lams).pmf(k) pmf *= likelihood pmf.normalize() ###Output _____no_output_____ ###Markdown The first parameter is the prior; the second is the number of goals.In the example, France scored 4 goals, so I'll make a copy of the prior and update it with the data. ###Code france = prior.copy() update_poisson(france, 4) ###Output _____no_output_____ ###Markdown Here's what the posterior distribution looks like, along with the prior. ###Code prior.plot(label='prior', color='C5') france.plot(label='France posterior', color='C3') decorate_rate('Posterior distribution for France') ###Output _____no_output_____ ###Markdown The data, `k=4`, makes us think higher values of `lam` are more likely and lower values are less likely. So the posterior distribution is shifted to the right.Let's do the same for Croatia: ###Code croatia = prior.copy() update_poisson(croatia, 2) ###Output _____no_output_____ ###Markdown And here are the results. ###Code prior.plot(label='prior', color='C5') croatia.plot(label='Croatia posterior', color='C0') decorate_rate('Posterior distribution for Croatia') ###Output _____no_output_____ ###Markdown Here are the posterior means for these distributions. ###Code print(croatia.mean(), france.mean()) ###Output 1.6999765866755225 2.699772393342308 ###Markdown The mean of the prior distribution is about 1.4.After Croatia scores 2 goals, their posterior mean is 1.7, which is near the midpoint of the prior and the data.Likewise after France scores 4 goals, their posterior mean is 2.7.These results are typical of a Bayesian update: the location of the posterior distribution is a compromise between the prior and the data. Probability of SuperiorityNow that we have a posterior distribution for each team, we can answer the first question: How confident should we be that France is the better team?In the model, "better" means having a higher goal-scoring rate against the opponent. We can use the posterior distributions to compute the probability that a random value drawn from France's distribution exceeds a value drawn from Croatia's.One way to do that is to enumerate all pairs of values from the two distributions, adding up the total probability that one value exceeds the other. ###Code def prob_gt(pmf1, pmf2): """Compute the probability of superiority.""" total = 0 for q1, p1 in pmf1.items(): for q2, p2 in pmf2.items(): if q1 > q2: total += p1 * p2 return total ###Output _____no_output_____ ###Markdown This is similar to the method we use in > to compute the distribution of a sum.Here's how we use it: ###Code prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown `Pmf` provides a function that does the same thing. ###Code Pmf.prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown The results are slightly different because `Pmf.prob_gt` uses array operators rather than `for` loops.Either way, the result is close to 75%. So, on the basis of one game, we have moderate confidence that France is actually the better team.Of course, we should remember that this result is based on the assumption that the goal-scoring rate is constant.In reality, if a team is down by one goal, they might play more aggressively toward the end of the game, making them more likely to score, but also more likely to give up an additional goal.As always, the results are only as good as the model. Predicting the RematchNow we can take on the second question: If the same teams played again, what is the chance Croatia would win?To answer this question, we'll generate the "posterior predictive distribution", which is the number of goals we expect a team to score.If we knew the goal scoring rate, `lam`, the distribution of goals would be a Poisson distribution with parameter `lam`.Since we don't know `lam`, the distribution of goals is a mixture of a Poisson distributions with different values of `lam`.First I'll generate a sequence of `Pmf` objects, one for each value of `lam`. ###Code pmf_seq = [make_poisson_pmf(lam, goals) for lam in prior.qs] ###Output _____no_output_____ ###Markdown The following figure shows what these distributions look like for a few values of `lam`. ###Code import matplotlib.pyplot as plt for i, index in enumerate([10, 20, 30, 40]): plt.subplot(2, 2, i+1) lam = prior.qs[index] pmf = pmf_seq[index] pmf.bar(label=f'$\lambda$ = {lam}', color='C3') decorate_goals() ###Output _____no_output_____ ###Markdown The predictive distribution is a mixture of these `Pmf` objects, weighted with the posterior probabilities.We can use `make_mixture` from > to compute this mixture. ###Code from utils import make_mixture pred_france = make_mixture(france, pmf_seq) ###Output _____no_output_____ ###Markdown Here's the predictive distribution for the number of goals France would score in a rematch. ###Code pred_france.bar(color='C3', label='France') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown This distribution represents two sources of uncertainty: we don't know the actual value of `lam`, and even if we did, we would not know the number of goals in the next game.Here's the predictive distribution for Croatia. ###Code pred_croatia = make_mixture(croatia, pmf_seq) pred_croatia.bar(color='C0', label='Croatia') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown We can use these distributions to compute the probability that France wins, loses, or ties the rematch. ###Code win = Pmf.prob_gt(pred_france, pred_croatia) win lose = Pmf.prob_lt(pred_france, pred_croatia) lose tie = Pmf.prob_eq(pred_france, pred_croatia) tie ###Output _____no_output_____ ###Markdown Assuming that France wins half of the ties, their chance of winning the rematch is about 65%. ###Code win + tie/2 ###Output _____no_output_____ ###Markdown This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.Even if France is the better team, they might lose the game. The Exponential DistributionAs an exercise at the end of this notebook, you'll have a chance to work on the following variation on the World Cup Problem:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?In this version, notice that the data is not the number of goals in a fixed period of time, but the time between goals.To compute the likelihood of data like this, we can take advantage of the theory of Poisson processes again. If each team has a constant goal-scoring rate, we expect the time between goals to follow an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).If the goal-scoring rate is $\lambda$, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$$\lambda \exp(-\lambda t)$$Because $t$ is a continuous quantity, the value of this expression is not a probability; it is a probability density. However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.SciPy provides `expon`, which creates an object that represents an exponential distribution.However, it does not take `lam` as a parameter in the way you might expect, which makes it awkward to work with.Since the PDF of the exponential distribution is so easy to evaluate, I'll use my own function. ###Code def expo_pdf(t, lam): """Compute the PDF of the exponential distribution.""" return lam * np.exp(-lam * t) ###Output _____no_output_____ ###Markdown To see what the exponential distribution looks like, let's assume again that `lam` is 1.4; we can compute the distribution of $t$ like this: ###Code lam = 1.4 qs = np.linspace(0, 4, 101) ps = expo_pdf(qs, lam) pmf_time = Pmf(ps, qs) pmf_time.normalize() ###Output _____no_output_____ ###Markdown And here's what it looks like: ###Code def decorate_time(title=''): decorate(xlabel='Time between goals (games)', ylabel='PMF', title=title) pmf_time.plot(label='exponential with $\lambda$ = 1.4') decorate_time('Distribution of time between goals') ###Output _____no_output_____ ###Markdown It is counterintuitive, but true, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.With a goal-scoring rate of 1.4, it is possible that a team will take more than one game to score a goal, but it is unlikely that they will take more than two games. SummaryThis chapter introduces three new distributions, so it can be hard to keep them straight.Let's review:* If a system satisfies the assumptions of a Poisson model, the number of events in a period of time follows a Poisson distribution, which is a discrete distribution with integer quantities from 0 to infinity. In practice, we can usually ignore low-probability quantities above a finite limit.* Also under the Poisson model, the interval between events follows an exponential distribution, which is a continuous distribution with quantities from 0 to infinity. Because it is continuous, it is described by a probability density function (PDF) rather than a probability mass function (PMF). But when we use an exponential distribution to compute the likelihood of the data, we can treat densities as unnormalized probabilities.* The Poisson and exponential distributions are parameterized by an event rate, denoted $\lambda$ or `lam`.* For the prior distribution of $\lambda$, I used a gamma distribution, which is a continuous distribution with quantities from 0 to infinity, but I approximated it with a discrete, bounded PMF. The gamma distribution has one parameter, denoted $\alpha$ or `alpha`, which is also its mean.I chose the gamma distribution because the shape is consistent with our background knowledge about goal-scoring rates.There are other distributions we could have used; however, we will see in > that the gamma distribution can be a particularly good choice.But we have a few things to do before we get there, starting with these exercises. Exercises **Exercise:** Let's finish the exercise we started:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)? Here are the steps I recommend:1. Starting with the same gamma prior we used in the previous problem, compute the likelihood of scoring a goal after 11 minutes for each possible value of `lam`. Don't forget to convert all times into games rather than minutes.2. Compute the posterior distribution of `lam` for Germany after the first goal.3. Compute the likelihood of scoring another goal after 12 more minutes and do another update. Plot the prior, posterior after one goal, and posterior after two goals.4. Compute the posterior predictive distribution of goals Germany might score during the remaining time in the game, `90-23` minutes. Note: you will have to think about how to generate predicted goals for a fraction of a game.5. Compute the probability of scoring 5 or more goals during the remaining time. ###Code # Solution # Here's a function that updates the distribution of lam # with the given time between goals def update_expo(pmf, data): """Update based on an observed interval pmf: prior PMF data: time between goals in minutes """ t = data / 90 lams = pmf.qs likelihood = expo_pdf(t, lams) pmf *= likelihood pmf.normalize() # Solution # Here are the updates for the first and second goals germany = prior.copy() update_expo(germany, 11) germany2 = germany.copy() update_expo(germany2, 12) # Solution # Here are the mean values of `lam` after each update germany.mean(), germany2.mean() # Solution # Here's what the posterior distributions look like prior.plot(color='C5', label='Prior') germany.plot(color='C3', label='Posterior after 1 goal') germany2.plot(color='C16', label='Posterior after 2 goals') decorate_rate('Prior and posterior distributions') # Solution # Here's the predictive distribution for each possible value of `lam` t = (90-23) / 90 pmf_seq = [make_poisson_pmf(lam*t, goals) for lam in germany2.qs] # Solution # And here's the mixture of predictive distributions, # weighted by the probabilities in the posterior distribution. pred_germany2 = make_mixture(germany2, pmf_seq) # Solution # Here's what the predictive distribution looks like pred_germany2.bar(color='C1', label='germany') decorate_goals('Posterior predictive distribution') # Solution # Here's the probability of scoring exactly 5 more goals pred_germany2[5] # Solution # And the probability of 5 or more pred_germany2.prob_ge(5) ###Output _____no_output_____ ###Markdown **Exercise:** Returning to the first version of the World Cup Problem. Suppose France and Croatia play a rematch. What is the probability that France scores first? Hint: Compute the posterior predictive distribution for the time until the first goal by making a mixture of exponential distributions. You can use the following function to make a PMF that approximates an exponential distribution. ###Code def make_expo_pmf(lam, high): """Make a PMF of an exponential distribution. lam: event rate high: upper bound on the interval `t` returns: Pmf of the interval between events """ qs = np.linspace(0, high, 101) ps = expo_pdf(qs, lam) pmf = Pmf(ps, qs) pmf.normalize() return pmf # Solution # Here are the predictive distributions for the # time until the first goal pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs] # Solution # And here are the mixtures based on the two posterior distributions pred_france = make_mixture(france, pmf_seq) pred_croatia = make_mixture(croatia, pmf_seq) # Solution # Here's what the posterior predictive distributions look like pred_france.plot(label='France', color='C3') pred_croatia.plot(label='Croatia', color='C0') decorate_time('Posterior predictive distribution') # Solution # And here's the probability France scores first Pmf.prob_lt(pred_france, pred_croatia) ###Output _____no_output_____ ###Markdown **Exercise:** In the 2010-11 National Hockey League (NHL) Finals, my beloved BostonBruins played a best-of-seven championship series against the despisedVancouver Canucks. Boston lost the first two games 0-1 and 2-3, thenwon the next two games 8-1 and 4-0. At this point in the series, whatis the probability that Boston will win the next game, and what istheir probability of winning the championship?To choose a prior distribution, I got some statistics fromhttp://www.nhl.com, specifically the average goals per gamefor each team in the 2010-11 season. The distribution is well modeled by a gamma distribution with mean 2.8.In what ways do you think the outcome of these games might violate the assumptions of the Poisson model? How would these violations affect your predictions? ###Code # Solution # When a team is winning or losing by an insurmountable margin, # they might remove their best players from the game, which # would affect their goal-scoring rate, violating the assumption # that the goal scoring rate is constant. # In this example, Boston won the third game 8-1, but scoring # eight goals in a game might not reflect their true long-term # goal-scoring rate. # As a result, the analysis below might overestimate the chance # that Boston wins. # As it turned out, they did not. # Solution from scipy.stats import gamma alpha = 2.8 qs = np.linspace(0, 15, 101) ps = gamma.pdf(qs, alpha) prior_hockey = Pmf(ps, qs) prior_hockey.normalize() # Solution prior_hockey.plot(color='C5') decorate_rate('Prior distribution for hockey') prior_hockey.mean() # Solution bruins = prior_hockey.copy() for data in [0, 2, 8, 4]: update_poisson(bruins, data) bruins.mean() # Solution canucks = prior_hockey.copy() for data in [1, 3, 1, 0]: update_poisson(canucks, data) canucks.mean() # Solution canucks.plot(label='Canucks') bruins.plot(label='Bruins') decorate_rate('Posterior distributions') # Solution goals = np.arange(15) pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs] # Solution pred_bruins = make_mixture(bruins, pmf_seq) pred_bruins.bar(label='Bruins', color='C1') decorate_goals('Posterior predictive distribution') # Solution pred_canucks = make_mixture(canucks, pmf_seq) pred_canucks.bar(label='Canucks') decorate_goals('Posterior predictive distribution') # Solution win = Pmf.prob_gt(pred_bruins, pred_canucks) lose = Pmf.prob_lt(pred_bruins, pred_canucks) tie = Pmf.prob_eq(pred_bruins, pred_canucks) win, lose, tie # Solution # Assuming the Bruins win half of the ties, # their chance of winning the next game is... p = win + tie/2 p # Solution # Their chance of winning the series is their # chance of winning k=2 or k=3 of the remaining # n=3 games. from scipy.stats import binom n = 3 a = binom.pmf([2,3], n, p) a.sum() ###Output _____no_output_____ ###Markdown Poisson Processes Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ###Code # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py import os if not os.path.exists('utils.py'): !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py from utils import set_pyplot_params set_pyplot_params() ###Output _____no_output_____ ###Markdown This chapter introduces the [Poisson process](https://en.wikipedia.org/wiki/Poisson_point_process), which is a model used to describe events that occur at random intervals.In this context, "process" has a mathematical definition that is almost unrelated to its usual meaning, and (I think) not worth explaining here.As an example of a Poisson process, we'll model goal-scoring in soccer, which is American English for the game everyone else calls "football".We'll use goals scored in a game to estimate the parameter of a Poisson process; then we'll use the posterior distribution to make predictions.And we'll solve The World Cup Problem. The World Cup ProblemIn the 2018 FIFA World Cup final, France defeated Croatia 4 goals to 2. Based on this outcome:1. How confident should we be that France is the better team?2. If the same teams played again, what is the chance France would win again?To answer these questions, we have to make some modeling decisions.* First, I'll assume that for any team against another team there is some unknown goal-scoring rate, measured in goals per game, which I'll denote with the Greek letter $\lambda$, pronounced "lambda", and the Python variable `lam` (we can't use `lambda` because it's a Python keyword).* Second, I'll assume that a goal is equally likely during any minute of a game. So, in a 90 minute game, the probability of scoring during any minute is $\lambda/90$.* Third, I'll assume that a team never scores twice during the same minute.Of course, none of these assumptions is completely true in the real world, but I think they are reasonable simplifications.As George Box said, "[All models are wrong; some are useful.](https://en.wikipedia.org/wiki/All_models_are_wrong)"In this case, the model is useful because if these assumption are true, we expect the number of goals scored in a game to follow a Poisson distribution. The Poisson distributionIf the number of goals scored in a game follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with a goal-scoring rate, $\lambda$, the probability of scoring $k$ goals is$$\lambda^k \exp(-\lambda) ~/~ k!$$for any non-negative value of $k$.SciPy provides a `poisson` object that represents a Poisson distribution.We can create one with $\lambda=1.4$ like this: ###Code from scipy.stats import poisson lam = 1.4 dist = poisson(lam) dist ###Output _____no_output_____ ###Markdown `poisson` provides `pmf`, which evaluates the probability mass function of the Poisson distribution. ###Code k = 4 dist.pmf(k) ###Output _____no_output_____ ###Markdown This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.The function `make_poisson_pmf`, which we saw in Section xxx, computes the PMF of the Poisson distribution for given values of `k` and returns the results in a `Pmf` object.For example, here's the distribution of goals scored for `lam=1.4`, computed for values of `k` from 0 to 9. ###Code import numpy as np from utils import make_poisson_pmf lam = 1.4 goals = np.arange(10) pmf_goals = make_poisson_pmf(lam, goals) ###Output _____no_output_____ ###Markdown And here's what it looks like. ###Code from utils import decorate def decorate_goals(title=''): decorate(xlabel='Number of goals', ylabel='PMF', title=title) pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$') decorate_goals('Distribution of goals scored') ###Output _____no_output_____ ###Markdown The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.Values above 7 are negligible.This distribution shows that if we know the goal scoring rate, we can predict the number of goals.Now let's turn it around: given a number of goals, what can we say about the goal-scoring rate?To answer that, we need to think about the prior distribution of `lam`, which represents the range of possible values and their probabilities before we see the score. The priorIf you have ever seen a soccer game, you have some information about `lam`. In most games, teams score a few goals each. In rare cases, a team might score more than 5 goals, but they almost never score more than 10.Using [data from previous World Cups](https://www.statista.com/statistics/269031/goals-scored-per-game-at-the-fifa-world-cup-since-1930/), I estimate that each team scores about 1.4 goals per game, on average. So I'll set the mean of `lam` to be 1.4.For a good team against a bad one, we expect `lam` to be higher; for a bad team against a good one, we expect it to be lower. To model the distribution of goal-scoring rates, I'll use a [gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution), which I chose because:1. The goal scoring rate is continuous and non-negative, and the gamma distribution is appropriate for this kind of quantity.2. The gamma distribution has only one parameter, `alpha`, which is the mean. So it's easy to construct a gamma distribution with the mean we want.3. As we'll see, the shape of the gamma distribution is a reasonable choice, given what we know about soccer.And there's one more reason, which I will reveal in Chapter xxx.SciPy provides `gamma`, which creates an object that represents a gamma distribution.And the `gamma` object provides provides `pdf`, which evaluates the **probability density function** (PDF) of the gamma distribution.Here's how we use it. ###Code from scipy.stats import gamma alpha = 1.4 qs = np.linspace(0, 10, 101) ps = gamma(alpha).pdf(qs) ###Output _____no_output_____ ###Markdown The parameter, `alpha`, is the mean of the distribution.The `qs` are possible values of `lam` from 0 to 10.The `ps` are **probability densities**, which we can think of as unnormalized probabilities.To normalize them, we can put them in a `Pmf` and call `normalize`: ###Code from empiricaldist import Pmf prior = Pmf(ps, qs) prior.normalize() ###Output _____no_output_____ ###Markdown The result is a discrete approximation of a gamma distribution.Here's what it looks like. ###Code def decorate_rate(title=''): decorate(xlabel='Goal scoring rate (lam)', ylabel='PMF', title=title) prior.plot(label='prior', color='C5') decorate_rate(r'Prior distribution of $\lambda$') ###Output _____no_output_____ ###Markdown This distribution represents our prior knowledge about goal scoring: `lam` is usually less than 2, occasionally as high as 6, and seldom higher than that. And we can confirm that the mean is about 1.4. ###Code prior.mean() ###Output _____no_output_____ ###Markdown As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update. The updateSuppose you are given the goal-scoring rate, $\lambda$, and asked to compute the probability of scoring a number of goals, $k$. That is precisely the question we answered by computing the Poisson distribution:$ f(k; \lambda) = \lambda^k \exp(-\lambda) ~/~ k! $For example, if $\lambda$ is 1.4, the probability of scoring 4 goals in a game is: ###Code lam = 1.4 k = 4 poisson(lam).pmf(4) ###Output _____no_output_____ ###Markdown Now suppose we are have an array of possible values for $\lambda$, like this: ###Code lams = prior.qs ###Output _____no_output_____ ###Markdown We can compute the likelihood of the data for each hypothetical value of lam, like this: ###Code k = 4 likelihood = poisson(lams).pmf(k) ###Output _____no_output_____ ###Markdown And that's all we need to do the update.To get the posterior distribution, we multiply the prior by the likelihoods we just computed and normalize the result.The following function encapsulates these steps. ###Code def update_poisson(pmf, data): """Update the PMF with a Poisson likelihood pmf: Series that represents the prior data: integer number of goals returns: posterior """ k = data lams = pmf.qs likelihood = poisson(lams).pmf(k) pmf *= likelihood pmf.normalize() ###Output _____no_output_____ ###Markdown The first parameter is the prior; the second is the number of goals.In the example, France scored 4 goals, so I'll make a copy of the prior and update it with the data. ###Code france = prior.copy() update_poisson(france, 4) ###Output _____no_output_____ ###Markdown Here's what the posterior distribution looks like, along with the prior. ###Code prior.plot(label='prior', color='C5') france.plot(label='France posterior', color='C3') decorate_rate('Posterior distribution for France') ###Output _____no_output_____ ###Markdown The data, `k=4`, makes us think higher values of `lam` are more likely and lower values are less likely. So the posterior distribution is shifted to the right. Let's do the same for Croatia: ###Code croatia = prior.copy() update_poisson(croatia, 2) ###Output _____no_output_____ ###Markdown And here are the results. ###Code prior.plot(label='prior', color='C5') croatia.plot(label='Croatia posterior', color='C0') decorate_rate('Posterior distribution for Croatia') ###Output _____no_output_____ ###Markdown Here are the posterior means for these distributions. ###Code prior.mean(), croatia.mean(), france.mean() ###Output _____no_output_____ ###Markdown The mean of the prior distribution is about 1.4.After Croatia scores 2 goals, their posterior mean is 1.7, which is near the midpoint of the prior and the data.Likewise after France scores 4 goals, their posterior mean is 2.7.These results are typical of a Bayesian update: the location of the posterior distribution is a compromise between the prior and the data. Probability of superiorityNow that we have a posterior distribution for each team, we can answer the first question: How confident should we be that France is the better team?In the model, "better" means having a higher goal-scoring rate against the opponent. We can use the posterior distributions to compute the probability that a random value drawn from France's distribution exceeds a value drawn from Croatia's.One way to do that is to enumerate all pairs of values from the two distributions, adding up the total probability that one value exceeds the other. ###Code def prob_gt(pmf1, pmf2): """Compute the probability of superiority. pmf1: Pmf object pmf2: Pmf object returns: float probability """ total = 0 for q1, p1 in pmf1.items(): for q2, p2 in pmf2.items(): if q1 > q2: total += p1 * p2 return total ###Output _____no_output_____ ###Markdown This is similar to the method we use in Chapter xxx to compute the distribution of a sum.Here's how we use it: ###Code prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown `Pmf` provides a function that does the same thing. ###Code Pmf.prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown The results are slightly different because `Pmf.prob_gt` uses array operators rather than `for` loops.Either way, the result is close to 75%. So, on the basis of one game, we have moderate confidence that France is actually the better team.Of course, we should remember that this result is based on the assumption that the goal-scoring rate is constant.In reality, if a team is down by one goal, they might play more aggressively toward the end of the game, making them more likely to score, but also more likely to give up an additional goal.As always, the results are only as good as the model. Predicting the rematchNow we can take on the second question: If the same teams played again, what is the chance Croatia would win?To answer this question, we'll generate the "posterior predictive distribution", which is the number of goals we expect a team to score.If we knew the goal scoring rate, `lam`, the distribution of goals would be a Poisson distribution with parameter `lam`.Since we don't know `lam`, the distribution of goals is a mixture of a Poisson distributions with different values of `lam`.First I'll generate a sequence of `Pmf` objects, one for each value of `lam`. ###Code pmf_seq = [make_poisson_pmf(lam, goals) for lam in prior.qs] ###Output _____no_output_____ ###Markdown The following figure shows what these distributions look like for a few values of `lam`. ###Code import matplotlib.pyplot as plt for i, index in enumerate([10, 20, 30, 40]): plt.subplot(2, 2, i+1) lam = prior.qs[index] pmf = pmf_seq[index] pmf.bar(label=f'$\lambda$ = {lam}', color='C3') decorate_goals() ###Output _____no_output_____ ###Markdown The predictive distribution is a mixture of these `Pmf` objects, weighted with the posterior probabilities.We can use `make_mixture` from Chapter xxx to compute this mixture. ###Code from utils import make_mixture pred_france = make_mixture(france, pmf_seq) ###Output _____no_output_____ ###Markdown Here's the predictive distribution for the number of goals France would score in a rematch. ###Code pred_france.bar(color='C3', label='France') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown This distribution represents two sources of uncertainty: we don't know the actual value of `lam`, and even if we did, we would not know the number of goals in the next game.Here's the predictive distribution for Croatia. ###Code pred_croatia = make_mixture(croatia, pmf_seq) pred_croatia.bar(color='C0', label='Croatia') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown We can use these distributions to compute the probability that France wins, loses, or ties the rematch. ###Code win = Pmf.prob_gt(pred_france, pred_croatia) win lose = Pmf.prob_lt(pred_france, pred_croatia) lose tie = Pmf.prob_eq(pred_france, pred_croatia) tie ###Output _____no_output_____ ###Markdown Assuming that France wins half of the ties, their chance of winning the rematch is about 65%. ###Code win + tie/2 ###Output _____no_output_____ ###Markdown This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.Even if France is the better team, they might lose the game. The Exponential DistributionAs an exercise at the end of this notebook, you'll have a chance to work on the following variation on the World Cup Problem:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?In this version, notice that the data is not the number of goals in a fixed period of time, but the time between goals.To compute the likelihood of data like this, we can take advantage of the theory of Poisson processes again. If each team has a constant goal-scoring rate, we expect the time between goals to follow an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).If the goal-scoring rate is $\lambda$, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$$\lambda~\exp(-\lambda t)$$Because $t$ is a continuous quantity, the value of this expression is not a probability; it is a probability density. However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.SciPy provides `expon`, which creates an object that represents an exponential distribution.However, it does not take `lam` as a parameter in the way you might expect, which makes it awkward to work with.Since the PDF of the exponential distribution is so easy to evaluate, I'll use my own function. ###Code def expo_pdf(t, lam): """Compute the PDF of the exponential distribution. lam: time λ: rate returns: probability density """ return lam * np.exp(-lam * t) ###Output _____no_output_____ ###Markdown To see what the exponential distribution looks like, let's assume again that `lam` is 1.4; we can compute the distribution of $t$ like this: ###Code lam = 1.4 qs = np.linspace(0, 4, 101) ps = expo_pdf(qs, lam) pmf_time = Pmf(ps, qs) pmf_time.normalize() ###Output _____no_output_____ ###Markdown And here's what it looks like: ###Code def decorate_time(title=''): decorate(xlabel='Time between goals (games)', ylabel='PMF', title=title) pmf_time.plot(label='exponential with $\lambda$ = 1.4') decorate_time('Distribution of time between goals') ###Output _____no_output_____ ###Markdown It is counterintuitive, but true if the distribution is exponential, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.With a goal-scoring rate of 1.4, it is possible that a team will take more than one game to score a goal, but it is unlikely that they will take more than two games. SummaryThis chapter introduces three new distributions, so it can be hard to keep them straight.Let's review:* If a system satisfies the assumptions of a Poisson model, the number of events in a period of time follows a Poisson distribution, which is a discrete distribution with integer quantities from 0 to infinity. In practice, we can usually ignore low-probability quantities above a finite limit.* Also under the Poisson model, the interval between events follows an exponential distribution, which is a continuous distribution with quantities from 0 to infinity. Because it is continuous, it is described by a probability density function (PDF) rather than a probability mass function (PMF). But when we use an exponential distribution to compute the likelihood of the data, we can treat densities as unnormalized probabilities.* The Poisson and exponential distributions are parameterized by an event rate, denoted $\lambda$ or `lam`.* For the prior distribution of $\lambda$, I used a gamma distribution, which is a continuous distribution with quantities from 0 to infinity, but I approximated it with a discrete, bounded PMF. The gamma distribution has one parameter, denoted $\alpha$ or `alpha`, which is also its mean.I chose the gamma distribution because the shape is consistent with our background knowledge about goal-scoring rates.There are other distributions we could have used; however, we will see in Chapter XX that the gamma distribution can be a particularly good choice.But we have a few things to do before we get there, starting with these exercises. Exercises **Exercise:** Let's finish off the exercise we started:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?Here are the steps I recommend:1. Starting with the same gamma prior we used in the previous problem, compute the likelihood of scoring a goal after 11 minutes for each possible value of `lam`. Don't forget to convert all times into games rather than minutes.2. Compute the posterior distribution of `lam` for Germany after the first goal.3. Compute the likelihood of scoring another goal after 12 more minutes and do another update. Plot the prior, posterior after one goal, and posterior after two goals.4. Compute the posterior predictive distribution of goals Germany might score during the remaining time in the game, `90-23` minutes. Note: you will have to think about how to generate predicted goals for a fraction of a game.5. Compute the probability of scoring 5 or more goals during the remaining time. ###Code # Solution # Here's a function that updates the distribution of lam # with the given time between goals def update_expo(pmf, data): """Update based on an observed interval pmf: prior PMF data: time between goals in minutes """ t = data / 90 lams = pmf.qs likelihood = expo_pdf(t, lams) pmf *= likelihood pmf.normalize() # Solution # Here are the updates for the first and second goals germany = prior.copy() update_expo(germany, 11) germany2 = germany.copy() update_expo(germany2, 12) # Solution # Here are the mean values of `lam` after each update germany.mean(), germany2.mean() # Solution # Here's what the posterior distributions look like prior.plot(color='C5', label='Prior') germany.plot(color='C3', label='Posterior after 1 goal') germany2.plot(color='C16', label='Posterior after 2 goals') decorate_rate('Prior and posterior distributions') # Solution # Here's the predictive distribution for each possible value of `lam` t = (90-23) / 90 pmf_seq = [make_poisson_pmf(lam*t, goals) for lam in germany.qs] # Solution # And here's the mixture of predictive distributions, # weighted by the probabilities in the posterior distribution. pmf_germany = make_mixture(germany, pmf_seq) # Solution # Here's what the predictive distribution looks like pmf_germany.bar(color='C1', label='germany') decorate_goals('Posterior predictive distribution') # Solution # Here's the probability of scoring exactly 5 more goals pmf_germany[5] # Solution # And the probability of 5 or more pmf_germany.prob_ge(5) ###Output _____no_output_____ ###Markdown **Exercise:** Returning to the first version of the World Cup Problem. Suppose France and Croatia play a rematch. What is the probability that France scores first?Hint: Compute the posterior predictive distribution for the time until the first goal by making a mixture of exponential distributions. You can use the following function to make a PMF that approximates an exponential distribution. ###Code def make_expo_pmf(lam, high): """Make a PMF of an exponential distribution. lam: event rate high: upper bound on the interval `t` returns: Pmf of the interval between events """ qs = np.linspace(0, high, 101) ps = expo_pdf(qs, lam) pmf = Pmf(ps, qs) pmf.normalize() return pmf # Solution # Here are the predictive distributions for the # time until the first goal pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs] # Solution # And here are the mixtures based on the two posterior distributions pred_france = make_mixture(france, pmf_seq) pred_croatia = make_mixture(croatia, pmf_seq) # Solution # Here's what the posterior predictive distributions look like pred_france.plot(label='France', color='C3') pred_croatia.plot(label='Croatia', color='C0') decorate_time('Posterior predictive distribution') # Solution # And here's the probability France scores first Pmf.prob_lt(pred_france, pred_croatia) ###Output _____no_output_____ ###Markdown **Exercise:** In the 2010-11 National Hockey League (NHL) Finals, my beloved BostonBruins played a best-of-seven championship series against the despisedVancouver Canucks. Boston lost the first two games 0-1 and 2-3, thenwon the next two games 8-1 and 4-0. At this point in the series, whatis the probability that Boston will win the next game, and what istheir probability of winning the championship?To choose a prior distribution, I got some statistics fromhttp://www.nhl.com, specifically the average goals per gamefor each team in the 2010-11 season. The distribution well modeled by a gamma distribution with mean 2.8.In what ways do you think the outcome of these games might violate the assumptions of the Poisson model? How would these violations affect your predictions. ###Code # Solution # When a team is winning or losing by an insurmountable margin, # they might remove their best players from the game, which # would affect their goal-scoring rate, violating the assumption # that the goal scoring rate is constant. # In this example, Boston won the third game 8-1, but scoring # eight goals in a game might not reflect their true long-term # goal-scoring rate. # As a result, the analysis below might overestimate the chance # that Boston wins. # As it turned out, they did not. # Solution from scipy.stats import gamma alpha = 2.8 qs = np.linspace(0, 15, 101) ps = gamma.pdf(qs, alpha) prior_hockey = Pmf(ps, qs) prior_hockey.normalize() # Solution prior_hockey.plot(color='C5') decorate_rate('Prior distribution for hockey') prior_hockey.mean() # Solution bruins = prior_hockey.copy() for data in [0, 2, 8, 4]: update_poisson(bruins, data) bruins.mean() # Solution canucks = prior_hockey.copy() for data in [1, 3, 1, 0]: update_poisson(canucks, data) canucks.mean() # Solution canucks.plot(label='Canucks') bruins.plot(label='Bruins') decorate_rate('Posterior distributions') # Solution goals = np.arange(15) pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs] # Solution pred_bruins = make_mixture(bruins, pmf_seq) pred_bruins.bar(label='Bruins', color='C1') decorate_goals('Posterior predictive distribution') # Solution pred_canucks = make_mixture(canucks, pmf_seq) pred_canucks.bar(label='Canucks') decorate_goals('Posterior predictive distribution') # Solution win = Pmf.prob_gt(pred_bruins, pred_canucks) lose = Pmf.prob_lt(pred_bruins, pred_canucks) tie = Pmf.prob_eq(pred_bruins, pred_canucks) win, lose, tie # Solution # Assuming the Bruins win half of the ties, # their chance of winning the next game is... p = win + lose/2 p # Solution # Their chance of winning the series is their # chance of winning k=2 or k=3 of the remaining # n=3 games. from scipy.stats import binom n = 3 a = binom.pmf([2,3], n, p) a.sum() ###Output _____no_output_____ ###Markdown Poisson Processes Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ###Code # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py import os if not os.path.exists('utils.py'): !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py from utils import set_pyplot_params set_pyplot_params() ###Output _____no_output_____ ###Markdown This chapter introduces the [Poisson process](https://en.wikipedia.org/wiki/Poisson_point_process), which is a model used to describe events that occur at random intervals.As an example of a Poisson process, we'll model goal-scoring in soccer, which is American English for the game everyone else calls "football".We'll use goals scored in a game to estimate the parameter of a Poisson process; then we'll use the posterior distribution to make predictions.And we'll solve The World Cup Problem. The World Cup ProblemIn the 2018 FIFA World Cup final, France defeated Croatia 4 goals to 2. Based on this outcome:1. How confident should we be that France is the better team?2. If the same teams played again, what is the chance France would win again?To answer these questions, we have to make some modeling decisions.* First, I'll assume that for any team against another team there is some unknown goal-scoring rate, measured in goals per game, which I'll denote with the Python variable `lam` or the Greek letter $\lambda$, pronounced "lambda".* Second, I'll assume that a goal is equally likely during any minute of a game. So, in a 90 minute game, the probability of scoring during any minute is $\lambda/90$.* Third, I'll assume that a team never scores twice during the same minute.Of course, none of these assumptions is completely true in the real world, but I think they are reasonable simplifications.As George Box said, "All models are wrong; some are useful."(https://en.wikipedia.org/wiki/All_models_are_wrong).In this case, the model is useful because if these assumptions are true, at least roughly, the number of goals scored in a game follows a Poisson distribution, at least roughly. The Poisson DistributionIf the number of goals scored in a game follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with a goal-scoring rate, $\lambda$, the probability of scoring $k$ goals is$$\lambda^k \exp(-\lambda) ~/~ k!$$for any non-negative value of $k$.SciPy provides a `poisson` object that represents a Poisson distribution.We can create one with $\lambda=1.4$ like this: ###Code from scipy.stats import poisson lam = 1.4 dist = poisson(lam) type(dist) ###Output _____no_output_____ ###Markdown The result is an object that represents a "frozen" random variable and provides `pmf`, which evaluates the probability mass function of the Poisson distribution. ###Code k = 4 dist.pmf(k) ###Output _____no_output_____ ###Markdown This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.We'll use the following function to make a `Pmf` that represents a Poisson distribution. ###Code from empiricaldist import Pmf def make_poisson_pmf(lam, qs): """Make a Pmf of a Poisson distribution.""" ps = poisson(lam).pmf(qs) pmf = Pmf(ps, qs) pmf.normalize() return pmf ###Output _____no_output_____ ###Markdown `make_poisson_pmf` takes as parameters the goal-scoring rate, `lam`, and an array of quantities, `qs`, where it should evaluate the Poisson PMF. It returns a `Pmf` object.For example, here's the distribution of goals scored for `lam=1.4`, computed for values of `k` from 0 to 9. ###Code import numpy as np lam = 1.4 goals = np.arange(10) pmf_goals = make_poisson_pmf(lam, goals) ###Output _____no_output_____ ###Markdown And here's what it looks like. ###Code from utils import decorate def decorate_goals(title=''): decorate(xlabel='Number of goals', ylabel='PMF', title=title) pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$') decorate_goals('Distribution of goals scored') ###Output _____no_output_____ ###Markdown The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.Values above 7 are negligible.This distribution shows that if we know the goal scoring rate, we can predict the number of goals.Now let's turn it around: given a number of goals, what can we say about the goal-scoring rate?To answer that, we need to think about the prior distribution of `lam`, which represents the range of possible values and their probabilities before we see the score. The Gamma DistributionIf you have ever seen a soccer game, you have some information about `lam`. In most games, teams score a few goals each. In rare cases, a team might score more than 5 goals, but they almost never score more than 10.Using [data from previous World Cups](https://www.statista.com/statistics/269031/goals-scored-per-game-at-the-fifa-world-cup-since-1930/), I estimate that each team scores about 1.4 goals per game, on average. So I'll set the mean of `lam` to be 1.4.For a good team against a bad one, we expect `lam` to be higher; for a bad team against a good one, we expect it to be lower. To model the distribution of goal-scoring rates, I'll use a [gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution), which I chose because:1. The goal scoring rate is continuous and non-negative, and the gamma distribution is appropriate for this kind of quantity.2. The gamma distribution has only one parameter, `alpha`, which is the mean. So it's easy to construct a gamma distribution with the mean we want.3. As we'll see, the shape of the gamma distribution is a reasonable choice, given what we know about soccer.And there's one more reason, which I will reveal in >.SciPy provides `gamma`, which creates an object that represents a gamma distribution.And the `gamma` object provides provides `pdf`, which evaluates the **probability density function** (PDF) of the gamma distribution.Here's how we use it. ###Code from scipy.stats import gamma alpha = 1.4 qs = np.linspace(0, 10, 101) ps = gamma(alpha).pdf(qs) ###Output _____no_output_____ ###Markdown The parameter, `alpha`, is the mean of the distribution.The `qs` are possible values of `lam` between 0 and 10.The `ps` are **probability densities**, which we can think of as unnormalized probabilities.To normalize them, we can put them in a `Pmf` and call `normalize`: ###Code from empiricaldist import Pmf prior = Pmf(ps, qs) prior.normalize() ###Output _____no_output_____ ###Markdown The result is a discrete approximation of a gamma distribution.Here's what it looks like. ###Code def decorate_rate(title=''): decorate(xlabel='Goal scoring rate (lam)', ylabel='PMF', title=title) prior.plot(label='prior', color='C5') decorate_rate(r'Prior distribution of $\lambda$') ###Output _____no_output_____ ###Markdown This distribution represents our prior knowledge about goal scoring: `lam` is usually less than 2, occasionally as high as 6, and seldom higher than that. And we can confirm that the mean is about 1.4. ###Code prior.mean() ###Output _____no_output_____ ###Markdown As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update. The UpdateSuppose you are given the goal-scoring rate, $\lambda$, and asked to compute the probability of scoring a number of goals, $k$. That is precisely the question we answered by computing the Poisson PMF.For example, if $\lambda$ is 1.4, the probability of scoring 4 goals in a game is: ###Code lam = 1.4 k = 4 poisson(lam).pmf(4) ###Output _____no_output_____ ###Markdown Now suppose we are have an array of possible values for $\lambda$; we can compute the likelihood of the data for each hypothetical value of lam, like this: ###Code lams = prior.qs k = 4 likelihood = poisson(lams).pmf(k) ###Output _____no_output_____ ###Markdown And that's all we need to do the update.To get the posterior distribution, we multiply the prior by the likelihoods we just computed and normalize the result.The following function encapsulates these steps. ###Code def update_poisson(pmf, data): """Update Pmf with a Poisson likelihood.""" k = data lams = pmf.qs likelihood = poisson(lams).pmf(k) pmf *= likelihood pmf.normalize() ###Output _____no_output_____ ###Markdown The first parameter is the prior; the second is the number of goals.In the example, France scored 4 goals, so I'll make a copy of the prior and update it with the data. ###Code france = prior.copy() update_poisson(france, 4) ###Output _____no_output_____ ###Markdown Here's what the posterior distribution looks like, along with the prior. ###Code prior.plot(label='prior', color='C5') france.plot(label='France posterior', color='C3') decorate_rate('Posterior distribution for France') ###Output _____no_output_____ ###Markdown The data, `k=4`, makes us think higher values of `lam` are more likely and lower values are less likely. So the posterior distribution is shifted to the right.Let's do the same for Croatia: ###Code croatia = prior.copy() update_poisson(croatia, 2) ###Output _____no_output_____ ###Markdown And here are the results. ###Code prior.plot(label='prior', color='C5') croatia.plot(label='Croatia posterior', color='C0') decorate_rate('Posterior distribution for Croatia') ###Output _____no_output_____ ###Markdown Here are the posterior means for these distributions. ###Code print(croatia.mean(), france.mean()) ###Output 1.6999765866755225 2.699772393342308 ###Markdown The mean of the prior distribution is about 1.4.After Croatia scores 2 goals, their posterior mean is 1.7, which is near the midpoint of the prior and the data.Likewise after France scores 4 goals, their posterior mean is 2.7.These results are typical of a Bayesian update: the location of the posterior distribution is a compromise between the prior and the data. Probability of SuperiorityNow that we have a posterior distribution for each team, we can answer the first question: How confident should we be that France is the better team?In the model, "better" means having a higher goal-scoring rate against the opponent. We can use the posterior distributions to compute the probability that a random value drawn from France's distribution exceeds a value drawn from Croatia's.One way to do that is to enumerate all pairs of values from the two distributions, adding up the total probability that one value exceeds the other. ###Code def prob_gt(pmf1, pmf2): """Compute the probability of superiority.""" total = 0 for q1, p1 in pmf1.items(): for q2, p2 in pmf2.items(): if q1 > q2: total += p1 * p2 return total ###Output _____no_output_____ ###Markdown This is similar to the method we use in > to compute the distribution of a sum.Here's how we use it: ###Code prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown `Pmf` provides a function that does the same thing. ###Code Pmf.prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown The results are slightly different because `Pmf.prob_gt` uses array operators rather than `for` loops.Either way, the result is close to 75%. So, on the basis of one game, we have moderate confidence that France is actually the better team.Of course, we should remember that this result is based on the assumption that the goal-scoring rate is constant.In reality, if a team is down by one goal, they might play more aggressively toward the end of the game, making them more likely to score, but also more likely to give up an additional goal.As always, the results are only as good as the model. Predicting the RematchNow we can take on the second question: If the same teams played again, what is the chance Croatia would win?To answer this question, we'll generate the "posterior predictive distribution", which is the number of goals we expect a team to score.If we knew the goal scoring rate, `lam`, the distribution of goals would be a Poisson distribution with parameter `lam`.Since we don't know `lam`, the distribution of goals is a mixture of a Poisson distributions with different values of `lam`.First I'll generate a sequence of `Pmf` objects, one for each value of `lam`. ###Code pmf_seq = [make_poisson_pmf(lam, goals) for lam in prior.qs] ###Output _____no_output_____ ###Markdown The following figure shows what these distributions look like for a few values of `lam`. ###Code import matplotlib.pyplot as plt for i, index in enumerate([10, 20, 30, 40]): plt.subplot(2, 2, i+1) lam = prior.qs[index] pmf = pmf_seq[index] pmf.bar(label=f'$\lambda$ = {lam}', color='C3') decorate_goals() ###Output _____no_output_____ ###Markdown The predictive distribution is a mixture of these `Pmf` objects, weighted with the posterior probabilities.We can use `make_mixture` from > to compute this mixture. ###Code from utils import make_mixture pred_france = make_mixture(france, pmf_seq) ###Output _____no_output_____ ###Markdown Here's the predictive distribution for the number of goals France would score in a rematch. ###Code pred_france.bar(color='C3', label='France') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown This distribution represents two sources of uncertainty: we don't know the actual value of `lam`, and even if we did, we would not know the number of goals in the next game.Here's the predictive distribution for Croatia. ###Code pred_croatia = make_mixture(croatia, pmf_seq) pred_croatia.bar(color='C0', label='Croatia') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown We can use these distributions to compute the probability that France wins, loses, or ties the rematch. ###Code win = Pmf.prob_gt(pred_france, pred_croatia) win lose = Pmf.prob_lt(pred_france, pred_croatia) lose tie = Pmf.prob_eq(pred_france, pred_croatia) tie ###Output _____no_output_____ ###Markdown Assuming that France wins half of the ties, their chance of winning the rematch is about 65%. ###Code win + tie/2 ###Output _____no_output_____ ###Markdown This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.Even if France is the better team, they might lose the game. The Exponential DistributionAs an exercise at the end of this notebook, you'll have a chance to work on the following variation on the World Cup Problem:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?In this version, notice that the data is not the number of goals in a fixed period of time, but the time between goals.To compute the likelihood of data like this, we can take advantage of the theory of Poisson processes again. If each team has a constant goal-scoring rate, we expect the time between goals to follow an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).If the goal-scoring rate is $\lambda$, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$$\lambda \exp(-\lambda t)$$Because $t$ is a continuous quantity, the value of this expression is not a probability; it is a probability density. However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.SciPy provides `expon`, which creates an object that represents an exponential distribution.However, it does not take `lam` as a parameter in the way you might expect, which makes it awkward to work with.Since the PDF of the exponential distribution is so easy to evaluate, I'll use my own function. ###Code def expo_pdf(t, lam): """Compute the PDF of the exponential distribution.""" return lam * np.exp(-lam * t) ###Output _____no_output_____ ###Markdown To see what the exponential distribution looks like, let's assume again that `lam` is 1.4; we can compute the distribution of $t$ like this: ###Code lam = 1.4 qs = np.linspace(0, 4, 101) ps = expo_pdf(qs, lam) pmf_time = Pmf(ps, qs) pmf_time.normalize() ###Output _____no_output_____ ###Markdown And here's what it looks like: ###Code def decorate_time(title=''): decorate(xlabel='Time between goals (games)', ylabel='PMF', title=title) pmf_time.plot(label='exponential with $\lambda$ = 1.4') decorate_time('Distribution of time between goals') ###Output _____no_output_____ ###Markdown It is counterintuitive, but true, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.With a goal-scoring rate of 1.4, it is possible that a team will take more than one game to score a goal, but it is unlikely that they will take more than two games. SummaryThis chapter introduces three new distributions, so it can be hard to keep them straight.Let's review:* If a system satisfies the assumptions of a Poisson model, the number of events in a period of time follows a Poisson distribution, which is a discrete distribution with integer quantities from 0 to infinity. In practice, we can usually ignore low-probability quantities above a finite limit.* Also under the Poisson model, the interval between events follows an exponential distribution, which is a continuous distribution with quantities from 0 to infinity. Because it is continuous, it is described by a probability density function (PDF) rather than a probability mass function (PMF). But when we use an exponential distribution to compute the likelihood of the data, we can treat densities as unnormalized probabilities.* The Poisson and exponential distributions are parameterized by an event rate, denoted $\lambda$ or `lam`.* For the prior distribution of $\lambda$, I used a gamma distribution, which is a continuous distribution with quantities from 0 to infinity, but I approximated it with a discrete, bounded PMF. The gamma distribution has one parameter, denoted $\alpha$ or `alpha`, which is also its mean.I chose the gamma distribution because the shape is consistent with our background knowledge about goal-scoring rates.There are other distributions we could have used; however, we will see in > that the gamma distribution can be a particularly good choice.But we have a few things to do before we get there, starting with these exercises. Exercises **Exercise:** Let's finish the exercise we started:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)? Here are the steps I recommend:1. Starting with the same gamma prior we used in the previous problem, compute the likelihood of scoring a goal after 11 minutes for each possible value of `lam`. Don't forget to convert all times into games rather than minutes.2. Compute the posterior distribution of `lam` for Germany after the first goal.3. Compute the likelihood of scoring another goal after 12 more minutes and do another update. Plot the prior, posterior after one goal, and posterior after two goals.4. Compute the posterior predictive distribution of goals Germany might score during the remaining time in the game, `90-23` minutes. Note: you will have to think about how to generate predicted goals for a fraction of a game.5. Compute the probability of scoring 5 or more goals during the remaining time. ###Code # Solution # Here's a function that updates the distribution of lam # with the given time between goals def update_expo(pmf, data): """Update based on an observed interval pmf: prior PMF data: time between goals in minutes """ t = data / 90 lams = pmf.qs likelihood = expo_pdf(t, lams) pmf *= likelihood pmf.normalize() # Solution # Here are the updates for the first and second goals germany = prior.copy() update_expo(germany, 11) germany2 = germany.copy() update_expo(germany2, 12) # Solution # Here are the mean values of `lam` after each update germany.mean(), germany2.mean() # Solution # Here's what the posterior distributions look like prior.plot(color='C5', label='Prior') germany.plot(color='C3', label='Posterior after 1 goal') germany2.plot(color='C16', label='Posterior after 2 goals') decorate_rate('Prior and posterior distributions') # Solution # Here's the predictive distribution for each possible value of `lam` t = (90-23) / 90 pmf_seq = [make_poisson_pmf(lam*t, goals) for lam in germany.qs] # Solution # And here's the mixture of predictive distributions, # weighted by the probabilities in the posterior distribution. pmf_germany = make_mixture(germany, pmf_seq) # Solution # Here's what the predictive distribution looks like pmf_germany.bar(color='C1', label='germany') decorate_goals('Posterior predictive distribution') # Solution # Here's the probability of scoring exactly 5 more goals pmf_germany[5] # Solution # And the probability of 5 or more pmf_germany.prob_ge(5) ###Output _____no_output_____ ###Markdown **Exercise:** Returning to the first version of the World Cup Problem. Suppose France and Croatia play a rematch. What is the probability that France scores first? Hint: Compute the posterior predictive distribution for the time until the first goal by making a mixture of exponential distributions. You can use the following function to make a PMF that approximates an exponential distribution. ###Code def make_expo_pmf(lam, high): """Make a PMF of an exponential distribution. lam: event rate high: upper bound on the interval `t` returns: Pmf of the interval between events """ qs = np.linspace(0, high, 101) ps = expo_pdf(qs, lam) pmf = Pmf(ps, qs) pmf.normalize() return pmf # Solution # Here are the predictive distributions for the # time until the first goal pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs] # Solution # And here are the mixtures based on the two posterior distributions pred_france = make_mixture(france, pmf_seq) pred_croatia = make_mixture(croatia, pmf_seq) # Solution # Here's what the posterior predictive distributions look like pred_france.plot(label='France', color='C3') pred_croatia.plot(label='Croatia', color='C0') decorate_time('Posterior predictive distribution') # Solution # And here's the probability France scores first Pmf.prob_lt(pred_france, pred_croatia) ###Output _____no_output_____ ###Markdown **Exercise:** In the 2010-11 National Hockey League (NHL) Finals, my beloved BostonBruins played a best-of-seven championship series against the despisedVancouver Canucks. Boston lost the first two games 0-1 and 2-3, thenwon the next two games 8-1 and 4-0. At this point in the series, whatis the probability that Boston will win the next game, and what istheir probability of winning the championship?To choose a prior distribution, I got some statistics fromhttp://www.nhl.com, specifically the average goals per gamefor each team in the 2010-11 season. The distribution is well modeled by a gamma distribution with mean 2.8.In what ways do you think the outcome of these games might violate the assumptions of the Poisson model? How would these violations affect your predictions? ###Code # Solution # When a team is winning or losing by an insurmountable margin, # they might remove their best players from the game, which # would affect their goal-scoring rate, violating the assumption # that the goal scoring rate is constant. # In this example, Boston won the third game 8-1, but scoring # eight goals in a game might not reflect their true long-term # goal-scoring rate. # As a result, the analysis below might overestimate the chance # that Boston wins. # As it turned out, they did not. # Solution from scipy.stats import gamma alpha = 2.8 qs = np.linspace(0, 15, 101) ps = gamma.pdf(qs, alpha) prior_hockey = Pmf(ps, qs) prior_hockey.normalize() # Solution prior_hockey.plot(color='C5') decorate_rate('Prior distribution for hockey') prior_hockey.mean() # Solution bruins = prior_hockey.copy() for data in [0, 2, 8, 4]: update_poisson(bruins, data) bruins.mean() # Solution canucks = prior_hockey.copy() for data in [1, 3, 1, 0]: update_poisson(canucks, data) canucks.mean() # Solution canucks.plot(label='Canucks') bruins.plot(label='Bruins') decorate_rate('Posterior distributions') # Solution goals = np.arange(15) pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs] # Solution pred_bruins = make_mixture(bruins, pmf_seq) pred_bruins.bar(label='Bruins', color='C1') decorate_goals('Posterior predictive distribution') # Solution pred_canucks = make_mixture(canucks, pmf_seq) pred_canucks.bar(label='Canucks') decorate_goals('Posterior predictive distribution') # Solution win = Pmf.prob_gt(pred_bruins, pred_canucks) lose = Pmf.prob_lt(pred_bruins, pred_canucks) tie = Pmf.prob_eq(pred_bruins, pred_canucks) win, lose, tie # Solution # Assuming the Bruins win half of the ties, # their chance of winning the next game is... p = win + lose/2 p # Solution # Their chance of winning the series is their # chance of winning k=2 or k=3 of the remaining # n=3 games. from scipy.stats import binom n = 3 a = binom.pmf([2,3], n, p) a.sum() ###Output _____no_output_____ ###Markdown Poisson Processes Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ###Code # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print('Downloaded ' + local) download('https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py') from utils import set_pyplot_params set_pyplot_params() ###Output _____no_output_____ ###Markdown This chapter introduces the [Poisson process](https://en.wikipedia.org/wiki/Poisson_point_process), which is a model used to describe events that occur at random intervals.As an example of a Poisson process, we'll model goal-scoring in soccer, which is American English for the game everyone else calls "football".We'll use goals scored in a game to estimate the parameter of a Poisson process; then we'll use the posterior distribution to make predictions.And we'll solve The World Cup Problem. The World Cup ProblemIn the 2018 FIFA World Cup final, France defeated Croatia 4 goals to 2. Based on this outcome:1. How confident should we be that France is the better team?2. If the same teams played again, what is the chance France would win again?To answer these questions, we have to make some modeling decisions.* First, I'll assume that for any team against another team there is some unknown goal-scoring rate, measured in goals per game, which I'll denote with the Python variable `lam` or the Greek letter $\lambda$, pronounced "lambda".* Second, I'll assume that a goal is equally likely during any minute of a game. So, in a 90 minute game, the probability of scoring during any minute is $\lambda/90$.* Third, I'll assume that a team never scores twice during the same minute.Of course, none of these assumptions is completely true in the real world, but I think they are reasonable simplifications.As George Box said, "All models are wrong; some are useful."(https://en.wikipedia.org/wiki/All_models_are_wrong).In this case, the model is useful because if these assumptions are true, at least roughly, the number of goals scored in a game follows a Poisson distribution, at least roughly. The Poisson DistributionIf the number of goals scored in a game follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with a goal-scoring rate, $\lambda$, the probability of scoring $k$ goals is$$\lambda^k \exp(-\lambda) ~/~ k!$$for any non-negative value of $k$.SciPy provides a `poisson` object that represents a Poisson distribution.We can create one with $\lambda=1.4$ like this: ###Code from scipy.stats import poisson lam = 1.4 dist = poisson(lam) type(dist) ###Output _____no_output_____ ###Markdown The result is an object that represents a "frozen" random variable and provides `pmf`, which evaluates the probability mass function of the Poisson distribution. ###Code k = 4 dist.pmf(k) ###Output _____no_output_____ ###Markdown This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.We'll use the following function to make a `Pmf` that represents a Poisson distribution. ###Code from empiricaldist import Pmf def make_poisson_pmf(lam, qs): """Make a Pmf of a Poisson distribution.""" ps = poisson(lam).pmf(qs) pmf = Pmf(ps, qs) pmf.normalize() return pmf ###Output _____no_output_____ ###Markdown `make_poisson_pmf` takes as parameters the goal-scoring rate, `lam`, and an array of quantities, `qs`, where it should evaluate the Poisson PMF. It returns a `Pmf` object.For example, here's the distribution of goals scored for `lam=1.4`, computed for values of `k` from 0 to 9. ###Code import numpy as np lam = 1.4 goals = np.arange(10) pmf_goals = make_poisson_pmf(lam, goals) ###Output _____no_output_____ ###Markdown And here's what it looks like. ###Code from utils import decorate def decorate_goals(title=''): decorate(xlabel='Number of goals', ylabel='PMF', title=title) pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$') decorate_goals('Distribution of goals scored') ###Output _____no_output_____ ###Markdown The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.Values above 7 are negligible.This distribution shows that if we know the goal scoring rate, we can predict the number of goals.Now let's turn it around: given a number of goals, what can we say about the goal-scoring rate?To answer that, we need to think about the prior distribution of `lam`, which represents the range of possible values and their probabilities before we see the score. The Gamma DistributionIf you have ever seen a soccer game, you have some information about `lam`. In most games, teams score a few goals each. In rare cases, a team might score more than 5 goals, but they almost never score more than 10.Using [data from previous World Cups](https://www.statista.com/statistics/269031/goals-scored-per-game-at-the-fifa-world-cup-since-1930/), I estimate that each team scores about 1.4 goals per game, on average. So I'll set the mean of `lam` to be 1.4.For a good team against a bad one, we expect `lam` to be higher; for a bad team against a good one, we expect it to be lower. To model the distribution of goal-scoring rates, I'll use a [gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution), which I chose because:1. The goal scoring rate is continuous and non-negative, and the gamma distribution is appropriate for this kind of quantity.2. The gamma distribution has only one parameter, `alpha`, which is the mean. So it's easy to construct a gamma distribution with the mean we want.3. As we'll see, the shape of the gamma distribution is a reasonable choice, given what we know about soccer.And there's one more reason, which I will reveal in >.SciPy provides `gamma`, which creates an object that represents a gamma distribution.And the `gamma` object provides provides `pdf`, which evaluates the **probability density function** (PDF) of the gamma distribution.Here's how we use it. ###Code from scipy.stats import gamma alpha = 1.4 qs = np.linspace(0, 10, 101) ps = gamma(alpha).pdf(qs) ###Output _____no_output_____ ###Markdown The parameter, `alpha`, is the mean of the distribution.The `qs` are possible values of `lam` between 0 and 10.The `ps` are **probability densities**, which we can think of as unnormalized probabilities.To normalize them, we can put them in a `Pmf` and call `normalize`: ###Code from empiricaldist import Pmf prior = Pmf(ps, qs) prior.normalize() ###Output _____no_output_____ ###Markdown The result is a discrete approximation of a gamma distribution.Here's what it looks like. ###Code def decorate_rate(title=''): decorate(xlabel='Goal scoring rate (lam)', ylabel='PMF', title=title) prior.plot(ls='--', label='prior', color='C5') decorate_rate(r'Prior distribution of $\lambda$') ###Output _____no_output_____ ###Markdown This distribution represents our prior knowledge about goal scoring: `lam` is usually less than 2, occasionally as high as 6, and seldom higher than that. And we can confirm that the mean is about 1.4. ###Code prior.mean() ###Output _____no_output_____ ###Markdown As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update. The UpdateSuppose you are given the goal-scoring rate, $\lambda$, and asked to compute the probability of scoring a number of goals, $k$. That is precisely the question we answered by computing the Poisson PMF.For example, if $\lambda$ is 1.4, the probability of scoring 4 goals in a game is: ###Code lam = 1.4 k = 4 poisson(lam).pmf(4) ###Output _____no_output_____ ###Markdown Now suppose we are have an array of possible values for $\lambda$; we can compute the likelihood of the data for each hypothetical value of `lam`, like this: ###Code lams = prior.qs k = 4 likelihood = poisson(lams).pmf(k) ###Output _____no_output_____ ###Markdown And that's all we need to do the update.To get the posterior distribution, we multiply the prior by the likelihoods we just computed and normalize the result.The following function encapsulates these steps. ###Code def update_poisson(pmf, data): """Update Pmf with a Poisson likelihood.""" k = data lams = pmf.qs likelihood = poisson(lams).pmf(k) pmf *= likelihood pmf.normalize() ###Output _____no_output_____ ###Markdown The first parameter is the prior; the second is the number of goals.In the example, France scored 4 goals, so I'll make a copy of the prior and update it with the data. ###Code france = prior.copy() update_poisson(france, 4) ###Output _____no_output_____ ###Markdown Here's what the posterior distribution looks like, along with the prior. ###Code prior.plot(ls='--', label='prior', color='C5') france.plot(label='France posterior', color='C3') decorate_rate('Posterior distribution for France') ###Output _____no_output_____ ###Markdown The data, `k=4`, makes us think higher values of `lam` are more likely and lower values are less likely. So the posterior distribution is shifted to the right.Let's do the same for Croatia: ###Code croatia = prior.copy() update_poisson(croatia, 2) ###Output _____no_output_____ ###Markdown And here are the results. ###Code prior.plot(ls='--', label='prior', color='C5') croatia.plot(label='Croatia posterior', color='C0') decorate_rate('Posterior distribution for Croatia') ###Output _____no_output_____ ###Markdown Here are the posterior means for these distributions. ###Code print(croatia.mean(), france.mean()) ###Output 1.6999765866755225 2.699772393342308 ###Markdown The mean of the prior distribution is about 1.4.After Croatia scores 2 goals, their posterior mean is 1.7, which is near the midpoint of the prior and the data.Likewise after France scores 4 goals, their posterior mean is 2.7.These results are typical of a Bayesian update: the location of the posterior distribution is a compromise between the prior and the data. Probability of SuperiorityNow that we have a posterior distribution for each team, we can answer the first question: How confident should we be that France is the better team?In the model, "better" means having a higher goal-scoring rate against the opponent. We can use the posterior distributions to compute the probability that a random value drawn from France's distribution exceeds a value drawn from Croatia's.One way to do that is to enumerate all pairs of values from the two distributions, adding up the total probability that one value exceeds the other. ###Code def prob_gt(pmf1, pmf2): """Compute the probability of superiority.""" total = 0 for q1, p1 in pmf1.items(): for q2, p2 in pmf2.items(): if q1 > q2: total += p1 * p2 return total ###Output _____no_output_____ ###Markdown This is similar to the method we use in > to compute the distribution of a sum.Here's how we use it: ###Code prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown `Pmf` provides a function that does the same thing. ###Code Pmf.prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown The results are slightly different because `Pmf.prob_gt` uses array operators rather than `for` loops.Either way, the result is close to 75%. So, on the basis of one game, we have moderate confidence that France is actually the better team.Of course, we should remember that this result is based on the assumption that the goal-scoring rate is constant.In reality, if a team is down by one goal, they might play more aggressively toward the end of the game, making them more likely to score, but also more likely to give up an additional goal.As always, the results are only as good as the model. Predicting the RematchNow we can take on the second question: If the same teams played again, what is the chance Croatia would win?To answer this question, we'll generate the "posterior predictive distribution", which is the number of goals we expect a team to score.If we knew the goal scoring rate, `lam`, the distribution of goals would be a Poisson distribution with parameter `lam`.Since we don't know `lam`, the distribution of goals is a mixture of a Poisson distributions with different values of `lam`.First I'll generate a sequence of `Pmf` objects, one for each value of `lam`. ###Code pmf_seq = [make_poisson_pmf(lam, goals) for lam in prior.qs] ###Output _____no_output_____ ###Markdown The following figure shows what these distributions look like for a few values of `lam`. ###Code import matplotlib.pyplot as plt for i, index in enumerate([10, 20, 30, 40]): plt.subplot(2, 2, i+1) lam = prior.qs[index] pmf = pmf_seq[index] pmf.bar(label=f'$\lambda$ = {lam}', color='C3') decorate_goals() ###Output _____no_output_____ ###Markdown The predictive distribution is a mixture of these `Pmf` objects, weighted with the posterior probabilities.We can use `make_mixture` from > to compute this mixture. ###Code from utils import make_mixture pred_france = make_mixture(france, pmf_seq) ###Output _____no_output_____ ###Markdown Here's the predictive distribution for the number of goals France would score in a rematch. ###Code pred_france.bar(color='C3', label='France') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown This distribution represents two sources of uncertainty: we don't know the actual value of `lam`, and even if we did, we would not know the number of goals in the next game.Here's the predictive distribution for Croatia. ###Code pred_croatia = make_mixture(croatia, pmf_seq) pred_croatia.bar(color='C0', label='Croatia') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown We can use these distributions to compute the probability that France wins, loses, or ties the rematch. ###Code win = Pmf.prob_gt(pred_france, pred_croatia) win lose = Pmf.prob_lt(pred_france, pred_croatia) lose tie = Pmf.prob_eq(pred_france, pred_croatia) tie ###Output _____no_output_____ ###Markdown Assuming that France wins half of the ties, their chance of winning the rematch is about 65%. ###Code win + tie/2 ###Output _____no_output_____ ###Markdown This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.Even if France is the better team, they might lose the game. The Exponential DistributionAs an exercise at the end of this notebook, you'll have a chance to work on the following variation on the World Cup Problem:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?In this version, notice that the data is not the number of goals in a fixed period of time, but the time between goals.To compute the likelihood of data like this, we can take advantage of the theory of Poisson processes again. If each team has a constant goal-scoring rate, we expect the time between goals to follow an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).If the goal-scoring rate is $\lambda$, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$$\lambda \exp(-\lambda t)$$Because $t$ is a continuous quantity, the value of this expression is not a probability; it is a probability density. However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.SciPy provides `expon`, which creates an object that represents an exponential distribution.However, it does not take `lam` as a parameter in the way you might expect, which makes it awkward to work with.Since the PDF of the exponential distribution is so easy to evaluate, I'll use my own function. ###Code def expo_pdf(t, lam): """Compute the PDF of the exponential distribution.""" return lam * np.exp(-lam * t) ###Output _____no_output_____ ###Markdown To see what the exponential distribution looks like, let's assume again that `lam` is 1.4; we can compute the distribution of $t$ like this: ###Code lam = 1.4 qs = np.linspace(0, 4, 101) ps = expo_pdf(qs, lam) pmf_time = Pmf(ps, qs) pmf_time.normalize() ###Output _____no_output_____ ###Markdown And here's what it looks like: ###Code def decorate_time(title=''): decorate(xlabel='Time between goals (games)', ylabel='PMF', title=title) pmf_time.plot(label='exponential with $\lambda$ = 1.4') decorate_time('Distribution of time between goals') ###Output _____no_output_____ ###Markdown It is counterintuitive, but true, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.With a goal-scoring rate of 1.4, it is possible that a team will take more than one game to score a goal, but it is unlikely that they will take more than two games. SummaryThis chapter introduces three new distributions, so it can be hard to keep them straight.Let's review:* If a system satisfies the assumptions of a Poisson model, the number of events in a period of time follows a Poisson distribution, which is a discrete distribution with integer quantities from 0 to infinity. In practice, we can usually ignore low-probability quantities above a finite limit.* Also under the Poisson model, the interval between events follows an exponential distribution, which is a continuous distribution with quantities from 0 to infinity. Because it is continuous, it is described by a probability density function (PDF) rather than a probability mass function (PMF). But when we use an exponential distribution to compute the likelihood of the data, we can treat densities as unnormalized probabilities.* The Poisson and exponential distributions are parameterized by an event rate, denoted $\lambda$ or `lam`.* For the prior distribution of $\lambda$, I used a gamma distribution, which is a continuous distribution with quantities from 0 to infinity, but I approximated it with a discrete, bounded PMF. The gamma distribution has one parameter, denoted $\alpha$ or `alpha`, which is also its mean.I chose the gamma distribution because the shape is consistent with our background knowledge about goal-scoring rates.There are other distributions we could have used; however, we will see in > that the gamma distribution can be a particularly good choice.But we have a few things to do before we get there, starting with these exercises. Exercises **Exercise:** Let's finish the exercise we started:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)? Here are the steps I recommend:1. Starting with the same gamma prior we used in the previous problem, compute the likelihood of scoring a goal after 11 minutes for each possible value of `lam`. Don't forget to convert all times into games rather than minutes.2. Compute the posterior distribution of `lam` for Germany after the first goal.3. Compute the likelihood of scoring another goal after 12 more minutes and do another update. Plot the prior, posterior after one goal, and posterior after two goals.4. Compute the posterior predictive distribution of goals Germany might score during the remaining time in the game, `90-23` minutes. Note: You will have to think about how to generate predicted goals for a fraction of a game.5. Compute the probability of scoring 5 or more goals during the remaining time. ###Code # Solution # Here's a function that updates the distribution of lam # with the given time between goals def update_expo(pmf, data): """Update based on an observed interval pmf: prior PMF data: time between goals in minutes """ t = data / 90 lams = pmf.qs likelihood = expo_pdf(t, lams) pmf *= likelihood pmf.normalize() # Solution # Here are the updates for the first and second goals germany = prior.copy() update_expo(germany, 11) germany2 = germany.copy() update_expo(germany2, 12) # Solution # Here are the mean values of `lam` after each update germany.mean(), germany2.mean() # Solution # Here's what the posterior distributions look like prior.plot(ls='--', label='prior', color='C5') germany.plot(color='C3', label='Posterior after 1 goal') germany2.plot(color='C16', label='Posterior after 2 goals') decorate_rate('Prior and posterior distributions') # Solution # Here's the predictive distribution for each possible value of `lam` t = (90-23) / 90 pmf_seq = [make_poisson_pmf(lam*t, goals) for lam in germany2.qs] # Solution # And here's the mixture of predictive distributions, # weighted by the probabilities in the posterior distribution. pred_germany2 = make_mixture(germany2, pmf_seq) # Solution # Here's what the predictive distribution looks like pred_germany2.bar(color='C1', label='germany') decorate_goals('Posterior predictive distribution') # Solution # Here's the probability of scoring exactly 5 more goals pred_germany2[5] # Solution # And the probability of 5 or more pred_germany2.prob_ge(5) ###Output _____no_output_____ ###Markdown **Exercise:** Returning to the first version of the World Cup Problem. Suppose France and Croatia play a rematch. What is the probability that France scores first? Hint: Compute the posterior predictive distribution for the time until the first goal by making a mixture of exponential distributions. You can use the following function to make a PMF that approximates an exponential distribution. ###Code def make_expo_pmf(lam, high): """Make a PMF of an exponential distribution. lam: event rate high: upper bound on the interval `t` returns: Pmf of the interval between events """ qs = np.linspace(0, high, 101) ps = expo_pdf(qs, lam) pmf = Pmf(ps, qs) pmf.normalize() return pmf # Solution # Here are the predictive distributions for the # time until the first goal pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs] # Solution # And here are the mixtures based on the two posterior distributions pred_france = make_mixture(france, pmf_seq) pred_croatia = make_mixture(croatia, pmf_seq) # Solution # Here's what the posterior predictive distributions look like pred_france.plot(label='France', color='C3') pred_croatia.plot(label='Croatia', color='C0') decorate_time('Posterior predictive distribution') # Solution # And here's the probability France scores first Pmf.prob_lt(pred_france, pred_croatia) ###Output _____no_output_____ ###Markdown **Exercise:** In the 2010-11 National Hockey League (NHL) Finals, my beloved BostonBruins played a best-of-seven championship series against the despisedVancouver Canucks. Boston lost the first two games 0-1 and 2-3, thenwon the next two games 8-1 and 4-0. At this point in the series, whatis the probability that Boston will win the next game, and what istheir probability of winning the championship?To choose a prior distribution, I got some statistics fromhttp://www.nhl.com, specifically the average goals per gamefor each team in the 2010-11 season. The distribution is well modeled by a gamma distribution with mean 2.8.In what ways do you think the outcome of these games might violate the assumptions of the Poisson model? How would these violations affect your predictions? ###Code # Solution # When a team is winning or losing by an insurmountable margin, # they might remove their best players from the game, which # would affect their goal-scoring rate, violating the assumption # that the goal scoring rate is constant. # In this example, Boston won the third game 8-1, but scoring # eight goals in a game might not reflect their true long-term # goal-scoring rate. # As a result, the analysis below might overestimate the chance # that Boston wins. # As it turned out, they did not. # Solution from scipy.stats import gamma alpha = 2.8 qs = np.linspace(0, 15, 101) ps = gamma.pdf(qs, alpha) prior_hockey = Pmf(ps, qs) prior_hockey.normalize() # Solution prior_hockey.plot(ls='--', color='C5') decorate_rate('Prior distribution for hockey') prior_hockey.mean() # Solution bruins = prior_hockey.copy() for data in [0, 2, 8, 4]: update_poisson(bruins, data) bruins.mean() # Solution canucks = prior_hockey.copy() for data in [1, 3, 1, 0]: update_poisson(canucks, data) canucks.mean() # Solution canucks.plot(label='Canucks') bruins.plot(label='Bruins') decorate_rate('Posterior distributions') # Solution goals = np.arange(15) pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs] # Solution pred_bruins = make_mixture(bruins, pmf_seq) pred_bruins.bar(label='Bruins', color='C1') decorate_goals('Posterior predictive distribution') # Solution pred_canucks = make_mixture(canucks, pmf_seq) pred_canucks.bar(label='Canucks') decorate_goals('Posterior predictive distribution') # Solution win = Pmf.prob_gt(pred_bruins, pred_canucks) lose = Pmf.prob_lt(pred_bruins, pred_canucks) tie = Pmf.prob_eq(pred_bruins, pred_canucks) win, lose, tie # Solution # Assuming the Bruins win half of the ties, # their chance of winning the next game is... p = win + tie/2 p # Solution # Their chance of winning the series is their # chance of winning k=2 or k=3 of the remaining # n=3 games. from scipy.stats import binom n = 3 a = binom.pmf([2,3], n, p) a.sum() ###Output _____no_output_____ ###Markdown Poisson Processes Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ###Code # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py import os if not os.path.exists('utils.py'): !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py from utils import set_pyplot_params set_pyplot_params() ###Output _____no_output_____ ###Markdown This chapter introduces the [Poisson process](https://en.wikipedia.org/wiki/Poisson_point_process), which is a model used to describe events that occur at random intervals.As an example of a Poisson process, we'll model goal-scoring in soccer, which is American English for the game everyone else calls "football".We'll use goals scored in a game to estimate the parameter of a Poisson process; then we'll use the posterior distribution to make predictions.And we'll solve The World Cup Problem. The World Cup ProblemIn the 2018 FIFA World Cup final, France defeated Croatia 4 goals to 2. Based on this outcome:1. How confident should we be that France is the better team?2. If the same teams played again, what is the chance France would win again?To answer these questions, we have to make some modeling decisions.* First, I'll assume that for any team against another team there is some unknown goal-scoring rate, measured in goals per game, which I'll denote with the Python variable `lam` or the Greek letter $\lambda$, pronounced "lambda".* Second, I'll assume that a goal is equally likely during any minute of a game. So, in a 90 minute game, the probability of scoring during any minute is $\lambda/90$.* Third, I'll assume that a team never scores twice during the same minute.Of course, none of these assumptions is completely true in the real world, but I think they are reasonable simplifications.As George Box said, "All models are wrong; some are useful."(https://en.wikipedia.org/wiki/All_models_are_wrong).In this case, the model is useful because if these assumptions are true, at least roughly, the number of goals scored in a game follows a Poisson distribution, at least roughly. The Poisson DistributionIf the number of goals scored in a game follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with a goal-scoring rate, $\lambda$, the probability of scoring $k$ goals is$$\lambda^k \exp(-\lambda) ~/~ k!$$for any non-negative value of $k$.SciPy provides a `poisson` object that represents a Poisson distribution.We can create one with $\lambda=1.4$ like this: ###Code from scipy.stats import poisson lam = 1.4 dist = poisson(lam) type(dist) ###Output _____no_output_____ ###Markdown The result is an object that represents a "frozen" random variable and provides `pmf`, which evaluates the probability mass function of the Poisson distribution. ###Code k = 4 dist.pmf(k) ###Output _____no_output_____ ###Markdown This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.We'll use the following function to make a `Pmf` that represents a Poisson distribution. ###Code from empiricaldist import Pmf def make_poisson_pmf(lam, qs): """Make a Pmf of a Poisson distribution.""" ps = poisson(lam).pmf(qs) pmf = Pmf(ps, qs) pmf.normalize() return pmf ###Output _____no_output_____ ###Markdown `make_poisson_pmf` takes as parameters the goal-scoring rate, `lam`, and an array of quantities, `qs`, where it should evaluate the Poisson PMF. It returns a `Pmf` object.For example, here's the distribution of goals scored for `lam=1.4`, computed for values of `k` from 0 to 9. ###Code import numpy as np lam = 1.4 goals = np.arange(10) pmf_goals = make_poisson_pmf(lam, goals) ###Output _____no_output_____ ###Markdown And here's what it looks like. ###Code from utils import decorate def decorate_goals(title=''): decorate(xlabel='Number of goals', ylabel='PMF', title=title) pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$') decorate_goals('Distribution of goals scored') ###Output _____no_output_____ ###Markdown The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.Values above 7 are negligible.This distribution shows that if we know the goal scoring rate, we can predict the number of goals.Now let's turn it around: given a number of goals, what can we say about the goal-scoring rate?To answer that, we need to think about the prior distribution of `lam`, which represents the range of possible values and their probabilities before we see the score. The Gamma DistributionIf you have ever seen a soccer game, you have some information about `lam`. In most games, teams score a few goals each. In rare cases, a team might score more than 5 goals, but they almost never score more than 10.Using [data from previous World Cups](https://www.statista.com/statistics/269031/goals-scored-per-game-at-the-fifa-world-cup-since-1930/), I estimate that each team scores about 1.4 goals per game, on average. So I'll set the mean of `lam` to be 1.4.For a good team against a bad one, we expect `lam` to be higher; for a bad team against a good one, we expect it to be lower. To model the distribution of goal-scoring rates, I'll use a [gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution), which I chose because:1. The goal scoring rate is continuous and non-negative, and the gamma distribution is appropriate for this kind of quantity.2. The gamma distribution has only one parameter, `alpha`, which is the mean. So it's easy to construct a gamma distribution with the mean we want.3. As we'll see, the shape of the gamma distribution is a reasonable choice, given what we know about soccer.And there's one more reason, which I will reveal in >.SciPy provides `gamma`, which creates an object that represents a gamma distribution.And the `gamma` object provides provides `pdf`, which evaluates the **probability density function** (PDF) of the gamma distribution.Here's how we use it. ###Code from scipy.stats import gamma alpha = 1.4 qs = np.linspace(0, 10, 101) ps = gamma(alpha).pdf(qs) ###Output _____no_output_____ ###Markdown The parameter, `alpha`, is the mean of the distribution.The `qs` are possible values of `lam` between 0 and 10.The `ps` are **probability densities**, which we can think of as unnormalized probabilities.To normalize them, we can put them in a `Pmf` and call `normalize`: ###Code from empiricaldist import Pmf prior = Pmf(ps, qs) prior.normalize() ###Output _____no_output_____ ###Markdown The result is a discrete approximation of a gamma distribution.Here's what it looks like. ###Code def decorate_rate(title=''): decorate(xlabel='Goal scoring rate (lam)', ylabel='PMF', title=title) prior.plot(ls='--', label='prior', color='C5') decorate_rate(r'Prior distribution of $\lambda$') ###Output _____no_output_____ ###Markdown This distribution represents our prior knowledge about goal scoring: `lam` is usually less than 2, occasionally as high as 6, and seldom higher than that. And we can confirm that the mean is about 1.4. ###Code prior.mean() ###Output _____no_output_____ ###Markdown As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update. The UpdateSuppose you are given the goal-scoring rate, $\lambda$, and asked to compute the probability of scoring a number of goals, $k$. That is precisely the question we answered by computing the Poisson PMF.For example, if $\lambda$ is 1.4, the probability of scoring 4 goals in a game is: ###Code lam = 1.4 k = 4 poisson(lam).pmf(4) ###Output _____no_output_____ ###Markdown Now suppose we are have an array of possible values for $\lambda$; we can compute the likelihood of the data for each hypothetical value of `lam`, like this: ###Code lams = prior.qs k = 4 likelihood = poisson(lams).pmf(k) ###Output _____no_output_____ ###Markdown And that's all we need to do the update.To get the posterior distribution, we multiply the prior by the likelihoods we just computed and normalize the result.The following function encapsulates these steps. ###Code def update_poisson(pmf, data): """Update Pmf with a Poisson likelihood.""" k = data lams = pmf.qs likelihood = poisson(lams).pmf(k) pmf *= likelihood pmf.normalize() ###Output _____no_output_____ ###Markdown The first parameter is the prior; the second is the number of goals.In the example, France scored 4 goals, so I'll make a copy of the prior and update it with the data. ###Code france = prior.copy() update_poisson(france, 4) ###Output _____no_output_____ ###Markdown Here's what the posterior distribution looks like, along with the prior. ###Code prior.plot(ls='--', label='prior', color='C5') france.plot(label='France posterior', color='C3') decorate_rate('Posterior distribution for France') ###Output _____no_output_____ ###Markdown The data, `k=4`, makes us think higher values of `lam` are more likely and lower values are less likely. So the posterior distribution is shifted to the right.Let's do the same for Croatia: ###Code croatia = prior.copy() update_poisson(croatia, 2) ###Output _____no_output_____ ###Markdown And here are the results. ###Code prior.plot(ls='--', label='prior', color='C5') croatia.plot(label='Croatia posterior', color='C0') decorate_rate('Posterior distribution for Croatia') ###Output _____no_output_____ ###Markdown Here are the posterior means for these distributions. ###Code print(croatia.mean(), france.mean()) ###Output 1.6999765866755225 2.699772393342308 ###Markdown The mean of the prior distribution is about 1.4.After Croatia scores 2 goals, their posterior mean is 1.7, which is near the midpoint of the prior and the data.Likewise after France scores 4 goals, their posterior mean is 2.7.These results are typical of a Bayesian update: the location of the posterior distribution is a compromise between the prior and the data. Probability of SuperiorityNow that we have a posterior distribution for each team, we can answer the first question: How confident should we be that France is the better team?In the model, "better" means having a higher goal-scoring rate against the opponent. We can use the posterior distributions to compute the probability that a random value drawn from France's distribution exceeds a value drawn from Croatia's.One way to do that is to enumerate all pairs of values from the two distributions, adding up the total probability that one value exceeds the other. ###Code def prob_gt(pmf1, pmf2): """Compute the probability of superiority.""" total = 0 for q1, p1 in pmf1.items(): for q2, p2 in pmf2.items(): if q1 > q2: total += p1 * p2 return total ###Output _____no_output_____ ###Markdown This is similar to the method we use in > to compute the distribution of a sum.Here's how we use it: ###Code prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown `Pmf` provides a function that does the same thing. ###Code Pmf.prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown The results are slightly different because `Pmf.prob_gt` uses array operators rather than `for` loops.Either way, the result is close to 75%. So, on the basis of one game, we have moderate confidence that France is actually the better team.Of course, we should remember that this result is based on the assumption that the goal-scoring rate is constant.In reality, if a team is down by one goal, they might play more aggressively toward the end of the game, making them more likely to score, but also more likely to give up an additional goal.As always, the results are only as good as the model. Predicting the RematchNow we can take on the second question: If the same teams played again, what is the chance Croatia would win?To answer this question, we'll generate the "posterior predictive distribution", which is the number of goals we expect a team to score.If we knew the goal scoring rate, `lam`, the distribution of goals would be a Poisson distribution with parameter `lam`.Since we don't know `lam`, the distribution of goals is a mixture of a Poisson distributions with different values of `lam`.First I'll generate a sequence of `Pmf` objects, one for each value of `lam`. ###Code pmf_seq = [make_poisson_pmf(lam, goals) for lam in prior.qs] ###Output _____no_output_____ ###Markdown The following figure shows what these distributions look like for a few values of `lam`. ###Code import matplotlib.pyplot as plt for i, index in enumerate([10, 20, 30, 40]): plt.subplot(2, 2, i+1) lam = prior.qs[index] pmf = pmf_seq[index] pmf.bar(label=f'$\lambda$ = {lam}', color='C3') decorate_goals() ###Output _____no_output_____ ###Markdown The predictive distribution is a mixture of these `Pmf` objects, weighted with the posterior probabilities.We can use `make_mixture` from > to compute this mixture. ###Code from utils import make_mixture pred_france = make_mixture(france, pmf_seq) ###Output _____no_output_____ ###Markdown Here's the predictive distribution for the number of goals France would score in a rematch. ###Code pred_france.bar(color='C3', label='France') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown This distribution represents two sources of uncertainty: we don't know the actual value of `lam`, and even if we did, we would not know the number of goals in the next game.Here's the predictive distribution for Croatia. ###Code pred_croatia = make_mixture(croatia, pmf_seq) pred_croatia.bar(color='C0', label='Croatia') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown We can use these distributions to compute the probability that France wins, loses, or ties the rematch. ###Code win = Pmf.prob_gt(pred_france, pred_croatia) win lose = Pmf.prob_lt(pred_france, pred_croatia) lose tie = Pmf.prob_eq(pred_france, pred_croatia) tie ###Output _____no_output_____ ###Markdown Assuming that France wins half of the ties, their chance of winning the rematch is about 65%. ###Code win + tie/2 ###Output _____no_output_____ ###Markdown This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.Even if France is the better team, they might lose the game. The Exponential DistributionAs an exercise at the end of this notebook, you'll have a chance to work on the following variation on the World Cup Problem:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?In this version, notice that the data is not the number of goals in a fixed period of time, but the time between goals.To compute the likelihood of data like this, we can take advantage of the theory of Poisson processes again. If each team has a constant goal-scoring rate, we expect the time between goals to follow an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).If the goal-scoring rate is $\lambda$, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$$\lambda \exp(-\lambda t)$$Because $t$ is a continuous quantity, the value of this expression is not a probability; it is a probability density. However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.SciPy provides `expon`, which creates an object that represents an exponential distribution.However, it does not take `lam` as a parameter in the way you might expect, which makes it awkward to work with.Since the PDF of the exponential distribution is so easy to evaluate, I'll use my own function. ###Code def expo_pdf(t, lam): """Compute the PDF of the exponential distribution.""" return lam * np.exp(-lam * t) ###Output _____no_output_____ ###Markdown To see what the exponential distribution looks like, let's assume again that `lam` is 1.4; we can compute the distribution of $t$ like this: ###Code lam = 1.4 qs = np.linspace(0, 4, 101) ps = expo_pdf(qs, lam) pmf_time = Pmf(ps, qs) pmf_time.normalize() ###Output _____no_output_____ ###Markdown And here's what it looks like: ###Code def decorate_time(title=''): decorate(xlabel='Time between goals (games)', ylabel='PMF', title=title) pmf_time.plot(label='exponential with $\lambda$ = 1.4') decorate_time('Distribution of time between goals') ###Output _____no_output_____ ###Markdown It is counterintuitive, but true, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.With a goal-scoring rate of 1.4, it is possible that a team will take more than one game to score a goal, but it is unlikely that they will take more than two games. SummaryThis chapter introduces three new distributions, so it can be hard to keep them straight.Let's review:* If a system satisfies the assumptions of a Poisson model, the number of events in a period of time follows a Poisson distribution, which is a discrete distribution with integer quantities from 0 to infinity. In practice, we can usually ignore low-probability quantities above a finite limit.* Also under the Poisson model, the interval between events follows an exponential distribution, which is a continuous distribution with quantities from 0 to infinity. Because it is continuous, it is described by a probability density function (PDF) rather than a probability mass function (PMF). But when we use an exponential distribution to compute the likelihood of the data, we can treat densities as unnormalized probabilities.* The Poisson and exponential distributions are parameterized by an event rate, denoted $\lambda$ or `lam`.* For the prior distribution of $\lambda$, I used a gamma distribution, which is a continuous distribution with quantities from 0 to infinity, but I approximated it with a discrete, bounded PMF. The gamma distribution has one parameter, denoted $\alpha$ or `alpha`, which is also its mean.I chose the gamma distribution because the shape is consistent with our background knowledge about goal-scoring rates.There are other distributions we could have used; however, we will see in > that the gamma distribution can be a particularly good choice.But we have a few things to do before we get there, starting with these exercises. Exercises **Exercise:** Let's finish the exercise we started:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)? Here are the steps I recommend:1. Starting with the same gamma prior we used in the previous problem, compute the likelihood of scoring a goal after 11 minutes for each possible value of `lam`. Don't forget to convert all times into games rather than minutes.2. Compute the posterior distribution of `lam` for Germany after the first goal.3. Compute the likelihood of scoring another goal after 12 more minutes and do another update. Plot the prior, posterior after one goal, and posterior after two goals.4. Compute the posterior predictive distribution of goals Germany might score during the remaining time in the game, `90-23` minutes. Note: You will have to think about how to generate predicted goals for a fraction of a game.5. Compute the probability of scoring 5 or more goals during the remaining time. ###Code # Solution # Here's a function that updates the distribution of lam # with the given time between goals def update_expo(pmf, data): """Update based on an observed interval pmf: prior PMF data: time between goals in minutes """ t = data / 90 lams = pmf.qs likelihood = expo_pdf(t, lams) pmf *= likelihood pmf.normalize() # Solution # Here are the updates for the first and second goals germany = prior.copy() update_expo(germany, 11) germany2 = germany.copy() update_expo(germany2, 12) # Solution # Here are the mean values of `lam` after each update germany.mean(), germany2.mean() # Solution # Here's what the posterior distributions look like prior.plot(ls='--', label='prior', color='C5') germany.plot(color='C3', label='Posterior after 1 goal') germany2.plot(color='C16', label='Posterior after 2 goals') decorate_rate('Prior and posterior distributions') # Solution # Here's the predictive distribution for each possible value of `lam` t = (90-23) / 90 pmf_seq = [make_poisson_pmf(lam*t, goals) for lam in germany2.qs] # Solution # And here's the mixture of predictive distributions, # weighted by the probabilities in the posterior distribution. pred_germany2 = make_mixture(germany2, pmf_seq) # Solution # Here's what the predictive distribution looks like pred_germany2.bar(color='C1', label='germany') decorate_goals('Posterior predictive distribution') # Solution # Here's the probability of scoring exactly 5 more goals pred_germany2[5] # Solution # And the probability of 5 or more pred_germany2.prob_ge(5) ###Output _____no_output_____ ###Markdown **Exercise:** Returning to the first version of the World Cup Problem. Suppose France and Croatia play a rematch. What is the probability that France scores first? Hint: Compute the posterior predictive distribution for the time until the first goal by making a mixture of exponential distributions. You can use the following function to make a PMF that approximates an exponential distribution. ###Code def make_expo_pmf(lam, high): """Make a PMF of an exponential distribution. lam: event rate high: upper bound on the interval `t` returns: Pmf of the interval between events """ qs = np.linspace(0, high, 101) ps = expo_pdf(qs, lam) pmf = Pmf(ps, qs) pmf.normalize() return pmf # Solution # Here are the predictive distributions for the # time until the first goal pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs] # Solution # And here are the mixtures based on the two posterior distributions pred_france = make_mixture(france, pmf_seq) pred_croatia = make_mixture(croatia, pmf_seq) # Solution # Here's what the posterior predictive distributions look like pred_france.plot(label='France', color='C3') pred_croatia.plot(label='Croatia', color='C0') decorate_time('Posterior predictive distribution') # Solution # And here's the probability France scores first Pmf.prob_lt(pred_france, pred_croatia) ###Output _____no_output_____ ###Markdown **Exercise:** In the 2010-11 National Hockey League (NHL) Finals, my beloved BostonBruins played a best-of-seven championship series against the despisedVancouver Canucks. Boston lost the first two games 0-1 and 2-3, thenwon the next two games 8-1 and 4-0. At this point in the series, whatis the probability that Boston will win the next game, and what istheir probability of winning the championship?To choose a prior distribution, I got some statistics fromhttp://www.nhl.com, specifically the average goals per gamefor each team in the 2010-11 season. The distribution is well modeled by a gamma distribution with mean 2.8.In what ways do you think the outcome of these games might violate the assumptions of the Poisson model? How would these violations affect your predictions? ###Code # Solution # When a team is winning or losing by an insurmountable margin, # they might remove their best players from the game, which # would affect their goal-scoring rate, violating the assumption # that the goal scoring rate is constant. # In this example, Boston won the third game 8-1, but scoring # eight goals in a game might not reflect their true long-term # goal-scoring rate. # As a result, the analysis below might overestimate the chance # that Boston wins. # As it turned out, they did not. # Solution from scipy.stats import gamma alpha = 2.8 qs = np.linspace(0, 15, 101) ps = gamma.pdf(qs, alpha) prior_hockey = Pmf(ps, qs) prior_hockey.normalize() # Solution prior_hockey.plot(ls='--', color='C5') decorate_rate('Prior distribution for hockey') prior_hockey.mean() # Solution bruins = prior_hockey.copy() for data in [0, 2, 8, 4]: update_poisson(bruins, data) bruins.mean() # Solution canucks = prior_hockey.copy() for data in [1, 3, 1, 0]: update_poisson(canucks, data) canucks.mean() # Solution canucks.plot(label='Canucks') bruins.plot(label='Bruins') decorate_rate('Posterior distributions') # Solution goals = np.arange(15) pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs] # Solution pred_bruins = make_mixture(bruins, pmf_seq) pred_bruins.bar(label='Bruins', color='C1') decorate_goals('Posterior predictive distribution') # Solution pred_canucks = make_mixture(canucks, pmf_seq) pred_canucks.bar(label='Canucks') decorate_goals('Posterior predictive distribution') # Solution win = Pmf.prob_gt(pred_bruins, pred_canucks) lose = Pmf.prob_lt(pred_bruins, pred_canucks) tie = Pmf.prob_eq(pred_bruins, pred_canucks) win, lose, tie # Solution # Assuming the Bruins win half of the ties, # their chance of winning the next game is... p = win + tie/2 p # Solution # Their chance of winning the series is their # chance of winning k=2 or k=3 of the remaining # n=3 games. from scipy.stats import binom n = 3 a = binom.pmf([2,3], n, p) a.sum() ###Output _____no_output_____ ###Markdown Poisson Processes Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) ###Code # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: !pip install empiricaldist # Get utils.py import os if not os.path.exists('utils.py'): !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py from utils import set_pyplot_params set_pyplot_params() ###Output _____no_output_____ ###Markdown This chapter introduces the [Poisson process](https://en.wikipedia.org/wiki/Poisson_point_process), which is a model used to describe events that occur at random intervals.In this context, "process" has a mathematical definition that is almost unrelated to its usual meaning and not worth explaining here.As an example of a Poisson process, we'll model goal-scoring in soccer, which is American English for the game everyone else calls "football".We'll use goals scored in a game to estimate the parameter of a Poisson process; then we'll use the posterior distribution to make predictions.And we'll solve The World Cup Problem. The World Cup ProblemIn the 2018 FIFA World Cup final, France defeated Croatia 4 goals to 2. Based on this outcome:1. How confident should we be that France is the better team?2. If the same teams played again, what is the chance France would win again?To answer these questions, we have to make some modeling decisions.* First, I'll assume that for any team against another team there is some unknown goal-scoring rate, measured in goals per game, which I'll denote with the Python variable `lam` or the Greek letter $\lambda$, pronounced "lambda".* Second, I'll assume that a goal is equally likely during any minute of a game. So, in a 90 minute game, the probability of scoring during any minute is $\lambda/90$.* Third, I'll assume that a team never scores twice during the same minute.Of course, none of these assumptions is completely true in the real world, but I think they are reasonable simplifications.As George Box said, "[All models are wrong; some are useful.](https://en.wikipedia.org/wiki/All_models_are_wrong)"In this case, the model is useful because if these assumption are true, we expect the number of goals scored in a game to follow a Poisson distribution. The Poisson distributionIf the number of goals scored in a game follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with a goal-scoring rate, $\lambda$, the probability of scoring $k$ goals is$$\lambda^k \exp(-\lambda) ~/~ k!$$for any non-negative value of $k$.SciPy provides a `poisson` object that represents a Poisson distribution.We can create one with $\lambda=1.4$ like this: ###Code from scipy.stats import poisson lam = 1.4 dist = poisson(lam) type(dist) ###Output _____no_output_____ ###Markdown The result is an object that represents a "frozen" random variable and provides `pmf`, which evaluates the probability mass function of the Poisson distribution. ###Code k = 4 dist.pmf(k) ###Output _____no_output_____ ###Markdown This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.We'll use the following function to make a `Pmf` that represents a Poisson distribution. ###Code def make_poisson_pmf(lam, qs): """Make a Pmf of a Poisson distribution.""" ps = poisson(lam).pmf(qs) pmf = Pmf(ps, qs) pmf.normalize() return pmf ###Output _____no_output_____ ###Markdown `make_poisson_pmf` takes as parameters the goal-scoring rate, `lam`, and an array of quantities, `qs`, where it should evaluate the Poisson PMF. It returns a `Pmf` object.For example, here's the distribution of goals scored for `lam=1.4`, computed for values of `k` from 0 to 9. ###Code import numpy as np lam = 1.4 goals = np.arange(10) pmf_goals = make_poisson_pmf(lam, goals) ###Output _____no_output_____ ###Markdown And here's what it looks like. ###Code from utils import decorate def decorate_goals(title=''): decorate(xlabel='Number of goals', ylabel='PMF', title=title) pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$') decorate_goals('Distribution of goals scored') ###Output _____no_output_____ ###Markdown The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.Values above 7 are negligible.This distribution shows that if we know the goal scoring rate, we can predict the number of goals.Now let's turn it around: given a number of goals, what can we say about the goal-scoring rate?To answer that, we need to think about the prior distribution of `lam`, which represents the range of possible values and their probabilities before we see the score. The priorIf you have ever seen a soccer game, you have some information about `lam`. In most games, teams score a few goals each. In rare cases, a team might score more than 5 goals, but they almost never score more than 10.Using [data from previous World Cups](https://www.statista.com/statistics/269031/goals-scored-per-game-at-the-fifa-world-cup-since-1930/), I estimate that each team scores about 1.4 goals per game, on average. So I'll set the mean of `lam` to be 1.4.For a good team against a bad one, we expect `lam` to be higher; for a bad team against a good one, we expect it to be lower. To model the distribution of goal-scoring rates, I'll use a [gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution), which I chose because:1. The goal scoring rate is continuous and non-negative, and the gamma distribution is appropriate for this kind of quantity.2. The gamma distribution has only one parameter, `alpha`, which is the mean. So it's easy to construct a gamma distribution with the mean we want.3. As we'll see, the shape of the gamma distribution is a reasonable choice, given what we know about soccer.And there's one more reason, which I will reveal in Chapter xxx.SciPy provides `gamma`, which creates an object that represents a gamma distribution.And the `gamma` object provides provides `pdf`, which evaluates the **probability density function** (PDF) of the gamma distribution.Here's how we use it. ###Code from scipy.stats import gamma alpha = 1.4 qs = np.linspace(0, 10, 101) ps = gamma(alpha).pdf(qs) ###Output _____no_output_____ ###Markdown The parameter, `alpha`, is the mean of the distribution.The `qs` are possible values of `lam` between 0 and 10.The `ps` are **probability densities**, which we can think of as unnormalized probabilities.To normalize them, we can put them in a `Pmf` and call `normalize`: ###Code from empiricaldist import Pmf prior = Pmf(ps, qs) prior.normalize() ###Output _____no_output_____ ###Markdown The result is a discrete approximation of a gamma distribution.Here's what it looks like. ###Code def decorate_rate(title=''): decorate(xlabel='Goal scoring rate (lam)', ylabel='PMF', title=title) prior.plot(label='prior', color='C5') decorate_rate(r'Prior distribution of $\lambda$') ###Output _____no_output_____ ###Markdown This distribution represents our prior knowledge about goal scoring: `lam` is usually less than 2, occasionally as high as 6, and seldom higher than that. And we can confirm that the mean is about 1.4. ###Code prior.mean() ###Output _____no_output_____ ###Markdown As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update. The updateSuppose you are given the goal-scoring rate, $\lambda$, and asked to compute the probability of scoring a number of goals, $k$. That is precisely the question we answered by computing the Poisson PMF.For example, if $\lambda$ is 1.4, the probability of scoring 4 goals in a game is: ###Code lam = 1.4 k = 4 poisson(lam).pmf(4) ###Output _____no_output_____ ###Markdown Now suppose we are have an array of possible values for $\lambda$; we can compute the likelihood of the data for each hypothetical value of lam, like this: ###Code lams = prior.qs k = 4 likelihood = poisson(lams).pmf(k) ###Output _____no_output_____ ###Markdown And that's all we need to do the update.To get the posterior distribution, we multiply the prior by the likelihoods we just computed and normalize the result.The following function encapsulates these steps. ###Code def update_poisson(pmf, data): """Update Pmf with a Poisson likelihood.""" k = data lams = pmf.qs likelihood = poisson(lams).pmf(k) pmf *= likelihood pmf.normalize() ###Output _____no_output_____ ###Markdown The first parameter is the prior; the second is the number of goals.In the example, France scored 4 goals, so I'll make a copy of the prior and update it with the data. ###Code france = prior.copy() update_poisson(france, 4) ###Output _____no_output_____ ###Markdown Here's what the posterior distribution looks like, along with the prior. ###Code prior.plot(label='prior', color='C5') france.plot(label='France posterior', color='C3') decorate_rate('Posterior distribution for France') ###Output _____no_output_____ ###Markdown The data, `k=4`, makes us think higher values of `lam` are more likely and lower values are less likely. So the posterior distribution is shifted to the right.Let's do the same for Croatia: ###Code croatia = prior.copy() update_poisson(croatia, 2) ###Output _____no_output_____ ###Markdown And here are the results. ###Code prior.plot(label='prior', color='C5') croatia.plot(label='Croatia posterior', color='C0') decorate_rate('Posterior distribution for Croatia') ###Output _____no_output_____ ###Markdown Here are the posterior means for these distributions. ###Code print(croatia.mean(), france.mean()) ###Output _____no_output_____ ###Markdown The mean of the prior distribution is about 1.4.After Croatia scores 2 goals, their posterior mean is 1.7, which is near the midpoint of the prior and the data.Likewise after France scores 4 goals, their posterior mean is 2.7.These results are typical of a Bayesian update: the location of the posterior distribution is a compromise between the prior and the data. Probability of superiorityNow that we have a posterior distribution for each team, we can answer the first question: How confident should we be that France is the better team?In the model, "better" means having a higher goal-scoring rate against the opponent. We can use the posterior distributions to compute the probability that a random value drawn from France's distribution exceeds a value drawn from Croatia's.One way to do that is to enumerate all pairs of values from the two distributions, adding up the total probability that one value exceeds the other. ###Code def prob_gt(pmf1, pmf2): """Compute the probability of superiority.""" total = 0 for q1, p1 in pmf1.items(): for q2, p2 in pmf2.items(): if q1 > q2: total += p1 * p2 return total ###Output _____no_output_____ ###Markdown This is similar to the method we use in Section xxx to compute the distribution of a sum.Here's how we use it: ###Code prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown `Pmf` provides a function that does the same thing. ###Code Pmf.prob_gt(france, croatia) ###Output _____no_output_____ ###Markdown The results are slightly different because `Pmf.prob_gt` uses array operators rather than `for` loops.Either way, the result is close to 75%. So, on the basis of one game, we have moderate confidence that France is actually the better team.Of course, we should remember that this result is based on the assumption that the goal-scoring rate is constant.In reality, if a team is down by one goal, they might play more aggressively toward the end of the game, making them more likely to score, but also more likely to give up an additional goal.As always, the results are only as good as the model. Predicting the rematchNow we can take on the second question: If the same teams played again, what is the chance Croatia would win?To answer this question, we'll generate the "posterior predictive distribution", which is the number of goals we expect a team to score.If we knew the goal scoring rate, `lam`, the distribution of goals would be a Poisson distribution with parameter `lam`.Since we don't know `lam`, the distribution of goals is a mixture of a Poisson distributions with different values of `lam`.First I'll generate a sequence of `Pmf` objects, one for each value of `lam`. ###Code pmf_seq = [make_poisson_pmf(lam, goals) for lam in prior.qs] ###Output _____no_output_____ ###Markdown The following figure shows what these distributions look like for a few values of `lam`. ###Code import matplotlib.pyplot as plt for i, index in enumerate([10, 20, 30, 40]): plt.subplot(2, 2, i+1) lam = prior.qs[index] pmf = pmf_seq[index] pmf.bar(label=f'$\lambda$ = {lam}', color='C3') decorate_goals() ###Output _____no_output_____ ###Markdown The predictive distribution is a mixture of these `Pmf` objects, weighted with the posterior probabilities.We can use `make_mixture` from Chapter xxx to compute this mixture. ###Code from utils import make_mixture pred_france = make_mixture(france, pmf_seq) ###Output _____no_output_____ ###Markdown Here's the predictive distribution for the number of goals France would score in a rematch. ###Code pred_france.bar(color='C3', label='France') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown This distribution represents two sources of uncertainty: we don't know the actual value of `lam`, and even if we did, we would not know the number of goals in the next game.Here's the predictive distribution for Croatia. ###Code pred_croatia = make_mixture(croatia, pmf_seq) pred_croatia.bar(color='C0', label='Croatia') decorate_goals('Posterior predictive distribution') ###Output _____no_output_____ ###Markdown We can use these distributions to compute the probability that France wins, loses, or ties the rematch. ###Code win = Pmf.prob_gt(pred_france, pred_croatia) win lose = Pmf.prob_lt(pred_france, pred_croatia) lose tie = Pmf.prob_eq(pred_france, pred_croatia) tie ###Output _____no_output_____ ###Markdown Assuming that France wins half of the ties, their chance of winning the rematch is about 65%. ###Code win + tie/2 ###Output _____no_output_____ ###Markdown This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.Even if France is the better team, they might lose the game. The Exponential DistributionAs an exercise at the end of this notebook, you'll have a chance to work on the following variation on the World Cup Problem:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)?In this version, notice that the data is not the number of goals in a fixed period of time, but the time between goals.To compute the likelihood of data like this, we can take advantage of the theory of Poisson processes again. If each team has a constant goal-scoring rate, we expect the time between goals to follow an [exponential distribution](https://en.wikipedia.org/wiki/Exponential_distribution).If the goal-scoring rate is $\lambda$, the probability of seeing an interval between goals of $t$ is proportional to the PDF of the exponential distribution:$$\lambda \exp(-\lambda t)$$Because $t$ is a continuous quantity, the value of this expression is not a probability; it is a probability density. However, it is proportional to the probability of the data, so we can use it as a likelihood in a Bayesian update.SciPy provides `expon`, which creates an object that represents an exponential distribution.However, it does not take `lam` as a parameter in the way you might expect, which makes it awkward to work with.Since the PDF of the exponential distribution is so easy to evaluate, I'll use my own function. ###Code def expo_pdf(t, lam): """Compute the PDF of the exponential distribution.""" return lam * np.exp(-lam * t) ###Output _____no_output_____ ###Markdown To see what the exponential distribution looks like, let's assume again that `lam` is 1.4; we can compute the distribution of $t$ like this: ###Code lam = 1.4 qs = np.linspace(0, 4, 101) ps = expo_pdf(qs, lam) pmf_time = Pmf(ps, qs) pmf_time.normalize() ###Output _____no_output_____ ###Markdown And here's what it looks like: ###Code def decorate_time(title=''): decorate(xlabel='Time between goals (games)', ylabel='PMF', title=title) pmf_time.plot(label='exponential with $\lambda$ = 1.4') decorate_time('Distribution of time between goals') ###Output _____no_output_____ ###Markdown It is counterintuitive, but true, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.With a goal-scoring rate of 1.4, it is possible that a team will take more than one game to score a goal, but it is unlikely that they will take more than two games. SummaryThis chapter introduces three new distributions, so it can be hard to keep them straight.Let's review:* If a system satisfies the assumptions of a Poisson model, the number of events in a period of time follows a Poisson distribution, which is a discrete distribution with integer quantities from 0 to infinity. In practice, we can usually ignore low-probability quantities above a finite limit.* Also under the Poisson model, the interval between events follows an exponential distribution, which is a continuous distribution with quantities from 0 to infinity. Because it is continuous, it is described by a probability density function (PDF) rather than a probability mass function (PMF). But when we use an exponential distribution to compute the likelihood of the data, we can treat densities as unnormalized probabilities.* The Poisson and exponential distributions are parameterized by an event rate, denoted $\lambda$ or `lam`.* For the prior distribution of $\lambda$, I used a gamma distribution, which is a continuous distribution with quantities from 0 to infinity, but I approximated it with a discrete, bounded PMF. The gamma distribution has one parameter, denoted $\alpha$ or `alpha`, which is also its mean.I chose the gamma distribution because the shape is consistent with our background knowledge about goal-scoring rates.There are other distributions we could have used; however, we will see in Chapter XX that the gamma distribution can be a particularly good choice.But we have a few things to do before we get there, starting with these exercises. Exercises **Exercise:** Let's finish off the exercise we started:>In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)? Here are the steps I recommend:1. Starting with the same gamma prior we used in the previous problem, compute the likelihood of scoring a goal after 11 minutes for each possible value of `lam`. Don't forget to convert all times into games rather than minutes.2. Compute the posterior distribution of `lam` for Germany after the first goal.3. Compute the likelihood of scoring another goal after 12 more minutes and do another update. Plot the prior, posterior after one goal, and posterior after two goals.4. Compute the posterior predictive distribution of goals Germany might score during the remaining time in the game, `90-23` minutes. Note: you will have to think about how to generate predicted goals for a fraction of a game.5. Compute the probability of scoring 5 or more goals during the remaining time. ###Code # Solution # Here's a function that updates the distribution of lam # with the given time between goals def update_expo(pmf, data): """Update based on an observed interval pmf: prior PMF data: time between goals in minutes """ t = data / 90 lams = pmf.qs likelihood = expo_pdf(t, lams) pmf *= likelihood pmf.normalize() # Solution # Here are the updates for the first and second goals germany = prior.copy() update_expo(germany, 11) germany2 = germany.copy() update_expo(germany2, 12) # Solution # Here are the mean values of `lam` after each update germany.mean(), germany2.mean() # Solution # Here's what the posterior distributions look like prior.plot(color='C5', label='Prior') germany.plot(color='C3', label='Posterior after 1 goal') germany2.plot(color='C16', label='Posterior after 2 goals') decorate_rate('Prior and posterior distributions') # Solution # Here's the predictive distribution for each possible value of `lam` t = (90-23) / 90 pmf_seq = [make_poisson_pmf(lam*t, goals) for lam in germany.qs] # Solution # And here's the mixture of predictive distributions, # weighted by the probabilities in the posterior distribution. pmf_germany = make_mixture(germany, pmf_seq) # Solution # Here's what the predictive distribution looks like pmf_germany.bar(color='C1', label='germany') decorate_goals('Posterior predictive distribution') # Solution # Here's the probability of scoring exactly 5 more goals pmf_germany[5] # Solution # And the probability of 5 or more pmf_germany.prob_ge(5) ###Output _____no_output_____ ###Markdown **Exercise:** Returning to the first version of the World Cup Problem. Suppose France and Croatia play a rematch. What is the probability that France scores first? Hint: Compute the posterior predictive distribution for the time until the first goal by making a mixture of exponential distributions. You can use the following function to make a PMF that approximates an exponential distribution. ###Code def make_expo_pmf(lam, high): """Make a PMF of an exponential distribution. lam: event rate high: upper bound on the interval `t` returns: Pmf of the interval between events """ qs = np.linspace(0, high, 101) ps = expo_pdf(qs, lam) pmf = Pmf(ps, qs) pmf.normalize() return pmf # Solution # Here are the predictive distributions for the # time until the first goal pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs] # Solution # And here are the mixtures based on the two posterior distributions pred_france = make_mixture(france, pmf_seq) pred_croatia = make_mixture(croatia, pmf_seq) # Solution # Here's what the posterior predictive distributions look like pred_france.plot(label='France', color='C3') pred_croatia.plot(label='Croatia', color='C0') decorate_time('Posterior predictive distribution') # Solution # And here's the probability France scores first Pmf.prob_lt(pred_france, pred_croatia) ###Output _____no_output_____ ###Markdown **Exercise:** In the 2010-11 National Hockey League (NHL) Finals, my beloved BostonBruins played a best-of-seven championship series against the despisedVancouver Canucks. Boston lost the first two games 0-1 and 2-3, thenwon the next two games 8-1 and 4-0. At this point in the series, whatis the probability that Boston will win the next game, and what istheir probability of winning the championship?To choose a prior distribution, I got some statistics fromhttp://www.nhl.com, specifically the average goals per gamefor each team in the 2010-11 season. The distribution is well modeled by a gamma distribution with mean 2.8.In what ways do you think the outcome of these games might violate the assumptions of the Poisson model? How would these violations affect your predictions. ###Code # Solution # When a team is winning or losing by an insurmountable margin, # they might remove their best players from the game, which # would affect their goal-scoring rate, violating the assumption # that the goal scoring rate is constant. # In this example, Boston won the third game 8-1, but scoring # eight goals in a game might not reflect their true long-term # goal-scoring rate. # As a result, the analysis below might overestimate the chance # that Boston wins. # As it turned out, they did not. # Solution from scipy.stats import gamma alpha = 2.8 qs = np.linspace(0, 15, 101) ps = gamma.pdf(qs, alpha) prior_hockey = Pmf(ps, qs) prior_hockey.normalize() # Solution prior_hockey.plot(color='C5') decorate_rate('Prior distribution for hockey') prior_hockey.mean() # Solution bruins = prior_hockey.copy() for data in [0, 2, 8, 4]: update_poisson(bruins, data) bruins.mean() # Solution canucks = prior_hockey.copy() for data in [1, 3, 1, 0]: update_poisson(canucks, data) canucks.mean() # Solution canucks.plot(label='Canucks') bruins.plot(label='Bruins') decorate_rate('Posterior distributions') # Solution goals = np.arange(15) pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs] # Solution pred_bruins = make_mixture(bruins, pmf_seq) pred_bruins.bar(label='Bruins', color='C1') decorate_goals('Posterior predictive distribution') # Solution pred_canucks = make_mixture(canucks, pmf_seq) pred_canucks.bar(label='Canucks') decorate_goals('Posterior predictive distribution') # Solution win = Pmf.prob_gt(pred_bruins, pred_canucks) lose = Pmf.prob_lt(pred_bruins, pred_canucks) tie = Pmf.prob_eq(pred_bruins, pred_canucks) win, lose, tie # Solution # Assuming the Bruins win half of the ties, # their chance of winning the next game is... p = win + lose/2 p # Solution # Their chance of winning the series is their # chance of winning k=2 or k=3 of the remaining # n=3 games. from scipy.stats import binom n = 3 a = binom.pmf([2,3], n, p) a.sum() ###Output _____no_output_____
nbs/simentities.data.dataset.ipynb
###Markdown Dataset ###Code %load_ext autoreload %autoreload 2 #export import pandas as pd from pathlib import Path #export class Dataset: """Load csv file and takes required data Takes data dedicated to the task. Series of authors and affiliations """ def __init__(self, path: Path, compression: str = None): self.df = pd.read_csv(path, compression=compression) self.names = self._process_names(col=self.df["authors"]) self.affiliations = self._process_affiliations(self.df["affiliations"]) def _process_names(self, col: pd.Series) -> pd.Series: name_col = col.dropna().apply(eval) name_col = name_col.explode(ignore_index=True) return name_col def _process_affiliations(self, col: pd.Series) -> pd.Series: aff_col = col.dropna().str.split(".") aff_col = aff_col.explode(ignore_index=True) return aff_col path = Path("../publications_min.csv.gz") dataset = Dataset(path, compression="gzip") dataset.names.tail(3) ###Output _____no_output_____
Assignment 2/NNFL_Q8.ipynb
###Markdown BITS F312 - Neural Network and Fuzzy Logic NNFL Assignment 2 ###Code from google.colab import drive drive.mount('/content/drive') # Changing directory to the directory containing dataset %cd drive/MyDrive/NNFL/Data_A2/ # listing datasets %ls -l # libraries required import pandas as pd import numpy as np import matplotlib.pyplot as plt import os from pprint import pprint # supressing warnings import warnings warnings.filterwarnings('ignore') ###Output _____no_output_____ ###Markdown Q8Implement support vector machine (SVM) classifier for the multi-class classification task. You can use onevs one and one vs all multiclass coding methods to create binary SVM models. Implement the SMOalgorithm for the evaluation of the training parameters of SVM such as Lagrange multipliers. You can useholdout approach (70%, 10%, 20%) for evaluating the performance of the classifier. The dataset(data5.xlsx) contains 7 features and the last column is the output (class labels). Evaluate individualaccuracy and overall accuracy. You can use RBF and polynomial kernels. Evaluate the classificationperformance of multiclass SVM for each kernel function. (Packages such as Scikitlearn, keras, tensorflow,pytorch etc. are not allowed) ###Code dataset = pd.read_excel('data5.xslx', header = None) row, col = dataset.shape feats = col - 1 # normalization dataset.loc[:, dataset.columns != feats] = (dataset.loc[:, dataset.columns != feats]-dataset.loc[:, dataset.columns != feats].mean(axis=0))/dataset.loc[:, dataset.columns != feats].std(axis=0) # spliting dataset into train test and val training_data, validation_data, testing_data = np.split(dataset.sample(frac=1),[int(0.7*len(dataset)), int(0.8*len(dataset))]) training_data = np.array(training_data) validation_data = np.array(validation_data) testing_data = np.array(testing_data) training_data_X = training_data[:, :feats] training_data_y = training_data[:, feats] validation_data_X = validation_data[:, :feats] validation_data_y = validation_data[:, feats] testing_data_X = testing_data[:, :feats] testing_data_y = testing_data[:, feats] train_row, train_col = training_data_X.shape class SupportVec(): def __init__(self, max_iter=10000, kernel_type='linear', C=1.0, epsilon=0.001): self.kernels = { 'linear' : self.linearKernel } self.max_iter = max_iter self.kernel_type = kernel_type self.C = C self.epsilon = epsilon def fit(self, X, y): n, d = X.shape[0], X.shape[1] alpha = np.zeros((n)) kernel = self.kernels[self.kernel_type] count = 0 while(True): count += 1 alpha_prev = np.copy(alpha) for j in range(0, n): i = self.initRandomize(0, n-1, j) # Get random int i~=j x_i, x_j, Yi, Yj = X[i,:], X[j,:], y[i], y[j] k_ij = kernel(x_i, x_i) + kernel(x_j, x_j) - 2 * kernel(x_i, x_j) if k_ij == 0: continue jPrimeAlpha, iPrimeAlpha = alpha[j], alpha[i] (L, H) = self.computerLH(self.C, jPrimeAlpha, iPrimeAlpha, Yj, Yi) self.w = self.computeWeights(alpha, y, X) self.b = self.computeBias(X, y, self.w) E_i = self.E(x_i, Yi, self.w, self.b) E_j = self.E(x_j, Yj, self.w, self.b) alpha[j] = jPrimeAlpha + float(Yj * (E_i - E_j))/k_ij alpha[j] = max(alpha[j], L) alpha[j] = min(alpha[j], H) alpha[i] = alpha_prime_i + y_i*Yj * (alpha_prime_j - alpha[j]) diff = np.linalg.norm(alpha - alpha_prev) if diff < self.epsilon: break if(count >= self.max_iter): print("Iteration number exceeded the max of %d iterations" % (self.max_iter)) return self.b = self.computeBias(X, y, self.w) if self.kernel_type == 'linear': self.w = self.computeWeights(alpha, y, X) alpha_idx = np.where(alpha > 0)[0] support_vectors = X[alpha_idx, :] return support_vectors, count def predict(self, X): return self.h(X, self.w, self.b) def computeBias(self, X, y, w): biasVar = y - np.dot(w.T, X.T) return np.mean(biasVar) def computeWeights(self, alpha, Y, X): return np.dot(X.T, np.multiplY(alpha,y)) def h(self, X, weight, bias): return np.sign(np.dot(weight.T, X.T) + bias).astype(int) def E(self, Xk, Yk, weight, bias): return self.h(Xk, weight, bias) - Yk def computerLH(self, C, jPrimeAlpha, iPrimeAlpha, Yj, Yi): if(Yi != Yj): return (max (0, jPrimeAlpha - iPrimeAlpha), min(C, C - iPrimeAlpha + jPrimeAlpha)) else: return (max (0, iPrimeAlpha + jPrimeAlpha - C), min(C, iPrimeAlpha + jPrimeAlpha)) def initRandomize(self, a, b, count): iterations = count counter = 0 while(iterations == count and counter<1000): iterations = random.randint(a,b) counter += 1 return iterations def linearKernel(self, x1, x2): return np.dot(x1, x2.T) model = SupportVec(max_iter=1000, epsilon=0.01) model.fit(training_data_X, training_data_y) test_pred = model.predict(test_x) print('Testing data') metrics(testing_data_y, test_pred) ###Output Testing data --------------------------------------------------------------------------- Sensitivity : 0.8653846153846154 Specificity : 0.8043478260869565 Accuracy ((TN+TP)/(TN+TP+FN+FP)) : 0.8367346938775511
_jupyter/.ipynb_checkpoints/blog-2-checkpoint.ipynb
###Markdown Web DevelopmentIn this blog post, I will show you how to create a webapp using Flask. The app will take in a user submitted message and their handler into a database. In addition, we will be able to view a sample of the messages by pulling from the database. Python FunctionsThere are five functions that I used in the creation of this web app. 1The overall function of get_message_db() is to make sure there is a table 'messages' in a database 'message_db' and a connection to this database. This will ensure that we can later modify the database in future functions. ###Code def get_message_db(): #Checks whether there is a database called message_db in the g attribute of the app if 'message_db' not in g: g.message_db = sqlite3.connect('message_db.sqlite') #Checks whether a table called messages exists in message_db, #and creates it if not. Gives the columns id, handle, and message g.message_db.execute( ''' CREATE TABLE IF NOT EXISTS messages (id INTEGER, handle TEXT, message TEXT); ''') #returns the connection to the database return g.message_db ###Output _____no_output_____ ###Markdown 2The insert_message(request) function will take in request as a parameter. Request is a built in object of the Flask library. Its built in attributes make it easy to extract user submitted data from a POST form. It will take this extracted data, which is the user's message and handle, and then submit it into the previously made database. ###Code def insert_message(request): #extracts the message and handle from the post request #ie. assigns the user's submission to variables mVal = request.form["mess"] hVal = request.form["user"] #assigns database connection to variable db = get_message_db() #will find length of table in database count = db.execute("SELECT COUNT(*) FROM messages;") #add one to length of table to ensure every submission has a unique id iVal = int(count.fetchall()[0][0]) + 1 #assign previous variables to each sequential column in database db.execute('INSERT INTO messages (id, handle, message) VALUES (?, ?, ?)', (iVal, hVal, mVal)) #saves your varibles into database db.commit() #close connection to database db.close() return ###Output _____no_output_____ ###Markdown 3This function will allow the url ".../submit/" to display the html template with the added variables that I will go through below. It will give tangible proof of the functions from above. ###Code #route this function to the url ".../submit/" w/ both POST and GET methods usable @app.route('/submit/', methods=['POST', 'GET']) def submit(): if request.method == 'GET': #this will render the base templete submit.html to the website when there has not been a post method submitted return render_template('submit.html') else: #try to extract the user submission and upload it into the database try: insert_message(request) return render_template('submit.html', thanks = True) #if fails, shows a message on the base template saying there was an error except: return render_template('submit.html', error = True) ###Output _____no_output_____ ###Markdown 4This function will take a parameter n, which is the max number of messgaes and their handles extracted from the database that it shall return. This will make it possible that there is a list of messages and handles that can be displayed on the website. ###Code def random_messages(n): #connect to the database db = get_message_db() #extract a randomized list containing 5 random messages and their handles id = db.execute(''' SELECT message, handle FROM messages ORDER BY RANDOM() LIMIT (?); ''', (n,)) idFetch = id.fetchall() #close the database db.close() #return list of tuples of all the messages and their handles return idFetch ###Output _____no_output_____ ###Markdown 5This last function will allow the url ".../view/" to display the html template with the added variable of the randomized messages, the final product of all the above functions ###Code #route this function to the url ".../view/" w/ GET method only @app.route('/view/', methods=['GET']) def view(): try: #try to extract a random number of messages (1-5) from the database rNum = random.randint(1, 5) mssg = random_messages(rNum) #if succesful, post those messages to the template return render_template('view.html', mssg = mssg) except: #if fails, shows a message on the base template saying there was an error return render_template('view.html', error = True) ###Output _____no_output_____ ###Markdown HTML TemplateBelow I will show an example of the HTML templates I referenced to in the functions explination. I will comment line by line the importance of each code. ###Code #extends 'base.html' allows you to import another html file #this is useful since the template for base.html was used in multiple html files {% extends 'base.html' %} #block gives you a way to divide code into coherent groupings {% block header %} <h1>{% block title %}Some Cool Messages{% endblock %}</h1> {% endblock %} {% block content %} #the if jinja operator allows for variables to be passed in from the functions that are #beneath the @app.route as long as the function is under the path {% if error %} #this code says that if an error exists, then print this statement and end <br> Uhhh idk what happened but an error occured sorry idk why. {% endif %} {% if mssg %} #this code says if the variable mssg exists... <br> #loop through each element {% for m in mssg %} #this section has id 'quote' to make it easy to read in CSS <section id="quote"> #this will print the 0th element of the m element of mssg "{{m[0]}}" </section> #same as above <section id ="author"> - {{m[1]}}<br> </section> <br> {% endfor %} # I added this just if there was no mssg yet uploaded in the database # there will be a small message letting you know. {% else %} I'm sorry, it seems there have been no messages submitted. You can start by clicking submit a message. {% endif %} {% endblock %} ###Output _____no_output_____
demos/for_developers.ipynb
###Markdown Tutorial: Graphistry for Develpers**Start by generating interactive graphs in the [Analysis tutorial](for_analysis.ipynb)****Graphistry is a client/server system:** * Graphs are akin to live documents: they are created on the server (a `dataset`), and then users can interact with them * Uploads may provide some settings * Users may dynamically create settings, such as filters: these are `workbooks`. Multiple `workbooks` may reuse the same `dataset`**APIs:** - Backend APIs - [Python](https://github.com/graphistry/pygraphistry) - [REST](https://labs.graphistry.com/graphistry/docs/docs/docs_api.htmllink) - Frontend APIs - iframe - React - JavaScript ###Code import graphistry #graphistry.register(key='MY_API_KEY', server='labs.graphistry.com') ###Output _____no_output_____ ###Markdown 1. Backend APIsGraphistry provides a REST upload API, and you can reuse the Python client for more conveniently using it. Python* Use the PyGraphistry API as in the [Analysis tutorial](for_analysis.ipynb)* Instead of plotting, get the plot URL for embedding ###Code edges = [{'src': 0, 'dst': 1}, {'src': 1, 'dst': 0}] g = graphistry.edges(pd.DataFrame(edges)).bind(source='src', destination='dst').settings(url_params={'play': 1000}) url = g.plot(render=False) url ###Output _____no_output_____ ###Markdown REST* Sample CURL below* Get API key either from your profile page, or for admins, by [generating a new one]( https://github.com/graphistry/graphistry-cli) ###Code json_data = { "name": "myUniqueGraphName", "type": "edgelist", "bindings": { "sourceField": "src", "destinationField": "dst", "idField": "node" }, "graph": [ {"src": "myNode1", "dst": "myNode2", "myEdgeField1": "I'm an edge!", "myCount": 7}, {"src": "myNode2", "dst": "myNode3", "myEdgeField1": "I'm also an edge!", "myCount": 200} ], "labels": [ {"node": "myNode1", "myNodeField1": "I'm a node!", "pointColor": 5}, {"node": "myNode2", "myNodeField1": "I'm a node too!", "pointColor": 4}, {"node": "myNode3", "myNodeField1": "I'm a node three!", "pointColor": 4} ] } import json with open('./data/samplegraph.json', 'w') as outfile: json.dump(json_data, outfile) ! curl -H "Content-type: application/json" -X POST -d @./data/samplegraph.json https://labs.graphistry.com/etl?key=YOUR_API_KEY_HERE ###Output {"success":true,"dataset":"myUniqueGraphName"} ###Markdown 2. Frontend APIsGraphistry supports 3 frontend APIs: iframe, React, and JavaScript iframe ###Code from IPython.display import HTML, display #skip splash screen url = url.replace('splashAfter', 'zzz') display(HTML('<iframe src="' + url + '" style="width: 100%; height: 400px"></iframe>')) ###Output _____no_output_____ ###Markdown Tutorial: Graphistry for Develpers**Start by generating interactive graphs in the [Analysis tutorial](for_analysis.ipynb)****Graphistry is a client/server system:** * Graphs are akin to live documents: they are created on the server (a `dataset`), and then users can interact with them * Uploads may provide some settings * Users may dynamically create settings, such as filters: these are `workbooks`. Multiple `workbooks` may reuse the same `dataset`**APIs:** - Backend APIs - [Python](https://github.com/graphistry/pygraphistry) - [REST](https://labs.graphistry.com/graphistry/docs/docs/docs_api.htmllink) - Frontend APIs - iframe - React - JavaScript ###Code import graphistry # To specify Graphistry account & server, use: # graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com') # For more options, see https://github.com/graphistry/pygraphistry#configure ###Output _____no_output_____ ###Markdown 1. Backend APIsGraphistry provides a REST upload API, and you can reuse the Python client for more conveniently using it. Python* Use the PyGraphistry API as in the [Analysis tutorial](for_analysis.ipynb)* Instead of plotting, get the plot URL for embedding ###Code edges = [{'src': 0, 'dst': 1}, {'src': 1, 'dst': 0}] g = graphistry.edges(pd.DataFrame(edges)).bind(source='src', destination='dst').settings(url_params={'play': 1000}) url = g.plot(render=False) url ###Output _____no_output_____ ###Markdown REST* Sample CURL below* Get API key either from your profile page, or for admins, by [generating a new one]( https://github.com/graphistry/graphistry-cli) ###Code json_data = { "name": "myUniqueGraphName", "type": "edgelist", "bindings": { "sourceField": "src", "destinationField": "dst", "idField": "node" }, "graph": [ {"src": "myNode1", "dst": "myNode2", "myEdgeField1": "I'm an edge!", "myCount": 7}, {"src": "myNode2", "dst": "myNode3", "myEdgeField1": "I'm also an edge!", "myCount": 200} ], "labels": [ {"node": "myNode1", "myNodeField1": "I'm a node!", "pointColor": 5}, {"node": "myNode2", "myNodeField1": "I'm a node too!", "pointColor": 4}, {"node": "myNode3", "myNodeField1": "I'm a node three!", "pointColor": 4} ] } import json with open('./data/samplegraph.json', 'w') as outfile: json.dump(json_data, outfile) ! curl -H "Content-type: application/json" -X POST -d @./data/samplegraph.json https://labs.graphistry.com/etl?key=YOUR_API_KEY_HERE ###Output {"success":true,"dataset":"myUniqueGraphName"} ###Markdown 2. Frontend APIsGraphistry supports 3 frontend APIs: iframe, React, and JavaScript iframe ###Code from IPython.display import HTML, display #skip splash screen url = url.replace('splashAfter', 'zzz') display(HTML('<iframe src="' + url + '" style="width: 100%; height: 400px"></iframe>')) ###Output _____no_output_____
AAAI/Learnability/CIN/Linear/ds2/size_500/synthetic_type2_Linear_m_1000.ipynb
###Markdown Generate dataset ###Code np.random.seed(12) y = np.random.randint(0,10,5000) idx= [] for i in range(10): print(i,sum(y==i)) idx.append(y==i) x = np.zeros((5000,2)) np.random.seed(12) x[idx[0],:] = np.random.multivariate_normal(mean = [5,5],cov=[[0.1,0],[0,0.1]],size=sum(idx[0])) x[idx[1],:] = np.random.multivariate_normal(mean = [-6,7],cov=[[0.1,0],[0,0.1]],size=sum(idx[1])) x[idx[2],:] = np.random.multivariate_normal(mean = [-5,-4],cov=[[0.1,0],[0,0.1]],size=sum(idx[2])) x[idx[3],:] = np.random.multivariate_normal(mean = [-1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[3])) x[idx[4],:] = np.random.multivariate_normal(mean = [0,2],cov=[[0.1,0],[0,0.1]],size=sum(idx[4])) x[idx[5],:] = np.random.multivariate_normal(mean = [1,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[5])) x[idx[6],:] = np.random.multivariate_normal(mean = [0,-1],cov=[[0.1,0],[0,0.1]],size=sum(idx[6])) x[idx[7],:] = np.random.multivariate_normal(mean = [0,0],cov=[[0.1,0],[0,0.1]],size=sum(idx[7])) x[idx[8],:] = np.random.multivariate_normal(mean = [-0.5,-0.5],cov=[[0.1,0],[0,0.1]],size=sum(idx[8])) x[idx[9],:] = np.random.multivariate_normal(mean = [0.4,0.2],cov=[[0.1,0],[0,0.1]],size=sum(idx[9])) x[idx[0]][0], x[idx[5]][5] for i in range(10): plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i)) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) bg_idx = [ np.where(idx[3] == True)[0], np.where(idx[4] == True)[0], np.where(idx[5] == True)[0], np.where(idx[6] == True)[0], np.where(idx[7] == True)[0], np.where(idx[8] == True)[0], np.where(idx[9] == True)[0]] bg_idx = np.concatenate(bg_idx, axis = 0) bg_idx.shape np.unique(bg_idx).shape x = x - np.mean(x[bg_idx], axis = 0, keepdims = True) np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True) x = x/np.std(x[bg_idx], axis = 0, keepdims = True) np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True) for i in range(10): plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i)) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) foreground_classes = {'class_0','class_1', 'class_2'} background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'} fg_class = np.random.randint(0,3) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(3,10) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) print(a.shape) print(fg_class , fg_idx) np.reshape(a,(2*m,1)) mosaic_list_of_images =[] mosaic_label = [] fore_idx=[] for j in range(desired_num): np.random.seed(j) fg_class = np.random.randint(0,3) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(3,10) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list_of_images.append(np.reshape(a,(2*m,1))) mosaic_label.append(fg_class) fore_idx.append(fg_idx) mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T mosaic_list_of_images.shape mosaic_list_of_images.shape, mosaic_list_of_images[0] for j in range(m): print(mosaic_list_of_images[0][2*j:2*j+2]) def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m): """ mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point labels : mosaic_dataset labels foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9 """ avg_image_dataset = [] cnt = 0 counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0]) for i in range(len(mosaic_dataset)): img = torch.zeros([2], dtype=torch.float64) np.random.seed(int(dataset_number*10000 + i)) give_pref = foreground_index[i] #np.random.randint(0,9) # print("outside", give_pref,foreground_index[i]) for j in range(m): if j == give_pref: img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim else : img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m) if give_pref == foreground_index[i] : # print("equal are", give_pref,foreground_index[i]) cnt += 1 counter[give_pref] += 1 else : counter[give_pref] += 1 avg_image_dataset.append(img) print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt)) print("the averaging are done as ", counter) return avg_image_dataset , labels , foreground_index avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m) test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m) avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0) # avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0) # print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0)) # print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0)) print("=="*40) test_dataset = torch.stack(test_dataset, axis = 0) # test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0) # print(torch.mean(test_dataset, keepdims= True, axis = 0)) # print(torch.std(test_dataset, keepdims= True, axis = 0)) print("=="*40) x1 = (avg_image_dataset_1).numpy() y1 = np.array(labels_1) plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0') plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1') plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2') plt.legend() plt.title("dataset4 CIN with alpha = 1/"+str(m)) x1 = (test_dataset).numpy() / m y1 = np.array(labels) plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0') plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1') plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2') plt.legend() plt.title("test dataset4") test_dataset[0:10]/m test_dataset = test_dataset/m test_dataset[0:10] class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list_of_images, mosaic_label): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list_of_images self.label = mosaic_label #self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx] avg_image_dataset_1[0].shape avg_image_dataset_1[0] batch = 200 traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 ) trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True) testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 ) testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False) testdata_11 = MosaicDataset(test_dataset, labels ) testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False) class Whatnet(nn.Module): def __init__(self): super(Whatnet,self).__init__() self.linear1 = nn.Linear(2,3) # self.linear2 = nn.Linear(50,10) # self.linear3 = nn.Linear(10,3) torch.nn.init.xavier_normal_(self.linear1.weight) torch.nn.init.zeros_(self.linear1.bias) def forward(self,x): # x = F.relu(self.linear1(x)) # x = F.relu(self.linear2(x)) x = (self.linear1(x)) return x def calculate_loss(dataloader,model,criter): model.eval() r_loss = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") outputs = model(inputs) loss = criter(outputs, labels) r_loss += loss.item() return r_loss/(i+1) def test_all(number, testloader,net): correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= net(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() pred = np.concatenate(pred, axis = 0) out = np.concatenate(out, axis = 0) print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) ) print("correct: ", correct, "total ", total) print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total)) def train_all(trainloader, ds_number, testloader_list): print("--"*40) print("training on data set ", ds_number) torch.manual_seed(12) net = Whatnet().double() net = net.to("cuda") criterion_net = nn.CrossEntropyLoss() optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9) acti = [] loss_curi = [] epochs = 1000 running_loss = calculate_loss(trainloader,net,criterion_net) loss_curi.append(running_loss) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 net.train() for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_net.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion_net(outputs, labels) # print statistics running_loss += loss.item() loss.backward() optimizer_net.step() running_loss = calculate_loss(trainloader,net,criterion_net) if(epoch%200 == 0): print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.05: print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) break print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total)) for i, j in enumerate(testloader_list): test_all(i+1, j,net) print("--"*40) return loss_curi train_loss_all=[] testloader_list= [ testloader_1, testloader_11] train_loss_all.append(train_all(trainloader_1, 1, testloader_list)) %matplotlib inline for i,j in enumerate(train_loss_all): plt.plot(j,label ="dataset "+str(i+1)) plt.xlabel("Epochs") plt.ylabel("Training_loss") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ###Output _____no_output_____
DA.ipynb
###Markdown DAPPERThanks to the [team](https://www.nersc.no/about)* A starting point point for Data Assimilation https://www.nersc.no/group/data-assimilation ###Code %%bash ECF_PORT=2500 ecflow_start.sh -p 2500 >/dev/null ###################### # from __future__ import print_function import os, sys # lib = "/usr/local/apps/ecflow/current/lib/python2.7/site-packages/ecflow" # lib = "/usr/local/apps/ecflow/current/lib/python3.5/site-packages/ecflow" lib = "/usr/local/lib/python3.5/site-packages/ecflow" sys.path.append(lib) import ecf; from ecf import (Client, Defs, Suite, Family, Task, Defstatus, Edit, Label, Trigger) try: x = Edit(test="value") # Edit is present in recent ecf.py module except: class Edit(Variables): pass home = os.getenv("HOME") + "/ecflow_server" user = os.getenv("USER") # SUITE node = Suite("DA").add( Defstatus("suspended"), Edit(ECF_HOME=home, ECF_INCLUDE=home + "/include", ECF_FILES=home + "/files", ECF_EXTN=".ecg", # current convention for generated task template extension ECF_JOB_CMD="%ECF_JOB% > %ECF_JOBOUT% 2>&1", # localhost run ECF_URL_CMD="firefox %URL%", URL="https://www.nersc.no/group/data-assimilation", ), Family("make").add( Family("get").add(Task("cmd").add( Edit(CMD="[ ! -d DAPPER ] && " + "git clone https://github.com/nansencenter/DAPPER.git", ARGS=""))), Family("compile").add( Trigger(["get"]), Task("cmd").add( Edit(CMD="cd DAPPER; xterm -T 'python3 example_1.py'")), ), ), Family("main").add(Task("cmd").add( Label("info", ""), Edit(CMD="ecflow_client --label info", ARGS="YOUR PART"), ), )) # print(node) # TASK TEMPLATE fname = home + "/files/cmd.ecg" if not os.path.isfile(fname): with open(fname, 'w') as task_template: print("""#!/bin/bash %include <head.h> %CMD:echo% %ARGS:% %include <tail.h>""", file=task_template) # DEFS defs = Defs() defs.add_suite(node) path = '/' + node.name() # print(defs) # CLIENT client = Client("localhost@%s" % os.getenv("ECF_PORT", 2500)) # PYTHON CLIENT if node.name() not in client.suites(): client.load(defs) # load/replace the top node (suite) client.begin_suite(node.name()) # BEGIN suite: UNKNOWN -> QUEUED else: client.replace(path, defs); # print("# REPLACE " + path, client) # load/replace the top node (suite) # client.resume(path) # RESUME suite: SUSPENDED -> create job and submit import os os.system("ecflow_ui &") ###Output _____no_output_____ ###Markdown Setup Import libraries ###Code import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import transforms from torchvision import datasets from torch.utils.data import DataLoader import random random.seed(123) import time import os from google.colab import drive drive.mount('/content/drive') ###Output Mounted at /content/drive ###Markdown Check CUDA version ###Code use_cuda = True if use_cuda and torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') device ###Output _____no_output_____ ###Markdown Visualisation functions ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt # Function to show an image tensor def show(X): if X.dim() == 3 and X.size(2) == 3: plt.imshow(X.numpy()) #plt.show() elif X.dim() == 2: plt.imshow( X.numpy() , cmap='gray' ) #plt.show() else: print('WRONG TENSOR SIZE') def show_saliency(X): if X.dim() == 3 and X.size(2) == 3: plt.imshow(X.numpy()) plt.show() elif X.dim() == 2: plt.imshow( X.numpy() , cmap='viridis' ) plt.show() else: print('WRONG TENSOR SIZE') ###Output _____no_output_____ ###Markdown Download dataset ###Code transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.squeeze()), # Squeeze the data to remove the redundant channel dimension ]) trainset = torchvision.datasets.FashionMNIST(root='./data_FashionMNIST', train=True, download=True, transform=transform ) testset = torchvision.datasets.FashionMNIST(root='./data_FashionMNIST', train=False, download=True, transform=transform ) classes = ( 'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot', ) ###Output Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz to ./data_FashionMNIST/FashionMNIST/raw/train-images-idx3-ubyte.gz ###Markdown Data preprocessing Augment the data ###Code train_hflip = transforms.functional.hflip(trainset.data) train_brightness = [transforms.functional.adjust_brightness(x, brightness_factor=random.choice([0.5, 0.75, 1.25, 1.5])) for x in trainset.data] train_brightness = torch.stack(train_brightness) train_blur = transforms.functional.gaussian_blur(trainset.data, kernel_size=3) train_rotate = [transforms.functional.rotate(torch.unsqueeze(x, dim=0), angle=random.randrange(30,330,5)).squeeze() for x in trainset.data] train_rotate = torch.stack(train_rotate) ###Output _____no_output_____ ###Markdown Visualise the augmented data ###Code show(trainset.data[0]) show(train_hflip[0]) show(train_blur[0]) show(train_brightness[0]) show(train_rotate[0]) ###Output _____no_output_____ ###Markdown Split training data into train and validation data ###Code trainset.data = torch.cat((trainset.data, train_hflip, train_brightness, train_blur, train_rotate),dim=0) trainset.targets = torch.cat((trainset.targets, trainset.targets, trainset.targets, trainset.targets, trainset.targets)) trainset from sklearn.model_selection import train_test_split targets = trainset.targets train_idx, val_idx= train_test_split(np.arange(len(targets)),test_size=0.2,shuffle=True, stratify=targets, random_state=123) train_sampler = torch.utils.data.SubsetRandomSampler(train_idx) val_sampler = torch.utils.data.SubsetRandomSampler(val_idx) batch_size=128 trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=train_sampler) valloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=val_sampler) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, drop_last=True ) ###Output _____no_output_____ ###Markdown Model architecture Create the model ###Code class Net(nn.Module): def __init__(self, kernel_size, pool_function, nfilters_conv1, nfilters_conv2): super(Net, self).__init__() self.nfilters_conv2 = nfilters_conv2 # CL1: 1 x 28 x 28 (grayscale) --> nfilters_conv1 x 28 x 28 self.conv1 = nn.Conv2d(1, nfilters_conv1, kernel_size=kernel_size, padding=kernel_size//2) # MP1: nfilters_conv1 x 28 x 28 --> nfilters_conv1 x 14 x 14 self.pool1 = pool_function(2,2) # CL2: nfilters_conv1 x 14 x 14 --> nfilters_conv2 x 14 x 14 self.conv2 = nn.Conv2d(nfilters_conv1, nfilters_conv2, kernel_size=kernel_size, padding=kernel_size//2) # MP2: nfilters_conv2 x 14 x 14 --> nfilters_conv2 x 7 x 7 self.pool2 = pool_function(2,2) # LL1: nfilters_conv2 x 7 x 7 --> 100 self.linear1 = nn.Linear((nfilters_conv2*7*7), 100) # LL2: 100 --> 10 self.linear2 = nn.Linear(100,10) def forward(self, x): x = x.unsqueeze(1) # CL1: x = self.conv1(x) x = F.relu(x) # MP1: x = self.pool1(x) # CL2: x = self.conv2(x) x = F.relu(x) # MP2: x = self.pool2(x) # LL1: x = x.view(-1, self.nfilters_conv2*7*7) x = self.linear1(x) x = F.relu(x) # LL2: x = self.linear2(x) return x # best results from hyperparameter tuning kernel_size= 5 pool_function = nn.AvgPool2d nfilters_conv1 = 128 nfilters_conv2 = 128 model_aug = Net(kernel_size=kernel_size,pool_function=pool_function,nfilters_conv1=nfilters_conv1,nfilters_conv2=nfilters_conv2).to(device) criterion = nn.CrossEntropyLoss() my_lr=0.01 optimizer=torch.optim.Adam(model_aug.parameters(), lr=my_lr) # change here print(model_aug) ###Output LeNet( (conv1): Conv2d(1, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)) (pool1): AvgPool2d(kernel_size=2, stride=2, padding=0) (conv2): Conv2d(128, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)) (pool2): AvgPool2d(kernel_size=2, stride=2, padding=0) (linear1): Linear(in_features=6272, out_features=100, bias=True) (linear2): Linear(in_features=100, out_features=10, bias=True) ) ###Markdown Attack! Import libraries ###Code !pip install advertorch from advertorch.attacks import PGDAttack ###Output _____no_output_____ ###Markdown Create adversary ###Code # prepare your pytorch model as "model" # prepare a batch of data and label as "cln_data" and "true_label" # prepare attack instance adversary = PGDAttack( model_aug, loss_fn=nn.CrossEntropyLoss(), eps=0.3, nb_iter=10, eps_iter=0.01, rand_init=True, clip_min=0.0, clip_max=1.0, targeted=False) plot_valloss = [] ###Output _____no_output_____ ###Markdown Train the model ###Code start=time.time() min_loss = 20 #initial loss to be overwritten epochs_no_improve = 0 patience = 20 # high patience to overcome local minima for epoch in range(1,200): model_aug.train() for i, (x_batch, y_batch) in enumerate(trainloader): x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used optimizer.zero_grad() # Set all currenly stored gradients to zero y_pred = model_aug(x_batch) loss = criterion(y_pred, y_batch) loss.backward() optimizer.step() # Compute relevant metrics y_pred_max = torch.argmax(y_pred, dim=1) # Get the labels with highest output probability correct = torch.sum(torch.eq(y_pred_max, y_batch)).item() # Count how many are equal to the true labels elapsed = time.time() - start # Keep track of how much time has elapsed # Show progress every 50 batches if not i % 100: print(f'epoch: {epoch}, time: {elapsed:.3f}s, loss: {loss.item():.3f}, train accuracy: {correct / batch_size:.3f}') model_aug.eval() val_loss = 0 counter = 0 for i, (x_batch, y_batch) in enumerate(valloader): counter += 1 x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used y_pred = model_aug(x_batch) val_loss += criterion(y_pred, y_batch).item() val_loss = val_loss/counter print(f'epoch: {epoch}, validation loss: {val_loss}') plot_valloss.append([val_loss, epoch]) # save the model if val_loss < min_loss: torch.save(model_aug, "/content/drive/MyDrive/Deep Learning/Project/model_aug.pckl") epochs_no_improve = 0 min_loss = val_loss else: epochs_no_improve += 1 if epochs_no_improve == patience: print("Early Stopping!") break ###Output epoch: 1, time: 0.873s, loss: 2.294, train accuracy: 0.109 epoch: 1, time: 5.740s, loss: 0.844, train accuracy: 0.648 epoch: 1, time: 10.371s, loss: 0.639, train accuracy: 0.789 epoch: 1, time: 15.068s, loss: 0.832, train accuracy: 0.664 epoch: 1, time: 19.699s, loss: 0.569, train accuracy: 0.828 epoch: 1, time: 24.408s, loss: 0.554, train accuracy: 0.797 epoch: 1, time: 29.085s, loss: 0.359, train accuracy: 0.891 epoch: 1, time: 33.751s, loss: 0.709, train accuracy: 0.766 epoch: 1, time: 38.548s, loss: 0.441, train accuracy: 0.836 epoch: 1, time: 43.239s, loss: 0.474, train accuracy: 0.844 epoch: 1, time: 47.946s, loss: 0.398, train accuracy: 0.867 epoch: 1, time: 52.616s, loss: 0.400, train accuracy: 0.852 epoch: 1, time: 57.362s, loss: 0.589, train accuracy: 0.781 epoch: 1, time: 62.024s, loss: 0.415, train accuracy: 0.812 epoch: 1, time: 66.770s, loss: 0.428, train accuracy: 0.836 epoch: 1, time: 71.518s, loss: 0.566, train accuracy: 0.781 epoch: 1, time: 76.125s, loss: 0.359, train accuracy: 0.828 epoch: 1, time: 80.803s, loss: 0.496, train accuracy: 0.836 epoch: 1, time: 85.667s, loss: 0.426, train accuracy: 0.852 epoch: 1, validation loss: 0.41329245926983066 epoch: 2, time: 106.848s, loss: 0.493, train accuracy: 0.812 epoch: 2, time: 111.647s, loss: 0.317, train accuracy: 0.891 epoch: 2, time: 116.336s, loss: 0.412, train accuracy: 0.805 epoch: 2, time: 121.080s, loss: 0.324, train accuracy: 0.867 epoch: 2, time: 125.690s, loss: 0.364, train accuracy: 0.883 epoch: 2, time: 130.408s, loss: 0.377, train accuracy: 0.875 epoch: 2, time: 135.189s, loss: 0.360, train accuracy: 0.875 epoch: 2, time: 139.908s, loss: 0.304, train accuracy: 0.898 epoch: 2, time: 144.655s, loss: 0.445, train accuracy: 0.836 epoch: 2, time: 149.261s, loss: 0.410, train accuracy: 0.859 epoch: 2, time: 153.893s, loss: 0.321, train accuracy: 0.875 epoch: 2, time: 158.528s, loss: 0.331, train accuracy: 0.867 epoch: 2, time: 163.189s, loss: 0.397, train accuracy: 0.828 epoch: 2, time: 167.825s, loss: 0.497, train accuracy: 0.828 epoch: 2, time: 172.631s, loss: 0.343, train accuracy: 0.844 epoch: 2, time: 177.257s, loss: 0.339, train accuracy: 0.844 epoch: 2, time: 181.881s, loss: 0.390, train accuracy: 0.914 epoch: 2, time: 186.619s, loss: 0.289, train accuracy: 0.898 epoch: 2, time: 191.254s, loss: 0.342, train accuracy: 0.875 epoch: 2, validation loss: 0.4029934276014503 epoch: 3, time: 210.255s, loss: 0.263, train accuracy: 0.922 epoch: 3, time: 215.065s, loss: 0.316, train accuracy: 0.906 epoch: 3, time: 219.726s, loss: 0.345, train accuracy: 0.859 epoch: 3, time: 224.511s, loss: 0.359, train accuracy: 0.875 epoch: 3, time: 229.311s, loss: 0.421, train accuracy: 0.844 epoch: 3, time: 234.214s, loss: 0.289, train accuracy: 0.891 epoch: 3, time: 238.912s, loss: 0.379, train accuracy: 0.836 epoch: 3, time: 243.620s, loss: 0.334, train accuracy: 0.875 epoch: 3, time: 248.345s, loss: 0.439, train accuracy: 0.812 epoch: 3, time: 253.032s, loss: 0.423, train accuracy: 0.836 epoch: 3, time: 257.739s, loss: 0.266, train accuracy: 0.906 epoch: 3, time: 262.445s, loss: 0.427, train accuracy: 0.852 epoch: 3, time: 267.159s, loss: 0.447, train accuracy: 0.812 epoch: 3, time: 271.912s, loss: 0.392, train accuracy: 0.836 epoch: 3, time: 276.508s, loss: 0.300, train accuracy: 0.867 epoch: 3, time: 281.286s, loss: 0.395, train accuracy: 0.844 epoch: 3, time: 286.027s, loss: 0.366, train accuracy: 0.875 epoch: 3, time: 290.660s, loss: 0.366, train accuracy: 0.836 epoch: 3, time: 295.447s, loss: 0.300, train accuracy: 0.898 epoch: 3, validation loss: 0.3725654235653786 epoch: 4, time: 314.570s, loss: 0.356, train accuracy: 0.859 epoch: 4, time: 319.388s, loss: 0.290, train accuracy: 0.906 epoch: 4, time: 324.131s, loss: 0.360, train accuracy: 0.852 epoch: 4, time: 328.885s, loss: 0.316, train accuracy: 0.891 epoch: 4, time: 333.499s, loss: 0.240, train accuracy: 0.898 epoch: 4, time: 338.187s, loss: 0.460, train accuracy: 0.789 epoch: 4, time: 342.845s, loss: 0.299, train accuracy: 0.883 epoch: 4, time: 347.577s, loss: 0.313, train accuracy: 0.852 epoch: 4, time: 352.290s, loss: 0.445, train accuracy: 0.859 epoch: 4, time: 356.954s, loss: 0.371, train accuracy: 0.875 epoch: 4, time: 361.684s, loss: 0.307, train accuracy: 0.875 epoch: 4, time: 366.405s, loss: 0.314, train accuracy: 0.906 epoch: 4, time: 371.268s, loss: 0.295, train accuracy: 0.914 epoch: 4, time: 375.973s, loss: 0.281, train accuracy: 0.883 epoch: 4, time: 380.615s, loss: 0.365, train accuracy: 0.859 epoch: 4, time: 385.267s, loss: 0.249, train accuracy: 0.883 epoch: 4, time: 389.864s, loss: 0.462, train accuracy: 0.844 epoch: 4, time: 394.497s, loss: 0.249, train accuracy: 0.906 epoch: 4, time: 399.186s, loss: 0.368, train accuracy: 0.836 epoch: 4, validation loss: 0.34379442763735235 epoch: 5, time: 418.341s, loss: 0.321, train accuracy: 0.875 epoch: 5, time: 423.206s, loss: 0.312, train accuracy: 0.859 epoch: 5, time: 427.850s, loss: 0.260, train accuracy: 0.914 epoch: 5, time: 432.586s, loss: 0.347, train accuracy: 0.852 epoch: 5, time: 437.394s, loss: 0.265, train accuracy: 0.875 epoch: 5, time: 442.102s, loss: 0.319, train accuracy: 0.859 epoch: 5, time: 446.706s, loss: 0.338, train accuracy: 0.875 epoch: 5, time: 451.415s, loss: 0.342, train accuracy: 0.883 epoch: 5, time: 456.041s, loss: 0.297, train accuracy: 0.883 epoch: 5, time: 460.824s, loss: 0.385, train accuracy: 0.844 epoch: 5, time: 465.517s, loss: 0.489, train accuracy: 0.852 epoch: 5, time: 470.286s, loss: 0.285, train accuracy: 0.875 epoch: 5, time: 474.982s, loss: 0.275, train accuracy: 0.859 epoch: 5, time: 479.599s, loss: 0.437, train accuracy: 0.820 epoch: 5, time: 484.302s, loss: 0.340, train accuracy: 0.883 epoch: 5, time: 489.069s, loss: 0.371, train accuracy: 0.844 epoch: 5, time: 493.671s, loss: 0.339, train accuracy: 0.875 epoch: 5, time: 498.456s, loss: 0.293, train accuracy: 0.891 epoch: 5, time: 503.175s, loss: 0.378, train accuracy: 0.844 epoch: 5, validation loss: 0.36943064375853996 epoch: 6, time: 521.877s, loss: 0.335, train accuracy: 0.883 epoch: 6, time: 526.647s, loss: 0.316, train accuracy: 0.891 epoch: 6, time: 531.304s, loss: 0.228, train accuracy: 0.891 epoch: 6, time: 535.962s, loss: 0.381, train accuracy: 0.875 epoch: 6, time: 540.660s, loss: 0.267, train accuracy: 0.867 epoch: 6, time: 545.252s, loss: 0.228, train accuracy: 0.891 epoch: 6, time: 549.910s, loss: 0.405, train accuracy: 0.836 epoch: 6, time: 554.617s, loss: 0.316, train accuracy: 0.867 epoch: 6, time: 559.353s, loss: 0.302, train accuracy: 0.914 epoch: 6, time: 564.007s, loss: 0.302, train accuracy: 0.906 epoch: 6, time: 568.611s, loss: 0.376, train accuracy: 0.875 epoch: 6, time: 573.221s, loss: 0.296, train accuracy: 0.898 epoch: 6, time: 577.914s, loss: 0.286, train accuracy: 0.898 epoch: 6, time: 582.717s, loss: 0.288, train accuracy: 0.914 epoch: 6, time: 587.327s, loss: 0.297, train accuracy: 0.906 epoch: 6, time: 592.066s, loss: 0.318, train accuracy: 0.859 epoch: 6, time: 596.786s, loss: 0.225, train accuracy: 0.922 epoch: 6, time: 601.616s, loss: 0.397, train accuracy: 0.883 epoch: 6, time: 606.310s, loss: 0.355, train accuracy: 0.875 epoch: 6, validation loss: 0.3386965770838357 epoch: 7, time: 625.394s, loss: 0.265, train accuracy: 0.883 epoch: 7, time: 630.164s, loss: 0.253, train accuracy: 0.914 epoch: 7, time: 634.777s, loss: 0.346, train accuracy: 0.883 epoch: 7, time: 639.388s, loss: 0.297, train accuracy: 0.906 epoch: 7, time: 644.046s, loss: 0.251, train accuracy: 0.914 epoch: 7, time: 648.872s, loss: 0.308, train accuracy: 0.891 epoch: 7, time: 653.509s, loss: 0.259, train accuracy: 0.914 epoch: 7, time: 658.221s, loss: 0.339, train accuracy: 0.891 epoch: 7, time: 662.945s, loss: 0.463, train accuracy: 0.820 epoch: 7, time: 667.662s, loss: 0.341, train accuracy: 0.844 epoch: 7, time: 672.273s, loss: 0.317, train accuracy: 0.867 epoch: 7, time: 676.999s, loss: 0.368, train accuracy: 0.836 epoch: 7, time: 681.727s, loss: 0.235, train accuracy: 0.922 epoch: 7, time: 686.340s, loss: 0.260, train accuracy: 0.898 epoch: 7, time: 691.035s, loss: 0.385, train accuracy: 0.859 epoch: 7, time: 695.624s, loss: 0.282, train accuracy: 0.883 epoch: 7, time: 700.340s, loss: 0.279, train accuracy: 0.906 epoch: 7, time: 705.065s, loss: 0.333, train accuracy: 0.906 epoch: 7, time: 709.742s, loss: 0.277, train accuracy: 0.891 epoch: 7, validation loss: 0.351558247450064 epoch: 8, time: 728.711s, loss: 0.334, train accuracy: 0.906 epoch: 8, time: 733.374s, loss: 0.296, train accuracy: 0.883 epoch: 8, time: 738.036s, loss: 0.289, train accuracy: 0.867 epoch: 8, time: 742.694s, loss: 0.297, train accuracy: 0.898 epoch: 8, time: 747.385s, loss: 0.423, train accuracy: 0.867 epoch: 8, time: 752.042s, loss: 0.287, train accuracy: 0.898 epoch: 8, time: 756.685s, loss: 0.343, train accuracy: 0.867 epoch: 8, time: 761.515s, loss: 0.393, train accuracy: 0.898 epoch: 8, time: 766.151s, loss: 0.226, train accuracy: 0.922 epoch: 8, time: 770.823s, loss: 0.395, train accuracy: 0.852 epoch: 8, time: 775.523s, loss: 0.244, train accuracy: 0.930 epoch: 8, time: 780.164s, loss: 0.286, train accuracy: 0.914 epoch: 8, time: 784.775s, loss: 0.336, train accuracy: 0.875 epoch: 8, time: 789.451s, loss: 0.263, train accuracy: 0.906 epoch: 8, time: 794.065s, loss: 0.287, train accuracy: 0.914 epoch: 8, time: 798.864s, loss: 0.284, train accuracy: 0.891 epoch: 8, time: 803.498s, loss: 0.271, train accuracy: 0.891 epoch: 8, time: 808.134s, loss: 0.323, train accuracy: 0.891 epoch: 8, time: 812.771s, loss: 0.194, train accuracy: 0.945 epoch: 8, validation loss: 0.33390339756253434 epoch: 9, time: 831.367s, loss: 0.324, train accuracy: 0.883 epoch: 9, time: 836.192s, loss: 0.227, train accuracy: 0.898 epoch: 9, time: 840.814s, loss: 0.209, train accuracy: 0.898 epoch: 9, time: 845.405s, loss: 0.430, train accuracy: 0.820 epoch: 9, time: 850.037s, loss: 0.202, train accuracy: 0.906 epoch: 9, time: 854.687s, loss: 0.263, train accuracy: 0.891 epoch: 9, time: 859.262s, loss: 0.271, train accuracy: 0.891 epoch: 9, time: 863.948s, loss: 0.237, train accuracy: 0.922 epoch: 9, time: 868.565s, loss: 0.257, train accuracy: 0.891 epoch: 9, time: 873.242s, loss: 0.292, train accuracy: 0.891 epoch: 9, time: 877.806s, loss: 0.403, train accuracy: 0.852 epoch: 9, time: 882.445s, loss: 0.194, train accuracy: 0.930 epoch: 9, time: 887.071s, loss: 0.250, train accuracy: 0.898 epoch: 9, time: 891.675s, loss: 0.327, train accuracy: 0.891 epoch: 9, time: 896.378s, loss: 0.319, train accuracy: 0.867 epoch: 9, time: 901.116s, loss: 0.365, train accuracy: 0.844 epoch: 9, time: 905.736s, loss: 0.293, train accuracy: 0.906 epoch: 9, time: 910.359s, loss: 0.259, train accuracy: 0.883 epoch: 9, time: 915.131s, loss: 0.287, train accuracy: 0.898 epoch: 9, validation loss: 0.3674401198940745 epoch: 10, time: 933.734s, loss: 0.316, train accuracy: 0.875 epoch: 10, time: 938.471s, loss: 0.229, train accuracy: 0.938 epoch: 10, time: 943.189s, loss: 0.213, train accuracy: 0.953 epoch: 10, time: 947.895s, loss: 0.280, train accuracy: 0.891 epoch: 10, time: 952.570s, loss: 0.342, train accuracy: 0.859 epoch: 10, time: 957.253s, loss: 0.219, train accuracy: 0.914 epoch: 10, time: 961.930s, loss: 0.218, train accuracy: 0.906 epoch: 10, time: 966.644s, loss: 0.212, train accuracy: 0.930 epoch: 10, time: 971.306s, loss: 0.271, train accuracy: 0.875 epoch: 10, time: 975.992s, loss: 0.273, train accuracy: 0.914 epoch: 10, time: 980.603s, loss: 0.249, train accuracy: 0.891 epoch: 10, time: 985.224s, loss: 0.350, train accuracy: 0.875 epoch: 10, time: 989.991s, loss: 0.316, train accuracy: 0.906 epoch: 10, time: 994.766s, loss: 0.177, train accuracy: 0.938 epoch: 10, time: 999.431s, loss: 0.266, train accuracy: 0.906 epoch: 10, time: 1004.176s, loss: 0.233, train accuracy: 0.898 epoch: 10, time: 1008.839s, loss: 0.274, train accuracy: 0.891 epoch: 10, time: 1013.480s, loss: 0.400, train accuracy: 0.828 epoch: 10, time: 1018.085s, loss: 0.258, train accuracy: 0.914 epoch: 10, validation loss: 0.3640514860021026 epoch: 11, time: 1036.607s, loss: 0.240, train accuracy: 0.891 epoch: 11, time: 1041.501s, loss: 0.348, train accuracy: 0.828 epoch: 11, time: 1046.057s, loss: 0.247, train accuracy: 0.891 epoch: 11, time: 1050.617s, loss: 0.352, train accuracy: 0.859 epoch: 11, time: 1055.307s, loss: 0.215, train accuracy: 0.914 epoch: 11, time: 1059.978s, loss: 0.245, train accuracy: 0.922 epoch: 11, time: 1064.638s, loss: 0.290, train accuracy: 0.875 epoch: 11, time: 1069.288s, loss: 0.274, train accuracy: 0.938 epoch: 11, time: 1073.889s, loss: 0.291, train accuracy: 0.891 epoch: 11, time: 1078.472s, loss: 0.283, train accuracy: 0.883 epoch: 11, time: 1083.101s, loss: 0.172, train accuracy: 0.930 epoch: 11, time: 1087.807s, loss: 0.296, train accuracy: 0.898 epoch: 11, time: 1092.395s, loss: 0.342, train accuracy: 0.891 epoch: 11, time: 1096.989s, loss: 0.213, train accuracy: 0.914 epoch: 11, time: 1101.663s, loss: 0.256, train accuracy: 0.898 epoch: 11, time: 1106.318s, loss: 0.256, train accuracy: 0.914 epoch: 11, time: 1111.011s, loss: 0.397, train accuracy: 0.852 epoch: 11, time: 1115.652s, loss: 0.413, train accuracy: 0.859 epoch: 11, time: 1120.310s, loss: 0.319, train accuracy: 0.891 epoch: 11, validation loss: 0.3498491298224626 epoch: 12, time: 1138.920s, loss: 0.269, train accuracy: 0.883 epoch: 12, time: 1143.723s, loss: 0.324, train accuracy: 0.883 epoch: 12, time: 1148.419s, loss: 0.219, train accuracy: 0.906 epoch: 12, time: 1153.032s, loss: 0.278, train accuracy: 0.867 epoch: 12, time: 1157.618s, loss: 0.210, train accuracy: 0.914 epoch: 12, time: 1162.282s, loss: 0.357, train accuracy: 0.867 epoch: 12, time: 1166.867s, loss: 0.278, train accuracy: 0.906 epoch: 12, time: 1171.520s, loss: 0.307, train accuracy: 0.883 epoch: 12, time: 1176.115s, loss: 0.230, train accuracy: 0.906 epoch: 12, time: 1180.753s, loss: 0.254, train accuracy: 0.898 epoch: 12, time: 1185.501s, loss: 0.213, train accuracy: 0.922 epoch: 12, time: 1190.137s, loss: 0.255, train accuracy: 0.914 epoch: 12, time: 1194.754s, loss: 0.160, train accuracy: 0.953 epoch: 12, time: 1199.546s, loss: 0.275, train accuracy: 0.906 epoch: 12, time: 1204.224s, loss: 0.257, train accuracy: 0.859 epoch: 12, time: 1208.867s, loss: 0.362, train accuracy: 0.883 epoch: 12, time: 1213.496s, loss: 0.274, train accuracy: 0.914 epoch: 12, time: 1218.129s, loss: 0.151, train accuracy: 0.945 epoch: 12, time: 1222.795s, loss: 0.262, train accuracy: 0.898 epoch: 12, validation loss: 0.35557099975057754 epoch: 13, time: 1241.660s, loss: 0.292, train accuracy: 0.875 epoch: 13, time: 1246.540s, loss: 0.290, train accuracy: 0.883 epoch: 13, time: 1251.291s, loss: 0.218, train accuracy: 0.945 epoch: 13, time: 1255.923s, loss: 0.309, train accuracy: 0.914 epoch: 13, time: 1260.826s, loss: 0.145, train accuracy: 0.922 epoch: 13, time: 1265.483s, loss: 0.350, train accuracy: 0.883 epoch: 13, time: 1270.138s, loss: 0.263, train accuracy: 0.914 epoch: 13, time: 1274.790s, loss: 0.189, train accuracy: 0.930 epoch: 13, time: 1279.329s, loss: 0.267, train accuracy: 0.883 epoch: 13, time: 1283.924s, loss: 0.383, train accuracy: 0.852 epoch: 13, time: 1288.534s, loss: 0.184, train accuracy: 0.914 epoch: 13, time: 1293.142s, loss: 0.339, train accuracy: 0.867 epoch: 13, time: 1297.732s, loss: 0.189, train accuracy: 0.922 epoch: 13, time: 1302.438s, loss: 0.277, train accuracy: 0.914 epoch: 13, time: 1307.056s, loss: 0.136, train accuracy: 0.953 epoch: 13, time: 1311.845s, loss: 0.191, train accuracy: 0.930 epoch: 13, time: 1316.507s, loss: 0.286, train accuracy: 0.898 epoch: 13, time: 1321.228s, loss: 0.240, train accuracy: 0.914 epoch: 13, time: 1325.912s, loss: 0.219, train accuracy: 0.914 epoch: 13, validation loss: 0.35088980137539316 epoch: 14, time: 1344.664s, loss: 0.266, train accuracy: 0.898 epoch: 14, time: 1349.504s, loss: 0.297, train accuracy: 0.883 epoch: 14, time: 1354.128s, loss: 0.362, train accuracy: 0.844 epoch: 14, time: 1358.890s, loss: 0.247, train accuracy: 0.914 epoch: 14, time: 1363.581s, loss: 0.186, train accuracy: 0.914 epoch: 14, time: 1368.237s, loss: 0.339, train accuracy: 0.883 epoch: 14, time: 1372.959s, loss: 0.266, train accuracy: 0.906 epoch: 14, time: 1377.563s, loss: 0.199, train accuracy: 0.914 epoch: 14, time: 1382.254s, loss: 0.201, train accuracy: 0.914 epoch: 14, time: 1386.944s, loss: 0.137, train accuracy: 0.938 epoch: 14, time: 1391.594s, loss: 0.245, train accuracy: 0.898 epoch: 14, time: 1396.221s, loss: 0.320, train accuracy: 0.875 epoch: 14, time: 1400.962s, loss: 0.220, train accuracy: 0.914 epoch: 14, time: 1405.614s, loss: 0.270, train accuracy: 0.914 epoch: 14, time: 1410.360s, loss: 0.218, train accuracy: 0.930 epoch: 14, time: 1414.977s, loss: 0.311, train accuracy: 0.930 epoch: 14, time: 1419.759s, loss: 0.305, train accuracy: 0.859 epoch: 14, time: 1424.462s, loss: 0.355, train accuracy: 0.867 epoch: 14, time: 1429.169s, loss: 0.285, train accuracy: 0.891 epoch: 14, validation loss: 0.35084098485360016 epoch: 15, time: 1448.316s, loss: 0.178, train accuracy: 0.930 epoch: 15, time: 1453.065s, loss: 0.324, train accuracy: 0.898 epoch: 15, time: 1457.820s, loss: 0.317, train accuracy: 0.914 epoch: 15, time: 1462.566s, loss: 0.302, train accuracy: 0.883 epoch: 15, time: 1467.141s, loss: 0.305, train accuracy: 0.891 epoch: 15, time: 1471.822s, loss: 0.182, train accuracy: 0.922 epoch: 15, time: 1476.543s, loss: 0.239, train accuracy: 0.906 epoch: 15, time: 1481.234s, loss: 0.185, train accuracy: 0.938 epoch: 15, time: 1485.888s, loss: 0.311, train accuracy: 0.859 epoch: 15, time: 1490.605s, loss: 0.332, train accuracy: 0.898 epoch: 15, time: 1495.237s, loss: 0.236, train accuracy: 0.922 epoch: 15, time: 1499.889s, loss: 0.241, train accuracy: 0.914 epoch: 15, time: 1504.548s, loss: 0.335, train accuracy: 0.914 epoch: 15, time: 1509.176s, loss: 0.304, train accuracy: 0.922 epoch: 15, time: 1513.761s, loss: 0.238, train accuracy: 0.906 epoch: 15, time: 1518.550s, loss: 0.251, train accuracy: 0.906 epoch: 15, time: 1523.178s, loss: 0.280, train accuracy: 0.906 epoch: 15, time: 1527.840s, loss: 0.221, train accuracy: 0.906 epoch: 15, time: 1532.524s, loss: 0.277, train accuracy: 0.883 epoch: 15, validation loss: 0.3633408567417405 epoch: 16, time: 1551.234s, loss: 0.191, train accuracy: 0.922 epoch: 16, time: 1556.047s, loss: 0.202, train accuracy: 0.938 epoch: 16, time: 1560.724s, loss: 0.177, train accuracy: 0.953 epoch: 16, time: 1565.372s, loss: 0.152, train accuracy: 0.938 epoch: 16, time: 1570.000s, loss: 0.235, train accuracy: 0.922 epoch: 16, time: 1574.690s, loss: 0.261, train accuracy: 0.875 epoch: 16, time: 1579.329s, loss: 0.298, train accuracy: 0.898 epoch: 16, time: 1584.051s, loss: 0.282, train accuracy: 0.898 epoch: 16, time: 1588.604s, loss: 0.186, train accuracy: 0.906 epoch: 16, time: 1593.248s, loss: 0.250, train accuracy: 0.914 epoch: 16, time: 1597.902s, loss: 0.305, train accuracy: 0.898 epoch: 16, time: 1602.550s, loss: 0.216, train accuracy: 0.914 epoch: 16, time: 1607.156s, loss: 0.235, train accuracy: 0.891 epoch: 16, time: 1611.777s, loss: 0.347, train accuracy: 0.883 epoch: 16, time: 1616.503s, loss: 0.256, train accuracy: 0.938 epoch: 16, time: 1621.141s, loss: 0.283, train accuracy: 0.898 epoch: 16, time: 1625.803s, loss: 0.285, train accuracy: 0.891 epoch: 16, time: 1630.434s, loss: 0.294, train accuracy: 0.898 epoch: 16, time: 1635.081s, loss: 0.239, train accuracy: 0.906 epoch: 16, validation loss: 0.35872740595579655 epoch: 17, time: 1654.056s, loss: 0.257, train accuracy: 0.883 epoch: 17, time: 1658.861s, loss: 0.309, train accuracy: 0.906 epoch: 17, time: 1663.572s, loss: 0.193, train accuracy: 0.914 epoch: 17, time: 1668.237s, loss: 0.197, train accuracy: 0.922 epoch: 17, time: 1672.921s, loss: 0.265, train accuracy: 0.891 epoch: 17, time: 1677.540s, loss: 0.213, train accuracy: 0.922 epoch: 17, time: 1682.200s, loss: 0.180, train accuracy: 0.930 epoch: 17, time: 1686.803s, loss: 0.265, train accuracy: 0.875 epoch: 17, time: 1691.433s, loss: 0.231, train accuracy: 0.922 epoch: 17, time: 1696.158s, loss: 0.256, train accuracy: 0.883 epoch: 17, time: 1700.817s, loss: 0.180, train accuracy: 0.922 epoch: 17, time: 1705.500s, loss: 0.151, train accuracy: 0.938 epoch: 17, time: 1710.270s, loss: 0.400, train accuracy: 0.875 epoch: 17, time: 1714.942s, loss: 0.207, train accuracy: 0.914 epoch: 17, time: 1719.674s, loss: 0.180, train accuracy: 0.930 epoch: 17, time: 1724.399s, loss: 0.239, train accuracy: 0.906 epoch: 17, time: 1729.048s, loss: 0.358, train accuracy: 0.906 epoch: 17, time: 1733.696s, loss: 0.282, train accuracy: 0.875 epoch: 17, time: 1738.490s, loss: 0.198, train accuracy: 0.914 epoch: 17, validation loss: 0.366751571009154 epoch: 18, time: 1757.617s, loss: 0.212, train accuracy: 0.898 epoch: 18, time: 1762.332s, loss: 0.238, train accuracy: 0.875 epoch: 18, time: 1767.006s, loss: 0.163, train accuracy: 0.961 epoch: 18, time: 1771.673s, loss: 0.287, train accuracy: 0.898 epoch: 18, time: 1776.241s, loss: 0.194, train accuracy: 0.930 epoch: 18, time: 1780.913s, loss: 0.171, train accuracy: 0.922 epoch: 18, time: 1785.595s, loss: 0.198, train accuracy: 0.922 epoch: 18, time: 1790.203s, loss: 0.256, train accuracy: 0.914 epoch: 18, time: 1795.018s, loss: 0.273, train accuracy: 0.898 epoch: 18, time: 1799.634s, loss: 0.305, train accuracy: 0.891 epoch: 18, time: 1804.288s, loss: 0.273, train accuracy: 0.922 epoch: 18, time: 1808.955s, loss: 0.166, train accuracy: 0.922 epoch: 18, time: 1813.596s, loss: 0.285, train accuracy: 0.945 epoch: 18, time: 1818.211s, loss: 0.227, train accuracy: 0.914 epoch: 18, time: 1822.918s, loss: 0.208, train accuracy: 0.938 epoch: 18, time: 1827.643s, loss: 0.180, train accuracy: 0.922 epoch: 18, time: 1832.348s, loss: 0.206, train accuracy: 0.914 epoch: 18, time: 1837.032s, loss: 0.281, train accuracy: 0.922 epoch: 18, time: 1841.613s, loss: 0.237, train accuracy: 0.891 epoch: 18, validation loss: 0.37612124575353634 epoch: 19, time: 1860.522s, loss: 0.174, train accuracy: 0.922 epoch: 19, time: 1865.168s, loss: 0.141, train accuracy: 0.938 epoch: 19, time: 1869.865s, loss: 0.224, train accuracy: 0.891 epoch: 19, time: 1874.622s, loss: 0.314, train accuracy: 0.930 epoch: 19, time: 1879.270s, loss: 0.214, train accuracy: 0.938 epoch: 19, time: 1883.900s, loss: 0.187, train accuracy: 0.922 epoch: 19, time: 1888.539s, loss: 0.151, train accuracy: 0.945 epoch: 19, time: 1893.138s, loss: 0.145, train accuracy: 0.945 epoch: 19, time: 1897.724s, loss: 0.273, train accuracy: 0.883 epoch: 19, time: 1902.471s, loss: 0.344, train accuracy: 0.891 epoch: 19, time: 1907.060s, loss: 0.184, train accuracy: 0.891 epoch: 19, time: 1911.720s, loss: 0.147, train accuracy: 0.938 epoch: 19, time: 1916.332s, loss: 0.199, train accuracy: 0.938 epoch: 19, time: 1920.958s, loss: 0.163, train accuracy: 0.945 epoch: 19, time: 1925.597s, loss: 0.236, train accuracy: 0.883 epoch: 19, time: 1930.314s, loss: 0.222, train accuracy: 0.898 epoch: 19, time: 1935.019s, loss: 0.337, train accuracy: 0.859 epoch: 19, time: 1939.611s, loss: 0.217, train accuracy: 0.906 epoch: 19, time: 1944.205s, loss: 0.177, train accuracy: 0.922 epoch: 19, validation loss: 0.3897295405806255 epoch: 20, time: 1962.930s, loss: 0.305, train accuracy: 0.875 epoch: 20, time: 1967.684s, loss: 0.197, train accuracy: 0.930 epoch: 20, time: 1972.338s, loss: 0.201, train accuracy: 0.922 epoch: 20, time: 1976.867s, loss: 0.194, train accuracy: 0.938 epoch: 20, time: 1981.507s, loss: 0.292, train accuracy: 0.914 epoch: 20, time: 1986.295s, loss: 0.247, train accuracy: 0.922 epoch: 20, time: 1990.981s, loss: 0.257, train accuracy: 0.945 epoch: 20, time: 1995.580s, loss: 0.328, train accuracy: 0.906 epoch: 20, time: 2000.240s, loss: 0.237, train accuracy: 0.914 epoch: 20, time: 2005.067s, loss: 0.162, train accuracy: 0.953 epoch: 20, time: 2009.813s, loss: 0.473, train accuracy: 0.844 epoch: 20, time: 2014.534s, loss: 0.216, train accuracy: 0.914 epoch: 20, time: 2019.251s, loss: 0.261, train accuracy: 0.906 epoch: 20, time: 2023.885s, loss: 0.273, train accuracy: 0.906 epoch: 20, time: 2028.498s, loss: 0.254, train accuracy: 0.906 epoch: 20, time: 2033.273s, loss: 0.320, train accuracy: 0.906 epoch: 20, time: 2037.838s, loss: 0.326, train accuracy: 0.906 epoch: 20, time: 2042.501s, loss: 0.155, train accuracy: 0.938 epoch: 20, time: 2047.099s, loss: 0.212, train accuracy: 0.938 epoch: 20, validation loss: 0.36104964082047886 epoch: 21, time: 2065.792s, loss: 0.155, train accuracy: 0.945 epoch: 21, time: 2070.545s, loss: 0.203, train accuracy: 0.914 epoch: 21, time: 2075.342s, loss: 0.367, train accuracy: 0.867 epoch: 21, time: 2080.006s, loss: 0.238, train accuracy: 0.914 epoch: 21, time: 2084.746s, loss: 0.370, train accuracy: 0.883 epoch: 21, time: 2089.461s, loss: 0.196, train accuracy: 0.930 epoch: 21, time: 2094.160s, loss: 0.152, train accuracy: 0.953 epoch: 21, time: 2098.921s, loss: 0.226, train accuracy: 0.898 epoch: 21, time: 2103.549s, loss: 0.234, train accuracy: 0.922 epoch: 21, time: 2108.237s, loss: 0.221, train accuracy: 0.914 epoch: 21, time: 2112.804s, loss: 0.422, train accuracy: 0.867 epoch: 21, time: 2117.448s, loss: 0.194, train accuracy: 0.945 epoch: 21, time: 2122.111s, loss: 0.197, train accuracy: 0.930 epoch: 21, time: 2126.740s, loss: 0.234, train accuracy: 0.891 epoch: 21, time: 2131.366s, loss: 0.208, train accuracy: 0.906 epoch: 21, time: 2135.986s, loss: 0.239, train accuracy: 0.922 epoch: 21, time: 2140.532s, loss: 0.283, train accuracy: 0.914 epoch: 21, time: 2145.262s, loss: 0.339, train accuracy: 0.906 epoch: 21, time: 2149.997s, loss: 0.212, train accuracy: 0.922 epoch: 21, validation loss: 0.37477968998555183 epoch: 22, time: 2168.807s, loss: 0.192, train accuracy: 0.914 epoch: 22, time: 2173.513s, loss: 0.295, train accuracy: 0.883 epoch: 22, time: 2178.165s, loss: 0.201, train accuracy: 0.945 epoch: 22, time: 2182.802s, loss: 0.189, train accuracy: 0.945 epoch: 22, time: 2187.523s, loss: 0.236, train accuracy: 0.906 epoch: 22, time: 2192.140s, loss: 0.298, train accuracy: 0.906 epoch: 22, time: 2196.731s, loss: 0.229, train accuracy: 0.898 epoch: 22, time: 2201.433s, loss: 0.283, train accuracy: 0.898 epoch: 22, time: 2206.046s, loss: 0.362, train accuracy: 0.859 epoch: 22, time: 2210.672s, loss: 0.184, train accuracy: 0.945 epoch: 22, time: 2215.422s, loss: 0.248, train accuracy: 0.922 epoch: 22, time: 2219.992s, loss: 0.171, train accuracy: 0.938 epoch: 22, time: 2224.722s, loss: 0.149, train accuracy: 0.922 epoch: 22, time: 2229.298s, loss: 0.134, train accuracy: 0.938 epoch: 22, time: 2234.062s, loss: 0.208, train accuracy: 0.914 epoch: 22, time: 2238.855s, loss: 0.367, train accuracy: 0.891 epoch: 22, time: 2243.512s, loss: 0.225, train accuracy: 0.922 epoch: 22, time: 2248.315s, loss: 0.212, train accuracy: 0.930 epoch: 22, time: 2253.007s, loss: 0.170, train accuracy: 0.945 epoch: 22, validation loss: 0.3869190960804791 epoch: 23, time: 2271.483s, loss: 0.198, train accuracy: 0.930 epoch: 23, time: 2276.200s, loss: 0.193, train accuracy: 0.938 epoch: 23, time: 2280.866s, loss: 0.285, train accuracy: 0.898 epoch: 23, time: 2285.510s, loss: 0.230, train accuracy: 0.906 epoch: 23, time: 2290.117s, loss: 0.243, train accuracy: 0.914 epoch: 23, time: 2294.714s, loss: 0.135, train accuracy: 0.930 epoch: 23, time: 2299.368s, loss: 0.245, train accuracy: 0.898 epoch: 23, time: 2304.073s, loss: 0.247, train accuracy: 0.891 epoch: 23, time: 2308.646s, loss: 0.170, train accuracy: 0.938 epoch: 23, time: 2313.264s, loss: 0.231, train accuracy: 0.922 epoch: 23, time: 2317.961s, loss: 0.301, train accuracy: 0.891 epoch: 23, time: 2322.605s, loss: 0.219, train accuracy: 0.914 epoch: 23, time: 2327.288s, loss: 0.445, train accuracy: 0.867 epoch: 23, time: 2331.953s, loss: 0.157, train accuracy: 0.914 epoch: 23, time: 2336.669s, loss: 0.272, train accuracy: 0.898 epoch: 23, time: 2341.340s, loss: 0.187, train accuracy: 0.930 epoch: 23, time: 2345.885s, loss: 0.321, train accuracy: 0.859 epoch: 23, time: 2350.564s, loss: 0.190, train accuracy: 0.922 epoch: 23, time: 2355.208s, loss: 0.260, train accuracy: 0.914 epoch: 23, validation loss: 0.36951833326361583 epoch: 24, time: 2374.055s, loss: 0.170, train accuracy: 0.922 epoch: 24, time: 2378.935s, loss: 0.125, train accuracy: 0.961 epoch: 24, time: 2383.538s, loss: 0.413, train accuracy: 0.852 epoch: 24, time: 2388.133s, loss: 0.266, train accuracy: 0.922 epoch: 24, time: 2392.754s, loss: 0.114, train accuracy: 0.969 epoch: 24, time: 2397.450s, loss: 0.246, train accuracy: 0.898 epoch: 24, time: 2402.181s, loss: 0.105, train accuracy: 0.969 epoch: 24, time: 2406.827s, loss: 0.309, train accuracy: 0.875 epoch: 24, time: 2411.549s, loss: 0.138, train accuracy: 0.930 epoch: 24, time: 2416.261s, loss: 0.286, train accuracy: 0.914 epoch: 24, time: 2420.884s, loss: 0.195, train accuracy: 0.953 epoch: 24, time: 2425.480s, loss: 0.319, train accuracy: 0.891 epoch: 24, time: 2430.179s, loss: 0.224, train accuracy: 0.906 epoch: 24, time: 2434.830s, loss: 0.218, train accuracy: 0.891 epoch: 24, time: 2439.546s, loss: 0.309, train accuracy: 0.867 epoch: 24, time: 2444.152s, loss: 0.203, train accuracy: 0.906 epoch: 24, time: 2448.778s, loss: 0.195, train accuracy: 0.930 epoch: 24, time: 2453.397s, loss: 0.257, train accuracy: 0.914 epoch: 24, time: 2458.077s, loss: 0.117, train accuracy: 0.953 epoch: 24, validation loss: 0.3921877811077053 epoch: 25, time: 2477.139s, loss: 0.196, train accuracy: 0.914 epoch: 25, time: 2481.886s, loss: 0.153, train accuracy: 0.945 epoch: 25, time: 2486.634s, loss: 0.174, train accuracy: 0.945 epoch: 25, time: 2491.327s, loss: 0.139, train accuracy: 0.938 epoch: 25, time: 2495.921s, loss: 0.185, train accuracy: 0.914 epoch: 25, time: 2500.571s, loss: 0.174, train accuracy: 0.938 epoch: 25, time: 2505.262s, loss: 0.187, train accuracy: 0.922 epoch: 25, time: 2509.925s, loss: 0.262, train accuracy: 0.898 epoch: 25, time: 2514.745s, loss: 0.236, train accuracy: 0.922 epoch: 25, time: 2519.389s, loss: 0.225, train accuracy: 0.922 epoch: 25, time: 2524.055s, loss: 0.210, train accuracy: 0.906 epoch: 25, time: 2528.734s, loss: 0.269, train accuracy: 0.914 epoch: 25, time: 2533.451s, loss: 0.176, train accuracy: 0.922 epoch: 25, time: 2538.184s, loss: 0.168, train accuracy: 0.945 epoch: 25, time: 2542.870s, loss: 0.175, train accuracy: 0.938 epoch: 25, time: 2547.531s, loss: 0.288, train accuracy: 0.867 epoch: 25, time: 2552.159s, loss: 0.196, train accuracy: 0.945 epoch: 25, time: 2556.859s, loss: 0.150, train accuracy: 0.922 epoch: 25, time: 2561.491s, loss: 0.210, train accuracy: 0.922 epoch: 25, validation loss: 0.3790416830320602 epoch: 26, time: 2580.352s, loss: 0.204, train accuracy: 0.930 epoch: 26, time: 2585.223s, loss: 0.281, train accuracy: 0.906 epoch: 26, time: 2589.950s, loss: 0.205, train accuracy: 0.938 epoch: 26, time: 2594.597s, loss: 0.210, train accuracy: 0.930 epoch: 26, time: 2599.251s, loss: 0.208, train accuracy: 0.938 epoch: 26, time: 2603.975s, loss: 0.314, train accuracy: 0.891 epoch: 26, time: 2608.725s, loss: 0.116, train accuracy: 0.930 epoch: 26, time: 2613.435s, loss: 0.231, train accuracy: 0.922 epoch: 26, time: 2618.105s, loss: 0.122, train accuracy: 0.961 epoch: 26, time: 2622.838s, loss: 0.222, train accuracy: 0.922 epoch: 26, time: 2627.461s, loss: 0.237, train accuracy: 0.914 epoch: 26, time: 2632.131s, loss: 0.163, train accuracy: 0.914 epoch: 26, time: 2636.830s, loss: 0.211, train accuracy: 0.914 epoch: 26, time: 2641.578s, loss: 0.310, train accuracy: 0.883 epoch: 26, time: 2646.267s, loss: 0.220, train accuracy: 0.930 epoch: 26, time: 2650.954s, loss: 0.327, train accuracy: 0.875 epoch: 26, time: 2655.667s, loss: 0.239, train accuracy: 0.930 epoch: 26, time: 2660.294s, loss: 0.273, train accuracy: 0.898 epoch: 26, time: 2664.977s, loss: 0.232, train accuracy: 0.922 epoch: 26, validation loss: 0.41837670674710387 epoch: 27, time: 2684.062s, loss: 0.181, train accuracy: 0.930 epoch: 27, time: 2688.878s, loss: 0.196, train accuracy: 0.930 epoch: 27, time: 2693.520s, loss: 0.164, train accuracy: 0.930 epoch: 27, time: 2698.231s, loss: 0.226, train accuracy: 0.930 epoch: 27, time: 2702.886s, loss: 0.187, train accuracy: 0.953 epoch: 27, time: 2707.540s, loss: 0.339, train accuracy: 0.883 epoch: 27, time: 2712.200s, loss: 0.271, train accuracy: 0.930 epoch: 27, time: 2716.848s, loss: 0.314, train accuracy: 0.898 epoch: 27, time: 2721.535s, loss: 0.122, train accuracy: 0.945 epoch: 27, time: 2726.104s, loss: 0.231, train accuracy: 0.906 epoch: 27, time: 2730.661s, loss: 0.211, train accuracy: 0.938 epoch: 27, time: 2735.323s, loss: 0.247, train accuracy: 0.891 epoch: 27, time: 2740.073s, loss: 0.236, train accuracy: 0.914 epoch: 27, time: 2744.808s, loss: 0.178, train accuracy: 0.914 epoch: 27, time: 2749.443s, loss: 0.175, train accuracy: 0.930 epoch: 27, time: 2754.051s, loss: 0.232, train accuracy: 0.930 epoch: 27, time: 2758.739s, loss: 0.187, train accuracy: 0.930 epoch: 27, time: 2763.377s, loss: 0.338, train accuracy: 0.852 epoch: 27, time: 2768.072s, loss: 0.160, train accuracy: 0.938 epoch: 27, validation loss: 0.4109298016216709 epoch: 28, time: 2787.178s, loss: 0.266, train accuracy: 0.898 epoch: 28, time: 2792.070s, loss: 0.132, train accuracy: 0.938 epoch: 28, time: 2796.641s, loss: 0.156, train accuracy: 0.930 epoch: 28, time: 2801.401s, loss: 0.113, train accuracy: 0.953 epoch: 28, time: 2806.160s, loss: 0.198, train accuracy: 0.930 epoch: 28, time: 2810.814s, loss: 0.230, train accuracy: 0.906 epoch: 28, time: 2815.425s, loss: 0.152, train accuracy: 0.922 epoch: 28, time: 2820.030s, loss: 0.195, train accuracy: 0.930 epoch: 28, time: 2824.754s, loss: 0.238, train accuracy: 0.922 epoch: 28, time: 2829.394s, loss: 0.212, train accuracy: 0.891 epoch: 28, time: 2834.162s, loss: 0.242, train accuracy: 0.945 epoch: 28, time: 2838.839s, loss: 0.249, train accuracy: 0.930 epoch: 28, time: 2843.514s, loss: 0.242, train accuracy: 0.898 epoch: 28, time: 2848.185s, loss: 0.164, train accuracy: 0.930 epoch: 28, time: 2852.857s, loss: 0.321, train accuracy: 0.906 epoch: 28, time: 2857.632s, loss: 0.256, train accuracy: 0.891 epoch: 28, time: 2862.291s, loss: 0.199, train accuracy: 0.945 epoch: 28, time: 2866.958s, loss: 0.261, train accuracy: 0.914 epoch: 28, time: 2871.632s, loss: 0.286, train accuracy: 0.906 epoch: 28, validation loss: 0.4235882515401474 Early Stopping! ###Markdown Show validation loss ###Code import pandas as pd df_plot = pd.DataFrame(data = plot_valloss, columns=['Validation Loss', 'Epoch']) df_plot.head() import plotly.express as px fig = px.line(df_plot, x="Epoch", y="Validation Loss", title='Validation Loss DA') fig.show() df_plot.to_csv("/content/drive/MyDrive/Deep Learning/Project/DA_valloss.csv", index=False) ###Output _____no_output_____ ###Markdown Testing Test model on clean data ###Code model_aug_inf = torch.load("/content/drive/MyDrive/Deep Learning/Project/model_aug.pckl") correct_total = 0 for i, (x_batch, y_batch) in enumerate(testloader): x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used y_pred = model_aug_inf(x_batch) y_pred_max = torch.argmax(y_pred, dim=1) correct_total += torch.sum(torch.eq(y_pred_max, y_batch)).item() print(f'Accuracy on the test set: {correct_total / len(testset):.3f}') accuracy = correct_total / len(testset) z = 1.96 #for 95% CI n = len(testset) interval = z * np.sqrt( (accuracy * (1 - accuracy)) / n) interval ###Output _____no_output_____ ###Markdown Test model on perturbed data ###Code import pandas as pd import seaborn as sn from advertorch.utils import predict_from_logits correct_total = 0 all_preds = [] y_true = [] for i, (x_batch, y_batch) in enumerate(testloader): x_batch, y_batch = x_batch.to(device), y_batch.to(device) # Move the data to the device that is used y_true.extend(y_batch) adv = adversary.perturb(x_batch, y_batch) y_adv_pred = predict_from_logits(model_aug_inf(adv)) all_preds.extend(y_adv_pred) correct_total += torch.sum(torch.eq(y_adv_pred, y_batch)).item() print(f'Accuracy on the test set: {correct_total / len(testset):.3f}') accuracy = correct_total / len(testset) z = 1.96 #for 95% CI n = len(all_preds) interval = z * np.sqrt( (accuracy * (1 - accuracy)) / n) interval ###Output _____no_output_____ ###Markdown Visualise results ###Code y_true_int = [int(x.cpu()) for x in y_true] y_pred_int = [int(x.cpu()) for x in all_preds] data = {'y_Actual': y_true_int, 'y_Predicted': y_pred_int } cm_df = pd.DataFrame(data, columns=['y_Actual', 'y_Predicted']) cm_df.head() confusion_matrix = pd.crosstab(cm_df['y_Actual'], cm_df['y_Predicted'], rownames=['Actual'], colnames=['Predicted']) print(confusion_matrix) sn.heatmap(confusion_matrix, annot=False) plt.show() ###Output _____no_output_____
Milestone4.ipynb
###Markdown Milestone 4 Set up the questions and the paragraphs ###Code import pandas as pd df = pd.read_csv("paragraphs.csv") questions = [ ["Does a company need to keep track of the carbon intensity of the electricity?"], ["What metric is used for evaluating emission?"], ["How does one get to net-zero emissions economy?"], ["What is net-zero emissions economy?"], ["How can carbon emission of the processes of cement clinker be reduced?"], ["How is the Weighted Cogeneration Threshold calculated?"], ["What is carbon capture and sequestration?"], ["What stages does CCS consist of?"], ["What should be the average energy consumption of a water supply system?"], ["What are sludge treatments?"], ["How is the process of anaerobic digestion?"], ["What is considered Zero direct emission vehicles?"] ] ###Output _____no_output_____ ###Markdown Set up the vectorizer and user a function to most similar paragraph to the question. ###Code from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel vectorizer = TfidfVectorizer() vector_corpus = vectorizer.fit_transform(df["paragraph"]) def get_context(question): q_v = vectorizer.transform(question) lk_rank = linear_kernel(q_v, vector_corpus).flatten() return df["paragraph"][lk_rank.argsort()[-1]] ###Output _____no_output_____ ###Markdown Initiate the QA pipeline and a function which return the answer ###Code from transformers import pipeline MODEL = "distilbert-base-uncased-distilled-squad" qamodel = pipeline("question-answering", model=MODEL, tokenizer=MODEL, device=-1) def get_answer_pipeline(question, context): answer = qamodel(question=question, context=context) return answer["answer"].rstrip(".").rstrip(",").lstrip("(").rstrip(")").rstrip(".").strip("'").strip(":") ###Output Downloading: 100%|██████████| 230/230 [00:00<00:00, 55.5kB/s] ###Markdown Go through the different questions and print the answers and the contexts ###Code for question in questions: context = get_context(question) answer = get_answer_pipeline(question, context) print(f"{question[0]}\n\n{answer}\n\n{context}") print("-"*100) ###Output Does a company need to keep track of the carbon intensity of the electricity? on the basis of a prevailing PPA Power Purchase Agreement the availability of low carbon electricity may be a limiting factor Therefore aa manufacturer could either directly produce its own low carbon electricity or purchase low carbon electricity or renewable energy certificates It should be possible to verify the average carbon intensity of the electricity on the basis of a prevailing PPA Power Purchase Agreement It is acknowledged that aluminium production facilities may play an important role in stabilizing electricity grids by active management of electricity demand This may result in substantial mitigation contributions eg by limiting the need for electricity storage facilities However given the lack of available metrics to quantify these impacts these benefits are not taken into account at this stage It is acknowledged that aluminium will play a role in a low carbon economy in particular enabling light weight products and electrification including transmission wires Such applications could also be considered eligible under the activity Manufacture of other low carbon technologies provided they can demonstrate substantial emissions reductions according to the criteria for that activity ---------------------------------------------------------------------------------------------------- What metric is used for evaluating emission? gCO2e The threshold metric is gCO2e and not an intensity metric such as gCO2e unit of production as this enables the Taxonomy to be applied by both those reducing emission intensity eg through efficiency while also requiring them to reduce emissions overall the overall goal ---------------------------------------------------------------------------------------------------- How does one get to net-zero emissions economy? to enable the screening of economic activities Opportunities for substantial mitigation and contributions to a net zero carbon economy An overarching goal of the Taxonomy is to enable the screening of economic activities to determine whether or when they do or do not deliver substantial mitigation consistent with the underlying goal of a net zero carbon economy by 2050 ---------------------------------------------------------------------------------------------------- What is net-zero emissions economy? to enable the screening of economic activities Opportunities for substantial mitigation and contributions to a net zero carbon economy An overarching goal of the Taxonomy is to enable the screening of economic activities to determine whether or when they do or do not deliver substantial mitigation consistent with the underlying goal of a net zero carbon economy by 2050 ---------------------------------------------------------------------------------------------------- How can carbon emission of the processes of cement clinker be reduced? use of biomass and waste materials Improving energy efficiency Thermal energy intensity of clinker and the electric intensity of cement can be reduced by deploying existing stateoftheart technologies in new cement plants and retrofitting existing facilities to improve energy performance levels when economically viable Switching to alternative fuels The carbon intensity of cement clinker can be reduced significantly by the use of biomass and waste materials as fuels in cement kilns The clinker burning process offers good conditions for using different types of waste materials replacing parts of the consumption of carbonintensive fossil fuels A wide range of different types of wastes can be used as fuels but as these can replace primary fuel in cement kilns a consistent waste quality is essential eg adequate calorific value metal halogen and ash content ---------------------------------------------------------------------------------------------------- How is the Weighted Cogeneration Threshold calculated? from the relative production of heat and power Any combined heat and power generation technology is eligible if it can be demonstrated using an ISO 14044compliant Life Cycle of Emissions LCE assessment that the facility is operating at less than the weighted cogeneration threshold The Weighted Cogeneration Threshold is calculated from the relative production of heat and power and based on the declining power generation threshold of 100 gCO2ekWhe and a notional heat threshold of 30 gCO2ekWhth ---------------------------------------------------------------------------------------------------- What is carbon capture and sequestration? a key technology for the decarbonisation of Europe Why carbon capture and sequestration CCS is included in the Taxonomy Carbon capture and sequestration CCS is a key technology for the decarbonisation of Europe It is included in all pathways presented by the European Commission in its LongTerm Strategic Vision document and is relied upon heavily in threeoutoffour scenarios outlined by the IPCC in the Special Report on 15 Degrees ---------------------------------------------------------------------------------------------------- What stages does CCS consist of? capture transport and storage CO2 transport and storage A typical CCS chain consists of three main stages capture transport and storage CO2 transport and storage are established and proven processes with decades of operation and wellestablished regulation here in Europe ---------------------------------------------------------------------------------------------------- What should be the average energy consumption of a water supply system? 05 kwh per cubic meter length of the network etc For the purpose of the Taxonomy ILI and kwhm3 supplied were chosen as parameters in order to measure the efficiency of a water supply system An average energy consumption of a water supply system of 05 kwh per cubic meter billedunbilled authorized water supply indicates a high performing system in terms of energy consumption Several energy efficiency measures can reduce directly the energy consumption in a water supply system enabling significant reductions of GHG emissions these are inter alia ---------------------------------------------------------------------------------------------------- What are sludge treatments? aerobic digestion Threshold Rationale Sewage sludge is a byproduct of wastewater treatment with organic and inorganic content The organic content of the sludge is subject of decomposition which might occur under controlled circumstances in sludge treatment installations or under uncontrolled circumstances in the final disposal with significant GHG emissions mainly methane Anaerobic Digestion AD and in some cases aerobic digestion are examples of sludge treatments In AD microorganisms decompose the organic matter of the sludge in the absence of oxygen and produce methanerich biogas The climate mitigation effect is dual i The biogas is a source of energy which is transformed into heat electricity or fuel replacing fossil fuels in electricity heat generation and consequently avoiding GHG emissions to air CO2 N2O etc320 ii the sludge is turned into a recyclable product eg as fertilizer substituting synthetic fertilizers Sludge treatment is in many cases centralized in wastewater treatment plants WWTP which treat the sludge and produce energy from sludge produced in the WWTP or outside the plant Methane leakage may offset the climate mitigation benefits and therefore needs to be avoided A monitoring system allows the detection of leakages it is in the interest of the operator to fix detected leakages in order to minimize economic losses ---------------------------------------------------------------------------------------------------- How is the process of anaerobic digestion? production and energetic utilization of biogas and production of digestate E Water supply sewerage waste management and remediation activities 4 E3821 Anaerobic digestion of biowaste322 Treatment of separately collected biowaste through anaerobic digestion with the resulting production and energetic utilization of biogas and production of digestate for use as fertilizersoil improver323 possibly after composting or any other treatment ----------------------------------------------------------------------------------------------------
data_preprocessing/1_data_generation/5_create_trainTestData/.ipynb_checkpoints/oneXone_grid_train-checkpoint.ipynb
###Markdown 5x5 grid needed ###Code import pandas as pd import numpy as np from datetime import datetime,timedelta import pytz import pygeohash as gh from haversine import haversine import time import pickle as cPickle import glob import json geohash_prec = 5 def geohash(lat,long): geo=gh.encode(float(lat), float(long), precision=geohash_prec) return geo data_train_acc['geohash_five'] = data_train_acc.apply(lambda row: geohash(row['acc_lat'],row['acc_long']), axis=1) data_train_acc data_train_nonac['geohash_five'] = data_train_nonac.apply(lambda row: geohash(row['non_acclat'],row['non_acclong']), axis=1) data_train_nonac data_train_acc['count']=1 data_train_acc data_train_acc=data_train_acc[['geohash_five','acc_lat','acc_long','UJAHR','UMONAT','UWOCHENTAG','USTUNDE','count']] data_train_acc.columns=['geohash','lat','long','UJAHR','UMONAT','UWOCHENTAG','hour','count'] data_train_acc #data_train_nonac['geohash'] = data_train_nonac.apply(lambda row: geohash(row['non_acclat'],row['non_acclong']), axis=1) data_train_nonac['count']=0 data_train_nonac data_train_nonac=data_train_nonac[['geohash_five','non_acclat','non_acclong','UJAHR','UMONAT','UWOCHENTAG','hour','count']] data_train_nonac.columns=['geohash','lat','long','UJAHR','UMONAT','UWOCHENTAG','hour','count'] data_train_nonac import geohash as gh def geohash(lat,long): geo=gh.encode(float(lat), float(long), precision=6) return geo data_train_nonac['geohash6'] = data_train_nonac.apply(lambda row: geohash(row['lat'],row['long']), axis=1) data_train_nonac data_train_nonac=data_train_nonac[['geohash','lat','long','UJAHR','UMONAT','UWOCHENTAG','hour','count']] data_train_nonac.columns=['geohash','lat','long','UJAHR','UMONAT','UWOCHENTAG','hour','count'] data_train_nonac data_train_nonac.geohash.nunique() frame=[data_train_acc,data_train_nonac] merge_train=pd.concat(frame) merge_train=merge_train.reset_index(drop=True) merge_train import geohash as gh def geohash(lat,long): geo=gh.encode(float(lat), float(long), precision=6) return geo #city='new_method/Nurmberg/5x5_Grid' geohash7=merge_train[['geohash','lat','long']] geohash7['cluster_id']=geohash7.apply(lambda row: geohash(row['lat'],row['long']), axis=1) geohash7=geohash7[['cluster_id','geohash']] geohash7.columns=['geohash','cluster_id'] geohash7=geohash7.drop_duplicates() geohash7.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+"geohash_cluster5to6.csv",index=False) geohash7 # # Helper functions for the parallel computing import pandas as pd import numpy as np import random import os import sys import psutil import matplotlib import matplotlib.pyplot as plt import math from multiprocessing import cpu_count,Pool import multiprocessing from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OneHotEncoder import pygeohash as gh import pickle from datetime import date import pysolar from pysolar.solar import * from datetime import date import datetime as dt import pygeohash as gh from pvlib import atmosphere import pvlib.solarposition as pv from pvlib.tools import datetime_to_djd, djd_to_datetime # X=pd.read_csv('/home/optim7/Dropbox/Smart_Analytics_work/WeatherData_WOOPA/PV_data/Testing123.csv',index_col='Datetime') # X.index = pd.to_datetime(X.index,unit='s') # X.head() import pytz import calendar import numpy as np class WithExtraArgs(object): def __init__(self, func, **args): self.func = func self.args = args def __call__(self, df): return self.func(df, **self.args) def applyParallel(data, func,pool,partition,kwargs): data_split = [data[i:i + partition] for i in range(0, len(data), partition)] #data_split = np.array_split(data, min(partitions,data.shape[0])) data =pool.map(WithExtraArgs(func, **kwargs), data_split) #data = pd.concat(pool.map(WithExtraArgs(func, **kwargs), data_split)) return data def caculatemean(ob): mid=sum(ob) / len(ob) return mid join_hann_forlatcenter=merge_train[['lat','long','geohash']] latlist=join_hann_forlatcenter.groupby('geohash')['lat'].apply(list).reset_index(name='latlist') latlist['middlelat']= latlist.apply(lambda row: caculatemean(row['latlist']), axis=1) latlist longlist=merge_train.groupby('geohash')['long'].apply(list).reset_index(name='longlist') longlist['middlelong']= longlist.apply(lambda row: caculatemean(row['longlist']), axis=1) longlist all_merge=pd.merge(latlist,longlist,on='geohash') all_merge=all_merge[['geohash','middlelat','middlelong']] all_merge.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+"oneCrossOneGrid_centroid_cluster.csv",index=False) data_train_acc join_hann_centroid=pd.merge(merge_train,all_merge,on='geohash') join_hann_centroid=join_hann_centroid.reset_index(drop=True) join_hann_centroid city='new_method/LS/1x1Grid' print(city) # train_generted=join_hann_centroid.loc[~((join_hann_centroid['UJAHR']==2017) & (join_hann_centroid['UMONAT']==1) # & (join_hann_centroid['UWOCHENTAG']==1) & (join_hann_centroid['hour']<8))] # train_generted=join_hann_centroid.loc[~((join_hann_centroid['UJAHR']==2019) & (join_hann_centroid['UMONAT']==2) # & (join_hann_centroid['UWOCHENTAG']==1) & (join_hann_centroid['hour']<8))] train_generted=join_hann_centroid.copy() train_generted def getsolarpos(df,geomap): #chdf=df.copy avgliEl=[] avgliZe=[] avgliAz=[] for index, row in df.iterrows(): liZe=[] liAz=[] liEl=[] year=row['UJAHR'] month=row['UMONAT'] day=row['UWOCHENTAG'] #hour=row['sessiondivided'] hour=int(row['USTUNDE']) #onekmhash=row['cluster_id'] l=() if day==1: day=6 else: day=day-2 #hour=hour.split('-') #print(hour[0],hour[1]) #hour=int((int(hour[0])+int(hour[1]))/2) A=calendar.TextCalendar(day) #decode=dict1.get(onekmhash) lat=row['middlelat'] long=row['middlelong'] for k in A.itermonthdays(year,month): #print(year,month,k) if k!=0: day1=date(year,month,k) #print(day) if day1.weekday()==day: #print("%s,%d-%d-%d" % (calendar.day_name[6] ,k,month,year)) #l=(k,month,year) dtime=str(year)+"-"+str(month).zfill(2)+"-"+str(k).zfill(2)+" "+str(hour).zfill(2)+":"+"00"+":"+"00" date_time_obj = dt.datetime.strptime(dtime, '%Y-%m-%d %H:%M:%S') # timezone = pytz.timezone('Europe/Berlin') # timezone_date_time_obj = timezone.localize(date_time_obj) # if month<3 or month >10: # date_time_obj=(str(date_time_obj)+"+1:00") # else: # date_time_obj=(str(date_time_obj)+"+2:00") timezone = pytz.timezone('Europe/Berlin') timezone_date_time_obj = timezone.localize(date_time_obj) #date_time_obj = dt.datetime.strptime(date_time_obj, '%Y-%m-%d %H:%M:%S%z') #print(timezone_date_time_obj) #print(lat,long,timezone_date_time_obj) az=get_azimuth(lat,long,timezone_date_time_obj) sol_pos=pv.get_solarposition(timezone_date_time_obj,lat,long) #display((sol_pos)) #print(sol_pos.dtypes) getsolarpos=sol_pos[['apparent_zenith', 'zenith', 'apparent_elevation', 'elevation','azimuth', 'equation_of_time']].values ele=getsolarpos[0][3] ze=getsolarpos[0][1] az=getsolarpos[0][4] #print(spl) liEl.append(ele) liZe.append(ze) liAz.append(az) npEl=np.array(liEl) npZe=np.array(liZe) npAz=np.array(liAz) avgEl=np.mean(liEl) avgZe=np.mean(liZe) avgAz=np.mean(liAz) avgliEl.append(avgEl) avgliZe.append(avgZe) avgliAz.append(avgAz) return avgliEl,avgliZe,avgliAz from multiprocessing import Pool import numpy as np def create_sequences(df): cores = 35 #Number of CPU cores on your system partitions = cores pool = Pool(cores) partition = int(np.ceil(float(len(df)/partitions))) temp = applyParallel (df,getsolarpos,pool,partition,{'geomap':df.copy()}) #todf=pd.DataFrame(temp) retres=temp print(retres) pool.close() pool.join() elevation = [] zenith = [] azimuth=[] for set_ in retres: elevation.extend(set_[0]) zenith.extend(set_[1]) azimuth.extend(set_[2]) elevation, zenith,azimuth = np.array(elevation), np.array(zenith),np.array(azimuth) return elevation,zenith,azimuth #return retres #df_accCount['concate_year'] = df_accCount.apply(lambda row: mergedata(row['cluster_id'],row['UJAHR'],row['UMONAT']), axis=1) df_accCount=train_generted.copy() # size = 200 # # sample size # train_generted_1=train_generted.loc[train_generted['counts']==0] # replace = True # with replacement # fn = lambda obj: obj.loc[np.random.choice(obj.index, size, replace),:] # train_generted_2=train_generted_1.groupby('geohash', as_index=False).apply(fn) # train_generted_3=train_generted_2.reset_index() # train_generted_4=train_generted_3.drop_duplicates() # train_generted_4.drop(['level_0','level_1'],axis=1,inplace=True) # train_generted_4 #train_generted_1=train_generted.loc[train_generted['counts']==0] # train_generted_acc=train_generted.loc[train_generted['counts']>0] # frames=[train_generted_acc,train_generted_4] # train_generted_final=pd.concat(frames) # train_generted_final # if there is a balance between acc and non acc data, otherwise run above cell train_generted_final=train_generted.copy() #train_generted_final['index1'] = train_generted_final.index train_generted_final['is_present']=1 train_generted_final=train_generted_final.sort_values(['geohash','UJAHR','UMONAT','UWOCHENTAG','hour']) train_generted_final=train_generted_final.reset_index(drop=True) train_generted_final #df_final=df_final.loc[(df_final['azimuth_solar']!=0.0)& (df_final['elevaion']!=0.0)#.iloc[[1472]]#.drop_duplicates() # train_generted_final1=train_generted_final.copy() # train_generted_final1 # for row,index train_generted_final.iterrows(): # for i in range(1,9): # if row['hour'] # if ((train_generted_final1['UJAHR'] == row['UJAHR']) and (train_generted_final1['UMONAT'] == row['UMONAT']) and ( # train_generted_final1['geohash'] == row['geohash']) # and (train_generted_final1['hour'] == row['hour']-i) and (train_generted_final1['UWOCHENTAG'] == row['UWOCHENTAG'])): #solving 4gib problem # train_generted_final.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/train_generted_final.csv",index=False) train_generted_final.columns # train_generted_final['sessiondivided'] = pd.Categorical(train_generted_final['sessiondivided'], ["0-2", "2-4", "4-6","6-8","8-10","10-12","12-14","14-16","16-18","18-20","20-22","22-24"]) # train_generted_final df_midlle_latlong=df_accCount[['geohash','middlelat','middlelong']] df_midlle_latlong=df_midlle_latlong.drop_duplicates().reset_index().drop(['index'],axis=1) df_midlle_latlong.columns=['geohash','lat_mid','long_mid'] df_midlle_latlong def ispresent(middlelat,middlelong): is_present=-1 if middlelat==0 and middlelong==0: is_present=0 else: is_present=1 return is_present # def dattmake(train_generted_final): # df_final = pd.pivot_table(train_generted_final, # index=['geohash','UJAHR','UMONAT','UWOCHENTAG','hour'], # values=['middlelat', 'middlelong', 'count'], # fill_value = 0, # dropna=False, # aggfunc=np.mean).reset_index() # return df_final def parallelize_dataframe(df, func, n_cores=30): df_split = np.array_split(df, n_cores) pool = Pool(n_cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df import numpy as np #def add_features(train_generted_final): df_final = pd.pivot_table(train_generted_final, index=['geohash','UJAHR','UMONAT','UWOCHENTAG','hour'], values=['middlelat', 'middlelong', 'count'], fill_value = 0, dropna=False, aggfunc=np.mean).reset_index() #display(df_finalhour df_final=df_final.sort_values(['geohash','UJAHR','UMONAT','UWOCHENTAG','hour']) #df_final=df_final.loc[(df_final['azimuth_solar']!=0.0)& (df_final['elevaion']!=0.0) & (df_final['zenith']!=0.0)] df_final['is_present']=df_final.apply(lambda row: ispresent(row['middlelat'],row['middlelong']), axis=1) df_final_train=df_final.loc[((df_final['UJAHR']==2017) & (df_final['UMONAT']<=12)|(df_final['UJAHR']==2018) & (df_final['UMONAT']<=12) | ((df_final['UJAHR']==2019) & (df_final['UMONAT']<=5)))] df_final_train=df_final_train.reset_index(drop=True) df_final_train #train = parallelize_dataframe(train_generted_final, add_features) df_final_train.head() df_final_train.memory_usage() #train_generted_final=train.copy() #del train train.head(100000).to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/trai_inte1L.csv",index=False) train.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/trai_inte.csv",index=False) #del train_generted_final def findpast(row): df_filter=pd.DataFrame(columns=['geohash', 'UJAHR', 'UMONAT', 'UWOCHENTAG', 'hour', 'middlelat', 'middlelong', 'count', 'is_present']) df_index = df_final_train.loc[ (df_final_train['UJAHR'] == row['UJAHR']) & (df_final_train['UMONAT'] == row['UMONAT']) & ( df_final_train['geohash'] == row['geohash']) & (df_final_train['hour'] == row['hour']) & (df_final_train['UWOCHENTAG'] == row['UWOCHENTAG']) &(df_final_train['is_present'] == row['is_present'])].index[0] #print('df_index=',df_index) i=int(df_index)-8 #print('i=',i) for i in range(i,df_index): # print(type(df_final_train.iloc[[i]])) #temp=df_final_train.iloc[[i]] df_filter=df_filter.append((df_final_train.iloc[[i]]),ignore_index=True) #df_filter['is_p2']=0 #print('i inside=',i) #print(df_filter) #df_filter=df_filter.append((df_final_train.iloc[[df_index]]),ignore_index=True) #df_filter['is_p2']=2 #display(df_filter) return df_filter def getdata(df_1,geomap): df=pd.DataFrame() for index, row in df_1.iterrows(): #print(index) temp=findpast(row) temp['is_p2']=0 df=df.append(temp) temp1=row temp1['is_p2']=2 df=df.append(temp1) return df from multiprocessing import Pool import numpy as np def create_sequences_1(df): cores = 30#Number of CPU cores on your system partitions = cores pool = Pool(cores) partition = int(np.ceil(float(len(df)/partitions))) temp = applyParallel (df,getdata,pool,partition,{'geomap':df.copy()}) #todf=pd.DataFrame(temp) print(type(temp)) retres=temp print(retres) pool.close() pool.join() elevation = [] # zenith = [] # azimuth=[] elevation.extend(retres) # zenith.extend(set_[1]) # azimuth.extend(set_[2]) #print(retres) # elevation, zenith,azimuth = np.array(elevation), np.array(zenith),np.array(azimuth) return elevation #return retres # from multiprocessing.reduction import ForkingPickler, AbstractReducer # class ForkingPickler4(ForkingPickler): # def __init__(self, *args): # if len(args) > 1: # args[1] = 2 # else: # args.append(2) # super().__init__(*args) # @classmethod # def dumps(cls, obj, protocol=4): # return ForkingPickler.dumps(obj, protocol) # def dump(obj, file, protocol=4): # ForkingPickler4(file, protocol).dump(obj) # class Pickle4Reducer(AbstractReducer): # ForkingPickler = ForkingPickler4 # register = ForkingPickler4.register # dump = dump # import pickle4reducer # import multiprocessing as mp # ctx = mp.get_context() # ctx.reducer = pickle4reducer # with mp.Pool(30) as p: # #res = {} # #output = {} # #for id, big_df in some_dict_of_big_dfs: # res = p.apply_async(create_sequences_1,(train_generted_final ,)) # #output = {u : res[id].get() for id in id_list} # # do something # res # d = {'col1': [1, 2], 'col2': [3, 4]} # df = pd.DataFrame(data=d) # df df=create_sequences_1(train_generted_final) df del train_generted_final del train print(city) df_final=pd.DataFrame() for df1 in df: df_final=df_final.append(df1) # df_final=df_final.sort_values(['cluster_id','UJAHR','UMONAT','UWOCHENTAG','hour']) df_final=df_final.reset_index(drop=True) df_final df_final.columns join_middle_latlong=pd.merge(df_final,df_midlle_latlong,on='geohash',how='left') join_middle_latlong.drop(['middlelat','middlelong'],axis=1,inplace=True) join_middle_latlong=join_middle_latlong[['geohash', 'UJAHR', 'UMONAT', 'UWOCHENTAG', 'hour', 'count', 'is_present', 'is_p2' ,'lat_mid','long_mid']] join_middle_latlong.columns=['geohash', 'UJAHR', 'UMONAT', 'UWOCHENTAG', 'USTUNDE', 'counts', 'is_present', 'is_p2','middlelat', 'middlelong'] join_middle_latlong join_middle_latlong.head(100) join_middle_latlong.geohash.nunique() elevation,zenith,azimuth=create_sequences(join_middle_latlong) print(city) elevation join_middle_latlong['elevaion']=elevation join_middle_latlong['zenith']=zenith join_middle_latlong['azimuth_solar']=azimuth #join_middle_latlong.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/train_final_withSolar.csv",index=False) join_middle_latlong.geohash.nunique() join_middle_latlong join_middle_latlong['concate_year']='20172018' join_middle_latlong.columns join_middle_latlong #city='olden' # testing=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/train_final_withSolar.csv") # test1=testing.loc[testing['counts']==0] # test2=test1.loc[testing['is_present']==1] # test2 # #city='gott' # testing=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/train_final_withSolar.csv") # test1=testing.loc[testing['counts']>0] # test2=test1.loc[testing['is_present']==1] # test2 join_middle_latlong.geohash.nunique() join_middle_latlong.columns join_middle_latlong.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/train_final_withSolarConvareyear_geohash.csv",index=False) l=[] year=[] i=0 parts1=[] #with open('/home/rajat/DAP/ls_accident/ls_accident/data/accident_data/groupebygeohashintofourwithoutyearfiveKM.csv'.format(c), 'r') as file: with open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/train_final_withSolarConvareyear_geohash.csv", 'r') as file: header = True for line in file: if not header: header = True continue parts = line.replace('\r', '').replace('\n', '').split(',') #parts1=parts #print(parts[29]) if [parts[0],parts[13]] not in l: #if parts[1] not in year m=[parts[0],parts[13]] l.append(m) #year.append(parts[1]) i=0 parts.append(i) else: i=i+1 parts.append(i) parts1.append(parts) print(parts1) import csv def writeCsvFile(fname, data, *args, **kwargs): """ @param fname: string, name of file to write @param data: list of list of items Write data to file """ mycsv = csv.writer(open(fname, 'w'), *args, **kwargs) for row in data: mycsv.writerow(row) writeCsvFile(r"/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//datawithazimuthUTPYE3OhannwithIndextrainGeohash.csv", parts1) print(city) import pandas as pd #city='hann' hann_indexed=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//datawithazimuthUTPYE3OhannwithIndextrainGeohash.csv") hann_indexed.columns=['geohash', 'year', 'month', 'day', 'twohoursession', 'acc_count', 'is_present', 'is_p2','middlelat', 'middlelong', 'elevaion', 'zenith', 'azimuth_solar', 'concate_year', 'timestep'] hann_indexed.to_csv('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'//datawithazimuthUTPYE3OhannwithIndextrainGeohash.csv',index=False) hann_indexed.geohash.nunique() hann_indexed import pandas as pd import numpy as np import random import os import sys import psutil import matplotlib import matplotlib.pyplot as plt import math from multiprocessing import cpu_count,Pool import multiprocessing from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import MinMaxScaler import pickle # 'def daynight(month,hour): # if hour=='22-24'or hour=='0-2' or hour=='2-4': # return 0 # elif hour=='8-10' or hour=='10-12' or hour=='12-14' or hour=='14-16': # return 1 # if month==1: # if hour=='4-6' or hour=='6-8' or hour=='16-18' or hour=='18-20' or hour=='20-22': # return 0 # if month==2: # if hour=='4-6' or hour=='6-8' or hour=='18-20' or hour=='20-22': # return 0 # if hour=='16-18': # return 1 # if month==3: # if hour=='4-6' or hour== '18-20' or hour=='20-22': # return 0 # if hour== '6-8' or hour== '16-18': # return 1 # if month==4: # if hour=='4-6' or hour=='20-22': # return 0 # if hour== '6-8' or hour== '16-18' or hour== '18-20': # return 1 # if month==5 or month==6 or month==7 or month==8: # if hour=='4-6' or hour=='20-22': # return 0 # if hour== '6-8' or hour== '16-18' or hour== '18-20' : # return 1 # if month==9 or month==10: # if hour=='4-6' or hour=='20-22' or hour== '6-8': # return 0 # if hour== '16-18' or hour== '18-20' : # return 1 # if month==11 or month==12: # if hour=='4-6' or hour=='6-8' or hour=='20-22' or hour== '16-18' or hour== '18-20': # return 0 # ' def daynight(month,hour): if hour>=22 or (hour>=0 and hour<4): return 0 elif hour>=8 and hour<=15: return 1 if month==1: if (hour>=4 and hour<=8) or (hour>=16 and hour<=22): return 0 else: return 1 if month==2: if (hour>=4 and hour<=8) or (hour>=18 and hour<=22): return 0 if hour>=16 and hour <=18: return 1 if month==3: if (hour>=4 and hour<=6) or (hour>=19 and hour<=22): return 0 if (hour>= 7 or hour <=8) or (hour>=16 and hour <=18): return 1 if month==4: if (hour>=4 and hour<=5) or (hour>=20 and hour<=22): return 0 if (hour>= 6 and hour <=8) or (hour>= 16 and hour <=20): return 1 if month==5 or month==6 or month==7 or month==8: if (hour>=4 and hour<=5) or (hour==22): return 0 if (hour>=6 and hour<=8) or (hour>=16 and hour<=21): return 1 if month==9 or month==10: if (hour>=4 and hour<=7 )or (hour>=20 and hour<=22): return 0 if (hour>=16 and hour<=19) : return 1 if month==11 or month==12: if(hour>=4 and hour<=8) or (hour>=16 and hour<=22): return 0 # def time_interval_two(HOD): # if HOD=='6-8' or HOD=='8-10' : # return 0 # if HOD=='10-12' or HOD=='12-14' or HOD=='14-16': # return 1 # if HOD=='16-18': # return 2; # if HOD=='18-20' or HOD=='20-22': # return 3 # if HOD=='22-24' or HOD=='0-2' or HOD=='2-4' or HOD=='4-6': # return 4 # else: # return 5; def time_interval_two(HOD): if HOD>=6 and HOD<=10 : return 0 if HOD>=11 and HOD<=15 : return 1 if HOD>=16 and HOD<=17: return 2; if HOD>=18 and HOD<=22: return 3 if HOD>22 or (HOD>=0 and HOD<6): return 4 else: return 5; #Officially spring is during the months of March, April and May in Germany. Summer is from June through to August. #Autumn is during the months of September, October and November and winter is from December to February. def season(month): if month>=3 and month<=5: return 0 if month>=6 and month<=8: return 1 if month>=9 and month<=11: return 2 if month==12 or month==1 or month== 2: return 3 # import pandas as pd # #city='hann' # df_final=pd.read_csv('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/datawithazimuthUTPYE3OhannwithIndextrainGeohash.csv',header=0) # df_final_train=df_final.loc[((df_final['year']==2017) & (df_final['month']<=12)|(df_final['year']==2018) & (df_final['month']<=12) | ((df_final['year']==2019) & (df_final['month']<=1)))] # df_final_valisdation=df_final.loc[((df_final['year']==2019) & (df_final['month']>1)& (df_final['month']<=5))] # df_final_train=df_final_train.reset_index(drop=True) # df_final_valisdation=df_final_valisdation.reset_index(drop=True) # df_final_train.to_csv('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/df_finalTrain.csv',index=False) # df_final_valisdation.to_csv('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/df_finalVald.csv',index=False) import pandas as pd #city='munich_grid5x5' df_final=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/datawithazimuthUTPYE3OhannwithIndextrainGeohash.csv",header=0) df_final_train=df_final.loc[((df_final['year']==2017) & (df_final['month']<=12)|(df_final['year']==2018) & (df_final['month']<=12) | ((df_final['year']==2019) & (df_final['month']<=1)))] df_final_valisdation=df_final.loc[((df_final['year']==2019) & (df_final['month']>1)& (df_final['month']<=5))] # df_final_train=df_final_train.reset_index(drop=True) # df_final_valisdation=df_final_valisdation.reset_index(drop=True) # df_final_train.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/df_finalTrain.csv",index=False) # df_final_valisdation.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/df_finalVald.csv",index=False) df_final_train.columns=['geohash','year', 'month', 'day', 'twohoursession', 'acc_count', 'is_present','is_p2', 'middlelat', 'middlelong', 'elevaion', 'zenith', 'azimuth_solar', 'concate_year', 'timestep'] df_final_train.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/df_finalTrain.csv",index=False) df_final_valisdation.columns=['geohash','year', 'month', 'day', 'twohoursession', 'acc_count', 'is_present','is_p2', 'middlelat', 'middlelong', 'elevaion', 'zenith', 'azimuth_solar', 'concate_year', 'timestep'] df_final_valisdation.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/df_finalVald.csv",index=False) print(city) geo_dict = dict(zip((hann_indexed.geohash.unique()), range(len(hann_indexed.geohash.unique())))) print(geo_dict) f = open('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/geo_dict.pkl',"wb") pickle.dump(geo_dict,f) f.close() print(city) def clean_data_city(filepath,city): df = pd.read_csv(filepath) display(df.head()) list_ = df.columns #print(list_) # temp_df = df[ # ['geohash', 'year', 'month', 'day', 'twohoursession', 'acc_count', # 'is_present', 'elevaion', 'zenith', # 'azimuth_solar', 'concate_year', 'timestep']] # temp_df.to_hdf("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+city+"train.h5",key='set1') #display(temp_df.head()) print("zero accident =", float(df[df['acc_count'] == 0].shape[0]) / df.shape[0]) #f = open("geo_dict_niedersacehsen_fivekm.pkl","rb") #f = open( f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/geo_dict.pkl", "rb") geo_dict = pickle.load(f) f.close() def fun_hash(geohash): return geo_dict[geohash] df['geohash_code'] = df.apply(lambda row: fun_hash(row['geohash']), axis=1) # temp_df = df[['geohash', 'geohash_code','year', 'month', 'day', 'twohoursession', 'acc_count', # 'is_present', 'elevaion', 'zenith', # 'azimuth_solar', 'concate_year', 'timestep']] # df = pd.read_hdf( # 'data/hann_raw_data/hann.h5', key='set2') #display(df.head()) def week_day(DOW): if DOW > 1 and DOW <= 6: return 1 elif DOW == 1 or DOW == 7: return 0 def shift(group): df_list = [] for idx, df in group: df['predicted_accident'] = df['acc_count'].shift(-1) df.drop(df.tail(1).index, inplace=True) df_list.append(df) return pd.concat(df_list) def time_interval(HOD): if HOD == '0-4': return 0 if HOD == '4-8': return 1 if HOD == '8-12': return 2; if HOD == '12-16': return 3 if HOD == '16-20': return 4 else: return 5; def make_binary(d): if d > 0: return 1 else: return 0 def year_binary(d): if d == 2017: return 0 if d==2018: return 1 else: return 2 df['DOW_cat'] = df.apply(lambda row: week_day(row['day']), axis=1) df['HOD_cat'] = df.apply(lambda row: time_interval_two(row['twohoursession']), axis=1) # df['T-Accident'] = df.apply(lambda row: make_binary(row['acc_count']), axis=1) df['DayLight'] = df.apply(lambda row: daynight(row['month'], row['twohoursession']), axis=1) df['season'] = df.apply(lambda row: season(row['month']), axis=1) df['year'] = df.apply(lambda row: year_binary(row['year']), axis=1) # group = df.groupby('geohash') # df = shift(group) #df['predicted_accident'] = df['acc_count'] df['predicted_accident']=df.apply(lambda row: make_binary(row['acc_count']), axis=1) #speed=pd.read_csv("/data/dadwal/data/DAP_data/speed_hour_day.csv",header=0) #mergespeed_tempdf=pd.merge(df,speed,on=['day','twohoursession']) #display(mergespeed_tempdf) #display(mergespeed_tempdf.columns) # 'cluster_id', 'UJAHR', 'UMONAT', 'UWOCHENTAG', 'sessiondivided', # 'middlelat', 'middlelong', 'counts', 'elevaion', 'zenith','azimuth_solar', 'concate_year', 'timestep' # 'year', 'month', 'day', 'twohoursession' mergespeed_tempdf = df[[u'timestep', u'predicted_accident',u'is_p2',u'is_present', u'geohash', u'geohash_code',u'year', u'month', u'day',u'twohoursession', u'HOD_cat', u'DOW_cat', u'DayLight', u'season', u'elevaion', u'zenith', u'azimuth_solar', 'acc_count' ]] mergespeed_tempdf.to_hdf("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+"ng_train.h5", key='set3') display(mergespeed_tempdf.head()) print(city) #city='hann_clusterDBSCAN_0.003_2pts' #clean_data_city('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/df_finalTrain.csv', city) clean_data_city('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/df_finalTrain.csv', city) print(city) df=pd.read_hdf("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+city+"train.h5",key='set3') df.loc[df['is_p2']==2] df1 = df[df.isna().any(axis=1)] df1 ###Output _____no_output_____ ###Markdown train and test ###Code import pandas as pd import numpy as np import random import os import sys import psutil import matplotlib import matplotlib.pyplot as plt import math from multiprocessing import cpu_count,Pool import multiprocessing from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OneHotEncoder from collections import Counter import pickle #city='olden' # previous 5x5 grid on munich on path below # /data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/Alldata_baveria/geodata_5x5/geo_vect_dict_bv_on method='dbscan' #city='new_method/hann/1x1Grid' #city='new_method/braun' #city='munich_grid5x5' #method='5x5' print(city) ###Output _____no_output_____ ###Markdown munich new method folder ###Code f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"//geodata//clustered_data_poi_acc_merged.pkl","rb") geohash_dict = pickle.load(f) f.close() print(geohash_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geo_dict.pkl","rb") geo_dict = pickle.load(f) f.close() #print(geo_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"//geodata//uartTrainOnly.pkl","rb") NLP_dict_uart = pickle.load(f) f.close() #print(geo_dict) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"//geodata//zustandTrainOnly.pkl","rb") NLP_dict_zustand = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"//geodata//utypeTrainOnly.pkl","rb") NLP_dict_utype1 = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"///geodata//acc_countTrainOnly.pkl","rb") NLP_dict_accCount = pickle.load(f) f.close() #print((NLP_dict_accCount)) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"//geodata//htype.pkl","rb") NLP_dict_htype = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"//geodata//maxspeed.pkl","rb") NLP_dict_avg_max_speed = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"///munich_centerCord.pkl","rb") NLP_dict_centerCord = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata/clusterToGeohashCountTrainOnly.pkl","rb") NLP_dict_clustertoGeohash = pickle.load(f) f.close() f=open("data/regional_features1cros1grid/pklfiles/geo_poi_dicr_niedersachsen_onekm.pkl","rb") geohash_dict = pickle.load(f) f.close() #print(geohash_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/geo_dict.pkl","rb") geo_dict = pickle.load(f) f.close() print(geo_dict) f=open("data/regional_features1cros1grid/pklfiles/uart.pkl","rb") NLP_dict_uart = pickle.load(f) f.close() #print(geo_dict) f = open("data/regional_features1cros1grid/pklfiles/zustand.pkl","rb") NLP_dict_zustand = pickle.load(f) f.close() f = open("data/regional_features1cros1grid/pklfiles/utyp1.pkl","rb") NLP_dict_utype1 = pickle.load(f) f.close() f = open("data/regional_features1cros1grid/pklfiles/acc_count.pkl","rb") NLP_dict_accCount = pickle.load(f) f.close() #print((NLP_dict_accCount)) f = open("data/regional_features1cros1grid/NLP_htypeonecross1.pkl","rb") NLP_dict_htype = pickle.load(f) f.close() f = open("data/regional_features1cros1grid/pklfiles/maxspeed.pkl","rb") NLP_dict_avg_max_speed = pickle.load(f) f.close() ###Output _____no_output_____ ###Markdown baun in new_method folder ###Code f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//geo_poi_dicr_niedersachsen_onekm.pkl","rb") geohash_dict = pickle.load(f) f.close() print(geohash_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geo_dict.pkl","rb") geo_dict = pickle.load(f) f.close() #print(geo_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//uartTrainOnly.pkl","rb") NLP_dict_uart = pickle.load(f) f.close() #print(geo_dict) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//zustandTrainOnly.pkl","rb") NLP_dict_zustand = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//utypeTrainOnly.pkl","rb") NLP_dict_utype1 = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"///geodata//acc_countTrainOnly.pkl","rb") NLP_dict_accCount = pickle.load(f) f.close() #print((NLP_dict_accCount)) f = open("data/regional_features1cros1grid/NLP_htypeonecross1.pkl","rb") NLP_dict_htype = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//maxspeed.pkl","rb") NLP_dict_avg_max_speed = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/braun_centerCord.pkl","rb") NLP_dict_centerCord = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//clusterToGeohashCountTrainOnly.pkl","rb") NLP_dict_clustertoGeohash = pickle.load(f) f.close() ###Output _____no_output_____ ###Markdown munich 5x5 new method folder ###Code f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//clustered_data_poi_acc_merged.pkl","rb") geohash_dict = pickle.load(f) f.close() print(geohash_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geo_dict.pkl","rb") geo_dict = pickle.load(f) f.close() #print(geo_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//uartTrainOnly.pkl","rb") NLP_dict_uart = pickle.load(f) f.close() #print(geo_dict) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//zustandTrainOnly.pkl","rb") NLP_dict_zustand = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//utypeTrainOnly.pkl","rb") NLP_dict_utype1 = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/geodata//acc_countTrainOnly.pkl","rb") NLP_dict_accCount = pickle.load(f) f.close() #print((NLP_dict_accCount)) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//htype.pkl","rb") NLP_dict_htype = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//maxspeed.pkl","rb") NLP_dict_avg_max_speed = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"///munich_centerCord.pkl","rb") NLP_dict_centerCord = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata/clusterToGeohashCountTrainOnly.pkl","rb") NLP_dict_clustertoGeohash = pickle.load(f) f.close() ###Output _____no_output_____ ###Markdown hannover in new method folder (1x1,5x5,clustering) ###Code f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//clustered_data_poi_acc_merged.pkl","rb") geohash_dict = pickle.load(f) f.close() print(geohash_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geo_dict.pkl","rb") geo_dict = pickle.load(f) f.close() #print(geo_dict) f=open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//uartTrainOnly.pkl","rb") NLP_dict_uart = pickle.load(f) f.close() #print(geo_dict) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//zustandTrainOnly.pkl","rb") NLP_dict_zustand = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//utypeTrainOnly.pkl","rb") NLP_dict_utype1 = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/geodata//acc_countTrainOnly.pkl","rb") NLP_dict_accCount = pickle.load(f) f.close() #print((NLP_dict_accCount)) f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//htype.pkl","rb") NLP_dict_htype = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata//maxspeed.pkl","rb") NLP_dict_avg_max_speed = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"///hann_centerCord.pkl","rb") NLP_dict_centerCord = pickle.load(f) f.close() f = open("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"//geodata/clusterToGeohashCountTrainOnly.pkl","rb") NLP_dict_clustertoGeohash = pickle.load(f) f.close() print(NLP_dict_htype.get('u1r2vx')) # # Helper functions for the parallel computing cores = cpu_count() #Number of CPU cores on your system partitions = 25 class WithExtraArgs(object): def __init__(self, func, **args): self.func = func self.args = args def __call__(self, df): return self.func(df, **self.args) def applyParallel(data, func,pool,partition, kwargs): data_split = [data[i:i + partition] for i in range(0, len(data), partition)] #data_split = np.array_split(data, min(partitions,data.shape[0])) data =pool.map(WithExtraArgs(func, **kwargs), data_split) #data = pd.concat(pool.map(WithExtraArgs(func, **kwargs), data_split)) return data def onhot_enoceder_month(train): myEncoder = OneHotEncoder(sparse=False) myEncoder.fit(train['month'].values.reshape(-1, 1)) onehot_encode = pd.concat([train.reset_index().drop('month',1), pd.DataFrame(myEncoder.transform(train['month'].values.reshape(-1, 1)), #columns=['m_en0','m_en1','m_en2','m_en3','m_en4'])], axis=1).reindex() columns=['m_en0','m_en1','m_en2','m_en3','m_en4','m_en5','m_en6','m_en7','m_en8','m_en9','m_en10','m_en11'])], axis=1).reindex() return onehot_encode.drop('index',1) def onhot_enoceder_day(train): myEncoder = OneHotEncoder(sparse=False) myEncoder.fit(train['day'].values.reshape(-1, 1)) onehot_encode = pd.concat([train.reset_index().drop('day',1), pd.DataFrame(myEncoder.transform(train['day'].values.reshape(-1, 1)), #columns=['m_en0','m_en1','m_en2','m_en3','m_en4'])], axis=1).reindex() columns=['d_en0','d_en1','d_en2','d_en3','d_en4','d_en5','d_en6'])], axis=1).reindex() return onehot_encode.drop('index',1) def onhot_enoceder(train): myEncoder = OneHotEncoder(sparse=False) myEncoder.fit(train['HOD_cat'].values.reshape(-1, 1)) onehot_encode = pd.concat([train.reset_index().drop('HOD_cat',1), pd.DataFrame(myEncoder.transform(train['HOD_cat'].values.reshape(-1, 1)), columns=['HOD_en0','HOD_en1','HOD_en2','HOD_en3','HOD_en4'])], axis=1).reindex() return onehot_encode.drop('index',1) def scalarval(df): scaler = MinMaxScaler(feature_range=(0, 1)) scaler.fit(df.values.reshape(-1,1)) scaled_values = scaler.transform(df.values.reshape(-1,1)) df = scaled_values return df latlong=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+"oneCrossOneGrid_centroid_cluster.csv") latlong['middlelong']=scalarval(latlong['middlelong']) latlong['middlelat']=scalarval(latlong['middlelat']) latlong col = latlong.iloc[:, 1:].values NLP_dict_mid = {} for index, row in latlong.iterrows(): NLP_dict_mid[row.geohash] = np.array(col[index]) f = open('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/hann_centerCord.pkl', "wb") pickle.dump(NLP_dict_mid, f) def onhot_enoceder_season(train): myEncoder = OneHotEncoder(sparse=False) myEncoder.fit(train['season'].values.reshape(-1, 1)) onehot_encode = pd.concat([train.reset_index().drop('season',1), pd.DataFrame(myEncoder.transform(train['season'].values.reshape(-1, 1)), #columns=['m_en0','m_en1','m_en2','m_en3','m_en4'])], axis=1).reindex() columns=['season_0','season_1','season_2','season_3'])], axis=1).reindex() return onehot_encode.drop('index',1) def onhot_enoceder_year(train): myEncoder = OneHotEncoder(sparse=False) myEncoder.fit(train['year'].values.reshape(-1, 1)) onehot_encode = pd.concat([train.reset_index().drop('year',1), pd.DataFrame(myEncoder.transform(train['year'].values.reshape(-1, 1)), #columns=['m_en0','m_en1','m_en2','m_en3','m_en4'])], axis=1).reindex() columns=['year_0','year_1','year_2'])], axis=1).reindex() return onehot_encode.drop('index',1) # def create_train_set_aug_geo(frame_list,geomap): # process_name = str(multiprocessing.current_process()) # id = int(process_name.split(',')[0].split('-')[1]) # print("process ",id," started") # X_train = [] # y_train = [] # print ("process list with length of ",len(frame_list)) # for frame in frame_list: # training_set = frame.values # geo_vec = geomap[frame.geohash.iloc[0]] # geo_code = geo_dict[frame.geohash.iloc[0]] # NLP_code_uart = NLP_dict_uart[frame.geohash.iloc[0]] # NLP_code_uytpe1 = NLP_dict_utype1[frame.geohash.iloc[0]] # NLP_code_zustand = NLP_dict_zustand[frame.geohash.iloc[0]] # htype_count = 0 # for key, values in NLP_dict_htype.items(): # htype_count = len(values) # break # try: # NLP_htype = NLP_dict_htype[frame.geohash.iloc[0]] # except: # NLP_htype = np.zeros(htype_count) # 27 if hannover data is choosen # try: # NLP_code_max_speed = NLP_dict_avg_max_speed[frame.geohash.iloc[0]] # except: # NLP_code_max_speed=[0.24] # try: # NLP_code_acc_count = NLP_dict_accCount[frame.geohash.iloc[0]] # except: # #print('raj') # NLP_code_acc_count = np.zeros(1) # 27 if hannover data is choosen # for i in range(8, training_set.shape[0]): # if training_set[i, 1] > 0 : # a = np.concatenate((training_set[i-8:i,5:].flatten(),geo_vec),axis=0) # a = np.concatenate((a,NLP_code_uart),axis=0) # a= np.concatenate((a,NLP_code_uytpe1),axis=0) # a=np.concatenate((a,NLP_code_zustand),axis=0) # a=np.concatenate((a,NLP_code_acc_count),axis=0) # a=np.concatenate((a,NLP_code_max_speed),axis=0) # a=np.concatenate((a,NLP_htype),axis=0) # a = np.append(a, geo_code) # a=np.append(a,1) # X_train.append(a) # y_train.append(1) # elif random.uniform(0, 1) > 0.964: # a = np.concatenate((training_set[i-8:i,5:].flatten(),geo_vec),axis=0) # a = np.concatenate((a,NLP_code_uart),axis=0) # a= np.concatenate((a,NLP_code_uytpe1),axis=0) # a=np.concatenate((a,NLP_code_zustand),axis=0) # a=np.concatenate((a,NLP_code_acc_count),axis=0) # a=np.concatenate((a,NLP_code_max_speed),axis=0) # a=np.concatenate((a,NLP_htype),axis=0) # a = np.append(a, geo_code) # a=np.append(a,0) # X_train.append(a) # y_train.append(0) # return X_train, y_train def create_train_set_aug_geo(frame_list,geomap): process_name = str(multiprocessing.current_process()) id = int(process_name.split(',')[0].split('-')[1]) print("process ",id," started") X_train = [] y_train = [] print ("process list with length of ",len(frame_list)) for frame in frame_list: training_set = frame.values #geo_vec = geomap[frame.geohash.iloc[0]] geo_code = geo_dict[frame.geohash.iloc[0]] #NLP_code_uart = NLP_dict_uart[frame.geohash.iloc[0]] # NLP_code_uytpe1 = NLP_dict_utype1[frame.geohash.iloc[0]] # NLP_code_zustand = NLP_dict_zustand[frame.geohash.iloc[0]] htype_count = 0 for key, values in NLP_dict_htype.items(): htype_count = len(values) #print(htype_count) break try: NLP_center_cord=NLP_dict_centerCord[frame.geohash.iloc[0]] except: NLP_center_cord = np.zeros(2) # 27 if hannover data is choosen #try: # NLP_cluster_geohash=NLP_dict_clustertoGeohash[frame.geohash.iloc[0]] #except: NLP_cluster_geohash = np.zeros(1) # 27 if hannover data is choosen try: NLP_htype = NLP_dict_htype[frame.geohash.iloc[0]] except: NLP_htype = np.zeros(htype_count) # 27 if hannover data is choosen try: geo_vec = geomap[frame.geohash.iloc[0]] #print(len(geo_vec)) except: geo_vec = np.zeros(10) # 27 if hannover data is choosen try: NLP_code_uart = NLP_dict_uart[frame.geohash.iloc[0]] except: NLP_code_uart = np.zeros(10) # 27 if hannover data is choosen try: NLP_code_uytpe1 = NLP_dict_utype1[frame.geohash.iloc[0]] except: NLP_code_uytpe1 = np.zeros(7) # 27 if hannover data is choosen try: NLP_code_zustand = NLP_dict_zustand[frame.geohash.iloc[0]] except: NLP_code_zustand = np.zeros(3) # 27 if hannover data is choosen # try: # NLP_htype = NLP_dict_htype[frame.geohash.iloc[0]] # #print(len(NLP_htype)) # except: # NLP_htype = np.zeros(htype_count) # 27 if hannover data is choosen try: NLP_code_max_speed = NLP_dict_avg_max_speed[frame.geohash.iloc[0]] except: NLP_code_max_speed=[0.24] try: NLP_code_acc_count = NLP_dict_accCount[frame.geohash.iloc[0]] except: #print('raj') NLP_code_acc_count = np.zeros(1) # 27 if hannover data is choosen for i in range(8, training_set.shape[0]): if training_set[i, 2] > 1 : if training_set[i, 1] > 0: a = np.concatenate((training_set[i-8:i,7:].flatten(),geo_vec),axis=0) #print(a.shape) a = np.concatenate((a,NLP_code_uart),axis=0) #print(a.shape) a= np.concatenate((a,NLP_code_uytpe1),axis=0) #print(a.shape) a=np.concatenate((a,NLP_code_zustand),axis=0) #print(a.shape) a=np.concatenate((a,NLP_code_acc_count),axis=0) #print(a.shape) a=np.concatenate((a,NLP_code_max_speed),axis=0) #print('max speed',a.shape) a=np.concatenate((a,NLP_htype),axis=0) #print('NLP',a.shape) a=np.concatenate((a,NLP_center_cord),axis=0) a=np.concatenate((a,NLP_cluster_geohash),axis=0) #print(a.shape) #add center of each cluster i.e. long lat as regional feature a = np.append(a, geo_code) #a=np.append(a,1) X_train.append(a) y_train.append(1) else: a = np.concatenate((training_set[i-8:i,7:].flatten(),geo_vec),axis=0) a = np.concatenate((a,NLP_code_uart),axis=0) a= np.concatenate((a,NLP_code_uytpe1),axis=0) a=np.concatenate((a,NLP_code_zustand),axis=0) a=np.concatenate((a,NLP_code_acc_count),axis=0) a=np.concatenate((a,NLP_code_max_speed),axis=0) a=np.concatenate((a,NLP_htype),axis=0) a=np.concatenate((a,NLP_center_cord),axis=0) a=np.concatenate((a,NLP_cluster_geohash),axis=0) a = np.append(a, geo_code) #a=np.append(a,0) X_train.append(a) y_train.append(0) return X_train, y_train def create_sequences(df,geohash_dict): frame_list=[] for idx, frame in df.groupby(df.geohash): #print('frame=',frame) frame_list.append(frame) print('------------------------') #print(frame_list[0]) print('#########################') pool = Pool(cores) partition = int(np.ceil(float(len(frame_list))/partitions)) train_set = applyParallel (frame_list,create_train_set_aug_geo,pool,partition,{'geomap':geohash_dict.copy()}) pool.close() pool.join() X_train = [] y_train = [] for set_ in train_set: X_train.extend(set_[0]) y_train.extend(set_[1]) X_train, y_train = np.array(X_train), np.array(y_train) return X_train,y_train #city1='new_method/braun/shifted_combinetrain_560m' print(city) def train_data(filename): # df=pd.read_hdf('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/'+city+'/'+city+'val.h5', # key='set3') df=pd.read_hdf("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/"+"ng_train.h5", key='set3') print('whole h5 file') display(df.columns) # df=df[[u'timestep', u'predicted_accident',u'is_present', u'geohash', u'geohash_code', u'month',u'day', u'twohoursession',u'year', # u'HOD_cat', u'DOW_cat', u'DayLight', u'season', u'elevaion', u'zenith', u'azimuth_solar' # ]] df=df[[u'timestep', u'predicted_accident',u'is_p2',u'is_present', u'geohash', u'geohash_code', u'month',u'day', u'twohoursession',u'year', u'HOD_cat', u'DOW_cat', u'DayLight', u'season', u'elevaion', u'zenith', u'azimuth_solar' ]] print('year=1') year_one=df.loc[df['year']>0] display(year_one) print('---------------------') acct=df.loc[df['predicted_accident']>0] print('acc > 0') display(acct) display(df.head()) df = onhot_enoceder_month(df) df= onhot_enoceder_day(df) df=onhot_enoceder(df) df=onhot_enoceder_season(df) df=onhot_enoceder_year(df) df['previous_acc']=df['predicted_accident'] # df=df[['timestep', 'predicted_accident', 'geohash', 'geohash_code', # 'twohoursession', 'year', 'DOW_cat', 'DayLight','m_en0', 'm_en1', # 'm_en2', 'm_en3', 'm_en4', 'm_en5', 'm_en6', 'm_en7', 'm_en8', 'm_en9', # 'm_en10', 'm_en11', 'HOD_en0', 'HOD_en1', 'HOD_en2', 'HOD_en3', # 'HOD_en4']] df['elevaion'] = scalarval(df['elevaion']) df['zenith'] = scalarval(df['zenith']) df['azimuth_solar'] = scalarval(df['azimuth_solar']) #df['acc_count'] = scalarval(df['acc_count']) #df['middlelat'] = scalarval(df['middlelat']) #df['middlelong'] = scalarval(df['middlelong']) print(df.columns) df_normalize = df.copy() #df_normalize.to_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/df_intermediate.csv",index=False) X_train, y_train = create_sequences(df,geohash_dict) #X_test, y_test = create_sequences(test,geohash_dict) #X_test, y_test = create_sequences(test,geohash_dict) #X_train3000whole=X_train[:3000,:] #X_test3000whole=X_test[:3000,:] #pd.DataFrame(X_train3000whole).to_csv("X_train3000whole.csv") #pd.DataFrame(X_test3000whole).to_csv("X_test3000whole.csv") np.save("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/traindata/X_train",X_train) print (X_train.shape) np.save("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/traindata/y_train",y_train) # np.save('/data/dadwal/dir_14oct/accident_prediction/training_data_nt/' + city + '/' + method + '/X_test',X_test) # print (X_test.shape) # np.save('/data/dadwal/dir_14oct/accident_prediction/training_data_nt/' + city + '/' + method + '/y_test',y_test) print (y_train.shape) print('done') print(city) train_data('hann_0.003_minpts_2_poi') #city1='new_method/som_clustering30x30' df=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city+"/df_intermediate.csv") df city1='new_method/som_clustering30x30' df=pd.read_csv("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria/"+city1+"/df_intermediate.csv") df_10=df.head(10) df_10 import numpy as np X_train_braun=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/braun/traindata/X_train.npy',allow_pickle=True) Y_train_braun=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/braun/traindata/y_train.npy',allow_pickle=True) X_train_gott=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/gott/traindata/X_train.npy',allow_pickle=True) Y_train_gott=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/gott/traindata/y_train.npy',allow_pickle=True) X_train_olden=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/olden/traindata/X_train.npy',allow_pickle=True) Y_train_olden=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/olden/traindata/y_train.npy',allow_pickle=True) X_train_osna=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/osna/traindata/X_train.npy',allow_pickle=True) Y_train_osna=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/osna/traindata/y_train.npy',allow_pickle=True) X_train=np.concatenate((X_train_braun,X_train_gott,X_train_olden,X_train_osna),axis=0) y_train=np.concatenate((Y_train_braun,Y_train_gott,Y_train_olden,Y_train_osna),axis=0) np.save("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/BGOO/X_train",X_train) np.save("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/BGOO/y_train",y_train) import numpy as np X_test_braun=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/braun/traindata/X_test.npy',allow_pickle=True) Y_test_braun=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/braun/traindata/y_test.npy',allow_pickle=True) print(X_test_braun.shape) X_test_gott=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/gott/traindata/X_test.npy',allow_pickle=True) Y_test_gott=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/gott/traindata/y_test.npy',allow_pickle=True) X_test_olden=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/olden/traindata/X_test.npy',allow_pickle=True) Y_test_olden=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/olden/traindata/y_test.npy',allow_pickle=True) X_test_osna=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/osna/traindata/X_test.npy',allow_pickle=True) Y_test_osna=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/osna/traindata/y_test.npy',allow_pickle=True) X_test=np.concatenate((X_test_braun,X_test_gott,X_test_olden,X_test_osna),axis=0) y_test=np.concatenate((Y_test_braun,Y_test_gott,Y_test_olden,Y_test_osna),axis=0) np.save("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/BGOO/X_test",X_test) np.save("/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/BGOO/y_test",y_test) X_train=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/BGOO/X_train.npy',allow_pickle=True) X_test=np.load('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/Baveria_grid/BGOO/X_test.npy',allow_pickle=True) print(X_train.shape) print(X_test.shape) city='braun' df=pd.read_hdf('/data/dadwal/data/DAP_data/dataPrepTrainTestCluster/rawdata_grid/'+city+'/'+city+'test.h5',key='set3') print('whole h5 file') df.head() df.geohash.nunique() def train_data(filename): df=pd.read_hdf('/data/dadwal/dir_14oct/accident_prediction/data/intermediate_results_1x1only/'+city+'/'+method+'/train_geohash.h5',key='set3') print('whole h5 file') display(df) print('year=1') year_one=df.loc[df['year']>0] display(year_one) print('---------------------') acct=df.loc[df['predicted_accident']>0] print('acc > 0') display(acct) display(df.head()) # df = onhot_enoceder_month(df) # df=onhot_enoceder(df) # df=onhot_enoceder_season(df) # df['elevaion'] = scalarval(df['elevaion']) # df['zenith'] = scalarval(df['zenith']) # df['azimuth_solar'] = scalarval(df['azimuth_solar']) # df['acc_count'] = scalarval(df['acc_count']) # df['middlelat'] = scalarval(df['middlelat']) # df['middlelong'] = scalarval(df['middlelong']) print(df.columns) df_normalize = df.copy() X_train, y_train = create_sequences(df,geohash_dict) #X_test, y_test = create_sequences(test,geohash_dict) X_train3000whole=X_train[:3000,:] #y_trainwhole=y_train[:3000,:] pd.DataFrame(X_train3000whole).to_csv("X_train3000whole.csv") #pd.DataFrame(X_test3000whole).to_csv("X_test3000whole.csv") np.save("/data/dadwal/dir_14oct/accident_prediction/training_data_NT_ALL_Cities/"+city+"/"+method+"/X_val",X_train) print (X_train.shape) np.save("/data/dadwal/dir_14oct/accident_prediction/training_data_NT_ALL_Cities/"+city+"/"+method+"/y_val",y_train) print( y_train.shape) # np.save('/data/dadwal/dir_14oct/accident_prediction/training_data_nt/' + city + '/' + method + '/X_test',X_test) # print (X_test.shape) # np.save('/data/dadwal/dir_14oct/accident_prediction/training_data_nt/' + city + '/' + method + '/y_test',y_test) # print (y_test.shape) print('done') train_data('hann_0.003_minpts_2_poi') import numpy as np #city='osna' train=np.load('/data/dadwal/dir_14oct/accident_prediction/new_training_data/' + city + '/' + method + '/y_train.npy',allow_pickle=True) test=np.load('/data/dadwal/dir_14oct/accident_prediction/new_training_data/' + city + '/' + method + '/y_test.npy',allow_pickle=True) train_acc=train[train > 0].size test_acc=test[test > 0].size tot_acc=train_acc+test_acc print('tot_acc in '+city+'=', tot_acc) train_nonacc=train[train == 0].size test_nonacc=test[test == 0].size tot_noacc=train_nonacc+test_nonacc print('tot_noacc in '+city+'=',tot_noacc) percent=(tot_acc/tot_noacc)*100 print("percent of acc/non-acc in "+city+"=",percent) import numpy as np city='osna' train=np.load('/data/dadwal/dir_14oct/accident_prediction/new_training_data/' + city + '/' + method + '/y_train.npy',allow_pickle=True) test=np.load('/data/dadwal/dir_14oct/accident_prediction/new_training_data/' + city + '/' + method + '/y_test.npy',allow_pickle=True) train_acc=train[train > 0].size test_acc=test[test > 0].size #train=train_acc #print('tot_acc in '+city+'=', tot_acc) train_nonacc=train[train == 0].size test_nonacc=test[test == 0].size #tot_noacc=train_nonacc+test_nonacc #print('tot_noacc in '+city+'=',tot_noacc) percent_acc=(train_acc/train_nonacc)*100 print("percent of acc/non-acc in train set "+city+"=",percent_acc) percent_nonacc=(test_acc/test_nonacc)*100 print("percent of acc/non-acc in train set "+city+"=",percent_nonacc) import numpy as np city='osna' method='dbscan' train=np.load('/data/dadwal/dir_14oct/accident_prediction/new_training_data/' + city + '/' + method + '/X_train.npy',allow_pickle=True) test=np.load('/data/dadwal/dir_14oct/accident_prediction/new_training_data/' + city + '/' + method + '/X_test.npy',allow_pickle=True) train_percent=(train.size/(train.size+test.size))*100 test_percent=(test.size/(test.size+train.size))*100 print(train_percent) print(test_percent) ###Output _____no_output_____ ###Markdown rain data for Osna ###Code def getdaynumber(day): day_number=0 if day=='Sun': day_number=1 elif day=='Mon': day_number=2 elif day=='Tue': day_number=3 elif day=='Wed': day_number=4 elif day=='Thu': day_number=5 elif day=='Fri': day_number=6 elif day=='Sat': day_number=7 return day_number import pandas as pd hann_rain_2017=pd.read_csv("/data/dadwal/data/DAP_data/rain_data/osnabruck/osna_2017_raindata.csv",header=0) hann_rain_2017['year']=2017 hann_rain_2017=hann_rain_2017[['geohash_six','year','month','day','twohoursession','val']] hann_rain_2017.columns=['geohash','year','month','day','twohoursession','rain_val'] hann_rain_2017['day']=hann_rain_2017.apply(lambda row: getdaynumber(row['day']), axis=1) hann_rain_2017.head() import pandas as pd hann_rain_2018=pd.read_csv("//data/dadwal/data/DAP_data/rain_data/osnabruck/osna_2018_raindata.csv",header=0) hann_rain_2018['year']=2018 hann_rain_2018=hann_rain_2018[['geohash_six','year','month','day','twohoursession','val']] hann_rain_2018.columns=['geohash','year','month','day','twohoursession','rain_val'] hann_rain_2018['day']=hann_rain_2018.apply(lambda row: getdaynumber(row['day']), axis=1) hann_rain_2018.head() hann_rain_2017['twohoursession'] = pd.Categorical(hann_rain_2017['twohoursession'], ["0-2", "2-4", "4-6","6-8","8-10","10-12","12-14","14-16","16-18","18-20","20-22","22-24"]) hann_rain_2018['twohoursession'] = pd.Categorical(hann_rain_2018['twohoursession'], ["0-2", "2-4", "4-6","6-8","8-10","10-12","12-14","14-16","16-18","18-20","20-22","22-24"]) import numpy as np df_final_2017 = pd.pivot_table(hann_rain_2017, index=['geohash','year','month','day','twohoursession'], values='rain_val', fill_value = 0, dropna=False, aggfunc=np.sum).reset_index() df_final_2017.head() import numpy as np df_final_2018 = pd.pivot_table(hann_rain_2018, index=['geohash','year','month','day','twohoursession'], values='rain_val', fill_value = 0, dropna=False, aggfunc=np.sum).reset_index() df_final_2018.head() # join two dataset df_final=df_final_2017.append(df_final_2018, ignore_index = True) df_final.head() import pandas as pd hann_accident_both_year=pd.read_csv("/data/dadwal/data/DAP_data/twoyearsepData/rawdata/datawithazimuthUTPYE3/datawithazimuthUTPYE3OsnaBruckwithIndex.csv",header=0) hann_accident_both_year.head() join_data=pd.merge(hann_accident_both_year,df_final,on=["geohash","year","month","day","twohoursession"],how='left') join_data=join_data.fillna(0) join_data.head() join_data.to_csv("/data/dadwal/data/DAP_data/twoyearsepData/rawdatawithrainData/osnabruckTwoearwithRain.csv",index=False) ###Output _____no_output_____ ###Markdown create train and test data from rain data ###Code def clean_data_city(filepath,storename): df = pd.read_csv(filepath) display (df.head()) list_ = df.columns print (list_) temp_df = df [[u'timestep', u'acc_count',u'geohash',u'year',u'rain_val',u'twohoursession',u'month',u'day',u'elevaion',u'zenith',u'azimuth_solar',u'STRZUSTAND_0',u'STRZUSTAND_1',u'STRZUSTAND_2',u'UART_0',u'UART_1',u'UART_2',u'UART_3',u'UART_4',u'UART_5',u'UART_6','UART_7',u'UART_8',u'UART_9',u'UTYP1_1',u'UTYP1_2',u'UTYP1_3',u'UTYP1_4',u'UTYP1_5',u'UTYP1_6',u'UTYP1_7']] temp_df.to_hdf('/data/dadwal/data/DAP_data/twoyearsepData/h5citiesrain/'+storename+'.h5',key='set1') display(temp_df.head()) print ("zero accident =",float(df[df['acc_count']==0].shape[0])/df.shape[0]) #f = open("geo_dict_niedersacehsen_fivekm.pkl","rb") f=open("/home/dadwal/DAP/ls_accident/data/oneKM/onekm/geo_dict/geo_dict_niedersacehsen_onekm.pkl","rb") geo_dict = pickle.load(f) f.close() def fun_hash(geohash): return geo_dict[geohash] df['geohash_code'] = df.apply(lambda row: fun_hash(row['geohash']), axis=1) temp_df = df [[u'timestep', u'acc_count',u'geohash',u'year',u'rain_val',u'twohoursession', u'geohash_code',u'month',u'day',u'elevaion',u'zenith',u'azimuth_solar',u'STRZUSTAND_0',u'STRZUSTAND_1',u'STRZUSTAND_2',u'UART_0',u'UART_1',u'UART_2',u'UART_3',u'UART_4',u'UART_5',u'UART_6','UART_7',u'UART_8',u'UART_9',u'UTYP1_1',u'UTYP1_2',u'UTYP1_3',u'UTYP1_4',u'UTYP1_5',u'UTYP1_6',u'UTYP1_7']] temp_df.to_hdf('/data/dadwal/data/DAP_data/twoyearsepData/h5citiesrain/'+storename+'.h5',key='set2') df = pd.read_hdf('/data/dadwal/data/DAP_data/twoyearsepData/h5citiesrain/'+storename+'.h5',key='set2') display(df.head()) def week_day(DOW): if DOW > 1 and DOW<=6: return 1 elif DOW==1 or DOW==7 : return 0 def shift(group): df_list=[] for idx,df in group: df['predicted_accident'] = df['acc_count'].shift(-1) df.drop(df.tail(1).index,inplace=True) df_list.append(df) return pd.concat(df_list) def make_binary(d): if d > 0: return 1 else: return 0 def year_binary(d): if d == '2017': return 1 else: return 0 df['DOW_cat'] = df.apply(lambda row: week_day(row['day']), axis=1) df['HOD_cat'] = df.apply(lambda row: time_interval_two(row['twohoursession']), axis=1) # df['T-Accident'] = df.apply(lambda row: make_binary(row['acc_count']), axis=1) df['DayLight'] = df.apply(lambda row: daynight(row['month'],row['twohoursession']), axis=1) df['season']= df.apply(lambda row: season(row['month']), axis=1) df['year']= df.apply(lambda row: year_binary(row['year']), axis=1) # group = df.groupby('geohash') # df = shift(group) df['predicted_accident'] = df['acc_count'] temp_df = df [[u'timestep', u'predicted_accident',u'geohash',u'geohash_code', u'twohoursession',u'year',u'month',u'rain_val', u'HOD_cat', u'DOW_cat',u'DayLight',u'season',u'elevaion',u'zenith',u'azimuth_solar','acc_count',u'STRZUSTAND_0',u'STRZUSTAND_1',u'STRZUSTAND_2',u'UART_0',u'UART_1',u'UART_2',u'UART_3',u'UART_4',u'UART_5',u'UART_6','UART_7',u'UART_8',u'UART_9',u'UTYP1_1',u'UTYP1_2',u'UTYP1_3',u'UTYP1_4',u'UTYP1_5',u'UTYP1_6',u'UTYP1_7']] temp_df.to_hdf('/data/dadwal/data/DAP_data/twoyearsepData/h5citiesrain/'+storename+'.h5',key='set3') display(temp_df.head()) display(temp_df.columns) clean_data_city("/data/dadwal/data/DAP_data/twoyearsepData/rawdatawithrainData/osnabruckTwoearwithRain.csv",'osntwotwoyearRain') def train_data(filename): #df = pd.read_hdf('/home/dadwal/ls_accident/data/oneKM/onekmfourhour/niedersachsen_onekmfourhour.h5',key='set3') #df=pd.read_hdf('/home/dadwal/DAP/ls_accident/data/differencitiesaccident/datawithazimuth/h5cities/'+filename+'.h5',key='set3') df=pd.read_hdf('/data/dadwal/data/DAP_data/twoyearsepData/h5citiesrain/osntwotwoyearRain.h5',key='set3') #df=df.loc[:,'timestep':'UTYP1_7'] acct=df.loc[df['predicted_accident']>0] #acct1=df.loc[df['T-Accident']>0] display(acct) #display(acct1)# the .h5 file contains raw traffic, weather, time, and POI data display(df.head()) df = onhot_enoceder_month(df) df=onhot_enoceder(df) df=onhot_enoceder_season(df) # #df=df[['timestep', 'predicted_accident', 'geohash', 'geohash_code', # 'twohoursession', 'year', 'DOW_cat', 'DayLight','m_en0', 'm_en1', # 'm_en2', 'm_en3', 'm_en4', 'm_en5', 'm_en6', 'm_en7', 'm_en8', 'm_en9', # 'm_en10', 'm_en11', 'HOD_en0', 'HOD_en1', 'HOD_en2', 'HOD_en3', # 'HOD_en4']] df['elevaion'] = scalarval(df['elevaion']) df['zenith'] = scalarval(df['zenith']) df['azimuth_solar'] = scalarval(df['azimuth_solar']) df['acc_count'] = scalarval(df['acc_count']) df['rain_val'] = scalarval(df['rain_val']) print(df.columns) #test = onhot_enoceder_month(test) df_normalize = df.copy() train = df_normalize[df_normalize.timestep <= df_normalize.timestep.max()*5/6] test = df_normalize[df_normalize.timestep > df_normalize.timestep.max()*5/6] #train.to_csv('train.csv',index=False) # scaler = MinMaxScaler(feature_range=(0, 1)) # scaler.fit(train.loc[:,'T-BrokenVehicle':]) # scaled_values = scaler.transform(train.loc[:,'T-BrokenVehicle':]) # train.loc[:,'T-BrokenVehicle':] = scaled_values # scaled_values = scaler.transform(test.loc[:,'T-BrokenVehicle':]) # test.loc[:,'T-BrokenVehicle':] = scaled_values display(test.head()) # train = onhot_enoceder(train) # test = onhot_enoceder(test) print(type(train)) print(type(test)) display(test.head()) display(test.head().columns) X_train, y_train = create_sequences(train,geohash_dict) X_test, y_test = create_sequences(test,geohash_dict) # suppose that we have a directory named train_set; in that directory we create several files per city to ... # ... represent its train and test data np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/traintestdatawithrain/X_train',X_train) print (X_train.shape) np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/traintestdatawithrain/y_train',y_train) print( y_train.shape) np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/traintestdatawithrain/X_test',X_test) print (X_test.shape) np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/traintestdatawithrain/y_test',y_test) print (y_test.shape) print('done') train_data('gott') import numpy as np train1=np.load('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/X_train.npy',allow_pickle=True) train=np.load('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/y_train.npy',allow_pickle=True) test=np.load('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/y_train.npy',allow_pickle=True) print(train1.shape) train_acc=train[train > 0].size test_acc=test[test > 0].size tot_acc=train_acc+test_acc print('tot_acc in =', tot_acc) train_nonacc=train[train == 0].size test_nonacc=test[test == 0].size tot_noacc=train_nonacc+test_nonacc print('tot_noacc in =',tot_noacc) percent=(tot_acc/tot_noacc)*100 print("percent of acc/non-acc in =",percent) ###Output _____no_output_____ ###Markdown gott with rain data as baseline ###Code def train_data(filename): #df = pd.read_hdf('/home/dadwal/ls_accident/data/oneKM/onekmfourhour/niedersachsen_onekmfourhour.h5',key='set3') #df=pd.read_hdf('/home/dadwal/DAP/ls_accident/data/differencitiesaccident/datawithazimuth/h5cities/'+filename+'.h5',key='set3') df=pd.read_hdf('/data/dadwal/data/DAP_data/twoyearsepData/h5citiesrain/osntwotwoyearRain.h5',key='set3') #df=df.loc[:,'timestep':'UTYP1_7'] acct=df.loc[df['predicted_accident']>0] #acct1=df.loc[df['T-Accident']>0] display(acct) #display(acct1)# the .h5 file contains raw traffic, weather, time, and POI data display(df.head()) df = onhot_enoceder_month(df) df=onhot_enoceder(df) df=onhot_enoceder_season(df) df=df[['timestep', 'predicted_accident', 'geohash', 'geohash_code', 'twohoursession', 'year', 'rain_val','DOW_cat', 'DayLight','m_en0', 'm_en1', 'm_en2', 'm_en3', 'm_en4', 'm_en5', 'm_en6', 'm_en7', 'm_en8', 'm_en9', 'm_en10', 'm_en11', 'HOD_en0', 'HOD_en1', 'HOD_en2', 'HOD_en3', 'HOD_en4']] # df['elevaion'] = scalarval(df['elevaion']) # df['zenith'] = scalarval(df['zenith']) # df['azimuth_solar'] = scalarval(df['azimuth_solar']) # df['acc_count'] = scalarval(df['acc_count']) df['rain_val'] = scalarval(df['rain_val']) print(df.columns) #test = onhot_enoceder_month(test) df_normalize = df.copy() train = df_normalize[df_normalize.timestep <= df_normalize.timestep.max()*5/6] test = df_normalize[df_normalize.timestep > df_normalize.timestep.max()*5/6] #train.to_csv('train.csv',index=False) # scaler = MinMaxScaler(feature_range=(0, 1)) # scaler.fit(train.loc[:,'T-BrokenVehicle':]) # scaled_values = scaler.transform(train.loc[:,'T-BrokenVehicle':]) # train.loc[:,'T-BrokenVehicle':] = scaled_values # scaled_values = scaler.transform(test.loc[:,'T-BrokenVehicle':]) # test.loc[:,'T-BrokenVehicle':] = scaled_values display(test.head()) # train = onhot_enoceder(train) # test = onhot_enoceder(test) print(type(train)) print(type(test)) display(test.head()) display(test.head().columns) X_train, y_train = create_sequences(train,geohash_dict) X_test, y_test = create_sequences(test,geohash_dict) # suppose that we have a directory named train_set; in that directory we create several files per city to ... # ... represent its train and test data np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/X_train',X_train) print (X_train.shape) np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/y_train',y_train) print( y_train.shape) np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/X_test',X_test) print (X_test.shape) np.save('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/y_test',y_test) print (y_test.shape) print('done') train_data('osna') import numpy as np train=np.load('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/y_train.npy',allow_pickle=True) test=np.load('/data/dadwal/data/DAP_data/twoyearsepData/traintestdata/osnabrucktraintestdata/baselinewithrain/y_train.npy',allow_pickle=True) train_acc=train[train > 0].size test_acc=test[test > 0].size tot_acc=train_acc+test_acc print('tot_acc in '+city+'=', tot_acc) train_nonacc=train[train == 0].size test_nonacc=test[test == 0].size tot_noacc=train_nonacc+test_nonacc print('tot_noacc in '+city+'=',tot_noacc) percent=(tot_acc/tot_noacc)*100 print("percent of acc/non-acc in "+city+"=",percent) ###Output _____no_output_____
benchmarks/en-efi/jw300-baseline/en_efi_jw300_notebook.ipynb
###Markdown Masakhane - Machine Translation for African Languages (Using JoeyNMT) Note before beginning: - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running - If you actually want to have a clue what you're doing, read the text and peek at the links - With 100 epochs, it should take around 7 hours to run in Google Colab - Once you've gotten a result for your language, please attach and email your notebook that generated it to [email protected] - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) Retrieve your data & make a parallel corpusIf you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details.Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe. ###Code from google.colab import drive drive.mount('/content/drive') # TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here: # These will also become the suffix's of all vocab and corpus files used throughout import os source_language = "en" target_language = "efi" lc = False # If True, lowercase the data. seed = 42 # Random seed for shuffling. tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language os.environ["tag"] = tag # This will save it to a folder in our gdrive instead! !mkdir -p "/content/drive/My Drive/masakhane/$src-$tgt-$tag" g_drive_path = "/content/drive/My Drive/masakhane/%s-%s-%s" % (source_language, target_language, tag) os.environ["gdrive_path"] = g_drive_path models_path = '%s/models/%s%s_transformer'% (g_drive_path, source_language, target_language) # model temporary directory for training model_temp_dir = "/content/drive/My Drive/masakhane/model-temp" # model permanent storage on the drive !mkdir -p "$gdrive_path/models/${src}${tgt}_transformer/" !echo $gdrive_path #TODO: Skip for retrain # Install opus-tools ! pip install opustools-pkg #TODO: Skip for retrain # Downloading our corpus ! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q # extract the corpus file ! gunzip JW300_latest_xml_$src-$tgt.xml.gz # extract the corpus file ! gunzip JW300_latest_xml_$tgt-$src.xml.gz #TODO: Skip for retrain # Download the global test set. ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en # And the specific test set for this language pair. os.environ["trg"] = target_language os.environ["src"] = source_language ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en ! mv test.en-$trg.en test.en ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg ! mv test.en-$trg.$trg test.$trg #TODO: Skip for retrain # Read the test data to filter from train and dev splits. # Store english portion in set for quick filtering checks. en_test_sents = set() filter_test_sents = "test.en-any.en" j = 0 with open(filter_test_sents) as f: for line in f: en_test_sents.add(line.strip()) j += 1 print('Loaded {} global test sentences to filter from the training/dev data.'.format(j)) #TODO: Skip for retrain import pandas as pd # TMX file to dataframe source_file = 'jw300.' + source_language target_file = 'jw300.' + target_language source = [] target = [] skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion. with open(source_file) as f: for i, line in enumerate(f): # Skip sentences that are contained in the test set. if line.strip() not in en_test_sents: source.append(line.strip()) else: skip_lines.append(i) with open(target_file) as f: for j, line in enumerate(f): # Only add to corpus if corresponding source was not skipped. if j not in skip_lines: target.append(line.strip()) print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i)) df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence']) # if you get TypeError: data argument can't be an iterator is because of your zip version run this below #df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence']) df.head(3) ###Output Loaded data and skipped 6113/377824 lines since contained in test set. ###Markdown Pre-processing and exportIt is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned.In addition we will split our data into dev/test/train and export to the filesystem. ###Code #TODO: Skip for retrain # drop duplicate translations df_pp = df.drop_duplicates() # drop conflicting translations # (this is optional and something that you might want to comment out # depending on the size of your corpus) df_pp.drop_duplicates(subset='source_sentence', inplace=True) df_pp.drop_duplicates(subset='target_sentence', inplace=True) # Shuffle the data to remove bias in dev set selection. df_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True) #TODO: Skip for retrain # Install fuzzy wuzzy to remove "almost duplicate" sentences in the # test and training sets. ! pip install fuzzywuzzy ! pip install python-Levenshtein import time from fuzzywuzzy import process import numpy as np # reset the index of the training set after previous filtering df_pp.reset_index(drop=False, inplace=True) # Remove samples from the training data set if they "almost overlap" with the # samples in the test set. # Filtering function. Adjust pad to narrow down the candidate matches to # within a certain length of characters of the given sample. def fuzzfilter(sample, candidates, pad): candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad] if len(candidates) > 0: return process.extractOne(sample, candidates)[1] else: return np.nan # NOTE - This might run slow depending on the size of your training set. We are # printing some information to help you track how long it would take. scores = [] start_time = time.time() for idx, row in df_pp.iterrows(): scores.append(fuzzfilter(row['source_sentence'], list(en_test_sents), 5)) if idx % 1000 == 0: hours, rem = divmod(time.time() - start_time, 3600) minutes, seconds = divmod(rem, 60) print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds), "%0.2f percent complete" % (100.0*float(idx)/float(len(df_pp)))) # Filter out "almost overlapping samples" df_pp['scores'] = scores df_pp = df_pp[df_pp['scores'] < 95] #TODO: Skip for retrain # This section does the split between train/dev for the parallel corpora then saves them as separate files # We use 1000 dev test and the given test set. import csv # Do the split between dev/train and create parallel corpora num_dev_patterns = 1000 # Optional: lower case the corpora - this will make it easier to generalize, but without proper casing. if lc: # Julia: making lowercasing optional df_pp["source_sentence"] = df_pp["source_sentence"].str.lower() df_pp["target_sentence"] = df_pp["target_sentence"].str.lower() # Julia: test sets are already generated dev = df_pp.tail(num_dev_patterns) # Herman: Error in original stripped = df_pp.drop(df_pp.tail(num_dev_patterns).index) with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file: for index, row in stripped.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file: for index, row in dev.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") #stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere #stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks. #dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False) #dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False) # Doublecheck the format below. There should be no extra quotation marks or weird characters. ! head train.* ! head dev.* ###Output ==> train.efi <== Isaiah 9 : 7 ọdọho ke Eyen Abasi edidi Edidem ye nte ke enye ayanam ediwak nti n̄kpọ ọnọ ubonowo . “ Ifịk Jehovah mme udịm edinam emi . ” The New Encyclopædia Britannica ọdọhọ ke Mme Ntiense Jehovah “ ẹdu uwem nte Bible etemede . ” Mmọ ẹkesụk ẹdu ke ini emi wheat ye mbiet ẹkọride ọtọkiet , ndien owo ikokụreke kan̄a ndutịm oro ẹkenamde man ẹnyene mbon emi ẹdisinọde mme owo udia eke spirit . SIO INI NỊM NDINAM ITIE UFAN ỌKỌRI . Ndien ami nyeben̄e ekụri nsiak ifia nnịm nnọ enye edida etem udia . Ini kiet , mma ntọhọ nnyụn̄ n̄n̄wana ye owo unek emi eketiede ubi ubi , nnyụn̄ mmia unamikọt nsio ntop nduọk ko ! Edi Andibot ọmọn̄wọn̄ọ ete ke imọ iyọsọp ida utịt isọk ererimbot n̄kaowo oro odude ke emi ke idak ukara Satan kpa Devil . Ami ye Roy ima idomo ndidu uwem ekekem ye enyịn̄ oro ebe ke ndibuana ke kpukpru usụn̄ ukwọrọikọ ye ubịnikọt oro esop ekesịnde udọn̄ ọnọ . T . Sylvia emi edide nurse ọdọhọ ete : “ Ediwak mbon oro ikakade n̄wed ntre ẹma ẹsika ufọkabasi . ==> train.en <== Referring to what the rulership of God’s Son will accomplish , Isaiah 9 : 7 says : “ The very zeal of Jehovah of armies will do this . ” The New Encyclopædia Britannica observes that Jehovah’s Witnesses “ insist upon a high moral code in personal conduct . ” They were still in the growing season , and the arrangement for a channel to provide spiritual food was still taking shape . MAKE TIME TO CULTIVATE A FRIENDSHIP . In the meantime , I would borrow an ax to chop firewood for cooking . On one occasion , I got into a fight with a sinister - looking customer but handled him easily . But the Creator has promised that he will soon bring an end to the present world society that is under the control of Satan the Devil . Roy and I endeavored to live up to that name by sharing in all the preaching methods and campaigns that the organization encouraged . T . “ I went to college with many who claimed to be religious , ” says Sylvia , who works in the health - care business . ==> dev.efi <== Edieke anamde ntre , ọwọrọ ke ememek mfọnn̄kan usụn̄ uwem . Akam ekeme ndidi se ọkwọrọ ederi eketịn̄de ọnọ mmọ edi oro . Ẹtịn̄ ukem ikọ oro ke 2 Chronicles 5 : 9 . Ẹkọbi Paul ẹtem ke ufọk esie ke Rome ke isua iba ( ke n̄kpọ nte isua 59 esịm 61 E.N . ) , ndien enye oyom usụn̄ do ọkwọrọ Obio Ubọn̄ onyụn̄ ekpep mbon en̄wen “ mme n̄kpọ emi ẹban̄ade Ọbọn̄ Jesus Christ . ” — Utom 28 : 30 , 31 . Jehovah ama odu ye enye . ” Ikebịghike - bịghi , ye edisio A Facsimile Edition of the Dead Sea Scrolls ( Nsiondi Mme Ata Ata Ikpan̄wed Inyan̄ Inụn̄ ) , ẹma ẹkeme ndikụt mme ndise ikpan̄wed oro owo mîkosioho ke mbemiso mmemmem mmemmem . Esịt ama enem enye etieti . Ke 2014 , obufa ọfiọn̄ emi ekperede usen emi uwemeyo ye okoneyo ẹsidide ukem ukem ediduọ ke March 30 , ke ayakde minit 15 ndimia n̄kanika usụkkiet okoneyo ke Jerusalem . Oro akanam iyom nditiene n̄kwọrọ etop emi . Mbon oro ẹmade eti n̄kpọ kpọt ẹdinyịme . ==> dev.en <== If you do , you will be choosing the best possible way of life . They may even have been told as much by a clergyman . The same point is made at 2 Chronicles 5 : 9 . 59 - 61 C.E . ) , and from there he finds ways to preach about the Kingdom and teach “ the things concerning the Lord Jesus Christ . ” ​ — Acts 28 : 30 , 31 . Jehovah was with him . ” Before long , with the publication of A Facsimile Edition of the Dead Sea Scrolls , photographs of the previously unpublished scrolls became easily accessible . What joy that brought her ! ( 20 : 45 ) , Jerusalem time . The following sunset in Jerusalem ( March 31 ) will come about 21 hours later . All the more reason for us to join in the proclamation . Only people who love what is good will accept him . ###Markdown --- Installation of JoeyNMTJoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io) ###Code # Install JoeyNMT ! git clone https://github.com/joeynmt/joeynmt.git ! cd joeynmt; pip3 install . ###Output Cloning into 'joeynmt'... remote: Enumerating objects: 3, done. remote: Counting objects: 100% (3/3), done. remote: Compressing objects: 100% (3/3), done. remote: Total 2380 (delta 0), reused 0 (delta 0), pack-reused 2377 Receiving objects: 100% (2380/2380), 2.60 MiB | 2.31 MiB/s, done. Resolving deltas: 100% (1670/1670), done. Processing /content/joeynmt Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.16.0) Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (7.0.0) Requirement already satisfied: numpy<2.0,>=1.14.5 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.18.2) Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (46.1.3) Requirement already satisfied: torch>=1.1 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.4.0) Requirement already satisfied: tensorflow>=1.14 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (2.2.0rc2) Requirement already satisfied: torchtext in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.3.1) Collecting sacrebleu>=1.3.6 [?25l Downloading https://files.pythonhosted.org/packages/f5/58/5c6cc352ea6271125325950715cf8b59b77abe5e93cf29f6e60b491a31d9/sacrebleu-1.4.6-py3-none-any.whl (59kB)  |████████████████████████████████| 61kB 1.1MB/s [?25hCollecting subword-nmt Downloading https://files.pythonhosted.org/packages/74/60/6600a7bc09e7ab38bc53a48a20d8cae49b837f93f5842a41fe513a694912/subword_nmt-0.3.7-py2.py3-none-any.whl Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (3.2.1) Requirement already satisfied: seaborn in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (0.10.0) Collecting pyyaml>=5.1 [?25l Downloading https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz (269kB)  |████████████████████████████████| 276kB 4.0MB/s [?25hCollecting pylint [?25l Downloading https://files.pythonhosted.org/packages/e9/59/43fc36c5ee316bb9aeb7cf5329cdbdca89e5749c34d5602753827c0aa2dc/pylint-2.4.4-py3-none-any.whl (302kB)  |████████████████████████████████| 307kB 57.3MB/s [?25hRequirement already satisfied: six==1.12 in /usr/local/lib/python3.6/dist-packages (from joeynmt==0.0.1) (1.12.0) Collecting wrapt==1.11.1 Downloading https://files.pythonhosted.org/packages/67/b2/0f71ca90b0ade7fad27e3d20327c996c6252a2ffe88f50a95bba7434eda9/wrapt-1.11.1.tar.gz Requirement already satisfied: tensorflow-estimator<2.3.0,>=2.2.0rc0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (2.2.0rc0) Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0) Requirement already satisfied: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.1.0) Requirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.6.3) Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.27.2) Requirement already satisfied: tensorboard<2.3.0,>=2.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (2.2.0) Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.2.0) Requirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.2.0) Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.9.0) Requirement already satisfied: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (2.10.0) Requirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (3.10.0) Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.34.2) Requirement already satisfied: scipy==1.4.1; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (1.4.1) Requirement already satisfied: gast==0.3.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=1.14->joeynmt==0.0.1) (0.3.3) Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (2.21.0) Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from torchtext->joeynmt==0.0.1) (4.38.0) Collecting mecab-python3 [?25l Downloading https://files.pythonhosted.org/packages/18/49/b55a839a77189042960bf96490640c44816073f917d489acbc5d79fa5cc3/mecab_python3-0.996.5-cp36-cp36m-manylinux2010_x86_64.whl (17.1MB)  |████████████████████████████████| 17.1MB 200kB/s [?25hCollecting portalocker Downloading https://files.pythonhosted.org/packages/64/03/9abfb3374d67838daf24f1a388528714bec1debb1d13749f0abd7fb07cfb/portalocker-1.6.0-py2.py3-none-any.whl Requirement already satisfied: typing in /usr/local/lib/python3.6/dist-packages (from sacrebleu>=1.3.6->joeynmt==0.0.1) (3.6.6) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.4.6) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (2.8.1) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (1.2.0) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->joeynmt==0.0.1) (0.10.0) Requirement already satisfied: pandas>=0.22.0 in /usr/local/lib/python3.6/dist-packages (from seaborn->joeynmt==0.0.1) (1.0.3) Collecting astroid<2.4,>=2.3.0 [?25l Downloading https://files.pythonhosted.org/packages/ad/ae/86734823047962e7b8c8529186a1ac4a7ca19aaf1aa0c7713c022ef593fd/astroid-2.3.3-py3-none-any.whl (205kB)  |████████████████████████████████| 215kB 61.3MB/s [?25hCollecting isort<5,>=4.2.5 [?25l Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB)  |████████████████████████████████| 51kB 7.8MB/s [?25hCollecting mccabe<0.7,>=0.6 Downloading https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (3.2.1) Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.7.2) Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.0.1) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.6.0.post2) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (0.4.1) Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (1.24.3) Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (3.0.4) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2019.11.28) Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->torchtext->joeynmt==0.0.1) (2.8) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.22.0->seaborn->joeynmt==0.0.1) (2018.9) Collecting lazy-object-proxy==1.4.* [?25l Downloading https://files.pythonhosted.org/packages/0b/dd/b1e3407e9e6913cf178e506cd0dee818e58694d9a5cd1984e3f6a8b9a10f/lazy_object_proxy-1.4.3-cp36-cp36m-manylinux1_x86_64.whl (55kB)  |████████████████████████████████| 61kB 8.6MB/s [?25hCollecting typed-ast<1.5,>=1.4.0; implementation_name == "cpython" and python_version < "3.8" [?25l Downloading https://files.pythonhosted.org/packages/90/ed/5459080d95eb87a02fe860d447197be63b6e2b5e9ff73c2b0a85622994f4/typed_ast-1.4.1-cp36-cp36m-manylinux1_x86_64.whl (737kB)  |████████████████████████████████| 747kB 64.4MB/s [?25hRequirement already satisfied: cachetools<3.2,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (3.1.1) Requirement already satisfied: rsa<4.1,>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (4.0) Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (0.2.8) Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (1.3.0) Requirement already satisfied: pyasn1>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from rsa<4.1,>=3.1.4->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (0.4.8) Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow>=1.14->joeynmt==0.0.1) (3.1.0) Building wheels for collected packages: joeynmt, pyyaml, wrapt Building wheel for joeynmt (setup.py) ... [?25l[?25hdone Created wheel for joeynmt: filename=joeynmt-0.0.1-cp36-none-any.whl size=73768 sha256=89928a71dba6299fa590b2e3aa35c718986d92c848776139e99d4db0c8e19bf3 Stored in directory: /tmp/pip-ephem-wheel-cache-clor59d_/wheels/db/01/db/751cc9f3e7f6faec127c43644ba250a3ea7ad200594aeda70a Building wheel for pyyaml (setup.py) ... [?25l[?25hdone Created wheel for pyyaml: filename=PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl size=44621 sha256=2429b3effea1bb425377daef070f44a92967e98a656cc62766a78bb0b4b2b497 Stored in directory: /root/.cache/pip/wheels/a7/c1/ea/cf5bd31012e735dc1dfea3131a2d5eae7978b251083d6247bd Building wheel for wrapt (setup.py) ... [?25l[?25hdone Created wheel for wrapt: filename=wrapt-1.11.1-cp36-cp36m-linux_x86_64.whl size=67430 sha256=61f829831a03970770d2c7b2bec42178fd22cc683c18885c204fa19b3a0cf6b1 Stored in directory: /root/.cache/pip/wheels/89/67/41/63cbf0f6ac0a6156588b9587be4db5565f8c6d8ccef98202fc Successfully built joeynmt pyyaml wrapt Installing collected packages: mecab-python3, portalocker, sacrebleu, subword-nmt, pyyaml, wrapt, lazy-object-proxy, typed-ast, astroid, isort, mccabe, pylint, joeynmt Found existing installation: PyYAML 3.13 Uninstalling PyYAML-3.13: Successfully uninstalled PyYAML-3.13 Found existing installation: wrapt 1.12.1 Uninstalling wrapt-1.12.1: Successfully uninstalled wrapt-1.12.1 Successfully installed astroid-2.3.3 isort-4.3.21 joeynmt-0.0.1 lazy-object-proxy-1.4.3 mccabe-0.6.1 mecab-python3-0.996.5 portalocker-1.6.0 pylint-2.4.4 pyyaml-5.3.1 sacrebleu-1.4.6 subword-nmt-0.3.7 typed-ast-1.4.1 wrapt-1.11.1 ###Markdown Preprocessing the Data into Subword BPE Tokens- One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909).- It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685)- Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable. ###Code #TODO: Skip for retrain # One of the huge boosts in NMT performance was to use a different method of tokenizing. # Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance # Do subword NMT from os import path os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language # Learn BPEs on the training data. os.environ["data_path"] = path.join("joeynmt", "data", source_language + target_language) # Herman! ! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt # Apply BPE splits to the development and test data. ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt # Create directory, move everyone we care about to the correct location ! mkdir -p $data_path ! cp train.* $data_path ! cp test.* $data_path ! cp dev.* $data_path ! cp bpe.codes.4000 $data_path ! ls $data_path # Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path ! cp train.* "$gdrive_path" ! cp test.* "$gdrive_path" ! cp dev.* "$gdrive_path" ! cp bpe.codes.4000 "$gdrive_path" ! ls "$gdrive_path" # Create that vocab using build_vocab ! sudo chmod 777 joeynmt/scripts/build_vocab.py ! joeynmt/scripts/build_vocab.py joeynmt/data/$src$tgt/train.bpe.$src joeynmt/data/$src$tgt/train.bpe.$tgt --output_path "$gdrive_path/vocab.txt" # Some output ! echo "BPE Xhosa Sentences" ! tail -n 5 test.bpe.$tgt ! echo "Combined BPE Vocab" ! tail -n 10 "$gdrive_path/vocab.txt" # Herman ###Output bpe.codes.4000 dev.efi test.bpe.en test.en-any.en train.efi dev.bpe.efi dev.en test.efi train.bpe.efi train.en dev.bpe.en test.bpe.efi test.en train.bpe.en 1000.hyps 4000.hyps dev.efi test.bpe.en train.bpe.en 2000.ckpt best.ckpt dev.en test.efi train.efi 2000.hyps bpe.codes.4000 models test.en train.en 3000.ckpt config.yaml src_vocab.txt test.en-any.en train.log 3000.hyps dev.bpe.efi tensorboard test.en-any.en.1 trg_vocab.txt 4000.ckpt dev.bpe.en test.bpe.efi train.bpe.efi validations.txt BPE Xhosa Sentences 18 , 19 . ( a ) Didie ke nditọete ke esop mbufo ẹkeme ndin̄wam fi ada san̄asan̄a ? “ Ndi@@ tie n̄kere se Mme N̄ke 27 : 11 , Matthew 26 : 5@@ 2 , ye John 13 : 35 ẹdọhọde ama an̄wam mi nt@@ etịm mb@@ iere ke ndid@@ ụk@@ ke ekọn̄ . Mme itie N̄wed Abasi emi ama anam esịt ana mi sụn̄ ke ini afanikọn̄ emi . ” — A@@ nd@@ ri@@ y emi otode Uk@@ ra@@ ine . “ Isaiah 2 : 4 ama an̄wam mi n̄ka iso nda san̄asan̄a ke ini idomo . Mma n@@ tie n̄kere nte uwem ed@@ inem@@ de ke obufa ererimbot , ke ini mme owo mîdi@@ d@@ aha n̄kpọ@@ ekọn̄ iw@@ ot owo . ” — W@@ il@@ m@@ er emi otode C@@ olo@@ mb@@ ia . Combined BPE Vocab ō ι ⁄ ◀ ˋ@@ /@@ ā Α@@ bless@@ ;@@ ###Markdown Creating the JoeyNMT ConfigJoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with!- We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021))Things worth playing with:- The batch size (also recommended to change for low-resourced languages)- The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes)- The decoder options (beam_size, alpha)- Evaluation metrics (BLEU versus Crhf4) ###Code def get_last_checkpoint(directory): last_checkpoint = '' try: for filename in os.listdir(directory): if not 'best' in filename and filename.endswith(".ckpt"): if not last_checkpoint or int(filename.split('.')[0]) > int(last_checkpoint.split('.')[0]): last_checkpoint = filename except FileNotFoundError as e: print('Error Occur ', e) return last_checkpoint # Copy the created models from the temporary storage to main storage on google drive for persistant storage # the content of te folder will be overwrite when you start trainin !cp -r "/content/drive/My Drive/masakhane/model-temp/"* "$gdrive_path/models/${src}${tgt}_transformer/" last_checkpoint = get_last_checkpoint(models_path) print('Last checkpoint :',last_checkpoint) # This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update # (You can of course play with all the parameters if you'd like!) name = '%s%s' % (source_language, target_language) gdrive_path = os.environ["gdrive_path"] # Create the config config = """ name: "{name}_transformer" data: src: "{source_language}" trg: "{target_language}" train: "{gdrive_path}/train.bpe" dev: "{gdrive_path}/dev.bpe" test: "{gdrive_path}/test.bpe" level: "bpe" lowercase: False max_sent_length: 100 src_vocab: "{gdrive_path}/vocab.txt" trg_vocab: "{gdrive_path}/vocab.txt" testing: beam_size: 5 alpha: 1.0 training: load_model: "{gdrive_path}/models/{name}_transformer/{last_checkpoint}" # TODO: uncommented to load a pre-trained model from last checkpoint random_seed: 42 optimizer: "adam" normalization: "tokens" adam_betas: [0.9, 0.999] scheduling: "plateau" # TODO: try switching from plateau to Noam scheduling patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds. learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer) learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer) decrease_factor: 0.7 loss: "crossentropy" learning_rate: 0.0003 learning_rate_min: 0.00000001 weight_decay: 0.0 label_smoothing: 0.1 batch_size: 4096 batch_type: "token" eval_batch_size: 3600 eval_batch_type: "token" batch_multiplier: 1 early_stopping_metric: "ppl" epochs: 3 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all validation_freq: 1000 # TODO: Set to at least once per epoch. logging_freq: 100 eval_metric: "bleu" model_dir: "{model_temp_dir}" overwrite: True # TODO: Set to True if you want to overwrite possibly existing models. shuffle: True use_cuda: True max_output_length: 100 print_valid_sents: [0, 1, 2, 3] keep_last_ckpts: 3 model: initializer: "xavier" bias_initializer: "zeros" init_gain: 1.0 embed_initializer: "xavier" embed_init_gain: 1.0 tied_embeddings: True tied_softmax: True encoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 decoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 """.format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language, model_temp_dir=model_temp_dir, last_checkpoint=last_checkpoint) with open("joeynmt/configs/transformer_{name}.yaml".format(name=name),'w') as f: f.write(config) ###Output _____no_output_____ ###Markdown Train the ModelThis single line of joeynmt runs the training using the config we made above ###Code # Train the model # You can press Ctrl-C to stop. And then run the next cell to save your checkpoints! !cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml # Copy the created models from the temporary storage to main storage on google drive for persistant storage !cp -r "/content/drive/My Drive/masakhane/model-temp/"* "$gdrive_path/models/${src}${tgt}_transformer/" # Output our validation accuracy ! cat "$gdrive_path/models/${src}${tgt}_transformer/validations.txt" # Test our model ! cd joeynmt; python3 -m joeynmt test "$gdrive_path/models/${src}${tgt}_transformer/config.yaml" ###Output 2020-04-07 21:16:45,174 Hello! This is Joey-NMT. 2020-04-07 21:17:10,964 dev bleu: 31.00 [Beam search decoding with beam size = 5 and alpha = 1.0] 2020-04-07 21:17:40,602 test bleu: 33.48 [Beam search decoding with beam size = 5 and alpha = 1.0]
homeworks/D047/Day_047_HW.ipynb
###Markdown [作業重點]了解如何使用 Sklearn 中的 hyper-parameter search 找出最佳的超參數 作業請使用不同的資料集,並使用 hyper-parameter search 的方式,看能不能找出最佳的超參數組合 ###Code from sklearn import datasets, metrics from sklearn.model_selection import train_test_split, KFold, GridSearchCV from sklearn.ensemble import GradientBoostingRegressor digits = datasets.load_digits() x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.25, random_state=42) clf = GradientBoostingRegressor(random_state=7) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) print(metrics.mean_squared_error(y_test, y_pred)) n_estimators = [100, 200, 300] max_depth = [1, 3, 5] param_grid = dict(n_estimators=n_estimators, max_depth=max_depth) grid_search = GridSearchCV(clf, param_grid, scoring="neg_mean_squared_error", n_jobs=-1, verbose=1) grid_result = grid_search.fit(x_train, y_train) print("Best Accuracy: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) clf_bestparam = GradientBoostingRegressor(max_depth=grid_result.best_params_['max_depth'], n_estimators=grid_result.best_params_['n_estimators']) clf_bestparam.fit(x_train, y_train) y_pred = clf_bestparam.predict(x_test) print(metrics.mean_squared_error(y_test, y_pred)) ###Output 0.9725969690518025
scripts/crossref_doi_retrieval.ipynb
###Markdown Trying to find DOIs through Crossref ###Code # Collect failed DOI attempts failed_doi_attempts = [] # Define Crossref DOI finding function def crossref_doi_find(row): """ Requests Crossref with title-year combination and returns DOI if good enough match is found. Args: row: DataFrame row containing year and title. Returns: string: Probable DOI of a paper None: If no good DOI could be found """ headers = {"Accept": "application/json"} if pd.isna(row.year): return None if not 1800 <= row.year <= datetime.date.today().year+2: return None year = str(int(row.year)) try: title = urllib.parse.quote(row.title) except: print("Failed to encode title: ", row.title) failed_doi_attempts.append(row) return None url = 'https://api.crossref.org/works/?query.title=' + title + \ '&filter=from-pub-date:' + year + ',until-pub-date:' + year r = requests.get(url, headers=headers) try: first_entry = r.json()['message']['items'][0] title, found_title = [s.translate(string.punctuation).lower() for s in [row.title, first_entry['title'][0]]] perfect_match = (title in found_title) or (found_title in title) if perfect_match: return first_entry["DOI"].lower() except: # JSON decoding error failed_doi_attempts.append(row) print("JSON failed to decode response for: " + row.title) return None return None # Set here the dataframe, for which you want to find missing DOIs doiFixedDF = resultsDF.copy() missing_doi_count = doiFixedDF.doi.isna().sum() print("Requesting Crossref to infer %d missing DOIs" % missing_doi_count) for i, row in tqdm(doiFixedDF[doiFixedDF.doi.isna()].iterrows(), total=missing_doi_count): doiFixedDF.loc[i, 'doi'] = crossref_doi_find(row) fixed_doi_count = missing_doi_count - doiFixedDF.doi.isna().sum() print("Out of %d initially missing DOIs, %d (%.2f%%) are found" % (missing_doi_count, fixed_doi_count, 100 * fixed_doi_count/missing_doi_count)) print("%d DOI attempts failed" % len(failed_doi_attempts)) if len(failed_doi_attempts) > 0: print("The following papers failed:") for row in failed_doi_attempts: print(row.title) ###Output _____no_output_____ ###Markdown Fixing Unicode in DOIs and saving the file with fixed DOIs ###Code # Fixing the Unicode encoding in some of DOIs doiFixedDF.loc[~doiFixedDF.doi.isna(), 'doi'] = doiFixedDF[~doiFixedDF.doi.isna()].doi.apply(urllib.parse.unquote) # Saving the fixed DOI file doiFixedDF.to_excel('../output/megameta_asreview_doi_retrieved.xlsx', index=False) ###Output _____no_output_____
examples/preload-labels.ipynb
###Markdown Preload Labels for 3KB WindowsIn this notebook we're using existing peak calls for **Encode e11.5's face and hindbrain** dataset and search for differentially expressed peaks. ###Code %load_ext autoreload %autoreload 2 import bbi import json import numpy as np import os import sqlite3 import sys import warnings ###Output _____no_output_____ ###Markdown **Configurations** ###Code MAX_PRELOADED_LABELS = 100 NUM_SEARCHES_TO_BE_PRELOADED = 10 CLEAR_DB = True ########################################### # Only change if you know what you're doing ########################################### base = "../" settings_filepath = "config-user-study-encode-e11-5-face-hindbrain.json" window_size = 3000 step_size = 1500 resolution = 25 # 1395142003 is the absolute offset of chr10 target_from = 1395142003 + 57039000 target_to = 1395142003 + 57042000 assert target_to - target_from == window_size # Minimum value to consider a peak annotation a peak for differential accessible peak annotations min_peak_val_diff = 0.75 # Minimum value to consider a peak annotation a peak for equally accessible peak annotations min_peak_val_same = 1 with open(os.path.join(base, settings_filepath), "r") as f: settings = json.load(f) signal_face = "data/ENCFF373NJX.bigWig" signal_hindbrain = "data/ENCFF943PHW.bigWig" narrow_peaks_face = "data/ENCFF545ITR.bigBed" narrow_peaks_hindbrain = "data/ENCFF007GMX.bigBed" broad_peaks_face = "data/ENCFF285BLZ.bigBed" broad_peaks_hindbrain = "data/ENCFF007GMX.bigBed" # Ignore warnings as they just pollute the output warnings.filterwarnings('ignore') # Enable importing modules from the parent directory module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) module_path = os.path.abspath(os.path.join('../experiments')) if module_path not in sys.path: sys.path.append(module_path) module_path = os.path.abspath(os.path.join('../server')) if module_path not in sys.path: sys.path.append(module_path) ###Output _____no_output_____ ###Markdown Extract windows ###Code from server.bigwig import chunk windows_face = chunk( signal_face, window_size, resolution, window_size // settings['step_freq'], settings['chroms'], verbose=True, ) windows_hindbrain = chunk( signal_hindbrain, window_size, resolution, window_size // settings['step_freq'], settings['chroms'], verbose=True, ) ###Output Extracted 87129 windows from chr10 with a max value of 1.0. Extracted 87129 windows from chr10 with a max value of 1.0. ###Markdown **Get the max signal per window** ###Code max_signal_face = np.max(windows_face, axis=1) max_signal_hindbrain = np.max(windows_hindbrain, axis=1) ###Output _____no_output_____ ###Markdown Find differentially accessible peaks much faster`chunk_beds_binary()` extracts only a binary value per window: `1` if a window contains an annotation, i.e., a peak, or `0` if not. ###Code def chunk_beds(bigbed): bins = 11 chrom_sizes = bbi.chromsizes(bigbed) chrom_size = chrom_sizes[settings['chroms'][0]] num_total_windows = np.ceil((chrom_size - window_size) / step_size).astype(int) + 1 num_windows = np.ceil((chrom_size - window_size) / step_size).astype(int) + 1 start_pos = np.arange(0, step_size * num_total_windows, step_size) end_pos = np.arange(window_size, step_size * num_total_windows + window_size, step_size) return bbi.stackup( bigbed, settings['chroms'] * num_total_windows, start_pos, end_pos, bins=bins, missing=0, oob=0, ).astype(int) peaks_face = chunk_beds(narrow_peaks_face) peaks_hindbrain = chunk_beds(narrow_peaks_hindbrain) print('Face peaks: {}'.format(np.sum(np.max(peaks_face[:,2:9], axis=1)))) print('Hindbrain peaks: {}'.format(np.sum(np.max(peaks_hindbrain[:,2:9], axis=1)))) diff_peaks = ( ( np.max(peaks_face[:,2:9], axis=1) + np.max(peaks_hindbrain[:,2:9], axis=1) == 1 ) & ( np.abs(np.sum(peaks_face[:,2:9], axis=1) - np.sum(peaks_hindbrain[:,2:9], axis=1)) > 2 ) ) print('Diff peaks: {}'.format(np.sum(diff_peaks))) same_peaks = ( np.max(peaks_face[:,2:9], axis=1) + np.max(peaks_hindbrain[:,2:9], axis=1) ) == 2 print('Same peaks: {}'.format(np.sum(same_peaks))) diff_peaks_win_ids = np.where(diff_peaks)[0] same_peaks_win_ids = np.where(same_peaks)[0] diff_peaks_with_max = diff_peaks & ((max_signal_face >= min_peak_val_diff) | (max_signal_hindbrain >= min_peak_val_diff)) diff_peaks_with_max_ids = np.where(diff_peaks_with_max)[0] print('Diff peaks with max val >= {}: {}'.format(min_peak_val_diff, np.sum(diff_peaks_with_max))) same_peaks_with_max = same_peaks & ((max_signal_face >= min_peak_val_same) | (max_signal_hindbrain >= min_peak_val_same)) same_peaks_with_max_ids = np.where(same_peaks_with_max)[0] print('Same peaks with max val >= {}: {}'.format(min_peak_val_same, np.sum(same_peaks_with_max))) ###Output Diff peaks with max val >= 0.75: 55 Same peaks with max val >= 1: 1201 ###Markdown Preload Search DB with some LabelsPreload at most `MAX_PRELOADED_LABELS` positive and negative differentially accessible peaks. We are limiting the number to not overrepresent negative examples as there seem to be many more peaks that are equally accessible. ###Code from ipywidgets.widgets import Checkbox clear_db = Checkbox(value=False, description='Clear DB (Make sure you know what you do!)') clear_db from server.config import Config from server.database import DB db_path = os.path.join(base, settings["db_path"]) if os.path.exists(db_path) and not clear_db.value: print('Database already exist. Check above to delete!') else: os.remove(db_path) DB(db_path=db_path, clear=True) with sqlite3.connect(db_path) as db: for search_id in range(1, NUM_SEARCHES_TO_BE_PRELOADED + 1): db.execute( """ INSERT INTO search(id, target_from, target_to, config) VALUES (?, ?, ?, ?); """, (int(search_id), int(target_from), int(target_to), json.dumps(settings)), ) for window_idx in np.random.choice( diff_peaks_with_max_ids, np.min((diff_peaks_with_max_ids.size, MAX_PRELOADED_LABELS)), replace=False ): db.execute( """ INSERT INTO classification(search_id, window_id, is_positive) VALUES (?, ?, ?); """, (int(search_id), int(window_idx), 1), ) for window_idx in np.random.choice( same_peaks_with_max_ids, np.min((same_peaks_with_max_ids.size, MAX_PRELOADED_LABELS)), replace=False ): db.execute( """ INSERT INTO classification(search_id, window_id, is_positive) VALUES (?, ?, ?); """, (int(search_id), int(window_idx), -1), ) db.commit() ###Output _____no_output_____ ###Markdown **Make sure to start the server first!** ###Code import requests import time for search_id in range(NUM_SEARCHES_TO_BE_PRELOADED, 0, -1): r = requests.post( url = f'http://localhost:5000/api/v1/classifier/?s={search_id}' ) time.sleep(5) r = requests.post( url = f'http://localhost:5000/api/v1/progress/?s={search_id}&u=1' ) time.sleep(5) ###Output _____no_output_____
DATA_690_3_Assignment3.ipynb
###Markdown Create a new module that has: - myFibonacci() function that implements Fibonacci series using recursion. Refer to the text book, lecture slides and Jupyter notebook. - When the module is imported, set a global variable that is used to count the number of recursions function makes. - printNumRecursions() function to return the number of total recursions made in myFibonacci() function Modify applyToEach() to use Python map() function ###Code # Create a new module %%writefile fibassignment3.py def fib(x): """ Assumes x an int >= 0 Returns Fibonacci of x""" global numFibCalls # Global Statement numFibCalls += 1 if x == 0 or x == 1: return 1 else: return fib(x-1) + fib(x-2) def testFib(n): for i in range(n+1): global numFibCalls #Try removing global numFibCalls or numFibCalls numFibCalls = 0 #NumFibCalls reset print("fib of ", i, '=', myFibonacci(i)) print("fib called", numFibCalls, 'times.') # List of files !ls # Content !cat fibassignment3.py # Import a file and its function from fibassignment3 import testFib testFib(6) # Write a new file call FibA and rename functions to myFibonacci, added a new function called printNumRecursions %%writefile FibA.py def myFibonacci(x): """ Assumes x an int >= 0 Returns Fibonacci of x""" global TotalRecursion # Global Statement TotalRecursion += 1 if x == 0 or x == 1: return 1 else: return myFibonacci(x-1) + myFibonacci(x-2) def printNumRecursions(): global TotalRecursion return TotalRecursion def testFib(n): for i in range(n+1): globe TotalRecursion TotalRecursion = 0 #TotalRecursion resets print("Fib of ", i, '=', myFibonacci(i)) print("Fib called", printNumRecursions(), 'times.') # Import a file and call a function from FibA import testFib testFib(6) ###Output Fib of 0 = 1 Fib called 1 times. Fib of 1 = 1 Fib called 1 times. Fib of 2 = 2 Fib called 3 times. Fib of 3 = 3 Fib called 5 times. Fib of 4 = 5 Fib called 9 times. Fib of 5 = 8 Fib called 15 times. Fib of 6 = 13 Fib called 25 times. ###Markdown Creation of two functions for the fibonacci and factorial ###Code # Assumes n is any number which could be negative and float numbers. If it is a float # or negative number, it would return to 0. Otherwise, returns to Fibonacci of n. # The above Fibonacci functions were not used because it did not consider negative # and float numbers, so a new function has to be written def fibo(n): if n== 0 or n ==1: return 1 elif n < 0: return 0 else: return fib(n-1)+fib(n-2) # Assumes n is any number which could be negative and float numbers. If it is a float # or negative number, it would return to 0. Otherwise, returns to Factorial of n. # A new function has to be written to apply negatives and float. def factR(n): if n== 0 or n ==1: return 1 elif n < 0: return 0 else: return n*factR(n - 1) ###Output _____no_output_____ ###Markdown Modify applyToEach() to use Python map() function - Assumes L is a list, f a function mutates L by replacing each element, e, of L by f(e). - Another alternative is "i = list(map(f, L))" which meant no need to indent on the next line which is print because it has no for loop - Creation of two function above to use for the factorial and fibonacci since the numbers used for the list were float and negative numbers. The return were zero. ###Code def applyToEach(L, f): for i in map(f, L): print(i, end = ' ') L = [1, -2, 3.33, 2, 3, 5, 0] print('L =', L) print('\n') print('Apply abs to each element of L.') applyToEach(L, abs) print("\n") print('Apply int to each element of L.') applyToEach(L, int) print('\n') print('Apply factorial to each element of L.') applyToEach(L, factR) print('\n') print('Apply fib to each element of L.') applyToEach(L, fibo) ###Output L = [1, -2, 3.33, 2, 3, 5, 0] Apply abs to each element of L. 1 2 3.33 2 3 5 0 Apply int to each element of L. 1 -2 3 2 3 5 0 Apply factorial to each element of L. 1 0 0.0 2 6 120 1 Apply fib to each element of L. 1 0 0 2 3 8 1
assignment 2/assignment2.ipynb
###Markdown Data Import and Preprocessing ###Code df = pd.read_csv("train.csv") df.head() # year, month, day-of-the-week, time-of-the-day might be useful parameters for prediction df['year'] = df['datetime'].str.extract("^(.{4})") df['month'] = df['datetime'].str.extract("-(.{2})-") df['day'] = df['datetime'].str.extract("(.{2}) ") df['time'] = df['datetime'].str.extract(" (.{2})") df.head() df[['year', 'month', 'day', 'time']] = df[['year', 'month', 'day', 'time']].astype(int) #drop the unique datatime column df = df.drop(labels=["datetime"], axis=1) df_original=df.copy() ###Output _____no_output_____ ###Markdown Clustering ###Code def set_colors(labels, colors='rgbykcmw'): colored_labels = [] for label in labels: if (label < 0) or (label > 6): colored_labels.append(colors[7]) else: colored_labels.append(colors[label]) return colored_labels ###Output _____no_output_____ ###Markdown K-Means CluseringI cluster the bike sharing demand dataset using K-means method and DBSCAN method ###Code # Fit a k-means estimator estimator = KMeans(n_clusters=8) X = df_original[["humidity","temp" ,"casual"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ #colors = set_colors(labels) fig = plt.figure() ax = Axes3D(fig) ax.scatter(df_original["humidity"], df_original["temp"], df_original["casual"],c=labels.astype(np.float), edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('humidity') ax.set_ylabel('temp') ax.set_zlabel('casual') ax.set_title('8 clusters') ax.dist = 12 #plt.scatter(df_original["humidity"], df_original["casual"], c=colors) #plt.xlabel("humidity") #plt.ylabel("casual") #plt.show() # Fit a k-means estimator estimator = KMeans(n_clusters=3) X = df_original[["humidity","temp" ,"casual"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ #colors = set_colors(labels) fig = plt.figure() ax = Axes3D(fig) ax.scatter(df_original["humidity"], df_original["temp"], df_original["casual"],c=labels.astype(np.float), edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('humidity') ax.set_ylabel('temp') ax.set_zlabel('casual') ax.set_title('3 clusters') ax.dist = 12 #plt.scatter(df_original["humidity"], df_original["casual"], c=colors) #plt.xlabel("humidity") #plt.ylabel("casual") #plt.show() ###Output _____no_output_____ ###Markdown DBSCAN Clustering ###Code #### from sklearn.cluster import DBSCAN #Fit a DBSCAN estimator estimator = DBSCAN(eps=5, min_samples=10) X = df_original[["humidity", "count"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ #print Counter(labels) colors = set_colors(labels) plt.scatter(df_original["humidity"], df_original["count"], c=colors) plt.xlabel("humidity") plt.ylabel("count") plt.show() from sklearn.cluster import DBSCAN #Fit a DBSCAN estimator estimator = DBSCAN(eps=3, min_samples=14) X = df_original[["humidity", "temp"]] estimator.fit(X) # Clusters are given in the labels_ attribute labels = estimator.labels_ #print Counter(labels) colors = set_colors(labels) plt.scatter(df_original["humidity"], df_original["temp"], c=colors) plt.xlabel("humidity") plt.ylabel("temp") plt.show() ###Output _____no_output_____ ###Markdown Dummy Variable ###Code df= df.drop(labels=["casual", "registered"], axis=1) # convert ordinal categorical variables into multiple dummy variables # get dummy variables for season df['season'].value_counts() df = df.join(pd.get_dummies(df.season, prefix='season')) df = df.drop(labels=["season"], axis=1) df['weather'].value_counts() df = df.join(pd.get_dummies(df.weather, prefix='weather')) df= df.drop(labels=["weather"], axis=1) ###Output _____no_output_____ ###Markdown Multi-colinearity AnalysisWe can see from the above heatmap, atemp variable and temp variable are highly correlated, the correlation coefficent is almost equal to 1. Therefroe, we need to delete one of the varibales to avoid Multi-colinearity Analysis. ###Code corrmat=df.corr() mask = np.array(corrmat) mask[np.tril_indices_from(mask)] = False fig,ax= plt.subplots() fig.set_size_inches(20,20) sn.heatmap(corrmat, mask=mask,vmax=.8, square=True,annot=True) df= df.drop(labels=["atemp"], axis=1) target = df['count'].values predictors = df.drop(labels=["count"], axis=1) predictors.shape #the datset we are going to use as predictor has 17 rows and 10886 colomns #target is the response array ###Output _____no_output_____ ###Markdown Linear regression Dataset split ###Code #split the df dataset into train and test dataset predictors=sm.add_constant(predictors) x_train,x_test,y_train,y_test=train_test_split(predictors,target,test_size=0.2,random_state=1) ###Output _____no_output_____ ###Markdown Backward Elimination Implement backward elimination to select significant predictors of the linear regression modelWe first include all the predictors into the model and fit a full logistic regression, then we elmiminate the predictors step by step accroding to the p-value. If the p-value is larger than the significance level (0.05),we delete the variable form our model. Finally, we get a set of predictors that are significant: 'const','temp','humidity','windspeed','year','month','time','season_1','season_2','season_3','season_4','weather_1','weather_2','weather_3','weather_4'.The RMSE of the simple linear regression we fit is 143.90089223868023 ###Code x_train_opt=x_train[['const','holiday','workingday','temp','humidity','windspeed','year', 'month','day','time','season_1','season_2','season_3','season_4','weather_1','weather_2','weather_3','weather_4']] #backward elimination: select a significance level to stay in the model (pvalue<=0.05) regressor=sm.OLS(endog=y_train,exog=x_train_opt).fit() #fit the full model with all possible predictors regressor.summary() x_train_opt=x_train[['const','holiday','temp','humidity','windspeed','year', 'month','day','time','season_1','season_2','season_3', 'season_4','weather_1','weather_2','weather_3','weather_4']] #backward elimination: select a significance level to stay in the model (pvalue<=0.05) regressor=sm.OLS(endog=y_train,exog=x_train_opt).fit() #fit the full model with all possible predictors regressor.summary() x_train_opt=x_train[['const','holiday','temp','humidity','windspeed','year', 'month','time','season_1','season_2','season_3', 'season_4','weather_1','weather_2','weather_3','weather_4']] #backward elimination: select a significance level to stay in the model (pvalue<=0.05) regressor=sm.OLS(endog=y_train,exog=x_train_opt).fit() #fit the full model with all possible predictors regressor.summary() x_train_opt=x_train[['const','temp','humidity','windspeed','year', 'month','time','season_1','season_2','season_3', 'season_4','weather_1','weather_2','weather_3','weather_4']] #backward elimination: select a significance level to stay in the model (pvalue<=0.05) regressor=sm.OLS(endog=y_train,exog=x_train_opt).fit() #fit the full model with all possible predictors regressor.summary() x_test_opt=x_test[['const','temp','humidity','windspeed','year', 'month','time','season_1','season_2','season_3', 'season_4','weather_1','weather_2','weather_3','weather_4']] linreg = LinearRegression() linreg.fit(x_train_opt, y_train) y_pred = linreg.predict(x_test_opt) plt.scatter(y_test, y_pred) plt.xlabel("bike demands: $Y_i$") plt.ylabel("Predicted bike demands: $\hat{y}_i$") plt.title("bike demands vs Predicted demands: $Y_i$ vs $\hat{y}_i$") from sklearn import metrics rmse = np.sqrt(metrics.mean_squared_error(y_test, y_pred)) rmse ###Output _____no_output_____ ###Markdown K-Fold Cross Validation for Linear Regression ###Code predictors_opt=predictors[['const','temp','humidity','windspeed','year', 'month','time','season_1','season_2','season_3', 'season_4','weather_1','weather_2','weather_3','weather_4']] #K Fold cross validation from sklearn.cross_validation import KFold predictors_opt_np = predictors_opt.as_matrix() #generate new np array datasets:predictors_opt_np and target_np target_np=target.copy() kf = KFold(len(predictors_opt_np), n_folds=5) scores = [] for train_index, test_index in kf: linreg.fit(predictors_opt_np[train_index],target_np[train_index]) scores.append(np.sqrt(metrics.mean_squared_error(target_np[test_index], linreg.predict(predictors_opt_np[test_index])))) scores np.mean(scores) np.median(scores) np.std(scores) def show_stats(m, ncv, cv): print('Method: %s' %m) print('RMSE on no CV training: %.3f' %ncv) print('RMSE on 5-fold CV: %.3f' %cv) show_stats('Simple Linear Regression',rmse ,np.mean(scores)) ###Output Method: Simple Linear Regression RMSE on no CV training: 143.901 RMSE on 5-fold CV: 140.056 ###Markdown The RMSE on no CV training is bigger than the RMSE on 5-fold CV.We can conclude that the linear regression fits better with K-fold cross validation. Regularization of linear regression ###Code from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet, SGDRegressor ridge = Ridge(fit_intercept=True, alpha=0.5) ridge.fit(x_train_opt,y_train) y_pred_ridge = ridge.predict(x_test_opt) plt.scatter(y_test, y_pred_ridge) plt.xlabel("Bike demands: $Y_i$") plt.ylabel("Predicted bike demands $\hat{y}_i$") plt.title("Ridge Regression - Bike denmand vs Predicted bike demands: $Y_i$ vs $\hat{y}_i$") rmse = np.sqrt(metrics.mean_squared_error(y_test, y_pred_ridge)) rmse ###Output _____no_output_____ ###Markdown K-Fold Cross-validation for Ridge Linear Regression ###Code kf = KFold(len(predictors_opt_np), n_folds=5) scores = [] for train_index, test_index in kf: ridge.fit(predictors_opt_np[train_index],target_np[train_index]) scores.append(np.sqrt(metrics.mean_squared_error(target_np[test_index], linreg.predict(predictors_opt_np[test_index])))) scores np.mean(scores) ###Output _____no_output_____ ###Markdown The Choice of Alpha in Ridge Regression ###Code print('Ridge Regression') print('alpha\t RMSE_train\t RMSE_cv\n') alpha = np.linspace(.01,20,50) t_rmse = np.array([]) cv_rmse = np.array([]) for a in alpha: ridge = Ridge(fit_intercept=True, alpha=a) # computing the RMSE on training data ridge.fit(x_train_opt,y_train) y_pred = ridge.predict(x_test_opt) err = y_pred-y_test # Dot product of error vector with itself gives us the sum of squared errors total_error = np.dot(err,err) rmse_train = np.sqrt(total_error/len(y_pred)) # computing RMSE using 5-fold cross validation kf = KFold(len(predictors_opt_np), n_folds=5) xval_err = 0 for train, test in kf: ridge.fit(predictors_opt_np[train], target_np[train]) y_pred = ridge.predict(predictors_opt_np[test]) err = y_pred - target_np[test] xval_err += np.dot(err,err) rmse_cv = np.sqrt(xval_err/len(predictors_opt_np)) t_rmse=np.append(t_rmse, [rmse_train]) cv_rmse=np.append(cv_rmse, [rmse_cv]) print('{:.3f}\t {:.4f}\t\t {:.4f}'.format(a,rmse_train,rmse_cv)) import pylab as pl pl.plot(alpha, t_rmse, label='RMSE-Train') pl.plot(alpha, cv_rmse, label='RMSE_Cross_Val') pl.legend( ('Ridge RMSE-Train', 'Ridge RMSE_Cross_Val') ) pl.ylabel('RMSE') pl.xlabel('Alpha') pl.show() ###Output _____no_output_____ ###Markdown logistic regression ###Code predictors target columns=['count'] target_bi=pd.DataFrame(target,columns=columns) # Create a binary response from the numerical response # The binary response is 1 when the count response is larger than the median #The binary response is 0 when the count response is smaller than the median from statistics import median target_bi['target_bi'] = np.zeros(len(target_bi)) target_bi.loc[df['count'] <= median(np.array(target_df['count'])), 'target_bi'] = 0 target_bi.loc[df['count'] > median(np.array(target_df['count'])), 'target_bi'] = 1 target_bi['target_bi'] =target_bi['target_bi'] .astype('category') target_bi ###Output _____no_output_____ ###Markdown Dataset Split ###Code x_train_log, x_test_log, y_train_log, y_test_log = train_test_split(predictors, target_bi, test_size= .2, random_state=0) y_train_log=y_train_log['target_bi'].values y_test_log=y_test_log['target_bi'].values ###Output _____no_output_____ ###Markdown Forward Stepwise-variable select We add variables to our logistic model step by step according to the accuracy rate of the model.First we compare the accuracy rate of each model with one variable as the predictors and add the varibale with largest accuacy rate into our model. Then we add variables into the model with the predictor we select last step. We will stop the process when the maximum accuracy rate is no longer larger than the maximum accuracy rate of last step.The predictor we select are: 'time','temp','season_4','humidity','weather_3','season_2','year'. ###Code from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score from sklearn.metrics import roc_curve, auc def caculate_auc(features): X_full = np.array(x_train_log[features]) Y_full = np.array(y_train_log) logit = LogisticRegression() res_logit = logit.fit(X_full, Y_full) Y_pred_full = res_logit.predict(X_full) false_positive_rate, true_positive_rate, thresholds = roc_curve(Y_full, Y_pred_full) return auc(false_positive_rate, true_positive_rate) features = list(predictors.columns) print(features) # first round iteration for p in features: auc_res = caculate_auc([p]) print([p], auc_res) #pick the highest value "time" features.remove('time') print(features) # second round iteration for p in predictors: pred_sec = ['time'] pred_sec.append(p) auc_res = caculate_auc(pred_sec) print(pred_sec, auc_res) #pick 'temp' features.remove('temp') print(features) # third round iteration for p in features: pred_third = ['time','temp'] pred_third.append(p) auc_res = caculate_auc(pred_third) print(pred_third, auc_res) #pick 'season_4' features.remove('season_4') print(features) # forth round iteration for p in features: pred_forth = ['time','temp','season_4'] pred_forth.append(p) auc_res = caculate_auc(pred_forth) print(pred_forth, auc_res) #remove 'humidity' features.remove('humidity') print(features) # fifth round iteration for p in features: pred_fifth = ['time','temp','season_4','humidity'] pred_fifth.append(p) auc_res = caculate_auc(pred_fifth) print(pred_fifth, auc_res) #remove 'weather_3' features.remove('weather_3') print(features) # sixth round iteration for p in features: pred_sixth = ['time','temp','season_4','humidity','weather_3'] pred_sixth.append(p) auc_res = caculate_auc(pred_sixth) print(pred_sixth, auc_res) #remove 'season_2' features.remove('season_2') print(features) # seventh round iteration for p in features: pred_seventh = ['time','temp','season_4','humidity','weather_3','season_2'] pred_seventh.append(p) auc_res = caculate_auc(pred_seventh) print(pred_seventh, auc_res) #remove 'season_3' features.remove('season_3') print(features) # eighth round iteration for p in features: pred_eighth = ['time','temp','season_4','humidity','weather_3','season_2','season_3'] pred_eighth.append(p) auc_res = caculate_auc(pred_eighth) print(pred_eighth, auc_res) #remove 'year' features.remove('year') print(features) # ninth round iteration for p in features: pred_ninth = ['time','temp','season_4','humidity','weather_3','season_2','season_3','year'] pred_ninth.append(p) auc_res = caculate_auc(pred_ninth) print(pred_ninth, auc_res) # None of the result is greater than the maximum in the ninth iteration result. #So we can conclude the best predictor combination is #['time', 'temp', 'season_4', 'humidity', 'weather_3', 'season_2', 'season_3', 'year',] _features=['time','temp','season_4','humidity','weather_3','season_2','season_3','year'] x_train_log=x_train_log[_features] x_test_log=x_test_log[_features] x_train_log['intercept'] = 1.0 x_test_log['intercept'] = 1.0 predictors_log=predictors[_features] predictors_log['intercept']=1.0 ###Output _____no_output_____ ###Markdown Significance AnalysisFrom the summary of the logisic regression, we can see that the p_value of the season_3 variable is quite large. So we can make a conclusion that the season_3 variable is not siginificant. We need to delete the variable from our logistic model. ###Code log_model = sm.Logit(y_train_log, x_train_log).fit() log_model.summary() x_train_log=x_train_log.drop('season_3', axis=1) x_test_log=x_test_log.drop('season_3',axis=1) lr = LogisticRegression(C=1e9) # Logistic regression metrics LRm = lr.fit(x_train_log, y_train_log) LRm.predict_proba(x_test_log) LRm.coef_ LRm.intercept_ LRm.decision_function(x_test_log) # Predict confidence scores for samples. y_pred=LRm.predict(x_test_log) ###Output _____no_output_____ ###Markdown Confusion Matrix ###Code confusion_matrix(y_test_log, y_pred) # Accuracy, precision and recall print("Accuracy full:", np.round(accuracy_score(y_test_log, y_pred), 3)) print("Precision full:", np.round(precision_score(y_test_log, y_pred), 3)) print("Recall full:", np.round(recall_score(y_test_log, y_pred), 3)) ###Output Accuracy full: 0.795 Precision full: 0.812 Recall full: 0.784 ###Markdown K-Fold Cross-Validation of Logistic Regression ###Code #cross validation k=5 acc=[] for i in range(k): x_train, x_test, y_train, y_test = train_test_split(predictors_log, target_bi, test_size= 0.2, random_state=i) # Logistic regression metric LRm = lr.fit(x_train, y_train['target_bi']) a=metrics.accuracy_score(y_test['target_bi'], LRm.predict(x_test)) acc.append(a) print (acc) print (np.mean(acc)) print (np.std(acc)) ###Output [0.79568411386593207, 0.78512396694214881, 0.80348943985307619, 0.79292929292929293, 0.79384756657483935] 0.794214876033 0.00587551635374
ipynb/US-Kentucky.ipynb
###Markdown United States: Kentucky* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kentucky.ipynb) ###Code import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview(country="US", region="Kentucky"); # load the data cases, deaths, region_label = get_country_data("US", "Kentucky") # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 500 rows pd.set_option("max_rows", 500) # display the table table ###Output _____no_output_____ ###Markdown Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kentucky.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))-------------------- ###Code print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # to force a fresh download of data, run "clear_cache()" print(f"Notebook execution took: {datetime.datetime.now()-start}") ###Output _____no_output_____ ###Markdown United States: Kentucky* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kentucky.ipynb) ###Code import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview(country="US", region="Kentucky", weeks=5); overview(country="US", region="Kentucky"); compare_plot(country="US", region="Kentucky"); # load the data cases, deaths = get_country_data("US", "Kentucky") # get population of the region for future normalisation: inhabitants = population(country="US", region="Kentucky") print(f'Population of country="US", region="Kentucky": {inhabitants} people') # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 1000 rows pd.set_option("max_rows", 1000) # display the table table ###Output _____no_output_____ ###Markdown Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kentucky.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))-------------------- ###Code print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # to force a fresh download of data, run "clear_cache()" print(f"Notebook execution took: {datetime.datetime.now()-start}") ###Output _____no_output_____ ###Markdown United States: Kentucky* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kentucky.ipynb) ###Code import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview(country="US", region="Kentucky", weeks=5); overview(country="US", region="Kentucky"); compare_plot(country="US", region="Kentucky"); # load the data cases, deaths = get_country_data("US", "Kentucky") # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 500 rows pd.set_option("max_rows", 500) # display the table table ###Output _____no_output_____ ###Markdown Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/US-Kentucky.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))-------------------- ###Code print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # to force a fresh download of data, run "clear_cache()" print(f"Notebook execution took: {datetime.datetime.now()-start}") ###Output _____no_output_____
tmp/Topic Modeling.ipynb
###Markdown Book Recommedation Topic Modeling using LDA LDA - Unsupervised ML modelLDA predicts propotion of topics each document(input)Idea is to make recommedation based on inequality of topic propotions predicted by model ###Code import pandas as pd import numpy as np import json import nltk import re import csv import matplotlib.pyplot as plt from tqdm import tqdm ###Output _____no_output_____ ###Markdown Load CMU book summary dataset for Training modelhttp://www.cs.cmu.edu/~dbamman/booksummaries.html ###Code records = [] with open("booksummaries.txt", 'r',encoding="utf8") as f: data_reader = csv.reader(f, dialect='excel-tab') for row in tqdm(data_reader): records.append(row) ###Output 16559it [00:00, 30035.20it/s] ###Markdown strip summary info as we only need summary for training ###Code summary = [] #summary is present at index 6 according to CMU site for i in tqdm(records): summary.append(i[6]) books = pd.DataFrame({'summary': summary}) books.head(2) ###Output 100%|███████████████████████████████████████████████████████████████████████| 16559/16559 [00:00<00:00, 1598505.83it/s] ###Markdown remove stopwords and punctuationsand Lemmatize words into base form ###Code from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer import string stop = set(stopwords.words('english')) exclude = set(string.punctuation) lemma = WordNetLemmatizer() def clean(doc): stop_free = " ".join([i for i in doc.lower().split() if i not in stop]) punc_free = ''.join(ch for ch in stop_free if ch not in exclude) normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split()) return normalized doc_clean = [clean(doc).split() for doc in tqdm(books['summary'])] ###Output 100%|███████████████████████████████████████████████████████████████████████████| 16559/16559 [00:17<00:00, 954.23it/s] ###Markdown Building Dictionary ###Code import gensim # Creating the term dictionary of our courpus, where every unique term is assigned an index. dictionary = gensim.corpora.Dictionary(doc_clean) # convert our books_summaries into numerical data using doc to bag of words doc_term_matrix = [dictionary.doc2bow(doc) for doc in tqdm(doc_clean)] ###Output 100%|██████████████████████████████████████████████████████████████████████████| 16559/16559 [00:02<00:00, 6537.76it/s] ###Markdown Creation and training of LDA ###Code # Creating the object for LDA model using gensim library Lda = gensim.models.ldamodel.LdaModel ###Output _____no_output_____ ###Markdown Set parameters of LDA ###Code num_topics = 5 passes = 5 eta = [0.01]*len(dictionary.keys()) alpha = [0.01]*num_topics ldamodel = Lda(doc_term_matrix, num_topics=num_topics, id2word = dictionary, passes=passes, alpha=alpha,eta=eta) ###Output _____no_output_____ ###Markdown Prediction using Trained Model ###Code def predict(summary): clean_summary = clean(summary).split() doc_trm_matrix = dictionary.doc2bow(clean_summary) return ldamodel[doc_trm_matrix] ###Output _____no_output_____ ###Markdown 1984 George Orwell ###Code book_1984 = """A man loses his identity while living under a repressive regime. \ In a story based on George Orwell's classic novel, Winston Smith (John Hurt) is a government employee whose job involves the rewriting of history in a manner that casts his fictional country's leaders in a charitable light. \ His trysts with Julia (Suzanna Hamilton) provide his only measure of enjoyment, but lawmakers frown on the relationship -- and in this closely monitored society, there is no escape from Big Brother.""" print('for Book : 1984') print(predict(book_1984)) ###Output for Book : 1984 [(2, 0.2600291), (3, 0.73927027)] ###Markdown Harry Potter and the Sorcerer's Stone J K Rowling ###Code book_harry1 = """Adaptation of the first of J.K. Rowling's popular children's novels about Harry Potter, \ a boy who learns on his eleventh birthday that he is the orphaned son of two powerful wizards\ and possesses unique magical powers of his own. He is summoned from his life as an unwanted child to \ become a student at Hogwarts, an English boarding school for wizards. There, \ he meets several friends who become his closest allies \ and help him discover the truth about his parents' mysterious deaths.""" print('for Book : Harry Potter and the Sorcerer\'s Stone') print(predict(book_harry1)) ###Output for Book : Harry Potter and the Sorcerer's Stone [(1, 0.3311144), (2, 0.08902878), (3, 0.16001026), (4, 0.41962406)]
dae/topics_vis_demo.ipynb
###Markdown This notebook is a demonstration of how to use our topic model to:1. Obtain topics2. Visualize topic transition heat maps3. Reproduce topic transition chain/graphs ###Code # import dependencies import sys import matplotlib import matplotlib.pyplot as plt import random import torch from torch import nn, optim from torch.autograd import Function import numpy as np import os import pickle import data_analysis_utils from data_analysis_utils import (prepare_text_for_lda, build_reduced_glove_dict, convert_token_2_ids, text_to_topic, compute_topic_transition_matrix, filtere_off_unk_topics) from collections import Counter import dae_model from dae_model import DictionaryAutoencoder import time ###Output _____no_output_____ ###Markdown GPU required ###Code device = 'cuda:0' ###Output _____no_output_____ ###Markdown Fix random seed for python, numpy and torch to ensure reproducibility ###Code random_seed = 42 random.seed(random_seed) np.random.seed(random_seed) torch.manual_seed(random_seed) ###Output _____no_output_____ ###Markdown Load pretrained topic model and stored storium data ###Code final_data_path = './final_pooled' # change to your local dir # pretrained model (the model is trained on pooled data of entries and challenges) with open(os.path.join(final_data_path, 'no_filter_train_30_10.pt'), 'rb') as f: model = torch.load(f, map_location=device) # put the model on specified device model.to(device) # simply load without parallelism model.device = device model.eval() # read the dictionary of entries and challenges pooled by story and then by worlds with open( os.path.join(final_data_path, 'text_by_story_world_dict.pkl'), 'rb') as f: text_by_story_world_dict = pickle.load(f) # read the saved vocab (word embedding + word2id dict) used by the topic model embedding_matrix_np = np.load(os.path.join(final_data_path, 'embedding_matrix.npy')) with open(os.path.join(final_data_path, 'word2id_dict.pkl'), 'rb') as f: word2id_dict = pickle.load(f) # load the previously annotated world labels (and keep track of the unknown topics' id) with open( os.path.join(final_data_path, 'topics.txt'), 'r') as f: lines = f.readlines() topics_summarized = [] idx_filter_off = [] for idx, line in enumerate( lines ): line_split = line.strip().split(': ')[1] topics_summarized.append(line_split) # load the previously inferred topic transition matrices in each world config = 'role' with open(os.path.join( final_data_path, f'topic_transition_matrix_by_world_dict_{config}.pkl'), 'rb') as f: topic_transition_matrix_by_world_dict = pickle.load(f) # count the number of stories in each world world_counter = Counter() for world, stories in text_by_story_world_dict.items(): world_counter.update({world: len(stories)}) print(f'Finished loading the model and text (from {len(text_by_story_world_dict)} worlds in total)') ###Output Finished loading the model and text (from 59 worlds in total) ###Markdown Visualize the topics ###Code with open( os.path.join( final_data_path, 'to_be_removed.pkl' ), 'rb' ) as f: to_be_removed = pickle.load(f) # this is a list of words to be removed from interpreting topc model.rank_vocab_for_topics(word_embedding_matrix=embedding_matrix_np, to_be_removed=to_be_removed) ###Output topic 0 : melodramatic, reenactment, film, thriller, backstory, narration, melodrama, storyline, subplots, subplot topic 1 : delicious, d'oeuvres, lunch, brunch, dinner, appetizer, gourmet, buffet, meal, breakfast topic 2 : beneficiary, liabilities, percentage, average, cent, million, income, total, billion, percent topic 3 : freighter, crewmembers, cockpit, airship, propulsion, spaceship, vessel, aboard, aircraft, spacecraft topic 4 : chlorine, gaseous, isotope, temperature, hydrogen, contaminant, runoff, spectrometer, particulate, biological topic 5 : thine, fealty, uphold, valor, infantry, dominion, nobility, sword, valour, righteousness topic 6 : shrimp, sauce, roast, truffle, avocado, cheese, chicken, garlic, beef, salad topic 7 : melodic, likeable, crunchy, tasty, witty, wonderfully, delicious, flavorful, personable, catchy topic 8 : unconvincing, boisterous, overwrought, dissonant, melodramatic, strident, bombastic, cacophonous, overblown, raucous topic 9 : aunt, niece, widower, sister, granddaughter, wife, married, mother, stepmother, daughter topic 10 : milk, liqueur, drink, garlic, cheese, juice, sauce, syrup, yogurt, teaspoon topic 11 : sublight, biosphere, aetheric, gravitational, livable, inhabitable, seawater, microgravity, ionize, habitable topic 12 : terran, overwatch, hone, cyberware, situational, sniper, quarterstaff, tactical, marksmanship, interpersonal topic 13 : touchscreen, portable, laptop, dataport, speakerphone, monitor, headset, projector, bluetooth, wireless topic 14 : greenish, iridescent, yellowish, translucent, vegetation, colored, reddish, whitish, grayish, foliage topic 15 : graham, custody, ledger, adoption, silas, eddie, lovey, stewart, cullen, unclaimed topic 16 : oncoming, ramp, exit, diagonally, roundabout, lunge, uphill, clockwise, uppercut, swerving topic 17 : championship, showdown, congrats, fight, coalition, victory, squad, undefeated, rematch, team topic 18 : wrist, zipper, thigh, strap, shoulder, sternum, cuff, waist, waistband, ankle topic 19 : costume, handbag, sequined, poncho, sequin, raincoat, polka, bodysuit, tote, jumpsuit topic 20 : lethal, fireball, inflict, weapon, assault, deadly, combat, damage, attack, melee topic 21 : rotunda, courtyard, staircase, foyer, tomb, plinth, golem, basement, gargoyle, sarcophagus topic 22 : convoy, hiking, river, reconnaissance, roads, patrol, coastline, coast, airfield, coastal topic 23 : breathe, throaty, raspy, tremble, birdsong, voice, breathy, hoarse, stillness, guttural topic 24 : insatiable, hairy, cock, whore, babe, busty, blonde, pussy, brunette, horny topic 25 : prefer, really, can, have, want, probably, think, could, would, might topic 26 : efficiently, manually, compile, restart, rebuild, optimize, iterate, manage, synchronize, automate topic 27 : tumor, ligament, laceration, mortem, hemorrhage, clotting, diagnose, amputate, scabbed, contusion topic 28 : participate, teens, singing, sessions, socialise, socialising, socializing, dancing, group, dance topic 29 : bless, dishonor, pray, repentance, shalt, kindness, beseech, righteousness, sanctify, repent topic 30 : liminal, supernatural, mythological, fantastical, formless, mythic, mythical, mysterious, unknowable, mystical topic 31 : seizing, ransacking, selling, counterfeit, buying, allegedly, peddling, looting, hoarding, stealing topic 32 : synchronization, decryption, device, apparatus, configuration, firewall, installation, encryption, authentication, security topic 33 : wounding, o'clock, eyewitness, shootout, gunfight, gunshot, hours, murder, homicide, massacre topic 34 : surfing, youtube, skiing, aquatic, habitat, species, mammal, fauna, hiking, wildlife topic 35 : sorrow, compassion, kindness, selflessness, heartbreak, gratitude, heartache, generosity, gratefulness, happiness topic 36 : johnson, florida, london, potion, magick, rune, morgan, denver, magic, levitation topic 37 : obfuscate, impersonate, outsmart, assassinate, hideout, outwit, infiltrate, encrypt, decrypt, unmask topic 38 : scumbag, billion, bullshit, fuck, scumbags, bankrupt, buisness, fuckin, shit, shithole topic 39 : brake, soldering, heater, corrosion, headlight, wiring, welding, electrical, plumbing, ignition topic 40 : mailing, send, notification, mail, notify, caller, phone, telephone, voicemail, email topic 41 : espionage, discovery, malware, smuggle, foiling, evasion, forgery, smuggling, vaccination, vaccine topic 42 : paperweight, dominoes, blackjack, chessboard, championship, pennant, card, cards, poker, tournament topic 43 : rueful, grimace, shyly, smiling, scowl, frown, smile, grin, quizzical, smirk topic 44 : commentator, veteran, actor, game, announcer, tournament, skipper, captain, season, player topic 45 : analyze, investigate, analyse, uncover, factual, thorough, forensic, investigative, investigating, investigation topic 46 : snapshot, julian, bailey, laura, klick, slideshow, naval, antoine, dither, thumbnail topic 47 : explanation, undergrad, spelling, reasoning, mathematics, theory, physics, grammar, homework, maths topic 48 : symptom, chronic, shortness, dizziness, fasting, bowel, insomnia, vomiting, nausea, abdominal topic 49 : university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach ###Markdown Visualize different "worlds" ###Code labels = [] sizes = [] size_remaining = 0 for idx, world_tuple in enumerate(world_counter.most_common()): if idx == 0: continue if idx <=7: labels.append(world_tuple[0]) sizes.append(world_tuple[1]) else: size_remaining += world_tuple[1] labels.append('Others') sizes.append(size_remaining) explode = [0] * len(sizes) explode[0] = 0.1 explode[1] = 0.1 explode[2] = 0.1 fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.title('Various Universes in Storium') plt.show() ###Output _____no_output_____ ###Markdown visualize the topic transition adjacency matrix in each worldWe use the world "Fantasy Classic" as an example but there are 58 other worlds in the dataset. ###Code world_name = 'Fantasy Classic' topic_transition_matrix = topic_transition_matrix_by_world_dict[world_name] fig, ax = plt.subplots() im = ax.imshow(topic_transition_matrix) # We want to show all ticks... num_topics = model.K ax.set_xticks(np.arange(num_topics)) ax.set_yticks(np.arange(num_topics)) # # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=90, ha="right", size=2, rotation_mode="anchor") ax.set_title(f"Inter-topic transition in {world_name}") fig.tight_layout() plt.show() # PLOT_SINGLE_CHAIN = True def plot_single_chain(starting): starting_topic = topics_summarized[starting] # if starting_topic[:3]== 'unk': # continue print(f'Starting from topic {starting} {topics_summarized[starting]}: ') for idx, (world_name, _) in enumerate(world_counter.most_common()): if idx == 0: continue topic_transition_matrix = topic_transition_matrix_by_world_dict[world_name] # traverse through topic transition matrix track = [] next = starting track.append(next) while len(track) < 2: all_candidates = topic_transition_matrix[next] desc_order = np.argsort(all_candidates) if next == desc_order[-1]: next = desc_order[-2] else: next = desc_order[-1] # -2 because the highest probability other than the diagonal track.append(next) track_str = [] for i in track: track_str.append(topics_summarized[i]) # print(f'In the world {world_name}, starting from {topics_summarized[starting]}:') print(f'[{world_name}]: ' + ' --> '.join(track_str)) print() if idx >= 7: break ###Output _____no_output_____ ###Markdown Looking at topic transitionSince we have the topic transtiion matrices in different worlds, we can obersve plot transitions by investigating the question:Given a particular topic, what is the next most probable topic that follows, in different worlds?Here we start from the topic on education, or topic 49, as an example.We can also look at more topics instead of the most probable one. For more details, please check world_vis.py ###Code # Starting from topic 49 plot_single_chain(49) ###Output Starting from topic 49 university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach: [Fantasy Classic]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> thine, fealty, uphold, valor, infantry, dominion, nobility, sword, valour, righteousness [Urban Fantasy]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> analyze, investigate, analyse, uncover, factual, thorough, forensic, investigative, investigating, investigation [The Mysterious Island]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> breathe, throaty, raspy, tremble, birdsong, voice, breathy, hoarse, stillness, guttural [Space Adventure]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> convoy, hiking, river, reconnaissance, roads, patrol, coastline, coast, airfield, coastal [Cyberpunk]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> oncoming, ramp, exit, diagonally, roundabout, lunge, uphill, clockwise, uppercut, swerving [Occult Pulp Horror]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> wrist, zipper, thigh, strap, shoulder, sternum, cuff, waist, waistband, ankle [Steampunk]: university, academy, faculty, school, veterinary, coaching, assistant, undergraduate, graduate, coach --> convoy, hiking, river, reconnaissance, roads, patrol, coastline, coast, airfield, coastal
Chap3_Adap_Mar_Plan/Chap3_Modeling_Theory.ipynb
###Markdown Adaptive Market Planning Narrative: There is a broad class of problems that involves allocating some resources to meet an uncertain (sometimes unobservable ) demand . Example: **Stocking a perishable inventory (e.g. fresh fish) to meet a demand where leftover inventory cannot be held for the future.** \**We have to allocate annual budgets for activities such as marketing. Left-over funds are returned to the company** "newsvendor problem", as canonical problem in optimization under uncertainity. Newsvendor problem $$\max_{x} \; \mathbf{E} \; F(x, W) = \mathbf{E} \{pmin\{x,W\}-cx\} \\$$ * where $x$ is our decision variable that detrmines the amount of resources to meet task* where $W$ is uncertain demand for the resource* We assume that we "purchase" our resource at a unit cost of $c$* we sell the smaller of $x$ and $W$ at a price $p$* $p$ is assumed greater than than $c$ If $W$ was deterministic (and if $p$>$c$), then the solution is easily verified to be $x$=$W$ Now imagine that $W$ is a random variable with: * probality distribution: $f^W(W)$ * cumulative distribution: $F^W(w) = Prob[W<w]$ * Then we can compute: * $F(x) = \mathbf{E} \; F(x, W)$ Then, the optimal solution $x^{*}$ would satisfy: $$\frac{dF(x)}{dx} = 0 \\x=x^*$$ Now consider the stochastic gradient, where we take the derivative of $F(x, W)$ assuming we know $W$, which is given by: $$\begin{equation} \frac{dF(x,W)}{dx} =\left\{ \begin{array}{@{}ll@{}} p-c, & x\leq W\ \\ -c, & x > W \end{array}\right.\end{equation} $$ * Taking Expectation: $$\begin{equation}\mathbf{E}\frac{dF(x,W)}{dx} = (p-c)Prob[x \leq W] - cProb[x>W] \\= (p-c)(1-F^W(x))-cF^W(x) \\= (p-c) - pF^W= 0 \; For \; x = x^*\end{equation}$$ We can now solve for $F^W(x^*)$ giving: $$F^W(x^*) = \frac{p-c}{p}$$ **This Chapter**: we are going to addressing the single-dimensional newsvendor, where: \we assume that the demand distribution $W$ is unknown, but can be observed. Basic Model We are going to solve problem using a classical method, called a stochastic gradient algorithm. The sequence is like: we going to pick $x^n$, then observe $W^{n+1}$ and then compute $\nabla F (x^n, W^{n+1})$- As: $$x^{n+1} = x^{n} + \alpha_{n} \nabla F(x^n, W^{n+1)}$$ whre $\alpha_n$ is known as a stepsize. Where it has been shown that we obtain asymptotic optimality: $$\lim_{n \to +\infty} x^n = x^*,$$ If the stepsize $\alpha_n$ satisfie: $$\alpha_n \ > \ 0, \\\sum_{n=1}^{\infty}\alpha_n = \infty, \\\sum_{n=1}^{\infty}(\alpha_n)^2 < \infty,$$ The second equation ensures that the stepsizes do not shrink so quickly that we stall out on the way to the optimum, the last equation has a effect ofinsuring that the variance of our estimate $x^*$ does shrink to zero. State variable: For our stochastic gradient algorithm, our state variable is given by: $$S^n = (x^n)$$ Decision Variable: harmonic stepsize rule: ***Step size Policy*** As with all of our sequential decision problems, the decision ( that is, the stepsize) is determined by what is typically referred to as a stepsize rule, but is sometimes called a stepzie policy that we denote by $\alpha^{\pi}(S^n)$ $$\alpha^{harmonic} (S^n|\theta^{step}) = \frac{\theta^{step}}{\theta^{step} + n -1}$$ Exogenous information The exogenous information is the random demand $W^{n+1}$ for the resource (product, time or money) that we are trying to meet with our supply of product $x^n$. We may assume that we observe $W^{n+1}$ directly. Transition Function $$x^{n+1} = x^n + \alpha_n \nabla_xF(x^n, W^{n+1})$$ Objective Function ***Net benefit at each iteration*** $$F(x^n, W^{n+1}) = p \ min \{x^n, W^{n+1} \} -cx$$ ***Maximize the total reward over some horizon*** $$max \ \mathbf{E} \{ \sum_{n=0}^{N-1}F(X^{\pi}(S^n), W^{n+1})| S^0)\} \\\pi$$ $$S^{n+1} = S^M(S^n, X^{\pi}(S^n), W^{n+1})$$ Coding Part ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Chnaging the path to function folder ###Code import os os.chdir("/home/peyman/Documents/PhD_UiS/seqdec_powell_repo/Chap3_Adap_Mar_Plan/functions") from AdaptiveMarketPlanningModel import AdaptiveMarketPlanningModel from AdaptiveMarketPlanningPolicy import AdaptiveMarketPlanningPolicy os.chdir("/home/peyman/Documents/PhD_UiS/seqdec_powell_repo/Chap3_Adap_Mar_Plan/data") raw_data= pd.read_excel("Base_parameters.xlsx", sheet_name="parameters", usecols=["Parameter", "Value"]) raw_data cost=raw_data["Value"][0] trial_size = raw_data["Value"][1] price = raw_data["Value"][2] theta_step = raw_data["Value"][3] T = raw_data["Value"][4] reward_type = raw_data["Value"][5] if __name__ == "__main__": # this is an example of creating a model and running a simulation for a certain trial size # define state variables state_names = ['order_quantity', 'counter'] init_state = {'order_quantity': 0, 'counter': 0} decision_names = ['step_size'] # read in variables from excel file #file = 'Base parameters.xlsx' #raw_data = pd.ExcelFile(file) raw_data= pd.read_excel("Base_parameters.xlsx", sheet_name="parameters", usecols=["Parameter", "Value"]) cost=raw_data["Value"][0] trial_size = raw_data["Value"][1] price = raw_data["Value"][2] theta_step = raw_data["Value"][3] T = raw_data["Value"][4] reward_type = raw_data["Value"][5] # initialize model and store ordered quantities in an array M = AdaptiveMarketPlanningModel(state_names, decision_names, init_state, T,reward_type, price, cost) P = AdaptiveMarketPlanningPolicy(M, theta_step) rewards_per_iteration = [] learning_list_per_iteration = [] for ite in list(range(trial_size)): print("Starting iteration ", ite) reward,learning_list = P.run_policy() M.learning_list=[] #print(learning_list) rewards_per_iteration.append(reward) learning_list_per_iteration.append(learning_list) print("Ending iteration ", ite," Reward ",reward) nElem = np.arange(1,trial_size+1) rewards_per_iteration = np.array(rewards_per_iteration) rewards_per_iteration_sum = rewards_per_iteration.cumsum() rewards_per_iteration_cum_avg = rewards_per_iteration_sum/nElem if (reward_type=="Cumulative"): rewards_per_iteration_cum_avg = rewards_per_iteration_cum_avg/T rewards_per_iteration = rewards_per_iteration/T optimal_order_quantity = -np.log(cost/price) * 100 print("Optimal order_quantity for price {} and cost {} is {}".format(price,cost,optimal_order_quantity)) print("Reward type: {}, theta_step: {}, T: {} - Average reward over {} iteratios is: {}".format(reward_type,theta_step,T,trial_size,rewards_per_iteration_cum_avg[-1])) ite = np.random.randint(0,trial_size) order_quantity = learning_list_per_iteration[ite] print("Order quantity for iteration {}".format(ite)) print(order_quantity) #Ploting the reward fig1, axsubs = plt.subplots(1,2,sharex=True,sharey=True) fig1.suptitle("Reward type: {}, theta_step: {}, T: {}".format(reward_type,theta_step,T) ) axsubs[0].plot(nElem, rewards_per_iteration_cum_avg, 'g') axsubs[0].set_title('Cum_average reward') axsubs[1].plot(nElem, rewards_per_iteration, 'g') axsubs[1].set_title('Reward per iteration') #Create a big subplot ax = fig1.add_subplot(111, frameon=False) # hide tick and tick label of the big axes plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) ax.set_ylabel('USD', labelpad=0) # Use argument `labelpad` to move label downwards. ax.set_xlabel('Iterations', labelpad=10) plt.show() # ploting the analytical sol plt.xlabel("Time") plt.ylabel("Order quantity") plt.title("Analytical vs learned ordered quantity - (iteration {})".format(ite)) time = np.arange(0, len(order_quantity)) plt.plot(time, time * 0 - np.log(cost/price) * 100, label = "Analytical solution") plt.plot(time, order_quantity, label = "Kesten's Rule for theta_step {}".format(theta_step)) plt.legend() plt.show() ###Output Starting iteration 0 t 1, Price 26, Demand 314.8739889239466, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 295.7859843857034, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 2.328314555822787, order_quantity 12.244897959183675, contribution 60.536178451392466 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 28.705248888447677, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 92.61075136482178, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 25.911979303246806, order_quantity 30.612244897959187, contribution 673.711461884417 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 12.969956495286967, order_quantity 36.734693877551024, contribution 337.21886887746115 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 6.0033004817091715, order_quantity 42.85714285714286, contribution 156.08581252443847 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 191.97800196472627, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 74.41971122461433, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 70.63009761492481, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 177.69310802559443, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 88.42532367595607, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 107.55531491558239, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 25.27053172699819, order_quantity 85.71428571428572, contribution 657.0338249019529 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 9.626529292855272, order_quantity 91.83673469387756, contribution 250.28976161423708 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 1.5833331207166401, order_quantity 97.9591836734694, contribution 41.16666113863264 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 36.17844195489686, order_quantity 104.08163265306123, contribution 940.6394908273184 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 12.00976399352421, order_quantity 110.20408163265307, contribution 312.2538638316295 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 5.264451655657649, order_quantity 116.32653061224491, contribution 136.8757430470989 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 10.04264926411079, order_quantity 122.44897959183675, contribution 261.1088808668805 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 40.84581887215743, order_quantity 128.57142857142858, contribution 1061.9912906760933 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 71.85490264168487, order_quantity 134.69387755102042, contribution 1868.2274686838068 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 81.41885316616086, order_quantity 140.81632653061226, contribution 2116.8901823201822 step 1.0204081632653061 derivative 6 Ending iteration 0 Reward 2116.8901823201822 Starting iteration 1 t 1, Price 26, Demand 32.14513855840667, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 90.83494650076909, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 149.62670485623363, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 81.56658496095478, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 48.31775260753281, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 141.6454842573513, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 10.251413802025251, order_quantity 36.734693877551024, contribution 266.53675885265653 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 108.67755852418193, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 117.34943782289558, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 15.274451689163977, order_quantity 55.102040816326536, contribution 397.1357439182634 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 35.61256821029146, order_quantity 61.22448979591837, contribution 925.9267734675778 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 12.90117842686974, order_quantity 67.34693877551021, contribution 335.4306390986132 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 11.263350276909874, order_quantity 73.46938775510205, contribution 292.8471071996567 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 0.3711539148093755, order_quantity 79.59183673469389, contribution 9.650001785043763 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 120.62297227031438, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 196.78870692640257, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 171.55437801960082, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 83.39466099644063, order_quantity 104.08163265306123, contribution 2168.2611859074564 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 71.55940613364413, order_quantity 110.20408163265307, contribution 1860.5445594747475 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 19.17732066502898, order_quantity 116.32653061224491, contribution 498.6103372907535 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 17.58893510372801, order_quantity 122.44897959183675, contribution 457.31231269692825 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 159.12376218393243, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 58.17350897800585, order_quantity 134.69387755102042, contribution 1512.511233428152 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 19.538612179342262, order_quantity 140.81632653061226, contribution 508.0039166628988 step 1.0204081632653061 derivative 6 Ending iteration 1 Reward 508.0039166628988 Starting iteration 2 t 1, Price 26, Demand 4.432658206176764, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 165.30641636258895, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 19.06380613882751, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 158.7236016077536, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 18.184324356105996, order_quantity 24.48979591836735, contribution 472.7924332587559 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 107.80810319292505, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 20.22832251886964, order_quantity 36.734693877551024, contribution 525.9363854906106 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 187.146511013431, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 67.8209415729502, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 17.28952880941249, order_quantity 55.102040816326536, contribution 449.52774904472477 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 31.75868619183671, order_quantity 61.22448979591837, contribution 825.7258409877544 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 23.96170716752028, order_quantity 67.34693877551021, contribution 623.0043863555273 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 76.32106259008472, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 5.108934092294564, order_quantity 79.59183673469389, contribution 132.83228639965864 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 129.5123128388002, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 142.92247730223067, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 7.185956921911572, order_quantity 97.9591836734694, contribution 186.83487996970086 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 29.745437026909862, order_quantity 104.08163265306123, contribution 773.3813626996564 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 18.921106119888336, order_quantity 110.20408163265307, contribution 491.94875911709676 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 368.9466763361944, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 274.18584602020786, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 46.61579098725777, order_quantity 128.57142857142858, contribution 1212.010565668702 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 3.011332063454097, order_quantity 134.69387755102042, contribution 78.29463364980653 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 87.18911254941344, order_quantity 140.81632653061226, contribution 2266.9169262847495 step 1.0204081632653061 derivative 6 Ending iteration 2 Reward 2266.9169262847495 Starting iteration 3 t 1, Price 26, Demand 214.11681232579616, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 63.21314678329871, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 37.7133608769428, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 50.09450101052112, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 130.08710964222433, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 4.630732572654592, order_quantity 30.612244897959187, contribution 120.3990468890194 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 45.35633397410373, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 152.73560714805416, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 63.380375405121804, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 223.54213356460076, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 23.53830590021698, order_quantity 61.22448979591837, contribution 611.9959534056414 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 92.7080466866632, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 24.762387530969203, order_quantity 73.46938775510205, contribution 643.8220758051992 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 64.28612485938847, order_quantity 79.59183673469389, contribution 1671.4392463441002 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 105.7582414978528, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 20.804025799408738, order_quantity 91.83673469387756, contribution 540.9046707846272 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 44.41367856172228, order_quantity 97.9591836734694, contribution 1154.7556426047793 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 92.57593560469259, order_quantity 104.08163265306123, contribution 2406.9743257220075 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 37.148164128123085, order_quantity 110.20408163265307, contribution 965.8522673312002 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 407.42260239134396, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 81.58701983381606, order_quantity 122.44897959183675, contribution 2121.2625156792174 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 13.50729939955609, order_quantity 128.57142857142858, contribution 351.1897843884583 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 189.9876099843742, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 0.6007593492458815, order_quantity 140.81632653061226, contribution 15.619743080392919 step 1.0204081632653061 derivative 6 Ending iteration 3 Reward 15.619743080392919 Starting iteration 4 t 1, Price 26, Demand 63.480991167470705, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 32.75626867681084, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 13.142349740526921, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 259.48515772361094, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 151.41109230252584, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 81.7417446482246, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 97.8692708205758, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 117.57436411200008, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 244.60276499854632, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 45.29196818064789, order_quantity 55.102040816326536, contribution 1177.5911726968452 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 288.54244421623343, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 47.07995977644027, order_quantity 67.34693877551021, contribution 1224.078954187447 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 98.4493672801954, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 137.86216140480443, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 67.31084867800514, order_quantity 85.71428571428572, contribution 1750.0820656281337 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 137.77514827369717, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 54.51640213121463, order_quantity 97.9591836734694, contribution 1417.4264554115805 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 10.300298429038754, order_quantity 104.08163265306123, contribution 267.8077591550076 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 314.28107217404175, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 111.39907163975636, order_quantity 116.32653061224491, contribution 2896.3758626336653 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 131.79334274030978, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 280.9262732365692, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 14.267406018912535, order_quantity 134.69387755102042, contribution 370.9525564917259 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 72.85385288908358, order_quantity 140.81632653061226, contribution 1894.200175116173 step 1.0204081632653061 derivative 6 Ending iteration 4 Reward 1894.200175116173 Starting iteration 5 t 1, Price 26, Demand 34.6134188494429, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 337.15394363193525, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 37.246234389376845, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 4.045620494502602, order_quantity 18.367346938775512, contribution 105.18613285706765 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 171.85551900805848, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 147.16525189794868, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 260.1553789375819, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 5.415769044095973, order_quantity 42.85714285714286, contribution 140.8099951464953 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 94.07533445221506, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 81.41292109564375, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 133.65083815883648, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 286.3545536608168, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 4.953000543127171, order_quantity 73.46938775510205, contribution 128.77801412130646 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 54.80771553567876, order_quantity 79.59183673469389, contribution 1425.0006039276477 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 18.88913611760723, order_quantity 85.71428571428572, contribution 491.11753905778795 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 21.30505561690429, order_quantity 91.83673469387756, contribution 553.9314460395116 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 45.65040478405654, order_quantity 97.9591836734694, contribution 1186.9105243854701 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 518.4237060578756, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 89.35181273746356, order_quantity 110.20408163265307, contribution 2323.1471311740524 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 440.112565350681, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 82.42978625425003, order_quantity 122.44897959183675, contribution 2143.174442610501 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 239.433695777598, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 45.61595493638745, order_quantity 134.69387755102042, contribution 1186.0148283460737 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 328.4584137480805, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 5 Reward 3661.2244897959185 Starting iteration 6 t 1, Price 26, Demand 115.24973010051211, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 89.60718728056614, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 148.5825361019983, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 207.45136261764844, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 30.683665460098535, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 163.77422963768012, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 40.4033655099858, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 19.283429075983673, order_quantity 42.85714285714286, contribution 501.3691559755755 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 5.265497233882574, order_quantity 48.9795918367347, contribution 136.9029280809469 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 52.3035245816191, order_quantity 55.102040816326536, contribution 1359.8916391220966 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 148.1294880498965, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 3.187294946589135, order_quantity 67.34693877551021, contribution 82.86966861131751 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 22.461865930828054, order_quantity 73.46938775510205, contribution 584.0085142015294 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 53.37825775128777, order_quantity 79.59183673469389, contribution 1387.834701533482 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 117.85938685517326, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 105.86287687826692, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 82.04916152641822, order_quantity 97.9591836734694, contribution 2133.2781996868734 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 200.2616483971966, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 26.265364372304006, order_quantity 110.20408163265307, contribution 682.8994736799042 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 60.3227983152142, order_quantity 116.32653061224491, contribution 1568.3927561955693 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 71.60551762064969, order_quantity 122.44897959183675, contribution 1861.743458136892 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 108.44227875747714, order_quantity 128.57142857142858, contribution 2819.4992476944058 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 23.750206577633897, order_quantity 134.69387755102042, contribution 617.5053710184814 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 37.27285132840711, order_quantity 140.81632653061226, contribution 969.0941345385847 step 1.0204081632653061 derivative 6 Ending iteration 6 Reward 969.0941345385847 Starting iteration 7 t 1, Price 26, Demand 31.564537430454624, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 128.06643475347903, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 50.09930936939004, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 116.48459052447804, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 32.75120437994132, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 58.76535538110136, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 3.82395610773818, order_quantity 36.734693877551024, contribution 99.42285880119269 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 59.673553148118415, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 24.271133883906877, order_quantity 48.9795918367347, contribution 631.0494809815788 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 536.3850312292284, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 79.51836350838981, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 64.3201834600805, order_quantity 67.34693877551021, contribution 1672.324769962093 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 71.72628930543067, order_quantity 73.46938775510205, contribution 1864.8835219411974 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 98.67314684698505, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 200.43926601288157, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 146.81230990087525, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 134.14102360183935, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 33.297963123914535, order_quantity 104.08163265306123, contribution 865.7470412217779 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 217.79865663972436, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 4.152203222908589, order_quantity 116.32653061224491, contribution 107.9572837956233 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 75.29442952145176, order_quantity 122.44897959183675, contribution 1957.6551675577457 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 243.81238480443454, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 37.18772870867736, order_quantity 134.69387755102042, contribution 966.8809464256115 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 17.586946561368926, order_quantity 140.81632653061226, contribution 457.2606105955921 step 1.0204081632653061 derivative 6 Ending iteration 7 Reward 457.2606105955921 Starting iteration 8 t 1, Price 26, Demand 69.4105965305588, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 20.741931082724, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 33.40788661314317, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 29.562032850226107, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 125.02912484128548, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 73.53282628022104, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 481.91206968001944, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 25.259266718114322, order_quantity 42.85714285714286, contribution 656.7409346709724 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 30.05365782839961, order_quantity 48.9795918367347, contribution 781.3951035383899 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 5.119485421803759, order_quantity 55.102040816326536, contribution 133.10662096689774 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 187.13939204281104, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 22.885106974464858, order_quantity 67.34693877551021, contribution 595.0127813360863 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 2.821169503754038, order_quantity 73.46938775510205, contribution 73.35040709760499 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 375.0908177516523, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 289.2901143341878, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 6.883754418325598, order_quantity 91.83673469387756, contribution 178.97761487646554 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 13.4733027342761, order_quantity 97.9591836734694, contribution 350.3058710911786 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 30.318478107086438, order_quantity 104.08163265306123, contribution 788.2804307842474 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 7.472550945961292, order_quantity 110.20408163265307, contribution 194.2863245949936 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 25.097828506934988, order_quantity 116.32653061224491, contribution 652.5435411803097 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 89.9966152386147, order_quantity 122.44897959183675, contribution 2339.911996203982 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 32.64069073368627, order_quantity 128.57142857142858, contribution 848.657959075843 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 198.31845308190807, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 30.69990998156727, order_quantity 140.81632653061226, contribution 798.197659520749 step 1.0204081632653061 derivative 6 Ending iteration 8 Reward 798.197659520749 Starting iteration 9 t 1, Price 26, Demand 26.491534616486323, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 18.115405418745244, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 78.71856424636033, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 203.37422909354365, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 21.113667542689623, order_quantity 24.48979591836735, contribution 548.9553561099302 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 26.13230639644123, order_quantity 30.612244897959187, contribution 679.439966307472 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 10.9582403424709, order_quantity 36.734693877551024, contribution 284.9142489042434 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 149.51482753488762, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 62.19172834595381, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 232.71531020682525, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 0.9073692153053994, order_quantity 61.22448979591837, contribution 23.591599597940384 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 90.77737895998449, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 31.2290307018228, order_quantity 73.46938775510205, contribution 811.9547982473928 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 69.54759820994407, order_quantity 79.59183673469389, contribution 1808.2375534585458 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 71.08855420019596, order_quantity 85.71428571428572, contribution 1848.3024092050948 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 6.321617406365018, order_quantity 91.83673469387756, contribution 164.36205256549047 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 504.816786035954, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 76.36052230928017, order_quantity 104.08163265306123, contribution 1985.3735800412844 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 72.63713428538952, order_quantity 110.20408163265307, contribution 1888.5654914201275 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 16.555956708615316, order_quantity 116.32653061224491, contribution 430.4548744239982 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 60.523730963748626, order_quantity 122.44897959183675, contribution 1573.6170050574642 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 59.84893516118503, order_quantity 128.57142857142858, contribution 1556.0723141908109 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 375.50355870088504, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 53.09175275539645, order_quantity 140.81632653061226, contribution 1380.3855716403077 step 1.0204081632653061 derivative 6 Ending iteration 9 Reward 1380.3855716403077 Starting iteration 10 t 1, Price 26, Demand 57.09171180215479, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 6.727472354272955, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 143.93580148337065, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 120.76831227392904, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 22.27855277242409, order_quantity 24.48979591836735, contribution 579.2423720830263 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 77.86357238737305, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 24.247495989195308, order_quantity 36.734693877551024, contribution 630.434895719078 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 212.8945256003858, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 83.66790431311686, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 82.93159789805308, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 14.575533634788423, order_quantity 61.22448979591837, contribution 378.963874504499 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 55.13467346412191, order_quantity 67.34693877551021, contribution 1433.5015100671696 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 125.92177745547974, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 34.35201374702352, order_quantity 79.59183673469389, contribution 893.1523574226115 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 61.794455584107425, order_quantity 85.71428571428572, contribution 1606.6558451867932 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 76.01514343531134, order_quantity 91.83673469387756, contribution 1976.393729318095 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 6.342458798647896, order_quantity 97.9591836734694, contribution 164.9039287648453 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 252.5706997496133, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 127.66034953794889, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 58.73943126786397, order_quantity 116.32653061224491, contribution 1527.2252129644633 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 69.77557005283384, order_quantity 122.44897959183675, contribution 1814.1648213736798 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 72.68801047724416, order_quantity 128.57142857142858, contribution 1889.8882724083483 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 123.78905566485562, order_quantity 134.69387755102042, contribution 3218.5154472862464 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 84.56722072255003, order_quantity 140.81632653061226, contribution 2198.747738786301 step 1.0204081632653061 derivative 6 Ending iteration 10 Reward 2198.747738786301 Starting iteration 11 t 1, Price 26, Demand 7.5761905550931905, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 37.012067766667414, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 3.5524969265716493, order_quantity 12.244897959183675, contribution 92.36492009086288 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 9.490208286032937, order_quantity 18.367346938775512, contribution 246.74541543685635 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 178.15655366521696, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 10.921788917355926, order_quantity 30.612244897959187, contribution 283.96651185125404 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 37.88211264627602, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 35.94679016956842, order_quantity 42.85714285714286, contribution 934.616544408779 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 1.1220929802031523, order_quantity 48.9795918367347, contribution 29.17441748528196 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 210.04292720690466, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 2.688908913017914, order_quantity 61.22448979591837, contribution 69.91163173846576 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 85.43660872271576, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 30.644298757112963, order_quantity 73.46938775510205, contribution 796.751767684937 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 26.109270455203777, order_quantity 79.59183673469389, contribution 678.8410318352982 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 100.18546355876427, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 127.26947284604763, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 25.735625411399937, order_quantity 97.9591836734694, contribution 669.1262606963984 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 27.142491007751346, order_quantity 104.08163265306123, contribution 705.704766201535 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 17.63223576215898, order_quantity 110.20408163265307, contribution 458.4381298161335 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 92.88408575063544, order_quantity 116.32653061224491, contribution 2414.9862295165212 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 125.97344113662649, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 106.29332843005687, order_quantity 128.57142857142858, contribution 2763.6265391814786 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 99.91640493150595, order_quantity 134.69387755102042, contribution 2597.8265282191546 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 72.40175538243433, order_quantity 140.81632653061226, contribution 1882.4456399432927 step 1.0204081632653061 derivative 6 Ending iteration 11 Reward 1882.4456399432927 Starting iteration 12 t 1, Price 26, Demand 36.672908349551086, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 75.38388329311768, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 29.536357783188745, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 16.959207509819645, order_quantity 18.367346938775512, contribution 440.9393952553108 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 43.21718251640927, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 117.35565729685445, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 78.89127120135602, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 2.344833680114183, order_quantity 42.85714285714286, contribution 60.965675682968765 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 114.87423769906317, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 58.85973657966416, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 54.560472325400866, order_quantity 61.22448979591837, contribution 1418.5722804604225 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 215.43429941587524, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 11.029480258806691, order_quantity 73.46938775510205, contribution 286.766486728974 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 292.20341857725793, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 215.8910375146425, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 19.220738740632704, order_quantity 91.83673469387756, contribution 499.7392072564503 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 470.46147995704615, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 230.42664368889953, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 38.237497381824944, order_quantity 110.20408163265307, contribution 994.1749319274486 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 9.97196830211344, order_quantity 116.32653061224491, contribution 259.27117585494943 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 166.5199085042248, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 27.544481718504866, order_quantity 128.57142857142858, contribution 716.1565246811265 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 46.458522625085514, order_quantity 134.69387755102042, contribution 1207.9215882522233 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 81.53475808871102, order_quantity 140.81632653061226, contribution 2119.9037103064866 step 1.0204081632653061 derivative 6 Ending iteration 12 Reward 2119.9037103064866 Starting iteration 13 t 1, Price 26, Demand 23.623185372215836, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 152.52875470429095, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 322.0023207324038, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 26.171364407946875, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 36.05041829650432, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 198.0913588135909, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 47.69555551350084, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 110.73339405839357, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 75.76662675606721, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 147.8050346936078, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 47.745763516728765, order_quantity 61.22448979591837, contribution 1241.3898514349478 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 20.283553952540036, order_quantity 67.34693877551021, contribution 527.372402766041 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 297.2214327262785, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 56.76154108925496, order_quantity 79.59183673469389, contribution 1475.800068320629 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 16.399609283210957, order_quantity 85.71428571428572, contribution 426.3898413634849 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 268.6347214099522, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 70.40598426788704, order_quantity 97.9591836734694, contribution 1830.555590965063 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 93.01494475239338, order_quantity 104.08163265306123, contribution 2418.388563562228 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 253.94911923700576, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 51.447553020193595, order_quantity 116.32653061224491, contribution 1337.6363785250335 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 38.960107014542764, order_quantity 122.44897959183675, contribution 1012.9627823781119 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 58.81752954028888, order_quantity 128.57142857142858, contribution 1529.2557680475109 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 5.677766479948749, order_quantity 134.69387755102042, contribution 147.62192847866748 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 31.426039561419056, order_quantity 140.81632653061226, contribution 817.0770285968955 step 1.0204081632653061 derivative 6 Ending iteration 13 Reward 817.0770285968955 Starting iteration 14 t 1, Price 26, Demand 43.78218724839925, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 42.920320136631446, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 65.6157321786668, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 84.08420871968656, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 6.986462412252135, order_quantity 24.48979591836735, contribution 181.64802271855552 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 89.85263613432083, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 31.295309935489623, order_quantity 36.734693877551024, contribution 813.6780583227302 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 77.0478639049694, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 297.07762549170286, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 52.414176511021736, order_quantity 55.102040816326536, contribution 1362.7685892865652 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 13.84688522646433, order_quantity 61.22448979591837, contribution 360.0190158880726 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 158.1320105488824, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 62.940445198423504, order_quantity 73.46938775510205, contribution 1636.451575159011 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 57.60229944366, order_quantity 79.59183673469389, contribution 1497.6597855351602 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 329.5827740754824, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 199.37846050732767, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 85.63385162458607, order_quantity 97.9591836734694, contribution 2226.480142239238 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 187.33757746485534, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 23.934178840567885, order_quantity 110.20408163265307, contribution 622.288649854765 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 79.87528991309478, order_quantity 116.32653061224491, contribution 2076.757537740464 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 219.94316053621006, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 12.62782341531394, order_quantity 128.57142857142858, contribution 328.32340879816246 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 435.88734989812014, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 366.3959733898647, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 14 Reward 3661.2244897959185 Starting iteration 15 t 1, Price 26, Demand 33.965996242863646, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 21.940969209580476, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 56.619722736170765, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 237.16871996007484, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 85.05008049962775, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 409.01345973080305, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 202.9887119562175, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 55.114129091001566, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 137.10193497966583, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 8.47772942302282, order_quantity 55.102040816326536, contribution 220.42096499859332 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 26.12819959095506, order_quantity 61.22448979591837, contribution 679.3331893648316 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 1.8668958632125916, order_quantity 67.34693877551021, contribution 48.53929244352738 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 22.077277550655268, order_quantity 73.46938775510205, contribution 574.0092163170369 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 220.17796996809955, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 143.65673250910345, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 29.885729214662476, order_quantity 91.83673469387756, contribution 777.0289595812244 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 45.043416518743804, order_quantity 97.9591836734694, contribution 1171.1288294873389 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 55.05101244198928, order_quantity 104.08163265306123, contribution 1431.3263234917213 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 181.80018438906856, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 108.60133014684274, order_quantity 116.32653061224491, contribution 2823.6345838179113 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 118.21922399491574, order_quantity 122.44897959183675, contribution 3073.6998238678093 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 23.586950876797832, order_quantity 128.57142857142858, contribution 613.2607227967436 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 45.97161125224588, order_quantity 134.69387755102042, contribution 1195.261892558393 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 164.4276148834372, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 15 Reward 3661.2244897959185 Starting iteration 16 t 1, Price 26, Demand 42.23779855627424, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 134.8019869407577, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 113.03680958899143, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 10.686426807241054, order_quantity 18.367346938775512, contribution 277.84709698826737 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 16.83706990065815, order_quantity 24.48979591836735, contribution 437.7638174171119 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 378.89402719347856, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 17.370875474233593, order_quantity 36.734693877551024, contribution 451.6427623300734 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 34.38649939222022, order_quantity 42.85714285714286, contribution 894.0489841977258 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 82.82155503560794, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 103.53027997406295, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 211.2344323769565, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 41.74297923912758, order_quantity 67.34693877551021, contribution 1085.3174602173171 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 22.060622087691915, order_quantity 73.46938775510205, contribution 573.5761742799898 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 20.074498274179618, order_quantity 79.59183673469389, contribution 521.9369551286701 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 113.42371161492227, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 3.851952168839315, order_quantity 91.83673469387756, contribution 100.15075638982219 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 3.011015578901439, order_quantity 97.9591836734694, contribution 78.28640505143741 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 81.37357993561665, order_quantity 104.08163265306123, contribution 2115.7130783260327 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 79.17235650857832, order_quantity 110.20408163265307, contribution 2058.481269223036 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 204.54654155841752, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 86.28027885272172, order_quantity 122.44897959183675, contribution 2243.2872501707648 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 87.73202099836355, order_quantity 128.57142857142858, contribution 2281.0325459574524 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 72.88497188440797, order_quantity 134.69387755102042, contribution 1895.0092689946073 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 130.06789676219043, order_quantity 140.81632653061226, contribution 3381.765315816951 step 1.0204081632653061 derivative 6 Ending iteration 16 Reward 3381.765315816951 Starting iteration 17 t 1, Price 26, Demand 8.906879306796643, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 179.09104644094447, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 172.85231069416923, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 219.9552090119965, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 52.02289858834794, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 21.10353139145314, order_quantity 30.612244897959187, contribution 548.6918161777816 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 30.972788969275356, order_quantity 36.734693877551024, contribution 805.2925132011593 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 8.729679442013781, order_quantity 42.85714285714286, contribution 226.9716654923583 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 90.64565717325303, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 70.4110487733706, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 30.642095637032835, order_quantity 61.22448979591837, contribution 796.6944865628537 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 40.45097217171347, order_quantity 67.34693877551021, contribution 1051.7252764645502 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 6.397270070178402, order_quantity 73.46938775510205, contribution 166.32902182463846 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 96.53209493116475, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 1.8944924041725244, order_quantity 85.71428571428572, contribution 49.256802508485634 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 91.36913142361284, order_quantity 91.83673469387756, contribution 2375.597417013934 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 10.011438980359195, order_quantity 97.9591836734694, contribution 260.2974134893391 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 56.61662072067772, order_quantity 104.08163265306123, contribution 1472.0321387376207 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 3.773607091021177, order_quantity 110.20408163265307, contribution 98.1137843665506 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 67.91579636372082, order_quantity 116.32653061224491, contribution 1765.8107054567413 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 192.65515259900224, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 7.849317596700541, order_quantity 128.57142857142858, contribution 204.08225751421406 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 117.62245936278639, order_quantity 134.69387755102042, contribution 3058.183943432446 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 51.1075929058318, order_quantity 140.81632653061226, contribution 1328.7974155516267 step 1.0204081632653061 derivative 6 Ending iteration 17 Reward 1328.7974155516267 Starting iteration 18 t 1, Price 26, Demand 134.0960110909712, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 28.3884830260981, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 5.727907328023664, order_quantity 12.244897959183675, contribution 148.92559052861526 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 124.65003600289322, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 156.70283135281363, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 116.55489218127724, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 319.7024966116504, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 147.07086671977737, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 156.6712557189048, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 51.67644098183999, order_quantity 55.102040816326536, contribution 1343.5874655278396 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 174.58802267145956, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 99.38456628115087, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 103.03308762874728, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 63.914346869268776, order_quantity 79.59183673469389, contribution 1661.7730186009883 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 47.958358529172635, order_quantity 85.71428571428572, contribution 1246.9173217584885 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 27.801348086993578, order_quantity 91.83673469387756, contribution 722.835050261833 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 74.78257657499371, order_quantity 97.9591836734694, contribution 1944.3469909498365 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 6.0252924215300965, order_quantity 104.08163265306123, contribution 156.6576029597825 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 20.85588479204377, order_quantity 110.20408163265307, contribution 542.253004593138 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 13.703790472322893, order_quantity 116.32653061224491, contribution 356.29855228039526 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 390.58653089361826, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 32.539951302103674, order_quantity 128.57142857142858, contribution 846.0387338546955 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 12.466800853843399, order_quantity 134.69387755102042, contribution 324.13682219992836 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 155.58165963694907, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 18 Reward 3661.2244897959185 Starting iteration 19 t 1, Price 26, Demand 29.26588497037457, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 40.414912445784154, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 47.70414111751845, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 63.616214345604405, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 145.9133136645828, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 107.637712816684, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 581.3837980637015, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 21.62332983544931, order_quantity 42.85714285714286, contribution 562.2065757216822 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 204.66163103271558, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 189.47484089730355, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 38.85112260822716, order_quantity 61.22448979591837, contribution 1010.1291878139061 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 3.7214050689663964, order_quantity 67.34693877551021, contribution 96.75653179312631 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 64.4633637343793, order_quantity 73.46938775510205, contribution 1676.0474570938618 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 172.31114211729084, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 4.865888816704297, order_quantity 85.71428571428572, contribution 126.51310923431173 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 44.16036323896351, order_quantity 91.83673469387756, contribution 1148.1694442130513 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 246.12404315982536, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 17.046428867885844, order_quantity 104.08163265306123, contribution 443.20715056503195 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 101.52764197272109, order_quantity 110.20408163265307, contribution 2639.718691290748 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 102.89893696748003, order_quantity 116.32653061224491, contribution 2675.3723611544806 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 204.12584376385735, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 86.4617289176381, order_quantity 128.57142857142858, contribution 2248.0049518585906 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 48.897385186132716, order_quantity 134.69387755102042, contribution 1271.3320148394507 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 16.282396664098684, order_quantity 140.81632653061226, contribution 423.3423132665658 step 1.0204081632653061 derivative 6 Ending iteration 19 Reward 423.3423132665658 Starting iteration 20 t 1, Price 26, Demand 15.554687046938318, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 11.114961666611881, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 36.890106176348944, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 19.882978187138605, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 141.97903322054307, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 116.8897955936748, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 52.54775787333803, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 47.11479863179625, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 10.694663881537993, order_quantity 48.9795918367347, contribution 278.06126091998783 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 79.31809287697693, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 12.60657660254309, order_quantity 61.22448979591837, contribution 327.7709916661203 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 56.215545567571986, order_quantity 67.34693877551021, contribution 1461.6041847568717 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 292.5240393309255, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 273.1775513065125, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 146.5934900911405, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 82.1581097534392, order_quantity 91.83673469387756, contribution 2136.1108535894195 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 95.00063923320678, order_quantity 97.9591836734694, contribution 2470.016620063376 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 86.93275352123612, order_quantity 104.08163265306123, contribution 2260.251591552139 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 70.54989857929566, order_quantity 110.20408163265307, contribution 1834.297363061687 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 16.021299001019816, order_quantity 116.32653061224491, contribution 416.5537740265152 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 36.43660642352026, order_quantity 122.44897959183675, contribution 947.3517670115268 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 61.08724470170964, order_quantity 128.57142857142858, contribution 1588.2683622444508 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 115.45067901220078, order_quantity 134.69387755102042, contribution 3001.7176543172204 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 296.69024287328574, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 20 Reward 3661.2244897959185 Starting iteration 21 t 1, Price 26, Demand 16.50763779370731, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 32.19040626395881, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 187.19158010231683, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 105.24135276468955, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 185.22483429141897, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 350.94292052671426, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 15.96431056701293, order_quantity 36.734693877551024, contribution 415.0720747423362 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 11.493177530725145, order_quantity 42.85714285714286, contribution 298.82261579885375 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 229.58478758347857, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 132.3693184135952, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 48.99641402954584, order_quantity 61.22448979591837, contribution 1273.9067647681918 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 210.77680418679364, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 188.33270279629514, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 112.76223897244628, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 83.40294456117856, order_quantity 85.71428571428572, contribution 2168.4765585906425 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 55.953272039057566, order_quantity 91.83673469387756, contribution 1454.7850730154967 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 128.99897312008775, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 201.78737372044023, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 273.0178345713919, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 17.47189121737263, order_quantity 116.32653061224491, contribution 454.26917165168834 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 39.67385253473515, order_quantity 122.44897959183675, contribution 1031.5201659031138 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 148.29649307286505, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 18.542327469109683, order_quantity 134.69387755102042, contribution 482.10051419685175 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 87.56977226891988, order_quantity 140.81632653061226, contribution 2276.8140789919166 step 1.0204081632653061 derivative 6 Ending iteration 21 Reward 2276.8140789919166 Starting iteration 22 t 1, Price 26, Demand 21.700068105179056, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 27.489203852074027, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 8.525421177750362, order_quantity 12.244897959183675, contribution 221.66095062150941 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 35.1834479979254, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 55.818681917131755, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 52.59243334454505, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 187.20499319066823, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 1.0563807175385813, order_quantity 42.85714285714286, contribution 27.465898656003112 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 77.4535412259251, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 132.01710368261465, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 30.642685927460366, order_quantity 61.22448979591837, contribution 796.7098341139696 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 203.0726191089808, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 47.98853213833396, order_quantity 73.46938775510205, contribution 1247.701835596683 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 24.463273512331753, order_quantity 79.59183673469389, contribution 636.0451113206256 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 9.735320511799594, order_quantity 85.71428571428572, contribution 253.11833330678945 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 43.30202505676687, order_quantity 91.83673469387756, contribution 1125.8526514759387 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 108.10541588507523, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 29.13223490641044, order_quantity 104.08163265306123, contribution 757.4381075666714 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 86.6112461336103, order_quantity 110.20408163265307, contribution 2251.8923994738675 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 121.93517799971522, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 133.01348505852798, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 38.6101416617578, order_quantity 128.57142857142858, contribution 1003.8636832057028 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 241.58978060651845, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 18.705265138604034, order_quantity 140.81632653061226, contribution 486.3368936037049 step 1.0204081632653061 derivative 6 Ending iteration 22 Reward 486.3368936037049 Starting iteration 23 t 1, Price 26, Demand 32.060746804890364, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 39.767591801098625, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 84.9952542289884, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 26.01238418584792, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 33.117797440581334, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 35.96793100364297, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 250.30478983930746, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 14.444020895034415, order_quantity 42.85714285714286, contribution 375.5445432708948 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 187.97203683557402, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 82.93648687183513, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 17.775152220509487, order_quantity 61.22448979591837, contribution 462.15395773324667 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 240.6309265683849, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 152.41878670408633, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 58.78925363327361, order_quantity 79.59183673469389, contribution 1528.5205944651138 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 125.42426309551169, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 101.20705799176366, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 8.6913263536978, order_quantity 97.9591836734694, contribution 225.9744851961428 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 24.073974231488478, order_quantity 104.08163265306123, contribution 625.9233300187004 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 3.272823150140053, order_quantity 110.20408163265307, contribution 85.09340190364138 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 33.74079432812109, order_quantity 116.32653061224491, contribution 877.2606525311484 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 351.4524647943902, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 27.094833601010155, order_quantity 128.57142857142858, contribution 704.465673626264 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 30.059798640031747, order_quantity 134.69387755102042, contribution 781.5547646408254 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 155.92127858671623, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 23 Reward 3661.2244897959185 Starting iteration 24 t 1, Price 26, Demand 292.78164882288735, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 145.3851659279597, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 99.08111377624313, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 67.08963880189883, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 6.710960404776707, order_quantity 24.48979591836735, contribution 174.48497052419438 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 57.53439513798367, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 109.18632016543988, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 324.8647944694461, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 587.5919240897854, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 25.437087687756872, order_quantity 55.102040816326536, contribution 661.3642798816786 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 60.5354527890141, order_quantity 61.22448979591837, contribution 1573.9217725143667 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 83.60147904965676, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 108.95044919743245, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 74.23140656898973, order_quantity 79.59183673469389, contribution 1930.0165707937329 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 86.57140941692366, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 27.573341144302304, order_quantity 91.83673469387756, contribution 716.9068697518599 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 9.388150043058225, order_quantity 97.9591836734694, contribution 244.09190111951386 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 157.73753923424013, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 13.577824783541004, order_quantity 110.20408163265307, contribution 353.02344437206614 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 137.76038313850265, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 2.987970526711065, order_quantity 122.44897959183675, contribution 77.68723369448769 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 187.40097053061638, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 47.367120228134, order_quantity 134.69387755102042, contribution 1231.5451259314839 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 157.12837717587854, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 24 Reward 3661.2244897959185 Starting iteration 25 t 1, Price 26, Demand 105.55530850313745, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 75.90507178180526, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 97.68805884447748, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 159.66253389032562, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 109.31539062628073, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 14.099043172225425, order_quantity 30.612244897959187, contribution 366.57512247786104 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 73.12954565719193, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 249.8755551109424, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 106.19985941242756, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 226.46573286037523, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 72.91259056681557, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 226.9943275665531, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 9.839679079923892, order_quantity 73.46938775510205, contribution 255.83165607802118 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 190.01343643850998, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 16.42345184985374, order_quantity 85.71428571428572, contribution 427.00974809619726 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 6.523304435963201, order_quantity 91.83673469387756, contribution 169.60591533504322 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 78.33658559776603, order_quantity 97.9591836734694, contribution 2036.751225541917 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 333.4576112561189, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 14.686323584965994, order_quantity 110.20408163265307, contribution 381.8444132091158 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 17.32389709051665, order_quantity 116.32653061224491, contribution 450.42132435343285 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 149.88984291184647, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 125.70640267347773, order_quantity 128.57142857142858, contribution 3268.3664695104208 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 85.98878778188922, order_quantity 134.69387755102042, contribution 2235.7084823291198 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 223.02740158869904, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 25 Reward 3661.2244897959185 Starting iteration 26 t 1, Price 26, Demand 94.47970223679405, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 101.90755931952282, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 141.6519718841871, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 14.286154928815037, order_quantity 18.367346938775512, contribution 371.440028149191 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 62.6215829377465, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 81.35716323947449, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 60.077373569602386, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 85.68174610904997, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 97.41980598765927, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 11.316629605511007, order_quantity 55.102040816326536, contribution 294.2323697432862 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 134.65805333449572, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 85.38018236970561, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 158.21623866503617, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 79.85236296707308, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 143.22301786961643, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 64.29638941083479, order_quantity 91.83673469387756, contribution 1671.7061246817045 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 291.10149770455445, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 133.80456440698313, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 29.309944429642076, order_quantity 110.20408163265307, contribution 762.058555170694 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 12.720505111230043, order_quantity 116.32653061224491, contribution 330.73313289198114 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 109.32469428816395, order_quantity 122.44897959183675, contribution 2842.4420514922626 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 21.534642497053515, order_quantity 128.57142857142858, contribution 559.9007049233913 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 87.97540150150589, order_quantity 134.69387755102042, contribution 2287.360439039153 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 106.86412335528736, order_quantity 140.81632653061226, contribution 2778.4672072374715 step 1.0204081632653061 derivative 6 Ending iteration 26 Reward 2778.4672072374715 Starting iteration 27 t 1, Price 26, Demand 18.67445518291948, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 277.43605863764054, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 134.35850605962972, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 169.59566364436768, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 38.31625677423656, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 41.68778981953417, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 55.90205746161545, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 39.89537042426251, order_quantity 42.85714285714286, contribution 1037.2796310308252 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 26.56425000643815, order_quantity 48.9795918367347, contribution 690.6705001673919 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 114.84243516014767, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 135.8122844203988, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 71.5775852829591, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 24.35577974726669, order_quantity 73.46938775510205, contribution 633.250273428934 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 55.35408287543173, order_quantity 79.59183673469389, contribution 1439.206154761225 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 90.50888477851173, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 39.85712659959451, order_quantity 91.83673469387756, contribution 1036.2852915894573 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 10.67843295332061, order_quantity 97.9591836734694, contribution 277.63925678633586 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 91.48929531111416, order_quantity 104.08163265306123, contribution 2378.721678088968 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 88.40653189534662, order_quantity 110.20408163265307, contribution 2298.569829279012 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 84.55750448020007, order_quantity 116.32653061224491, contribution 2198.495116485202 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 148.10393418143127, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 47.03617152826064, order_quantity 128.57142857142858, contribution 1222.9404597347766 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 115.98455966275797, order_quantity 134.69387755102042, contribution 3015.5985512317075 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 19.78593401528241, order_quantity 140.81632653061226, contribution 514.4342843973427 step 1.0204081632653061 derivative 6 Ending iteration 27 Reward 514.4342843973427 Starting iteration 28 t 1, Price 26, Demand 0.9198418545245689, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 15.302011515495737, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 69.2431131054732, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 25.693177119022042, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 103.6805843345068, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 63.49586767578461, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 186.4314896753123, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 122.35366186715954, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 20.32693027177267, order_quantity 48.9795918367347, contribution 528.5001870660894 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 186.6558353031533, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 46.6160577295674, order_quantity 61.22448979591837, contribution 1212.0175009687525 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 18.10164783819151, order_quantity 67.34693877551021, contribution 470.6428437929793 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 229.5931790130236, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 55.946921946477644, order_quantity 79.59183673469389, contribution 1454.6199706084187 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 11.184283248665787, order_quantity 85.71428571428572, contribution 290.7913644653105 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 69.02831280346192, order_quantity 91.83673469387756, contribution 1794.7361328900097 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 144.0487854454447, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 85.37642379978013, order_quantity 104.08163265306123, contribution 2219.7870187942835 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 78.76614480111793, order_quantity 110.20408163265307, contribution 2047.9197648290663 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 69.7112146219455, order_quantity 116.32653061224491, contribution 1812.4915801705831 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 5.941606823929459, order_quantity 122.44897959183675, contribution 154.48177742216595 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 166.09909602522782, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 59.84768564169188, order_quantity 134.69387755102042, contribution 1556.0398266839888 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 132.35880031486263, order_quantity 140.81632653061226, contribution 3441.3288081864284 step 1.0204081632653061 derivative 6 Ending iteration 28 Reward 3441.3288081864284 Starting iteration 29 t 1, Price 26, Demand 37.40334912817388, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 21.593069735140954, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 70.4202197892418, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 254.70613902146619, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 11.558944586326813, order_quantity 24.48979591836735, contribution 300.5325592444971 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 18.793834210263157, order_quantity 30.612244897959187, contribution 488.63968946684207 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 49.164941887690766, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 23.901826714435472, order_quantity 42.85714285714286, contribution 621.4474945753223 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 18.70326662749811, order_quantity 48.9795918367347, contribution 486.2849323149509 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 13.618602118715359, order_quantity 55.102040816326536, contribution 354.0836550865993 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 1.616825290724323, order_quantity 61.22448979591837, contribution 42.0374575588324 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 770.9635188374017, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 26.684468811219652, order_quantity 73.46938775510205, contribution 693.796189091711 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 203.8288060613788, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 106.11587122200285, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 110.86943042975305, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 23.66250891618235, order_quantity 97.9591836734694, contribution 615.2252318207411 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 5.73391902097213, order_quantity 104.08163265306123, contribution 149.0818945452754 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 364.0285107322499, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 67.34620226152104, order_quantity 116.32653061224491, contribution 1751.001258799547 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 103.59872817493792, order_quantity 122.44897959183675, contribution 2693.566932548386 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 117.7650610653284, order_quantity 128.57142857142858, contribution 3061.8915876985384 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 42.83276849894452, order_quantity 134.69387755102042, contribution 1113.6519809725573 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 162.12387664084517, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 29 Reward 3661.2244897959185 Starting iteration 30 t 1, Price 26, Demand 92.95915340995361, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 23.09602399710373, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 128.16419398995046, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 84.01807553284453, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 68.52577491617413, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 399.6378082928957, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 47.70255908431456, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 31.350168630089097, order_quantity 42.85714285714286, contribution 815.1043843823165 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 128.00321379079008, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 11.674605544563835, order_quantity 55.102040816326536, contribution 303.53974415865974 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 56.97879573768717, order_quantity 61.22448979591837, contribution 1481.4486891798665 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 83.15448996253659, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 58.4415720985035, order_quantity 73.46938775510205, contribution 1519.480874561091 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 301.68996825992406, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 18.524216325202868, order_quantity 85.71428571428572, contribution 481.62962445527455 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 7.043394746026232, order_quantity 91.83673469387756, contribution 183.12826339668203 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 93.33906017405597, order_quantity 97.9591836734694, contribution 2426.815564525455 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 32.659592952131355, order_quantity 104.08163265306123, contribution 849.1494167554152 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 56.06734635818127, order_quantity 110.20408163265307, contribution 1457.751005312713 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 127.30592833597578, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 94.28562652407092, order_quantity 122.44897959183675, contribution 2451.4262896258438 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 26.912269463231315, order_quantity 128.57142857142858, contribution 699.7190060440142 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 44.44940038494307, order_quantity 134.69387755102042, contribution 1155.6844100085198 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 29.948570211172807, order_quantity 140.81632653061226, contribution 778.662825490493 step 1.0204081632653061 derivative 6 Ending iteration 30 Reward 778.662825490493 Starting iteration 31 t 1, Price 26, Demand 58.74007885279903, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 73.59891135553046, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 128.70114734731084, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 665.3910950588187, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 150.2865229094809, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 48.520192316402785, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 179.74343352392412, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 50.233628826048914, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 122.78505519235634, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 186.38727315835638, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 600.7877363826227, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 187.62359927042473, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 181.19339962862244, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 126.59784445638354, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 82.68897302811347, order_quantity 85.71428571428572, contribution 2149.91329873095 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 371.394144885824, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 205.02566638922423, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 254.60130415869543, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 325.9446145614362, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 2.011783926529007, order_quantity 116.32653061224491, contribution 52.30638208975419 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 134.73702639147265, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 164.1485214554806, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 82.30754495125359, order_quantity 134.69387755102042, contribution 2139.996168732593 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 359.09720650807316, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 31 Reward 3661.2244897959185 Starting iteration 32 t 1, Price 26, Demand 70.30893990083813, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 25.32795694417316, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 31.352900581098474, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 14.684681452210016, order_quantity 18.367346938775512, contribution 381.80171775746044 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 41.570601585006266, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 5.4425241208264445, order_quantity 30.612244897959187, contribution 141.50562714148757 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 55.97853835170613, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 241.06064096307537, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 66.40762864672925, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 56.44024787684623, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 17.175585536096065, order_quantity 61.22448979591837, contribution 446.5652239384977 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 0.9026804513672682, order_quantity 67.34693877551021, contribution 23.46969173554897 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 68.89908039765905, order_quantity 73.46938775510205, contribution 1791.3760903391353 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 125.69126924874783, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 26.305418709474342, order_quantity 85.71428571428572, contribution 683.9408864463329 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 38.73297305448051, order_quantity 91.83673469387756, contribution 1007.0572994164933 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 5.211210905344186, order_quantity 97.9591836734694, contribution 135.49148353894884 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 71.49006512853417, order_quantity 104.08163265306123, contribution 1858.7416933418883 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 235.48106096292685, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 118.81855464810529, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 126.61253499512743, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 16.39666899488324, order_quantity 128.57142857142858, contribution 426.3133938669642 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 190.4754020275785, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 27.588799048277636, order_quantity 140.81632653061226, contribution 717.3087752552185 step 1.0204081632653061 derivative 6 Ending iteration 32 Reward 717.3087752552185 Starting iteration 33 t 1, Price 26, Demand 61.32866579203584, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 221.8318226640545, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 75.28342164354605, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 1.2378535721365582, order_quantity 18.367346938775512, contribution 32.184192875550515 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 158.91583105730692, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 21.150247265037063, order_quantity 30.612244897959187, contribution 549.9064288909636 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 26.342805996644774, order_quantity 36.734693877551024, contribution 684.9129559127641 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 0.7542458397831329, order_quantity 42.85714285714286, contribution 19.610391834361454 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 81.1987524528802, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 14.919787521862307, order_quantity 55.102040816326536, contribution 387.91447556842 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 148.9095522825302, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 47.508821958947614, order_quantity 67.34693877551021, contribution 1235.229370932638 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 158.24287962154622, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 21.547361493040977, order_quantity 79.59183673469389, contribution 560.2313988190654 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 208.88346362752594, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 143.51411849401129, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 38.114791159609105, order_quantity 97.9591836734694, contribution 990.9845701498367 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 1.7613745547504156, order_quantity 104.08163265306123, contribution 45.795738423510805 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 185.07682148448245, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 63.53724448410372, order_quantity 116.32653061224491, contribution 1651.9683565866967 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 1.1749367606704444, order_quantity 122.44897959183675, contribution 30.548355777431553 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 143.68174772286594, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 3.187571685523812, order_quantity 134.69387755102042, contribution 82.87686382361912 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 98.98662161247704, order_quantity 140.81632653061226, contribution 2573.652161924403 step 1.0204081632653061 derivative 6 Ending iteration 33 Reward 2573.652161924403 Starting iteration 34 t 1, Price 26, Demand 265.7619447020758, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 116.81801054554637, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 13.688993603003366, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 11.227828192326308, order_quantity 18.367346938775512, contribution 291.92353300048404 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 36.65158277537704, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 57.587394770424574, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 319.35645545579945, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 30.425391509283777, order_quantity 42.85714285714286, contribution 791.0601792413783 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 213.6418702787682, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 10.511090631477163, order_quantity 55.102040816326536, contribution 273.28835641840624 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 55.72464012922722, order_quantity 61.22448979591837, contribution 1448.8406433599077 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 92.98432864312818, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 60.728865041663695, order_quantity 73.46938775510205, contribution 1578.950491083256 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 62.58159279868701, order_quantity 79.59183673469389, contribution 1627.1214127658623 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 56.15927279521071, order_quantity 85.71428571428572, contribution 1460.1410926754784 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 6.915248884579943, order_quantity 91.83673469387756, contribution 179.7964709990785 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 110.35381709859418, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 21.271787400685326, order_quantity 104.08163265306123, contribution 553.0664724178184 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 169.49690903642622, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 14.511597428282318, order_quantity 116.32653061224491, contribution 377.30153313534026 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 278.85400013623797, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 151.76100880760927, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 22.71788152923106, order_quantity 134.69387755102042, contribution 590.6649197600076 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 91.51254418706506, order_quantity 140.81632653061226, contribution 2379.3261488636917 step 1.0204081632653061 derivative 6 Ending iteration 34 Reward 2379.3261488636917 Starting iteration 35 t 1, Price 26, Demand 90.33101884971504, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 44.98738979612245, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 222.27748639660453, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 194.7622321533174, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 94.16393487861309, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 26.853143854227817, order_quantity 30.612244897959187, contribution 698.1817402099232 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 309.2061594465508, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 39.35040384821913, order_quantity 42.85714285714286, contribution 1023.1105000536974 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 17.250107687658566, order_quantity 48.9795918367347, contribution 448.50279987912273 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 267.4345680268544, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 0.6632874206876356, order_quantity 61.22448979591837, contribution 17.245472937878525 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 144.9070509123106, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 149.4900033758821, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 13.359120943204191, order_quantity 79.59183673469389, contribution 347.33714452330895 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 92.5562522164034, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 98.4722990269198, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 6.40666462130572, order_quantity 97.9591836734694, contribution 166.57328015394873 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 71.06944957971321, order_quantity 104.08163265306123, contribution 1847.8056890725434 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 21.266115329454927, order_quantity 110.20408163265307, contribution 552.9189985658281 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 60.40580323962271, order_quantity 116.32653061224491, contribution 1570.5508842301906 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 49.54917714898169, order_quantity 122.44897959183675, contribution 1288.278605873524 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 85.31932679877666, order_quantity 128.57142857142858, contribution 2218.302496768193 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 6.193728605993389, order_quantity 134.69387755102042, contribution 161.0369437558281 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 16.41926540174011, order_quantity 140.81632653061226, contribution 426.9009004452428 step 1.0204081632653061 derivative 6 Ending iteration 35 Reward 426.9009004452428 Starting iteration 36 t 1, Price 26, Demand 170.5675310750535, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 9.713616927556998, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 354.55592040520304, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 11.80008178558353, order_quantity 18.367346938775512, contribution 306.80212642517176 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 8.8961308734777, order_quantity 24.48979591836735, contribution 231.2994027104202 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 23.853414908961383, order_quantity 30.612244897959187, contribution 620.188787632996 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 4.925464228822518, order_quantity 36.734693877551024, contribution 128.06206994938546 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 78.19015799585878, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 219.35779797772796, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 310.00751595307594, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 13.132954882331255, order_quantity 61.22448979591837, contribution 341.45682694061264 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 170.48009162000676, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 24.49913729391867, order_quantity 73.46938775510205, contribution 636.9775696418853 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 174.0335210830101, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 43.68708464936106, order_quantity 85.71428571428572, contribution 1135.8642008833876 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 18.060414983585275, order_quantity 91.83673469387756, contribution 469.57078957321716 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 338.52260935146717, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 196.74024490823294, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 11.966604797036268, order_quantity 110.20408163265307, contribution 311.131724722943 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 71.0110821050851, order_quantity 116.32653061224491, contribution 1846.2881347322125 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 26.06742000177617, order_quantity 122.44897959183675, contribution 677.7529200461804 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 6.735620599692139, order_quantity 128.57142857142858, contribution 175.12613559199562 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 7.391349245862476, order_quantity 134.69387755102042, contribution 192.17508039242438 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 45.15379463018149, order_quantity 140.81632653061226, contribution 1173.9986603847187 step 1.0204081632653061 derivative 6 Ending iteration 36 Reward 1173.9986603847187 Starting iteration 37 t 1, Price 26, Demand 61.62952842229329, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 211.52385548647808, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 22.734211262866964, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 158.23159751787315, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 323.1241233598178, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 32.519787270629905, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 81.24143613996395, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 83.37755440501898, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 34.491590883958224, order_quantity 48.9795918367347, contribution 896.7813629829138 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 121.02559582585471, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 76.78252215261516, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 142.34810190264363, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 207.3624855562153, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 25.664054156648263, order_quantity 79.59183673469389, contribution 667.2654080728548 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 97.27854949886694, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 21.983530196492005, order_quantity 91.83673469387756, contribution 571.5717851087921 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 12.643101877966567, order_quantity 97.9591836734694, contribution 328.72064882713073 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 144.4400041170064, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 84.33560682499761, order_quantity 110.20408163265307, contribution 2192.725777449938 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 8.698429940442287, order_quantity 116.32653061224491, contribution 226.15917845149946 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 17.426164639121474, order_quantity 122.44897959183675, contribution 453.08028061715834 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 1.2527354973586096, order_quantity 128.57142857142858, contribution 32.57112293132385 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 318.9426186336418, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 39.04167326816056, order_quantity 140.81632653061226, contribution 1015.0835049721745 step 1.0204081632653061 derivative 6 Ending iteration 37 Reward 1015.0835049721745 Starting iteration 38 t 1, Price 26, Demand 25.485876913817346, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 118.75981367064354, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 5.292138581269403, order_quantity 12.244897959183675, contribution 137.59560311300447 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 252.80925264504083, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 79.01297607742887, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 121.36945066696272, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 35.40389311348504, order_quantity 36.734693877551024, contribution 920.501220950611 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 95.73122830876332, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 101.21483419403683, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 169.44431424543575, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 39.03316174790005, order_quantity 61.22448979591837, contribution 1014.8622054454014 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 5.264907710849567, order_quantity 67.34693877551021, contribution 136.88760048208874 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 8.73349764103727, order_quantity 73.46938775510205, contribution 227.070938666969 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 5.9361575735067325, order_quantity 79.59183673469389, contribution 154.34009691117504 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 47.45578633590638, order_quantity 85.71428571428572, contribution 1233.8504447335658 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 193.90482804395143, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 154.70576879514243, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 84.01658498394966, order_quantity 104.08163265306123, contribution 2184.431209582691 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 42.69917759091318, order_quantity 110.20408163265307, contribution 1110.1786173637427 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 24.046892921261282, order_quantity 116.32653061224491, contribution 625.2192159527933 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 101.30207019637342, order_quantity 122.44897959183675, contribution 2633.853825105709 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 174.93568646996928, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 14.526956205935743, order_quantity 134.69387755102042, contribution 377.70086135432933 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 43.8722623724773, order_quantity 140.81632653061226, contribution 1140.6788216844097 step 1.0204081632653061 derivative 6 Ending iteration 38 Reward 1140.6788216844097 Starting iteration 39 t 1, Price 26, Demand 31.336147152813922, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 58.751320734024205, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 99.13884469692414, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 73.66804224203452, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 82.13137172449831, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 91.975087037724, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 23.5563249541805, order_quantity 36.734693877551024, contribution 612.464448808693 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 52.788283634356546, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 219.62724950258558, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 218.03234514986804, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 1.598263834267522, order_quantity 61.22448979591837, contribution 41.55485969095557 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 27.677711042341357, order_quantity 67.34693877551021, contribution 719.6204871008753 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 39.01014516838889, order_quantity 73.46938775510205, contribution 1014.2637743781111 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 156.354621775748, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 36.021644936915536, order_quantity 85.71428571428572, contribution 936.562768359804 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 100.22729571022128, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 19.997171419670607, order_quantity 97.9591836734694, contribution 519.9264569114358 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 144.49953012189582, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 200.9077852852766, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 91.86950480072838, order_quantity 116.32653061224491, contribution 2388.607124818938 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 46.94745691712455, order_quantity 122.44897959183675, contribution 1220.6338798452382 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 137.029075979776, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 73.24287438496066, order_quantity 134.69387755102042, contribution 1904.3147340089772 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 100.35815646549726, order_quantity 140.81632653061226, contribution 2609.3120681029286 step 1.0204081632653061 derivative 6 Ending iteration 39 Reward 2609.3120681029286 Starting iteration 40 t 1, Price 26, Demand 235.02765171766066, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 49.39363943570485, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 137.2612167203887, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 123.51053163960741, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 82.4492137493338, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 3.9640798022216024, order_quantity 30.612244897959187, contribution 103.06607485776166 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 27.709296175132987, order_quantity 36.734693877551024, contribution 720.4417005534576 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 15.402410018898708, order_quantity 42.85714285714286, contribution 400.4626604913664 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 300.9524507964234, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 35.881119633784216, order_quantity 55.102040816326536, contribution 932.9091104783896 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 53.00871244658776, order_quantity 61.22448979591837, contribution 1378.2265236112817 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 298.10198125183985, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 94.73961485178128, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 73.1530314755001, order_quantity 79.59183673469389, contribution 1901.9788183630026 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 67.21264219854466, order_quantity 85.71428571428572, contribution 1747.528697162161 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 62.995090193520255, order_quantity 91.83673469387756, contribution 1637.8723450315267 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 228.25690848302975, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 33.063875442293345, order_quantity 104.08163265306123, contribution 859.6607614996269 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 81.8924525918062, order_quantity 110.20408163265307, contribution 2129.2037673869613 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 67.98821991948961, order_quantity 116.32653061224491, contribution 1767.69371790673 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 51.030073968936904, order_quantity 122.44897959183675, contribution 1326.7819231923595 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 146.7185121798368, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 79.69620715393228, order_quantity 134.69387755102042, contribution 2072.1013860022395 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 150.00120586363434, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 40 Reward 3661.2244897959185 Starting iteration 41 t 1, Price 26, Demand 35.41695011197279, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 4.978806502808332, order_quantity 6.122448979591837, contribution 129.44896907301666 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 9.307890230298012, order_quantity 12.244897959183675, contribution 242.0051459877483 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 21.176517258852073, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 39.80125440604776, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 110.47761670318341, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 18.678417244966557, order_quantity 36.734693877551024, contribution 485.63884836913047 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 50.081297536764446, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 191.09412726532946, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 21.783066590683926, order_quantity 55.102040816326536, contribution 566.359731357782 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 128.05524580003035, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 242.25996571732207, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 318.64151818935636, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 14.212424127068052, order_quantity 79.59183673469389, contribution 369.52302730376937 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 97.85533067001666, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 68.85360772795379, order_quantity 91.83673469387756, contribution 1790.1938009267985 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 16.079850933184478, order_quantity 97.9591836734694, contribution 418.07612426279644 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 265.1988824090353, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 68.70433611744853, order_quantity 110.20408163265307, contribution 1786.312739053662 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 21.46199097669753, order_quantity 116.32653061224491, contribution 558.0117653941358 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 9.161709407216163, order_quantity 122.44897959183675, contribution 238.20444458762023 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 102.60761841522881, order_quantity 128.57142857142858, contribution 2667.798078795949 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 84.15590599492681, order_quantity 134.69387755102042, contribution 2188.053555868097 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 125.31096844946119, order_quantity 140.81632653061226, contribution 3258.0851796859906 step 1.0204081632653061 derivative 6 Ending iteration 41 Reward 3258.0851796859906 Starting iteration 42 t 1, Price 26, Demand 12.15303816782572, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 9.680673853674227, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 58.81221248470266, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 92.66761537316674, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 87.92869008618254, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 138.35148371077256, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 139.5465818038084, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 138.30003690349727, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 48.2175016729445, order_quantity 48.9795918367347, contribution 1253.655043496557 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 21.368549820566127, order_quantity 55.102040816326536, contribution 555.5822953347193 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 220.8905050665608, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 194.38236687779224, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 70.15709777930243, order_quantity 73.46938775510205, contribution 1824.0845422618631 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 86.15209618738756, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 39.36710596949552, order_quantity 85.71428571428572, contribution 1023.5447552068835 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 0.04739368185541928, order_quantity 91.83673469387756, contribution 1.232235728240901 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 26.636758414057216, order_quantity 97.9591836734694, contribution 692.5557187654877 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 161.6573501603038, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 77.9703416884441, order_quantity 110.20408163265307, contribution 2027.2288838995464 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 170.85641232958665, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 86.04512637784059, order_quantity 122.44897959183675, contribution 2237.173285823855 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 153.53518815964358, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 16.80404861552636, order_quantity 134.69387755102042, contribution 436.9052640036853 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 157.98205537046576, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 42 Reward 3661.2244897959185 Starting iteration 43 t 1, Price 26, Demand 94.14982248427513, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 1.2328102682755295, order_quantity 6.122448979591837, contribution 32.053066975163766 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 22.00640051916142, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 37.34264892069087, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 192.2892370769945, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 23.888700533192676, order_quantity 30.612244897959187, contribution 621.1062138630095 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 50.466950609636505, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 32.388727914223885, order_quantity 42.85714285714286, contribution 842.106925769821 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 106.28246142324012, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 49.50039906228607, order_quantity 55.102040816326536, contribution 1287.0103756194378 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 176.87109293416546, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 79.95804065677949, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 173.39259931878465, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 5.344246145282281, order_quantity 79.59183673469389, contribution 138.95039977733933 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 27.854793694868853, order_quantity 85.71428571428572, contribution 724.2246360665902 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 72.36753789440128, order_quantity 91.83673469387756, contribution 1881.5559852544334 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 36.872616904457175, order_quantity 97.9591836734694, contribution 958.6880395158865 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 83.06232742096917, order_quantity 104.08163265306123, contribution 2159.6205129451982 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 79.3041914510861, order_quantity 110.20408163265307, contribution 2061.9089777282384 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 294.5247388854612, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 110.74004960967001, order_quantity 122.44897959183675, contribution 2879.2412898514203 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 29.53532379883531, order_quantity 128.57142857142858, contribution 767.9184187697181 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 88.63424032379349, order_quantity 134.69387755102042, contribution 2304.490248418631 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 20.483237029922634, order_quantity 140.81632653061226, contribution 532.5641627779885 step 1.0204081632653061 derivative 6 Ending iteration 43 Reward 532.5641627779885 Starting iteration 44 t 1, Price 26, Demand 208.735421256725, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 174.59199383806182, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 13.275797870174536, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 150.29088567886674, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 32.80978724818689, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 71.41448662575264, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 22.31391417220192, order_quantity 36.734693877551024, contribution 580.1617684772499 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 8.68528066843188, order_quantity 42.85714285714286, contribution 225.81729737922888 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 12.54565141413216, order_quantity 48.9795918367347, contribution 326.1869367674361 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 27.72523566899406, order_quantity 55.102040816326536, contribution 720.8561273938456 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 28.262948256343574, order_quantity 61.22448979591837, contribution 734.8366546649329 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 19.600320735971263, order_quantity 67.34693877551021, contribution 509.60833913525283 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 232.12566761035274, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 79.49285161469025, order_quantity 79.59183673469389, contribution 2066.8141419819467 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 28.196751648947245, order_quantity 85.71428571428572, contribution 733.1155428726283 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 401.7524977510321, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 250.82306669964285, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 18.3374885254917, order_quantity 104.08163265306123, contribution 476.7747016627842 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 78.2231140699531, order_quantity 110.20408163265307, contribution 2033.8009658187805 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 123.50363999895677, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 400.9624813558845, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 22.308379455698702, order_quantity 128.57142857142858, contribution 580.0178658481663 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 46.21705272269524, order_quantity 134.69387755102042, contribution 1201.6433707900762 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 382.3839456013288, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 44 Reward 3661.2244897959185 Starting iteration 45 t 1, Price 26, Demand 66.23485025186811, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 37.5586831318068, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 21.128435402613537, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 234.90772841117428, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 14.705935174318954, order_quantity 24.48979591836735, contribution 382.3543145322928 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 164.87699099086748, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 58.70779343726481, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 100.63813653279308, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 82.6474913720927, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 76.06556358716009, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 33.89003087582152, order_quantity 61.22448979591837, contribution 881.1408027713596 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 64.242576814461, order_quantity 67.34693877551021, contribution 1670.306997175986 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 245.1286032085695, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 109.47747747071355, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 99.7013335982087, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 107.51951195051215, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 251.0432727902006, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 52.50471612393602, order_quantity 104.08163265306123, contribution 1365.1226192223367 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 68.66128094976686, order_quantity 110.20408163265307, contribution 1785.1933046939384 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 154.09794728855542, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 310.84336342555173, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 103.04423528727126, order_quantity 128.57142857142858, contribution 2679.150117469053 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 52.29372293696872, order_quantity 134.69387755102042, contribution 1359.6367963611867 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 79.41888521581735, order_quantity 140.81632653061226, contribution 2064.891015611251 step 1.0204081632653061 derivative 6 Ending iteration 45 Reward 2064.891015611251 Starting iteration 46 t 1, Price 26, Demand 101.90354733073177, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 30.348145274959254, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 81.3977367915017, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 119.91766812968199, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 38.31996835254581, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 0.472786107241446, order_quantity 30.612244897959187, contribution 12.292438788277595 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 101.38412402940969, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 64.2414280079993, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 409.7247580548832, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 24.828688094140226, order_quantity 55.102040816326536, contribution 645.5458904476459 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 335.3317195165246, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 30.796391394997535, order_quantity 67.34693877551021, contribution 800.7061762699359 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 138.79298398884654, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 42.179200670181864, order_quantity 79.59183673469389, contribution 1096.6592174247285 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 86.95939099572935, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 35.12955318902861, order_quantity 91.83673469387756, contribution 913.368382914744 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 22.322740535619282, order_quantity 97.9591836734694, contribution 580.3912539261013 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 183.62420076047994, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 225.35400529537998, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 250.9579547253519, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 24.620575672777345, order_quantity 122.44897959183675, contribution 640.134967492211 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 3.2916634201555452, order_quantity 128.57142857142858, contribution 85.58324892404417 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 43.53975644620089, order_quantity 134.69387755102042, contribution 1132.0336676012232 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 60.32274511720881, order_quantity 140.81632653061226, contribution 1568.3913730474292 step 1.0204081632653061 derivative 6 Ending iteration 46 Reward 1568.3913730474292 Starting iteration 47 t 1, Price 26, Demand 379.1135974620993, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 49.9689021591187, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 256.2899149272658, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 49.691110619490644, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 57.542706565471626, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 409.415428636267, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 33.09246735127766, order_quantity 36.734693877551024, contribution 860.4041511332192 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 50.21717254618382, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 14.415361056285391, order_quantity 48.9795918367347, contribution 374.7993874634202 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 46.36347046454929, order_quantity 55.102040816326536, contribution 1205.4502320782815 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 152.81381897253533, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 34.24753108870899, order_quantity 67.34693877551021, contribution 890.4358083064337 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 185.5203885941786, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 145.96299274525703, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 125.12908924145685, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 309.95484542648006, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 20.715028925484127, order_quantity 97.9591836734694, contribution 538.5907520625873 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 230.121982379406, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 20.314035409265998, order_quantity 110.20408163265307, contribution 528.164920640916 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 111.03368707110694, order_quantity 116.32653061224491, contribution 2886.87586384878 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 74.28881517806938, order_quantity 122.44897959183675, contribution 1931.509194629804 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 128.36677590487295, order_quantity 128.57142857142858, contribution 3337.536173526697 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 191.55635707387958, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 175.5057216892813, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 47 Reward 3661.2244897959185 Starting iteration 48 t 1, Price 26, Demand 88.90760857216414, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 72.5852222873219, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 234.58331750331874, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 90.00875051276606, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 32.11130262914709, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 34.38685164248908, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 12.188523303474781, order_quantity 36.734693877551024, contribution 316.9016058903443 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 142.91312165244915, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 104.68866731244918, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 236.96774971656856, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 240.51104516511123, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 242.51642547255346, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 123.49541584526855, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 257.3893116185501, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 38.02406159725418, order_quantity 85.71428571428572, contribution 988.6256015286087 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 92.99394948840389, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 317.49308045593693, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 121.89835323520182, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 0.9123383992274205, order_quantity 110.20408163265307, contribution 23.720798379912935 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 69.97293004360439, order_quantity 116.32653061224491, contribution 1819.2961811337143 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 78.13624124950502, order_quantity 122.44897959183675, contribution 2031.5422724871305 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 47.59194775657952, order_quantity 128.57142857142858, contribution 1237.3906416710677 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 175.9209388598238, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 134.506625588845, order_quantity 140.81632653061226, contribution 3497.17226530997 step 1.0204081632653061 derivative 6 Ending iteration 48 Reward 3497.17226530997 Starting iteration 49 t 1, Price 26, Demand 121.71949651308478, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 89.33858129463158, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 39.52264837764894, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 153.33349931925088, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 92.50170088804565, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 18.668286738642596, order_quantity 30.612244897959187, contribution 485.3754552047075 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 260.19746999360035, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 100.52825446731055, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 80.53482930256182, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 2.0039263424709417, order_quantity 55.102040816326536, contribution 52.102084904244485 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 71.25465860656, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 24.368621662225724, order_quantity 67.34693877551021, contribution 633.5841632178689 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 232.46076724358144, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 65.87607674420035, order_quantity 79.59183673469389, contribution 1712.777995349209 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 334.28231130290607, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 55.75479541226491, order_quantity 91.83673469387756, contribution 1449.6246807188877 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 33.36799550880969, order_quantity 97.9591836734694, contribution 867.567883229052 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 32.16384476964452, order_quantity 104.08163265306123, contribution 836.2599640107575 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 44.98641168249271, order_quantity 110.20408163265307, contribution 1169.6467037448103 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 47.54449465082441, order_quantity 116.32653061224491, contribution 1236.1568609214346 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 6.8269585139534765, order_quantity 122.44897959183675, contribution 177.5009213627904 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 186.75047982879403, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 5.241714957167451, order_quantity 134.69387755102042, contribution 136.28458888635373 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 114.98773014568383, order_quantity 140.81632653061226, contribution 2989.6809837877795 step 1.0204081632653061 derivative 6 Ending iteration 49 Reward 2989.6809837877795 Starting iteration 50 t 1, Price 26, Demand 377.8968486431486, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 4.487975496291812, order_quantity 6.122448979591837, contribution 116.6873629035871 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 143.8250640399104, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 13.067395560335687, order_quantity 18.367346938775512, contribution 339.7522845687279 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 31.1818324986336, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 109.65797025998691, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 125.14220328194523, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 163.78087694212095, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 120.5104254434286, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 26.418218031661077, order_quantity 55.102040816326536, contribution 686.873668823188 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 24.46254254167461, order_quantity 61.22448979591837, contribution 636.0261060835398 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 125.61076169591307, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 151.30406067159487, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 71.10261694434362, order_quantity 79.59183673469389, contribution 1848.6680405529341 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 173.39340473285722, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 146.223227424816, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 90.92195114986109, order_quantity 97.9591836734694, contribution 2363.970729896388 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 38.10427636629349, order_quantity 104.08163265306123, contribution 990.7111855236308 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 162.30819086202087, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 68.36609909645752, order_quantity 116.32653061224491, contribution 1777.5185765078954 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 24.611926528770752, order_quantity 122.44897959183675, contribution 639.9100897480396 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 110.9916759820787, order_quantity 128.57142857142858, contribution 2885.783575534046 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 1.6565540769677891, order_quantity 134.69387755102042, contribution 43.07040600116252 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 53.55173843464579, order_quantity 140.81632653061226, contribution 1392.3451993007907 step 1.0204081632653061 derivative 6 Ending iteration 50 Reward 1392.3451993007907 Starting iteration 51 t 1, Price 26, Demand 5.719419503103864, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 196.8980993892396, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 155.16117051570325, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 43.882230723747334, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 18.86165176548425, order_quantity 24.48979591836735, contribution 490.4029459025905 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 18.26981593347162, order_quantity 30.612244897959187, contribution 475.0152142702621 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 148.48149705510411, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 51.93361320507219, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 28.089586576593074, order_quantity 48.9795918367347, contribution 730.3292509914199 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 3.2603052233639582, order_quantity 55.102040816326536, contribution 84.76793580746292 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 159.9869879610752, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 101.09462571999175, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 22.62464749335165, order_quantity 73.46938775510205, contribution 588.2408348271429 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 66.00253901090151, order_quantity 79.59183673469389, contribution 1716.0660142834395 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 4.193461430600514, order_quantity 85.71428571428572, contribution 109.02999719561335 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 23.976982964678122, order_quantity 91.83673469387756, contribution 623.4015570816312 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 459.2933995041968, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 122.47697949475406, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 105.84488264821078, order_quantity 110.20408163265307, contribution 2751.9669488534805 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 239.33087355461504, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 25.455569252530957, order_quantity 122.44897959183675, contribution 661.8448005658049 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 87.28384958575019, order_quantity 128.57142857142858, contribution 2269.380089229505 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 65.42088646611997, order_quantity 134.69387755102042, contribution 1700.943048119119 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 7.539561677425789, order_quantity 140.81632653061226, contribution 196.02860361307052 step 1.0204081632653061 derivative 6 Ending iteration 51 Reward 196.02860361307052 Starting iteration 52 t 1, Price 26, Demand 31.948142645656795, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 75.82871083033254, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 21.759289596624782, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 12.650616471138582, order_quantity 18.367346938775512, contribution 328.91602824960313 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 27.61052736612059, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 230.53219261123218, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 110.80978942109606, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 43.78381284027886, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 167.50359890862882, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 19.90086644702561, order_quantity 55.102040816326536, contribution 517.4225276226658 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 195.2462628602034, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 88.35415168690317, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 438.8735800032451, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 120.56490816709714, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 19.31958681564797, order_quantity 85.71428571428572, contribution 502.30925720684724 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 42.733890491366125, order_quantity 91.83673469387756, contribution 1111.0811527755193 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 50.65561724388503, order_quantity 97.9591836734694, contribution 1317.0460483410109 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 43.55207342641757, order_quantity 104.08163265306123, contribution 1132.353909086857 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 26.91013557205137, order_quantity 110.20408163265307, contribution 699.6635248733356 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 84.4188464800441, order_quantity 116.32653061224491, contribution 2194.8900084811467 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 97.03586069201626, order_quantity 122.44897959183675, contribution 2522.9323779924225 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 70.27742491426639, order_quantity 128.57142857142858, contribution 1827.213047770926 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 61.25632089266869, order_quantity 134.69387755102042, contribution 1592.6643432093858 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 343.2715305099441, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 52 Reward 3661.2244897959185 Starting iteration 53 t 1, Price 26, Demand 171.63129044788147, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 96.4074873522218, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 64.40150503202054, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 6.726805665715892, order_quantity 18.367346938775512, contribution 174.8969473086132 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 39.68097575814179, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 5.155308376589584, order_quantity 30.612244897959187, contribution 134.0380177913292 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 9.478125487297143, order_quantity 36.734693877551024, contribution 246.43126266972573 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 0.6018709552531368, order_quantity 42.85714285714286, contribution 15.648644836581557 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 159.38730802701292, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 22.281525316169542, order_quantity 55.102040816326536, contribution 579.3196582204081 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 3.3871158481509607, order_quantity 61.22448979591837, contribution 88.06501205192498 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 10.029096587399412, order_quantity 67.34693877551021, contribution 260.7565112723847 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 87.27341674566259, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 240.29659958660181, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 117.94639802315581, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 15.033918033298344, order_quantity 91.83673469387756, contribution 390.8818688657569 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 89.01329966388784, order_quantity 97.9591836734694, contribution 2314.345791261084 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 113.57483473520409, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 276.1319256532881, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 7.214467301704119, order_quantity 116.32653061224491, contribution 187.5761498443071 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 284.3964595722043, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 11.455746562655063, order_quantity 128.57142857142858, contribution 297.84941062903164 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 69.58628031060653, order_quantity 134.69387755102042, contribution 1809.2432880757697 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 26.150032901862453, order_quantity 140.81632653061226, contribution 679.9008554484237 step 1.0204081632653061 derivative 6 Ending iteration 53 Reward 679.9008554484237 Starting iteration 54 t 1, Price 26, Demand 80.5154979454567, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 136.09380499069914, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 9.492178202485585, order_quantity 12.244897959183675, contribution 246.7966332646252 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 174.50582067332076, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 284.8423485658079, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 102.58427243726038, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 212.97615067299017, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 117.97606522997768, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 39.662040037071584, order_quantity 48.9795918367347, contribution 1031.2130409638612 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 293.6734859667573, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 109.9337229290985, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 92.06687213590665, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 36.080058695895886, order_quantity 73.46938775510205, contribution 938.0815260932931 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 66.13575367671977, order_quantity 79.59183673469389, contribution 1719.529595594714 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 61.52282821897085, order_quantity 85.71428571428572, contribution 1599.593533693242 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 30.943283104893176, order_quantity 91.83673469387756, contribution 804.5253607272226 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 17.527915215850257, order_quantity 97.9591836734694, contribution 455.7257956121067 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 149.3851119562289, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 116.8042729165218, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 327.3091001759879, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 11.175811143916233, order_quantity 122.44897959183675, contribution 290.57108974182205 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 58.9874739433465, order_quantity 128.57142857142858, contribution 1533.674322527009 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 382.23191856086265, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 20.302613964696157, order_quantity 140.81632653061226, contribution 527.8679630821001 step 1.0204081632653061 derivative 6 Ending iteration 54 Reward 527.8679630821001 Starting iteration 55 t 1, Price 26, Demand 151.2757816893514, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 2.4955022021351163, order_quantity 6.122448979591837, contribution 64.88305725551302 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 17.78805764382544, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 271.0869173303623, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 296.8629405143375, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 0.1053184937536086, order_quantity 30.612244897959187, contribution 2.7382808375938237 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 361.0623993114714, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 104.15186031702378, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 208.5481287788484, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 18.611213226721855, order_quantity 55.102040816326536, contribution 483.89154389476823 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 103.47808270239899, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 29.434834012444846, order_quantity 67.34693877551021, contribution 765.305684323566 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 36.316469729243195, order_quantity 73.46938775510205, contribution 944.228212960323 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 111.74424490278855, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 245.788662111567, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 473.30496672085536, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 60.77441747094022, order_quantity 97.9591836734694, contribution 1580.1348542444457 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 110.77702104024789, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 49.91737827677445, order_quantity 110.20408163265307, contribution 1297.8518351961357 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 64.37790985419802, order_quantity 116.32653061224491, contribution 1673.8256562091485 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 16.03757538944644, order_quantity 122.44897959183675, contribution 416.9769601256075 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 251.20499500205548, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 33.25458095083704, order_quantity 134.69387755102042, contribution 864.619104721763 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 55.57422276787937, order_quantity 140.81632653061226, contribution 1444.9297919648636 step 1.0204081632653061 derivative 6 Ending iteration 55 Reward 1444.9297919648636 Starting iteration 56 t 1, Price 26, Demand 127.25555122781766, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 39.841197917491996, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 115.81860301522724, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 91.71931163551342, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 82.42602209272262, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 10.430801858141063, order_quantity 30.612244897959187, contribution 271.20084831166764 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 132.40187879372982, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 0.8988702397393449, order_quantity 42.85714285714286, contribution 23.370626233222968 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 115.43855074086373, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 50.07393599080585, order_quantity 55.102040816326536, contribution 1301.9223357609521 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 360.35021722481565, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 55.25417009111804, order_quantity 67.34693877551021, contribution 1436.608422369069 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 2.220836766984655, order_quantity 73.46938775510205, contribution 57.741755941601035 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 119.34145688077236, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 7.909644397657681, order_quantity 85.71428571428572, contribution 205.6507543390997 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 2.136415420193331, order_quantity 91.83673469387756, contribution 55.54680092502661 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 193.70050711258088, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 65.26995977065931, order_quantity 104.08163265306123, contribution 1697.018954037142 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 11.577741438635366, order_quantity 110.20408163265307, contribution 301.02127740451954 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 29.35401683602504, order_quantity 116.32653061224491, contribution 763.204437736651 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 52.394971108396945, order_quantity 122.44897959183675, contribution 1362.2692488183206 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 17.37807541006517, order_quantity 128.57142857142858, contribution 451.8299606616944 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 101.72590828927619, order_quantity 134.69387755102042, contribution 2644.873615521181 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 41.993498061683745, order_quantity 140.81632653061226, contribution 1091.8309496037773 step 1.0204081632653061 derivative 6 Ending iteration 56 Reward 1091.8309496037773 Starting iteration 57 t 1, Price 26, Demand 124.96669054137028, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 32.776311936789426, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 44.42538718751599, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 8.276270175827152, order_quantity 18.367346938775512, contribution 215.18302457150597 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 236.22540309550652, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 106.48634870017624, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 46.496670623076895, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 137.5628520837853, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 2.3298556164987176, order_quantity 48.9795918367347, contribution 60.57624602896666 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 15.170396426618261, order_quantity 55.102040816326536, contribution 394.43030709207477 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 4.0564912887146765, order_quantity 61.22448979591837, contribution 105.46877350658158 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 595.702609127403, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 400.2005273321121, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 140.6004374358569, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 106.20603202448173, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 25.83729671470695, order_quantity 91.83673469387756, contribution 671.7697145823807 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 66.55874573242897, order_quantity 97.9591836734694, contribution 1730.5273890431533 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 205.47555588779232, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 127.18007771844138, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 47.76165151983609, order_quantity 116.32653061224491, contribution 1241.8029395157384 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 149.09652853058986, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 25.6940775271815, order_quantity 128.57142857142858, contribution 668.046015706719 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 96.41904646200375, order_quantity 134.69387755102042, contribution 2506.8952080120976 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 374.6088599574362, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 57 Reward 3661.2244897959185 Starting iteration 58 t 1, Price 26, Demand 195.8230913252247, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 29.677589506685198, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 42.98033226071663, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 56.229250151220455, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 65.0394080337087, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 78.58738200277587, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 183.24526755677604, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 30.345381236402275, order_quantity 42.85714285714286, contribution 788.9799121464591 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 29.8261198549967, order_quantity 48.9795918367347, contribution 775.4791162299142 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 49.678052906317546, order_quantity 55.102040816326536, contribution 1291.6293755642562 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 71.24910697187046, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 151.61953877909644, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 182.66745862864641, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 307.4559630439783, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 2.669915854604334, order_quantity 85.71428571428572, contribution 69.41781221971269 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 456.6276945275816, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 59.65643968021595, order_quantity 97.9591836734694, contribution 1551.0674316856146 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 80.10007250551118, order_quantity 104.08163265306123, contribution 2082.6018851432905 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 431.35598219408473, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 79.70904266044187, order_quantity 116.32653061224491, contribution 2072.4351091714884 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 14.72284313576322, order_quantity 122.44897959183675, contribution 382.7939215298437 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 41.660703432693325, order_quantity 128.57142857142858, contribution 1083.1782892500264 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 108.76884196104312, order_quantity 134.69387755102042, contribution 2827.9898909871213 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 197.19680728966117, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 58 Reward 3661.2244897959185 Starting iteration 59 t 1, Price 26, Demand 185.36936306032558, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 179.54278813819394, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 23.06893810215672, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 103.01533250552635, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 120.43662065021084, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 46.148143499691265, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 304.71477753956054, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 92.97960757250779, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 191.42721798737014, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 164.71152118233383, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 49.809440331563124, order_quantity 61.22448979591837, contribution 1295.0454486206413 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 186.67647718070384, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 78.45478450328869, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 247.44311200803594, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 39.72383074710809, order_quantity 85.71428571428572, contribution 1032.8195994248103 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 65.14888487612951, order_quantity 91.83673469387756, contribution 1693.8710067793675 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 34.3658290901596, order_quantity 97.9591836734694, contribution 893.5115563441497 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 136.85569319356557, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 87.33795470216812, order_quantity 110.20408163265307, contribution 2270.786822256371 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 19.850240429269235, order_quantity 116.32653061224491, contribution 516.106251161 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 62.240715499771845, order_quantity 122.44897959183675, contribution 1618.258602994068 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 273.95216906243127, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 148.79193007232655, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 48.45176289316356, order_quantity 140.81632653061226, contribution 1259.7458352222525 step 1.0204081632653061 derivative 6 Ending iteration 59 Reward 1259.7458352222525 Starting iteration 60 t 1, Price 26, Demand 23.66434201397386, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 164.34090543163782, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 54.5946521206961, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 68.1125042288144, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 149.40809636543725, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 23.120214144255886, order_quantity 30.612244897959187, contribution 601.1255677506531 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 122.53991551975749, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 163.22393235510435, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 41.4237102277144, order_quantity 48.9795918367347, contribution 1077.0164659205745 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 82.09786065608846, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 32.33962878141144, order_quantity 61.22448979591837, contribution 840.8303483166975 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 44.86705977014391, order_quantity 67.34693877551021, contribution 1166.5435540237418 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 61.88370426858746, order_quantity 73.46938775510205, contribution 1608.9763109832738 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 30.4833480193617, order_quantity 79.59183673469389, contribution 792.5670485034042 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 238.4056253447302, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 76.15459616515119, order_quantity 91.83673469387756, contribution 1980.0195002939308 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 240.99282368850848, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 12.889181798471844, order_quantity 104.08163265306123, contribution 335.1187267602679 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 1.6471765280708186, order_quantity 110.20408163265307, contribution 42.82658972984128 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 60.61459320871268, order_quantity 116.32653061224491, contribution 1575.9794234265296 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 33.08711796361914, order_quantity 122.44897959183675, contribution 860.2650670540977 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 60.715262147591, order_quantity 128.57142857142858, contribution 1578.596815837366 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 0.8666069659789956, order_quantity 134.69387755102042, contribution 22.531781115453885 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 118.62095916235586, order_quantity 140.81632653061226, contribution 3084.1449382212522 step 1.0204081632653061 derivative 6 Ending iteration 60 Reward 3084.1449382212522 Starting iteration 61 t 1, Price 26, Demand 39.57593838365299, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 55.61217150891379, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 98.54433654803294, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 169.18902836386061, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 32.05924514160164, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 90.1188120188939, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 20.142067411892018, order_quantity 36.734693877551024, contribution 523.6937527091925 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 38.901586909981646, order_quantity 42.85714285714286, contribution 1011.4412596595228 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 87.46313067478629, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 13.197131538701386, order_quantity 55.102040816326536, contribution 343.125420006236 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 137.87502437099312, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 156.2223023143858, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 135.8187855071509, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 27.9133712178555, order_quantity 79.59183673469389, contribution 725.747651664243 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 37.856860798391736, order_quantity 85.71428571428572, contribution 984.2783807581851 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 73.86004463506481, order_quantity 91.83673469387756, contribution 1920.361160511685 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 136.3806402655426, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 29.790454823464348, order_quantity 104.08163265306123, contribution 774.5518254100731 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 82.07189398896311, order_quantity 110.20408163265307, contribution 2133.8692437130408 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 51.1427998611784, order_quantity 116.32653061224491, contribution 1329.7127963906385 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 57.80045449039625, order_quantity 122.44897959183675, contribution 1502.8118167503026 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 33.86795466242114, order_quantity 128.57142857142858, contribution 880.5668212229497 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 2.7781806354038983, order_quantity 134.69387755102042, contribution 72.23269652050135 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 201.4242496902108, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 61 Reward 3661.2244897959185 Starting iteration 62 t 1, Price 26, Demand 265.0593565580982, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 63.822691827353395, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 52.90867977876253, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 62.23270673493625, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 71.43252667553521, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 147.3146873168427, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 37.327890842011904, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 91.67019849971169, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 0.550753696518094, order_quantity 48.9795918367347, contribution 14.319596109470444 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 26.62642875493329, order_quantity 55.102040816326536, contribution 692.2871476282655 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 0.8772478837676296, order_quantity 61.22448979591837, contribution 22.80844497795837 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 302.34576251106404, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 212.5684479989264, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 340.4637460669968, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 126.92132322679694, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 73.01300432950704, order_quantity 91.83673469387756, contribution 1898.338112567183 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 20.849034783425076, order_quantity 97.9591836734694, contribution 542.074904369052 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 43.522805693201704, order_quantity 104.08163265306123, contribution 1131.5929480232444 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 472.4135001495295, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 49.09766563821283, order_quantity 116.32653061224491, contribution 1276.5393065935336 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 23.51007401293955, order_quantity 122.44897959183675, contribution 611.2619243364284 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 156.56331200693108, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 129.57107045385035, order_quantity 134.69387755102042, contribution 3368.8478318001094 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 209.0217755564372, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 62 Reward 3661.2244897959185 Starting iteration 63 t 1, Price 26, Demand 4.912629297761755, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 258.31311703337786, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 13.719299421995473, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 29.61266069461776, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 68.4421799422631, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 99.1877712435851, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 70.34714087127185, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 19.2322175728695, order_quantity 42.85714285714286, contribution 500.03765689460704 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 81.90505132863133, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 7.8111150079549, order_quantity 55.102040816326536, contribution 203.0889902068274 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 54.33261432278114, order_quantity 61.22448979591837, contribution 1412.6479723923096 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 100.06949684717077, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 173.72855910819396, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 23.89142743387499, order_quantity 79.59183673469389, contribution 621.1771132807497 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 36.079309391551064, order_quantity 85.71428571428572, contribution 938.0620441803277 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 85.62650526664592, order_quantity 91.83673469387756, contribution 2226.289136932794 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 94.04177455199164, order_quantity 97.9591836734694, contribution 2445.0861383517827 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 100.1250037041717, order_quantity 104.08163265306123, contribution 2603.2500963084644 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 257.6105423413788, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 278.2704351657614, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 742.3673220213542, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 181.3236888632799, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 23.42941983074826, order_quantity 134.69387755102042, contribution 609.1649155994547 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 65.18824905625702, order_quantity 140.81632653061226, contribution 1694.8944754626825 step 1.0204081632653061 derivative 6 Ending iteration 63 Reward 1694.8944754626825 Starting iteration 64 t 1, Price 26, Demand 65.86722894269687, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 19.621997759373897, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 27.545536181551817, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 36.863880199545754, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 155.27068341629354, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 62.179940238976805, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 44.35212856601066, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 29.130639832929862, order_quantity 42.85714285714286, contribution 757.3966356561764 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 55.594745592578086, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 10.446046649913695, order_quantity 55.102040816326536, contribution 271.5972128977561 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 24.036902071419117, order_quantity 61.22448979591837, contribution 624.9594538568971 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 1.8985793723350761, order_quantity 67.34693877551021, contribution 49.36306368071198 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 113.83797343299598, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 105.02488512531, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 37.32186100753028, order_quantity 85.71428571428572, contribution 970.3683861957873 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 26.332956636794506, order_quantity 91.83673469387756, contribution 684.6568725566572 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 247.67361986746207, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 1.7469382848217154, order_quantity 104.08163265306123, contribution 45.4203954053646 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 276.768018859936, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 80.63692677559395, order_quantity 116.32653061224491, contribution 2096.5600961654427 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 108.90902981961041, order_quantity 122.44897959183675, contribution 2831.6347753098707 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 83.30230969474073, order_quantity 128.57142857142858, contribution 2165.860052063259 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 40.740130752142804, order_quantity 134.69387755102042, contribution 1059.2433995557128 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 296.1977613985706, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 64 Reward 3661.2244897959185 Starting iteration 65 t 1, Price 26, Demand 85.75243014655203, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 62.69356776429073, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 137.2050238917235, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 21.591722209025598, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 119.16812014701085, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 2.887845582483323, order_quantity 30.612244897959187, contribution 75.0839851445664 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 194.3758392914426, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 275.28275607130246, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 17.815818600349616, order_quantity 48.9795918367347, contribution 463.21128360909 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 21.540515608767016, order_quantity 55.102040816326536, contribution 560.0534058279425 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 189.36218977809486, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 267.78664976333476, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 103.49502401135986, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 1.0966912524548935, order_quantity 79.59183673469389, contribution 28.51397256382723 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 0.27356013183619987, order_quantity 85.71428571428572, contribution 7.112563427741197 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 12.424756315951893, order_quantity 91.83673469387756, contribution 323.0436642147492 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 10.220068188619932, order_quantity 97.9591836734694, contribution 265.7217729041182 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 23.865554114063595, order_quantity 104.08163265306123, contribution 620.5044069656535 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 47.2382103671093, order_quantity 110.20408163265307, contribution 1228.193469544842 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 84.10082613731818, order_quantity 116.32653061224491, contribution 2186.6214795702726 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 71.39463214556568, order_quantity 122.44897959183675, contribution 1856.2604357847076 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 185.59838463134446, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 62.71608338880842, order_quantity 134.69387755102042, contribution 1630.618168109019 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 39.58622640343038, order_quantity 140.81632653061226, contribution 1029.24188648919 step 1.0204081632653061 derivative 6 Ending iteration 65 Reward 1029.24188648919 Starting iteration 66 t 1, Price 26, Demand 157.80364652813063, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 100.74041123216098, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 0.9292525395480102, order_quantity 12.244897959183675, contribution 24.160566028248265 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 141.45827077262118, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 275.4162231999874, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 35.046350549256786, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 69.32531207027672, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 3.576718180629716, order_quantity 42.85714285714286, contribution 92.99467269637262 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 15.2127541770496, order_quantity 48.9795918367347, contribution 395.5316086032896 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 136.16097765797633, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 24.033113774511573, order_quantity 61.22448979591837, contribution 624.8609581373009 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 350.91723324683625, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 74.44694946420987, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 138.74275701772015, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 471.8569633347563, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 83.77134143545778, order_quantity 91.83673469387756, contribution 2178.0548773219025 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 119.15727855352517, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 7.354860952235952, order_quantity 104.08163265306123, contribution 191.22638475813474 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 23.87288557866381, order_quantity 110.20408163265307, contribution 620.6950250452591 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 157.33491755302964, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 52.8427218089803, order_quantity 122.44897959183675, contribution 1373.910767033488 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 91.17354272414563, order_quantity 128.57142857142858, contribution 2370.5121108277863 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 35.13764980793789, order_quantity 134.69387755102042, contribution 913.5788950063852 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 47.39980152438637, order_quantity 140.81632653061226, contribution 1232.3948396340456 step 1.0204081632653061 derivative 6 Ending iteration 66 Reward 1232.3948396340456 Starting iteration 67 t 1, Price 26, Demand 9.970197943667461, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 38.25340225041002, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 143.66236318915867, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 22.560619882830725, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 121.37346059033511, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 407.1617632339943, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 46.80674160771689, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 2.0518512564509344, order_quantity 42.85714285714286, contribution 53.34813266772429 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 9.875537574678146, order_quantity 48.9795918367347, contribution 256.7639769416318 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 27.899055054806617, order_quantity 55.102040816326536, contribution 725.375431424972 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 15.609910643824737, order_quantity 61.22448979591837, contribution 405.85767673944315 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 189.62325834431059, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 36.607569606328774, order_quantity 73.46938775510205, contribution 951.7968097645481 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 39.61299899588812, order_quantity 79.59183673469389, contribution 1029.937973893091 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 33.44668054068161, order_quantity 85.71428571428572, contribution 869.6136940577219 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 213.27952094121625, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 102.42413483407884, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 69.56370811441164, order_quantity 104.08163265306123, contribution 1808.6564109747026 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 91.69089174622738, order_quantity 110.20408163265307, contribution 2383.9631854019117 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 59.32411270835259, order_quantity 116.32653061224491, contribution 1542.4269304171673 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 7.997495412382967, order_quantity 122.44897959183675, contribution 207.93488072195714 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 136.40086732295202, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 60.49184870883519, order_quantity 134.69387755102042, contribution 1572.788066429715 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 19.029823814706443, order_quantity 140.81632653061226, contribution 494.7754191823675 step 1.0204081632653061 derivative 6 Ending iteration 67 Reward 494.7754191823675 Starting iteration 68 t 1, Price 26, Demand 80.21063194303551, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 52.62194059649761, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 10.524062126990092, order_quantity 12.244897959183675, contribution 273.6256153017424 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 28.289415671526264, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 102.19326638428585, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 83.3954138185598, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 106.28090085726443, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 99.65202166398458, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 9.436537836072253, order_quantity 48.9795918367347, contribution 245.34998373787857 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 123.86745689250583, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 140.26953156355876, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 18.083390042920126, order_quantity 67.34693877551021, contribution 470.1681411159233 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 9.413669044046003, order_quantity 73.46938775510205, contribution 244.75539514519608 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 308.05334605778836, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 33.94217822219452, order_quantity 85.71428571428572, contribution 882.4966337770576 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 75.19402903074695, order_quantity 91.83673469387756, contribution 1955.0447547994206 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 148.23453697596406, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 13.262931455492508, order_quantity 104.08163265306123, contribution 344.8362178428052 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 26.123133489812318, order_quantity 110.20408163265307, contribution 679.2014707351202 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 6.143304182848488, order_quantity 116.32653061224491, contribution 159.7259087540607 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 2.5865609463149073, order_quantity 122.44897959183675, contribution 67.2505846041876 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 95.54962631181373, order_quantity 128.57142857142858, contribution 2484.290284107157 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 110.43040159570097, order_quantity 134.69387755102042, contribution 2871.190441488225 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 97.72820084769968, order_quantity 140.81632653061226, contribution 2540.9332220401916 step 1.0204081632653061 derivative 6 Ending iteration 68 Reward 2540.9332220401916 Starting iteration 69 t 1, Price 26, Demand 56.6670764724587, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 9.134164208741897, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 111.48972701797486, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 201.43545633745967, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 49.92266777354734, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 26.260183647319845, order_quantity 30.612244897959187, contribution 682.764774830316 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 29.685562112588165, order_quantity 36.734693877551024, contribution 771.8246149272923 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 120.48419765079944, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 11.439270251377472, order_quantity 48.9795918367347, contribution 297.4210265358143 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 10.708107716618793, order_quantity 55.102040816326536, contribution 278.41080063208864 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 44.98929494402263, order_quantity 61.22448979591837, contribution 1169.7216685445885 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 301.53264626345646, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 154.33477074347797, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 46.930842499835826, order_quantity 79.59183673469389, contribution 1220.2019049957314 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 21.267984435339336, order_quantity 85.71428571428572, contribution 552.9675953188228 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 170.09040827061875, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 70.07684851547515, order_quantity 97.9591836734694, contribution 1821.9980614023539 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 36.37236185595826, order_quantity 104.08163265306123, contribution 945.6814082549148 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 63.65192239703307, order_quantity 110.20408163265307, contribution 1654.9499823228598 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 47.33728308744783, order_quantity 116.32653061224491, contribution 1230.7693602736435 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 68.91898590668484, order_quantity 122.44897959183675, contribution 1791.8936335738058 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 106.45943238553046, order_quantity 128.57142857142858, contribution 2767.945242023792 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 6.068983010812733, order_quantity 134.69387755102042, contribution 157.79355828113106 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 29.416885209667754, order_quantity 140.81632653061226, contribution 764.8390154513615 step 1.0204081632653061 derivative 6 Ending iteration 69 Reward 764.8390154513615 Starting iteration 70 t 1, Price 26, Demand 187.81510287435137, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 64.85247629471512, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 88.53606532450488, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 58.3847434624769, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 102.6349211016017, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 5.336002329624148, order_quantity 30.612244897959187, contribution 138.73606057022786 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 25.58421977281452, order_quantity 36.734693877551024, contribution 665.1897140931776 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 173.681211112317, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 57.45728619583069, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 67.43422224453747, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 2.6807502573795836, order_quantity 61.22448979591837, contribution 69.69950669186917 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 24.303571415131405, order_quantity 67.34693877551021, contribution 631.8928567934165 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 52.914285587829504, order_quantity 73.46938775510205, contribution 1375.7714252835672 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 60.82079360758521, order_quantity 79.59183673469389, contribution 1581.3406337972153 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 107.32749106916506, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 55.94504909260019, order_quantity 91.83673469387756, contribution 1454.571276407605 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 68.89554608490324, order_quantity 97.9591836734694, contribution 1791.2841982074845 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 63.51226175283611, order_quantity 104.08163265306123, contribution 1651.318805573739 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 108.54700702001364, order_quantity 110.20408163265307, contribution 2822.2221825203546 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 40.86440809759899, order_quantity 116.32653061224491, contribution 1062.4746105375737 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 3.7738388558952445, order_quantity 122.44897959183675, contribution 98.11981025327636 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 218.0144445006285, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 163.87846068264332, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 59.29968438174426, order_quantity 140.81632653061226, contribution 1541.7917939253507 step 1.0204081632653061 derivative 6 Ending iteration 70 Reward 1541.7917939253507 Starting iteration 71 t 1, Price 26, Demand 7.786784099687527, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 313.19939290051377, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 87.82552718108033, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 315.9469890767805, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 66.94806271551698, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 30.87019229186641, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 11.23745282062024, order_quantity 36.734693877551024, contribution 292.1737733361262 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 32.37761469658446, order_quantity 42.85714285714286, contribution 841.817982111196 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 27.11437574388015, order_quantity 48.9795918367347, contribution 704.9737693408839 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 30.49594340588599, order_quantity 55.102040816326536, contribution 792.8945285530357 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 204.233976396561, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 258.7551922022073, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 17.313494631744454, order_quantity 73.46938775510205, contribution 450.1508604253558 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 30.166526055104402, order_quantity 79.59183673469389, contribution 784.3296774327144 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 102.37412626452974, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 75.32660211163852, order_quantity 91.83673469387756, contribution 1958.4916549026016 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 31.493585534690776, order_quantity 97.9591836734694, contribution 818.8332239019602 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 155.31300565018054, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 47.93452693800218, order_quantity 110.20408163265307, contribution 1246.2977003880567 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 40.716722370781916, order_quantity 116.32653061224491, contribution 1058.6347816403297 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 373.80069965488383, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 34.91187800054243, order_quantity 128.57142857142858, contribution 907.7088280141031 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 125.53088317981127, order_quantity 134.69387755102042, contribution 3263.802962675093 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 13.234684144681253, order_quantity 140.81632653061226, contribution 344.10178776171256 step 1.0204081632653061 derivative 6 Ending iteration 71 Reward 344.10178776171256 Starting iteration 72 t 1, Price 26, Demand 3.887415217839569, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 130.08080824507485, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 39.66210472546776, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 236.40465404469418, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 160.01255562321973, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 341.3748179004642, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 24.018610624947062, order_quantity 36.734693877551024, contribution 624.4838762486236 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 15.195714320231279, order_quantity 42.85714285714286, contribution 395.08857232601326 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 5.138150799078782, order_quantity 48.9795918367347, contribution 133.59192077604834 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 226.85507387311551, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 85.21994535101133, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 8.595050551471408, order_quantity 67.34693877551021, contribution 223.4713143382566 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 79.0678157376888, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 16.228750440553828, order_quantity 79.59183673469389, contribution 421.94751145439955 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 96.45933989545455, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 97.66925154981705, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 45.04164946922744, order_quantity 97.9591836734694, contribution 1171.0828861999134 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 282.31414022244667, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 17.268726172679514, order_quantity 110.20408163265307, contribution 448.9868804896674 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 63.01995983762421, order_quantity 116.32653061224491, contribution 1638.5189557782294 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 5.368636227278818, order_quantity 122.44897959183675, contribution 139.58454190924928 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 68.94075477293926, order_quantity 128.57142857142858, contribution 1792.4596240964206 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 27.172589316357502, order_quantity 134.69387755102042, contribution 706.4873222252951 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 190.66028280822067, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 72 Reward 3661.2244897959185 Starting iteration 73 t 1, Price 26, Demand 138.31051293630358, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 122.60327843948701, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 3.7894776869945237, order_quantity 12.244897959183675, contribution 98.52641986185762 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 63.88167279021948, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 9.893887311744164, order_quantity 24.48979591836735, contribution 257.2410701053483 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 7.22102611906415, order_quantity 30.612244897959187, contribution 187.7466790956679 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 15.183719733868484, order_quantity 36.734693877551024, contribution 394.7767130805806 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 30.785036608425862, order_quantity 42.85714285714286, contribution 800.4109518190725 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 1.6056059128940148, order_quantity 48.9795918367347, contribution 41.745753735244385 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 133.89464048101487, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 40.0785086749406, order_quantity 61.22448979591837, contribution 1042.0412255484557 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 151.28678268580924, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 116.13560865765417, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 114.7411024970562, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 185.1107506527635, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 20.742187427986174, order_quantity 91.83673469387756, contribution 539.2968731276405 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 49.51708529371107, order_quantity 97.9591836734694, contribution 1287.4442176364878 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 48.093090882010856, order_quantity 104.08163265306123, contribution 1250.4203629322822 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 291.59768363953134, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 56.086203956106495, order_quantity 116.32653061224491, contribution 1458.241302858769 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 36.19344815059351, order_quantity 122.44897959183675, contribution 941.0296519154313 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 337.00843538229014, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 1.419874557198968, order_quantity 134.69387755102042, contribution 36.91673848717316 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 45.352540852408566, order_quantity 140.81632653061226, contribution 1179.1660621626227 step 1.0204081632653061 derivative 6 Ending iteration 73 Reward 1179.1660621626227 Starting iteration 74 t 1, Price 26, Demand 296.06461409193787, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 83.63272001498936, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 71.95833188323697, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 0.6498049448987442, order_quantity 18.367346938775512, contribution 16.894928567367348 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 65.83030382317068, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 171.04080064596974, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 77.13168223590363, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 338.47650514583205, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 27.190362249404913, order_quantity 48.9795918367347, contribution 706.9494184845278 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 53.544782029513804, order_quantity 55.102040816326536, contribution 1392.164332767359 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 5.477290334012803, order_quantity 61.22448979591837, contribution 142.4095486843329 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 442.3174765255155, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 0.2540425751261929, order_quantity 73.46938775510205, contribution 6.605106953281015 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 19.449750006503297, order_quantity 79.59183673469389, contribution 505.6935001690857 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 100.72020605711278, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 117.73122483680032, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 111.05987592352855, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 56.72358053265969, order_quantity 104.08163265306123, contribution 1474.813093849152 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 142.5833322628541, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 8.715828946671083, order_quantity 116.32653061224491, contribution 226.61155261344817 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 9.169871992856091, order_quantity 122.44897959183675, contribution 238.41667181425836 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 30.296022058740878, order_quantity 128.57142857142858, contribution 787.6965735272628 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 114.12133380676286, order_quantity 134.69387755102042, contribution 2967.1546789758345 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 398.7430769578623, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 74 Reward 3661.2244897959185 Starting iteration 75 t 1, Price 26, Demand 5.2977430341503995, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 211.43883530136787, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 123.09895505432003, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 12.503388678665992, order_quantity 18.367346938775512, contribution 325.0881056453158 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 3.9832308792956113, order_quantity 24.48979591836735, contribution 103.56400286168589 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 78.01488963119266, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 139.31434036811712, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 127.82512215095278, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 154.08036374929208, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 16.953980262590584, order_quantity 55.102040816326536, contribution 440.8034868273552 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 24.954603234479197, order_quantity 61.22448979591837, contribution 648.8196840964591 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 6.075837033272421, order_quantity 67.34693877551021, contribution 157.97176286508295 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 194.93644684041007, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 57.35276464445816, order_quantity 79.59183673469389, contribution 1491.1718807559123 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 53.88706735142994, order_quantity 85.71428571428572, contribution 1401.0637511371785 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 70.83530158428127, order_quantity 91.83673469387756, contribution 1841.717841191313 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 25.826134367820675, order_quantity 97.9591836734694, contribution 671.4794935633375 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 314.98038849897523, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 49.83562985710425, order_quantity 110.20408163265307, contribution 1295.7263762847106 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 30.29338897847131, order_quantity 116.32653061224491, contribution 787.6281134402541 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 0.09751414906963535, order_quantity 122.44897959183675, contribution 2.535367875810519 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 63.38146950170972, order_quantity 128.57142857142858, contribution 1647.9182070444526 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 97.75212491524262, order_quantity 134.69387755102042, contribution 2541.555247796308 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 197.29403885892518, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 75 Reward 3661.2244897959185 Starting iteration 76 t 1, Price 26, Demand 10.611242208207235, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 52.35473389610957, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 32.47632864421768, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 22.697810331259827, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 13.077682574471055, order_quantity 24.48979591836735, contribution 340.01974693624743 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 357.78053651665283, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 140.20254597591705, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 9.456162550199423, order_quantity 42.85714285714286, contribution 245.860226305185 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 42.3308714183272, order_quantity 48.9795918367347, contribution 1100.6026568765071 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 130.99916572132545, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 59.799469956116624, order_quantity 61.22448979591837, contribution 1554.7862188590323 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 402.5844659697574, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 248.99490197949805, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 19.366338010625938, order_quantity 79.59183673469389, contribution 503.5247882762744 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 31.589628260277475, order_quantity 85.71428571428572, contribution 821.3303347672144 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 170.8001553212301, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 67.42261338112858, order_quantity 97.9591836734694, contribution 1752.9879479093431 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 155.97578023931803, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 15.237244052822781, order_quantity 110.20408163265307, contribution 396.1683453733923 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 17.222812098564074, order_quantity 116.32653061224491, contribution 447.7931145626659 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 58.03373014855463, order_quantity 122.44897959183675, contribution 1508.8769838624205 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 236.14413663652113, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 58.915066288787976, order_quantity 134.69387755102042, contribution 1531.7917235084874 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 96.73293645371274, order_quantity 140.81632653061226, contribution 2515.056347796531 step 1.0204081632653061 derivative 6 Ending iteration 76 Reward 2515.056347796531 Starting iteration 77 t 1, Price 26, Demand 78.76356390328182, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 125.40785123896092, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 10.705757570581914, order_quantity 12.244897959183675, contribution 278.34969683512975 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 102.6412859014689, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 127.12221732840226, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 23.418961031384367, order_quantity 30.612244897959187, contribution 608.8929868159935 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 84.33983098717495, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 149.93108194830035, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 24.705829461214368, order_quantity 48.9795918367347, contribution 642.3515659915736 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 36.20986739291191, order_quantity 55.102040816326536, contribution 941.4565522157096 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 206.10543758268452, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 260.35908373631395, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 169.15648655032274, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 117.29465585061092, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 8.11994985336837, order_quantity 85.71428571428572, contribution 211.11869618757763 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 87.17024066708156, order_quantity 91.83673469387756, contribution 2266.4262573441206 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 119.43567507588835, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 2.163926520137327, order_quantity 104.08163265306123, contribution 56.2620895235705 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 159.86945211771751, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 165.18442726130283, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 389.88057524962596, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 40.91854554398171, order_quantity 128.57142857142858, contribution 1063.8821841435245 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 107.20877022119866, order_quantity 134.69387755102042, contribution 2787.428025751165 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 117.98991964128615, order_quantity 140.81632653061226, contribution 3067.7379106734397 step 1.0204081632653061 derivative 6 Ending iteration 77 Reward 3067.7379106734397 Starting iteration 78 t 1, Price 26, Demand 58.11641764207556, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 76.05739862224232, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 50.582931206649754, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 54.00978111869294, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 120.6905524727892, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 7.747850522138528, order_quantity 30.612244897959187, contribution 201.4441135756017 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 90.33663937396412, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 9.998160816969113, order_quantity 42.85714285714286, contribution 259.95218124119697 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 21.55033815815443, order_quantity 48.9795918367347, contribution 560.3087921120152 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 318.96536585400867, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 42.58550923293077, order_quantity 61.22448979591837, contribution 1107.2232400562 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 157.05889983878157, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 5.3523043144844085, order_quantity 73.46938775510205, contribution 139.15991217659462 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 130.87996623498418, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 283.15144466861955, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 76.26713061803008, order_quantity 91.83673469387756, contribution 1982.945396068782 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 151.36631399781385, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 47.19612026367623, order_quantity 104.08163265306123, contribution 1227.099126855582 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 10.260053905855854, order_quantity 110.20408163265307, contribution 266.7614015522522 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 0.23754461816769368, order_quantity 116.32653061224491, contribution 6.176160072360036 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 20.45085156894479, order_quantity 122.44897959183675, contribution 531.7221407925645 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 63.06774068253909, order_quantity 128.57142857142858, contribution 1639.7612577460163 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 35.286421493246124, order_quantity 134.69387755102042, contribution 917.4469588243992 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 3.5000825712134733, order_quantity 140.81632653061226, contribution 91.00214685155031 step 1.0204081632653061 derivative 6 Ending iteration 78 Reward 91.00214685155031 Starting iteration 79 t 1, Price 26, Demand 20.114230682627234, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 31.718312594995872, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 123.9819879752398, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 222.53226992023394, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 10.84181882454073, order_quantity 24.48979591836735, contribution 281.887289438059 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 102.57045078570364, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 112.44560277739109, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 166.55072259170774, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 33.49910563801461, order_quantity 48.9795918367347, contribution 870.9767465883799 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 21.63675530432194, order_quantity 55.102040816326536, contribution 562.5556379123705 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 63.90243890460384, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 125.55605294649015, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 7.30043865332285, order_quantity 73.46938775510205, contribution 189.8114049863941 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 76.6435608841133, order_quantity 79.59183673469389, contribution 1992.7325829869458 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 11.74366672996927, order_quantity 85.71428571428572, contribution 305.33533497920104 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 175.64878358171288, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 126.69755690102336, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 117.29278487518455, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 8.948575508024582, order_quantity 110.20408163265307, contribution 232.66296320863916 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 4.589576559749994, order_quantity 116.32653061224491, contribution 119.32899055349985 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 18.72610899285871, order_quantity 122.44897959183675, contribution 486.8788338143264 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 302.2825797366214, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 44.138523871836995, order_quantity 134.69387755102042, contribution 1147.6016206677618 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 163.46513855153685, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 79 Reward 3661.2244897959185 Starting iteration 80 t 1, Price 26, Demand 35.849596642185844, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 31.505359382147514, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 67.92289658584268, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 460.33439097188176, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 92.64908921347917, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 19.254355383449493, order_quantity 30.612244897959187, contribution 500.6132399696868 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 301.75515148710986, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 56.54620715642956, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 128.88188552048018, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 122.77157408033888, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 145.76891832468465, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 95.99639273416408, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 100.64116410267712, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 172.74553290174924, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 77.18613769020219, order_quantity 85.71428571428572, contribution 2006.839579945257 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 244.15593631353656, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 161.64337638799475, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 29.093144694138907, order_quantity 104.08163265306123, contribution 756.4217620476115 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 105.60640751949913, order_quantity 110.20408163265307, contribution 2745.7665955069774 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 48.96711514762126, order_quantity 116.32653061224491, contribution 1273.1449938381527 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 86.41329710347809, order_quantity 122.44897959183675, contribution 2246.74572469043 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 7.672152581859774, order_quantity 128.57142857142858, contribution 199.47596712835414 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 80.15723715319584, order_quantity 134.69387755102042, contribution 2084.0881659830916 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 104.32573813149408, order_quantity 140.81632653061226, contribution 2712.469191418846 step 1.0204081632653061 derivative 6 Ending iteration 80 Reward 2712.469191418846 Starting iteration 81 t 1, Price 26, Demand 0.2628852255744377, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 2.762398082551589, order_quantity 6.122448979591837, contribution 71.82235014634131 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 166.8396301499177, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 142.80006142542675, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 156.0027600974924, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 129.25989485126007, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 58.70811799514571, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 29.903348045047835, order_quantity 42.85714285714286, contribution 777.4870491712437 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 142.20795248176955, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 201.64815993661426, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 7.354558238769219, order_quantity 61.22448979591837, contribution 191.2185142079997 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 44.47068281580544, order_quantity 67.34693877551021, contribution 1156.2377532109415 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 51.00887399272129, order_quantity 73.46938775510205, contribution 1326.2307238107535 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 36.588719482666576, order_quantity 79.59183673469389, contribution 951.306706549331 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 286.01185973141463, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 74.07254829623484, order_quantity 91.83673469387756, contribution 1925.886255702106 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 84.96484016099592, order_quantity 97.9591836734694, contribution 2209.085844185894 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 5.224595723887757, order_quantity 104.08163265306123, contribution 135.8394888210817 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 72.23531677019291, order_quantity 110.20408163265307, contribution 1878.1182360250157 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 54.05536778740559, order_quantity 116.32653061224491, contribution 1405.4395624725453 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 54.24050390589434, order_quantity 122.44897959183675, contribution 1410.253101553253 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 13.609460378682831, order_quantity 128.57142857142858, contribution 353.8459698457536 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 7.351772685435753, order_quantity 134.69387755102042, contribution 191.14608982132958 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 24.64125158333024, order_quantity 140.81632653061226, contribution 640.6725411665863 step 1.0204081632653061 derivative 6 Ending iteration 81 Reward 640.6725411665863 Starting iteration 82 t 1, Price 26, Demand 6.265760234243463, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 466.7875575223018, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 16.3767754200998, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 90.542928862878, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 333.8866230386127, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 514.0432268682071, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 37.44449263415409, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 115.9376854482416, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 169.32773696682935, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 69.63307173009566, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 19.82679245637014, order_quantity 61.22448979591837, contribution 515.4966038656236 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 68.27735048513439, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 180.92844418577448, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 11.818491158078126, order_quantity 79.59183673469389, contribution 307.2807701100313 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 117.91736716432001, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 39.86713463799394, order_quantity 91.83673469387756, contribution 1036.5455005878425 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 152.17442282686795, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 109.39659719204977, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 53.47740213379297, order_quantity 110.20408163265307, contribution 1390.4124554786172 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 204.77438344539394, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 182.59130911036664, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 186.0654976693277, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 79.07064898977164, order_quantity 134.69387755102042, contribution 2055.8368737340625 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 155.1764831618724, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 82 Reward 3661.2244897959185 Starting iteration 83 t 1, Price 26, Demand 104.28339826722002, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 205.8908307744935, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 37.54830359577656, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 47.15856849061451, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 92.97532705723688, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 62.53624675930186, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 32.57224736952492, order_quantity 36.734693877551024, contribution 846.878431607648 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 111.86605609338791, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 6.577951851225674, order_quantity 48.9795918367347, contribution 171.02674813186752 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 81.5633633436313, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 99.06644335691492, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 311.87293680495156, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 325.4750474398129, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 112.56871200647514, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 34.46200614933382, order_quantity 85.71428571428572, contribution 896.0121598826793 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 19.655710069595735, order_quantity 91.83673469387756, contribution 511.0484618094891 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 90.07254799412304, order_quantity 97.9591836734694, contribution 2341.8862478471992 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 22.888642604636324, order_quantity 104.08163265306123, contribution 595.1047077205444 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 28.562597933190688, order_quantity 110.20408163265307, contribution 742.6275462629578 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 196.11624388994136, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 195.6518142761482, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 56.50396254457098, order_quantity 128.57142857142858, contribution 1469.1030261588455 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 30.701319207075084, order_quantity 134.69387755102042, contribution 798.2342993839521 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 18.972060338301418, order_quantity 140.81632653061226, contribution 493.27356879583687 step 1.0204081632653061 derivative 6 Ending iteration 83 Reward 493.27356879583687 Starting iteration 84 t 1, Price 26, Demand 47.430961811424474, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 122.36800392798123, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 17.378114642878874, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 146.22588238348732, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 81.8659406130678, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 358.6668828984176, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 20.002761437232532, order_quantity 36.734693877551024, contribution 520.0717973680458 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 16.139690883854325, order_quantity 42.85714285714286, contribution 419.63196298021245 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 8.11749253126866, order_quantity 48.9795918367347, contribution 211.05480581298514 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 77.52924029868515, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 98.3049814094364, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 27.069334651216487, order_quantity 67.34693877551021, contribution 703.8027009316287 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 62.44354824702316, order_quantity 73.46938775510205, contribution 1623.5322544226021 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 14.865735564501628, order_quantity 79.59183673469389, contribution 386.50912467704234 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 84.00985843954557, order_quantity 85.71428571428572, contribution 2184.2563194281847 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 167.16235120904796, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 310.2128304319574, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 427.02022210780524, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 28.97366162698537, order_quantity 110.20408163265307, contribution 753.3152023016196 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 47.5780913741891, order_quantity 116.32653061224491, contribution 1237.0303757289166 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 168.52920377545263, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 10.915467651974396, order_quantity 128.57142857142858, contribution 283.80215895133426 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 332.2039805667005, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 4.956885407329488, order_quantity 140.81632653061226, contribution 128.87902059056668 step 1.0204081632653061 derivative 6 Ending iteration 84 Reward 128.87902059056668 Starting iteration 85 t 1, Price 26, Demand 103.44381239779561, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 2.3420631867489448, order_quantity 6.122448979591837, contribution 60.893642855472564 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 20.723018437659434, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 64.91395822419736, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 5.985649374370251, order_quantity 24.48979591836735, contribution 155.62688373362653 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 89.72701550179471, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 66.80329474785668, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 189.1579979257546, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 9.662614432469114, order_quantity 48.9795918367347, contribution 251.22797524419698 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 248.19849881096752, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 26.077300132295285, order_quantity 61.22448979591837, contribution 678.0098034396774 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 35.807890690592146, order_quantity 67.34693877551021, contribution 931.0051579553958 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 3.5977181970927647, order_quantity 73.46938775510205, contribution 93.54067312441188 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 38.225569895147785, order_quantity 79.59183673469389, contribution 993.8648172738424 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 111.99916150395086, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 58.57370626388744, order_quantity 91.83673469387756, contribution 1522.9163628610734 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 28.410288610012373, order_quantity 97.9591836734694, contribution 738.6675038603216 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 20.82038894411503, order_quantity 104.08163265306123, contribution 541.3301125469908 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 117.6015428311533, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 196.89241292369937, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 10.474947347844076, order_quantity 122.44897959183675, contribution 272.34863104394594 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 125.80717639881627, order_quantity 128.57142857142858, contribution 3270.986586369223 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 9.65347626467048, order_quantity 134.69387755102042, contribution 250.99038288143248 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 157.65023894063884, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 85 Reward 3661.2244897959185 Starting iteration 86 t 1, Price 26, Demand 23.140815706357962, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 52.521613718007096, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 60.021358003530736, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 58.81899330792892, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 217.6455883407545, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 40.98879745590131, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 240.45248850497876, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 10.615704582701056, order_quantity 42.85714285714286, contribution 276.00831915022746 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 222.6278446882942, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 104.75865047925086, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 31.77444659246005, order_quantity 61.22448979591837, contribution 826.1356114039612 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 19.727255381571304, order_quantity 67.34693877551021, contribution 512.908639920854 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 175.50771569939278, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 188.30179892193604, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 18.838331846766643, order_quantity 85.71428571428572, contribution 489.7966280159327 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 84.88603542943973, order_quantity 91.83673469387756, contribution 2207.036921165433 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 11.814711111236885, order_quantity 97.9591836734694, contribution 307.18248889215903 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 144.16016003138105, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 74.56038754921995, order_quantity 110.20408163265307, contribution 1938.5700762797187 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 42.12433965264395, order_quantity 116.32653061224491, contribution 1095.2328309687425 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 17.393919208602394, order_quantity 122.44897959183675, contribution 452.24189942366223 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 87.20538216887114, order_quantity 128.57142857142858, contribution 2267.3399363906497 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 195.38162278977765, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 10.404202830930124, order_quantity 140.81632653061226, contribution 270.50927360418325 step 1.0204081632653061 derivative 6 Ending iteration 86 Reward 270.50927360418325 Starting iteration 87 t 1, Price 26, Demand 10.074772020421552, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 51.338430854979556, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 102.5175748408202, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 199.72718623368758, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 154.06113464634515, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 46.09358554811243, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 39.52005412251264, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 98.72301416545731, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 67.79998414881868, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 70.77508162659647, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 93.51526833073312, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 85.62028712131901, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 22.514215328801974, order_quantity 73.46938775510205, contribution 585.3695985488513 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 182.9323185921699, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 24.319372070456673, order_quantity 85.71428571428572, contribution 632.3036738318735 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 271.947130089357, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 145.04558451468233, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 107.7036907102415, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 3.697099873938497, order_quantity 110.20408163265307, contribution 96.12459672240092 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 58.14275213634581, order_quantity 116.32653061224491, contribution 1511.7115555449911 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 251.5125485162442, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 306.72670999641196, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 118.20088068894827, order_quantity 134.69387755102042, contribution 3073.2228979126553 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 127.75435797301311, order_quantity 140.81632653061226, contribution 3321.613307298341 step 1.0204081632653061 derivative 6 Ending iteration 87 Reward 3321.613307298341 Starting iteration 88 t 1, Price 26, Demand 13.397126877164457, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 23.688066825263398, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 26.861283741572407, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 142.46074669539829, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 132.42843090347404, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 90.40856287415997, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 47.20988725130959, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 75.18616421140277, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 56.75162971771649, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 127.5127947550354, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 159.78070002133543, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 16.941521864576373, order_quantity 67.34693877551021, contribution 440.4795684789857 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 12.230209417143458, order_quantity 73.46938775510205, contribution 317.9854448457299 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 29.929900270085973, order_quantity 79.59183673469389, contribution 778.1774070222353 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 6.247223713045471, order_quantity 85.71428571428572, contribution 162.42781653918223 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 142.26242203986303, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 82.86002457915038, order_quantity 97.9591836734694, contribution 2154.36063905791 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 97.88260486886911, order_quantity 104.08163265306123, contribution 2544.9477265905966 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 172.92011715160552, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 140.78162090846033, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 226.48558007711443, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 117.92433628858888, order_quantity 128.57142857142858, contribution 3066.032743503311 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 286.1453295089932, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 34.717958649182215, order_quantity 140.81632653061226, contribution 902.6669248787376 step 1.0204081632653061 derivative 6 Ending iteration 88 Reward 902.6669248787376 Starting iteration 89 t 1, Price 26, Demand 11.304854888481973, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 213.89590083300112, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 204.46745852577425, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 50.45968818140629, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 1.3540477143418594, order_quantity 24.48979591836735, contribution 35.20524057288834 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 21.254162260553947, order_quantity 30.612244897959187, contribution 552.6082187744026 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 31.919561409048036, order_quantity 36.734693877551024, contribution 829.908596635249 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 89.657717324072, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 118.09630698196858, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 78.63323171731146, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 114.67664904088785, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 43.86399504907865, order_quantity 67.34693877551021, contribution 1140.463871276045 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 275.70867374519185, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 149.17857778431366, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 73.8822769228474, order_quantity 85.71428571428572, contribution 1920.9391999940324 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 12.638523459585047, order_quantity 91.83673469387756, contribution 328.6016099492112 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 88.96467183014872, order_quantity 97.9591836734694, contribution 2313.0814675838665 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 11.948647851518148, order_quantity 104.08163265306123, contribution 310.6648441394718 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 40.6308377453092, order_quantity 110.20408163265307, contribution 1056.4017813780392 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 27.287653306068083, order_quantity 116.32653061224491, contribution 709.4789859577702 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 139.0323091831502, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 52.245821943417305, order_quantity 128.57142857142858, contribution 1358.3913705288498 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 10.24012994706472, order_quantity 134.69387755102042, contribution 266.2433786236827 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 14.289437930430115, order_quantity 140.81632653061226, contribution 371.525386191183 step 1.0204081632653061 derivative 6 Ending iteration 89 Reward 371.525386191183 Starting iteration 90 t 1, Price 26, Demand 3.2715757805828987, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 104.92477408741698, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 363.12381834896104, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 61.749029016064824, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 158.61296767061188, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 119.16433617897431, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 115.2524823281747, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 80.64472458108955, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 38.757402982399064, order_quantity 48.9795918367347, contribution 1007.6924775423756 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 2.4807852638799535, order_quantity 55.102040816326536, contribution 64.50041686087879 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 13.877873138791541, order_quantity 61.22448979591837, contribution 360.8247016085801 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 1.7552985980274856, order_quantity 67.34693877551021, contribution 45.637763548714624 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 10.955735522898886, order_quantity 73.46938775510205, contribution 284.84912359537105 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 15.122094009711425, order_quantity 79.59183673469389, contribution 393.17444425249704 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 28.844887252758582, order_quantity 85.71428571428572, contribution 749.9670685717231 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 6.49331330463948, order_quantity 91.83673469387756, contribution 168.8261459206265 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 167.3225347502497, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 70.75005930794642, order_quantity 104.08163265306123, contribution 1839.5015420066068 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 45.90093978733414, order_quantity 110.20408163265307, contribution 1193.4244344706876 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 98.09049489620224, order_quantity 116.32653061224491, contribution 2550.3528673012584 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 35.731862191181534, order_quantity 122.44897959183675, contribution 929.02841697072 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 61.92578026085146, order_quantity 128.57142857142858, contribution 1610.070286782138 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 144.5540041402395, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 33.427521010858534, order_quantity 140.81632653061226, contribution 869.1155462823219 step 1.0204081632653061 derivative 6 Ending iteration 90 Reward 869.1155462823219 Starting iteration 91 t 1, Price 26, Demand 160.4599199709709, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 4.682283426674163, order_quantity 6.122448979591837, contribution 121.73936909352823 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 84.71649272987683, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 49.44449060842071, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 0.8753069862115952, order_quantity 24.48979591836735, contribution 22.757981641501473 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 71.22604705462446, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 6.726268679980242, order_quantity 36.734693877551024, contribution 174.8829856794863 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 26.9394908284651, order_quantity 42.85714285714286, contribution 700.4267615400926 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 130.33917337367788, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 32.23310747452204, order_quantity 55.102040816326536, contribution 838.0607943375732 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 64.46240218838419, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 207.13680085262118, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 52.75538723192778, order_quantity 73.46938775510205, contribution 1371.6400680301222 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 43.96380385969367, order_quantity 79.59183673469389, contribution 1143.0589003520354 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 89.39682963359728, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 27.79122901129686, order_quantity 91.83673469387756, contribution 722.5719542937184 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 40.73184344386716, order_quantity 97.9591836734694, contribution 1059.0279295405462 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 65.12542872746187, order_quantity 104.08163265306123, contribution 1693.2611469140086 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 112.32413920970265, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 29.579598010302732, order_quantity 116.32653061224491, contribution 769.069548267871 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 3.0971389065415513, order_quantity 122.44897959183675, contribution 80.52561157008033 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 72.32901043962642, order_quantity 128.57142857142858, contribution 1880.554271430287 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 49.97430474497849, order_quantity 134.69387755102042, contribution 1299.3319233694408 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 22.39176864416944, order_quantity 140.81632653061226, contribution 582.1859847484054 step 1.0204081632653061 derivative 6 Ending iteration 91 Reward 582.1859847484054 Starting iteration 92 t 1, Price 26, Demand 34.30045914243039, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 68.59160577299971, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 10.250818338277876, order_quantity 12.244897959183675, contribution 266.52127679522476 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 144.371879224788, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 82.94728347034349, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 51.77871912990002, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 9.988430215532802, order_quantity 36.734693877551024, contribution 259.6991856038528 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 11.53740180270368, order_quantity 42.85714285714286, contribution 299.9724468702957 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 12.737052631707868, order_quantity 48.9795918367347, contribution 331.16336842440455 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 205.48346323346212, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 194.40439193920488, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 113.3329519365231, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 144.76438878413228, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 71.58100065769784, order_quantity 79.59183673469389, contribution 1861.1060171001438 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 69.44959744646798, order_quantity 85.71428571428572, contribution 1805.6895336081675 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 75.23166809595601, order_quantity 91.83673469387756, contribution 1956.0233704948562 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 14.389572834568835, order_quantity 97.9591836734694, contribution 374.1288936987897 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 171.310247094649, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 238.0537691496234, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 272.77929114590944, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 92.1628938114538, order_quantity 122.44897959183675, contribution 2396.235239097799 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 99.86368615514048, order_quantity 128.57142857142858, contribution 2596.4558400336527 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 170.5699485173173, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 59.28495898182044, order_quantity 140.81632653061226, contribution 1541.4089335273313 step 1.0204081632653061 derivative 6 Ending iteration 92 Reward 1541.4089335273313 Starting iteration 93 t 1, Price 26, Demand 216.67674710719763, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 118.99613881652668, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 199.66600432580668, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 0.44868808734779486, order_quantity 18.367346938775512, contribution 11.665890271042667 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 32.769847501916836, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 58.700669437832985, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 29.51094048168405, order_quantity 36.734693877551024, contribution 767.2844525237853 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 103.17989710823998, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 6.466089320008815, order_quantity 48.9795918367347, contribution 168.1183223202292 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 69.88725977975842, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 9.150430702248224, order_quantity 61.22448979591837, contribution 237.91119825845382 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 385.855328652466, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 0.9205654714975231, order_quantity 73.46938775510205, contribution 23.9347022589356 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 36.61927459036862, order_quantity 79.59183673469389, contribution 952.1011393495841 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 64.65950441579038, order_quantity 85.71428571428572, contribution 1681.14711481055 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 336.10042736575963, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 1.9672123359050375, order_quantity 97.9591836734694, contribution 51.147520733530975 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 69.16611261480945, order_quantity 104.08163265306123, contribution 1798.3189279850458 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 89.3149911041692, order_quantity 110.20408163265307, contribution 2322.1897687083992 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 76.61222811611292, order_quantity 116.32653061224491, contribution 1991.9179310189359 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 10.99371223326763, order_quantity 122.44897959183675, contribution 285.83651806495834 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 53.19245005989339, order_quantity 128.57142857142858, contribution 1383.0037015572282 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 21.955354563429463, order_quantity 134.69387755102042, contribution 570.839218649166 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 46.22027775490997, order_quantity 140.81632653061226, contribution 1201.7272216276592 step 1.0204081632653061 derivative 6 Ending iteration 93 Reward 1201.7272216276592 Starting iteration 94 t 1, Price 26, Demand 128.0692372046795, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 226.3153649751912, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 2.384370530749555, order_quantity 12.244897959183675, contribution 61.993633799488435 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 190.74673107849802, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 69.21845782650034, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 22.067204404687125, order_quantity 30.612244897959187, contribution 573.7473145218653 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 27.940161093501732, order_quantity 36.734693877551024, contribution 726.4441884310451 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 78.1831961150223, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 67.49073867038514, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 57.1682990407122, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 103.18915188844062, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 381.61946787948347, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 41.56870896922052, order_quantity 73.46938775510205, contribution 1080.7864331997337 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 0.0307456114723977, order_quantity 79.59183673469389, contribution 0.7993858982823402 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 98.03241695482318, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 116.78522648962118, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 12.841428146668388, order_quantity 97.9591836734694, contribution 333.87713181337807 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 152.99009333986876, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 117.84546674154129, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 154.3259441884529, order_quantity 116.32653061224491, contribution 3024.4897959183677 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 2.2436125050310882, order_quantity 122.44897959183675, contribution 58.333925130808296 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 74.03481269615445, order_quantity 128.57142857142858, contribution 1924.9051301000156 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 374.10869203910767, order_quantity 134.69387755102042, contribution 3502.040816326531 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 4.879265479919772, order_quantity 140.81632653061226, contribution 126.86090247791407 step 1.0204081632653061 derivative 6 Ending iteration 94 Reward 126.86090247791407 Starting iteration 95 t 1, Price 26, Demand 13.097739894457646, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 160.19474219493634, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 29.339868080683196, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 109.0822062784182, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 237.70728570415693, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 9.044974666161965, order_quantity 30.612244897959187, contribution 235.1693413202111 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 89.04331903102876, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 47.56341035253474, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 19.708657211454927, order_quantity 48.9795918367347, contribution 512.4250874978281 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 248.16731290383655, order_quantity 55.102040816326536, contribution 1432.6530612244899 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 221.58187121478056, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 63.51951733983389, order_quantity 67.34693877551021, contribution 1651.507450835681 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 76.57799734901026, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 7.164193969944174, order_quantity 79.59183673469389, contribution 186.26904321854852 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 28.215882873342892, order_quantity 85.71428571428572, contribution 733.6129547069152 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 32.458274275858706, order_quantity 91.83673469387756, contribution 843.9151311723264 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 633.4861985767438, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 139.65845898599812, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 169.57201433045697, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 36.80682829667117, order_quantity 116.32653061224491, contribution 956.9775357134504 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 35.909294418967555, order_quantity 122.44897959183675, contribution 933.6416548931564 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 116.80386341213863, order_quantity 128.57142857142858, contribution 3036.9004487156044 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 58.00386545662993, order_quantity 134.69387755102042, contribution 1508.1005018723781 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 23.55876725961768, order_quantity 140.81632653061226, contribution 612.5279487500597 step 1.0204081632653061 derivative 6 Ending iteration 95 Reward 612.5279487500597 Starting iteration 96 t 1, Price 26, Demand 9.065371717501902, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 132.93404309236584, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 171.20595462472602, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 101.4447022167692, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 42.09740397347027, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 177.10685526096245, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 12.489465358398276, order_quantity 36.734693877551024, contribution 324.7260993183552 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 73.26431806887435, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 25.360635732890263, order_quantity 48.9795918367347, contribution 659.3765290551469 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 6.9894720898181815, order_quantity 55.102040816326536, contribution 181.7262743352727 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 86.92878464635035, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 8.634267675207699, order_quantity 67.34693877551021, contribution 224.49095955540017 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 49.91080756525692, order_quantity 73.46938775510205, contribution 1297.6809966966798 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 61.15879249350344, order_quantity 79.59183673469389, contribution 1590.1286048310894 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 27.322263369800876, order_quantity 85.71428571428572, contribution 710.3788476148228 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 27.274136678693274, order_quantity 91.83673469387756, contribution 709.1275536460251 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 61.828934566427954, order_quantity 97.9591836734694, contribution 1607.5522987271268 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 254.9064436786006, order_quantity 104.08163265306123, contribution 2706.1224489795923 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 19.756607703737245, order_quantity 110.20408163265307, contribution 513.6718002971684 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 30.85383069405449, order_quantity 116.32653061224491, contribution 802.1995980454168 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 145.54126216893027, order_quantity 122.44897959183675, contribution 3183.673469387755 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 11.868317528230527, order_quantity 128.57142857142858, contribution 308.5762557339937 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 13.438719069574786, order_quantity 134.69387755102042, contribution 349.40669580894445 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 223.52105920135347, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 96 Reward 3661.2244897959185 Starting iteration 97 t 1, Price 26, Demand 4.308780053703763, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 167.50554963282426, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 6.0870862829996915, order_quantity 12.244897959183675, contribution 158.26424335799197 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 27.665510143012074, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 361.83691645652976, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 131.51551047499973, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 32.99519556404306, order_quantity 36.734693877551024, contribution 857.8750846651195 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 230.26343930904264, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 83.75427301104719, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 54.57826812180898, order_quantity 55.102040816326536, contribution 1419.0349711670335 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 30.8081533332347, order_quantity 61.22448979591837, contribution 801.0119866641022 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 103.43351709814806, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 145.20636011081592, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 15.164226012046218, order_quantity 79.59183673469389, contribution 394.2698763132017 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 270.3718231199107, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 40.031963394263684, order_quantity 91.83673469387756, contribution 1040.8310482508557 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 136.65540816007024, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 64.14738988357497, order_quantity 104.08163265306123, contribution 1667.8321369729492 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 5.103742873304718, order_quantity 110.20408163265307, contribution 132.69731470592268 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 100.72039973179776, order_quantity 116.32653061224491, contribution 2618.7303930267417 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 57.250032954904064, order_quantity 122.44897959183675, contribution 1488.5008568275057 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 93.89103635814612, order_quantity 128.57142857142858, contribution 2441.1669453117993 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 58.86488013290918, order_quantity 134.69387755102042, contribution 1530.4868834556387 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 40.8765138469801, order_quantity 140.81632653061226, contribution 1062.7893600214827 step 1.0204081632653061 derivative 6 Ending iteration 97 Reward 1062.7893600214827 Starting iteration 98 t 1, Price 26, Demand 6.616776599236801, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 9.921736442364784, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 19.292917159731545, order_quantity 12.244897959183675, contribution 318.36734693877554 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 402.58199622498665, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 21.02850154781152, order_quantity 24.48979591836735, contribution 546.7410402430995 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 161.06339945631157, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 117.4551357080953, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 43.462612027761274, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 9.297004125626646, order_quantity 48.9795918367347, contribution 241.7221072662928 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 29.158546380442257, order_quantity 55.102040816326536, contribution 758.1222058914987 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 105.29192415630489, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 205.11302530062133, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 61.88530483293368, order_quantity 73.46938775510205, contribution 1609.0179256562756 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 120.36205831404803, order_quantity 79.59183673469389, contribution 2069.387755102041 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 123.89533431397798, order_quantity 85.71428571428572, contribution 2228.571428571429 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 659.7638317434037, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 76.00562181525771, order_quantity 97.9591836734694, contribution 1976.1461671967004 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 98.75774862398758, order_quantity 104.08163265306123, contribution 2567.7014642236772 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 97.22977523129298, order_quantity 110.20408163265307, contribution 2527.974156013617 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 36.10053079021046, order_quantity 116.32653061224491, contribution 938.613800545472 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 56.90684445728523, order_quantity 122.44897959183675, contribution 1479.577955889416 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 155.15539348104352, order_quantity 128.57142857142858, contribution 3342.857142857143 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 1.0147290529428286, order_quantity 134.69387755102042, contribution 26.382955376513543 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 323.47718077803023, order_quantity 140.81632653061226, contribution 3661.2244897959185 step 1.0204081632653061 derivative 6 Ending iteration 98 Reward 3661.2244897959185 Starting iteration 99 t 1, Price 26, Demand 20.479231229505483, order_quantity 0, contribution 0 step 1.0204081632653061 derivative 6 t 2, Price 26, Demand 16.507226584494905, order_quantity 6.122448979591837, contribution 159.18367346938777 step 1.0204081632653061 derivative 6 t 3, Price 26, Demand 4.081701634562154, order_quantity 12.244897959183675, contribution 106.124242498616 step 1.0204081632653061 derivative 6 t 4, Price 26, Demand 45.68402850781048, order_quantity 18.367346938775512, contribution 477.5510204081633 step 1.0204081632653061 derivative 6 t 5, Price 26, Demand 62.28838970299264, order_quantity 24.48979591836735, contribution 636.7346938775511 step 1.0204081632653061 derivative 6 t 6, Price 26, Demand 182.3044281769016, order_quantity 30.612244897959187, contribution 795.9183673469388 step 1.0204081632653061 derivative 6 t 7, Price 26, Demand 232.033665989077, order_quantity 36.734693877551024, contribution 955.1020408163266 step 1.0204081632653061 derivative 6 t 8, Price 26, Demand 83.16405242425088, order_quantity 42.85714285714286, contribution 1114.2857142857144 step 1.0204081632653061 derivative 6 t 9, Price 26, Demand 461.2556998249619, order_quantity 48.9795918367347, contribution 1273.4693877551022 step 1.0204081632653061 derivative 6 t 10, Price 26, Demand 54.5605794906732, order_quantity 55.102040816326536, contribution 1418.5750667575032 step 1.0204081632653061 derivative 6 t 11, Price 26, Demand 75.01355234352044, order_quantity 61.22448979591837, contribution 1591.8367346938776 step 1.0204081632653061 derivative 6 t 12, Price 26, Demand 211.9283570055846, order_quantity 67.34693877551021, contribution 1751.0204081632655 step 1.0204081632653061 derivative 6 t 13, Price 26, Demand 173.03838942765418, order_quantity 73.46938775510205, contribution 1910.2040816326532 step 1.0204081632653061 derivative 6 t 14, Price 26, Demand 1.1030802026317, order_quantity 79.59183673469389, contribution 28.680085268424197 step 1.0204081632653061 derivative 6 t 15, Price 26, Demand 39.989961945827574, order_quantity 85.71428571428572, contribution 1039.739010591517 step 1.0204081632653061 derivative 6 t 16, Price 26, Demand 169.69469314696605, order_quantity 91.83673469387756, contribution 2387.7551020408164 step 1.0204081632653061 derivative 6 t 17, Price 26, Demand 292.6344798078025, order_quantity 97.9591836734694, contribution 2546.9387755102043 step 1.0204081632653061 derivative 6 t 18, Price 26, Demand 6.514945917092973, order_quantity 104.08163265306123, contribution 169.3885938444173 step 1.0204081632653061 derivative 6 t 19, Price 26, Demand 188.4863092170037, order_quantity 110.20408163265307, contribution 2865.3061224489797 step 1.0204081632653061 derivative 6 t 20, Price 26, Demand 18.184687842329353, order_quantity 116.32653061224491, contribution 472.8018839005632 step 1.0204081632653061 derivative 6 t 21, Price 26, Demand 98.31488617474469, order_quantity 122.44897959183675, contribution 2556.187040543362 step 1.0204081632653061 derivative 6 t 22, Price 26, Demand 40.06043633193066, order_quantity 128.57142857142858, contribution 1041.5713446301972 step 1.0204081632653061 derivative 6 t 23, Price 26, Demand 40.54771716555463, order_quantity 134.69387755102042, contribution 1054.2406463044204 step 1.0204081632653061 derivative 6 t 24, Price 26, Demand 44.717858553295976, order_quantity 140.81632653061226, contribution 1162.6643223856954 step 1.0204081632653061 derivative 6 Ending iteration 99 Reward 1162.6643223856954 Optimal order_quantity for price 26 and cost 20 is 26.2364264467491 Reward type: Terminal, theta_step: 50, T: 24 - Average reward over 100 iteratios is: 2034.696423937267 Order quantity for iteration 31 [0, 6.122448979591837, 12.244897959183675, 18.367346938775512, 24.48979591836735, 30.612244897959187, 36.734693877551024, 42.85714285714286, 48.9795918367347, 55.102040816326536, 61.22448979591837, 67.34693877551021, 73.46938775510205, 79.59183673469389, 85.71428571428572, 91.83673469387756, 97.9591836734694, 104.08163265306123, 110.20408163265307, 116.32653061224491, 122.44897959183675, 128.57142857142858, 134.69387755102042, 140.81632653061226]
src/nlp-part3-word-embeddings.ipynb
###Markdown Word EmbeddingsWord Embedding is the vector representation of words. But here the context of the word is preserved. If two words have similar context then the vector representation for them should be same. Few points to consider for better understanding**Eg Sentences/Documents: 1)It is a good day 2) It is a great day** 1. Vector representation is nothing but One-hot encodng of a word. we create a vocabulary from above documents and oneHot encode them.2. The words "good" and "great" have similar context but with oneHot encoding both are independent of each other. Word Embeddings help us achieve this. How do we get this Word Vector Representation1. Word vector representation is a bi-product of a language model **(A typical language model input word -> Language Model ML/DL -> Output Word)**2. We pass the input word through a neural network(with linear activation) and obtain the target word. The weight matrix that is obtained from the model is the Word Embedding Matrix (We throw away the model after obtaining this). What methods do we have for this Word Vector Representation1. DTM2. Word2Vec3. GLOVE and others. Word2VecWord2Vec is a statistical method for efficiently learning a standalone word embedding from a text corpus.We have two different models as part of Word2Vec to obtain word embedding.1. CBOW (Continuous Bag of Words).2. Skipgram In CBOW or skipgram, the problem is if you have a very large corpus then training would take very large amount of time and resources as you have to train very large set of weights. Another problem is in skiggram we use a Softmax activation function which is costly. As a result we have two approaches in Skipgram1. Subsampling2. Negative Sampling.I haven't explained them in detail (just model building) but please find these resources helpful* http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/* http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/* https://www.youtube.com/watch?v=BWaHLmG1lak&ab_channel=Rasa ###Code from zipfile import ZipFile zip_path = '/kaggle/input/quora-insincere-questions-classification/embeddings.zip' zf = ZipFile(zip_path) zf.filelist ###Output _____no_output_____ ###Markdown Custom Word Embeddings using gensim Library ###Code from gensim.models import word2vec ###Output _____no_output_____ ###Markdown **NOTE**Word2Vec model has an argument sentences which takes list of list of tokens, so we are splitting in below code.If you pass the list without splitting, you won't get an error but you will have unique characters as tokens (imagine like a letter embedding (I'm not sure but just for understanding)) ###Code docs = ['it is a good day', 'it is a great day', 'it was bad day'] docs_words = [doc.split(' ')for doc in docs] docs_words embedding_dim = 50 # dimension of word vector representation # min_count: minimum frequency required for a word to be considered # window : window_size as in how many words to be considered in training sample before and after the word # sg=1 : tells to use skipgram else CBOW model = word2vec.Word2Vec(sentences=docs_words, vector_size=embedding_dim, min_count=1, window=2, sg=1) vocab = model.wv.index_to_key vocab print(model.wv.similarity('day', 'bad')) print(model.wv.similarity('good', 'great')) df_embedding_matrix = pd.DataFrame(model.wv[vocab], index=vocab) df_embedding_matrix ###Output _____no_output_____ ###Markdown Let's custom train on larger dataset for better results ###Code data = pd.read_csv('/kaggle/input/quora-insincere-questions-classification/train.csv') data.head() docs = data['question_text'].str.lower().str.replace('[^a-z\s]', '') from gensim.parsing.preprocessing import remove_stopwords docs = docs.apply(remove_stopwords) print(type(docs)) docs docs_words = [doc.split(' ') for doc in docs] docs_words[:4] len(docs_words) embedding_dim = 50 model = word2vec.Word2Vec(sentences=docs_words, vector_size=embedding_dim, min_count=50, window=2, sg=1) vocab = model.wv.index_to_key df_embedding_matrix = pd.DataFrame(model.wv[vocab], index=vocab) df_embedding_matrix.head() df_embedding_matrix.shape ###Output _____no_output_____ ###Markdown Pre-Trained Word2Vec Models Let's use GoogleNews pre-trained model1. A pre-trained model is nothing a model which has been already trained for large amount of samples and the word embeddings are readily available for use.2. It is similar to tansfer learning in Deep Learning3. Google has considered the vector size as 300 ###Code zf.filelist # Used to load the pre-trained model from gensim.models import keyedvectors embedding_file = 'GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embeddings = keyedvectors.load_word2vec_format(zf.open(embedding_file), binary=True) print("Total no. of words or Vocabulary size = ",len(embeddings)) print("Vector size:", len(embeddings['market'])) embeddings.most_similar('good', topn=5) # omputer_programmer + (woman-man) embeddings.most_similar(positive=['computer_programmer', 'woman'], negative=['man'], topn=5) ###Output _____no_output_____ ###Markdown Text classification using pre-trained word embeddings ###Code data = pd.read_csv('/kaggle/input/quora-insincere-questions-classification/train.csv').sample(10000) data.head(3) docs = data['question_text'].str.lower().str.replace('[^a-z\s]', '') docs = docs.apply(remove_stopwords) docs from sklearn.model_selection import train_test_split trainx, testx, trainy, testy = train_test_split(docs, data['target'], test_size=0.2, random_state=10) ###Output _____no_output_____ ###Markdown * We have to convert tokens into id's* When converting, there can be a large sentence/document in the corpus say dataset can be something like this[[This movie is really good, the plot is perfect], [This movie is superb]]* So the remaining length of the document should be padded with 0's. **(I have used post padding)*** There is an other way as well, you can consider the max words in a document to say 50 and kick out the other words.* All these scenarios can be checked for better model performance ###Code from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer() # Here we are getting to know/ telling the tokenizer how many words are in vocabulary tokenizer.fit_on_texts(trainx) vocab = tokenizer.word_index print("Total no. of unique tokens in document: ", len(vocab)) # Here we convert the tokens in documents into id's trainx_seq = tokenizer.texts_to_sequences(trainx) testx_seq = tokenizer.texts_to_sequences(testx) trainx_seq[1] doc_sizes = [] for doc in trainx_seq: size = len(doc) doc_sizes.append(size) pd.Series(doc_sizes).plot.box(); ###Output _____no_output_____ ###Markdown From the above box plot we can consider the max document length is 11 ###Code max_doc_length = 11 trainx_pad = pad_sequences(trainx_seq, maxlen=max_doc_length, padding='post') testx_pad = pad_sequences(testx_seq, maxlen=max_doc_length, padding='post') trainx_pad[:5] trainx tokenizer.word_index['network'] ###Output _____no_output_____ ###Markdown Now let's create word embedding matrix and also see the words that are not available in embeddings ###Code gnews_embedding_dim = 300 # In keras tokenizer id's start from 1, there is no 0. vocab_len = len(vocab)+1 words_not_available = [] word_embedding_matrix = np.zeros((vocab_len, gnews_embedding_dim)) for word, wid in tokenizer.word_index.items(): if word in embeddings: word_embedding_matrix[wid] = embeddings[word] else: words_not_available.append(word) print("Percentage of unavailable word embeddings: ",len(words_not_available)/len(vocab)*100) ###Output Percentage of unavailable word embeddings: 15.972596412901238 ###Markdown How to handle these unavailable words?First implement other pre-trained models like fasttext or glove and check which gives you better accuracy, f1_score etc.,If there is not much improvement then below methods can be used:1. Identify synonyms of the unavailable words and check if they have word embeddings.2. Perform lemmatization so that you get the root form of the word and check if they have word embeddings.3. There will be some words like shouldnt, couldnt (as quotes were not considered), these we can make then as should not, could not and see if they have word embeddings. ###Code from keras.models import Sequential from keras import layers model = Sequential() model.add(layers.Embedding(vocab_len, gnews_embedding_dim, weights=[word_embedding_matrix], input_length=max_doc_length, trainable=False)) model.add(layers.Dense(10, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary() ###Output Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, 11, 300) 3897600 _________________________________________________________________ dense (Dense) (None, 11, 10) 3010 _________________________________________________________________ dense_1 (Dense) (None, 11, 1) 11 ================================================================= Total params: 3,900,621 Trainable params: 3,021 Non-trainable params: 3,897,600 _________________________________________________________________ ###Markdown Adding Flatten() Layer ###Code model = Sequential() model.add(layers.Embedding(vocab_len, gnews_embedding_dim, weights=[word_embedding_matrix], input_length=max_doc_length, trainable=False)) model.add(layers.Flatten()) model.add(layers.Dense(10, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy') model.fit(trainx_pad, trainy, epochs=10, validation_data=(testx_pad, testy), verbose=1) loss = model.evaluate(testx_pad, testy) loss ###Output 63/63 [==============================] - 0s 2ms/step - loss: 0.2689 - accuracy: 0.9275 ###Markdown Model Prediction ###Code test_data = pd.read_csv('/kaggle/input/quora-insincere-questions-classification/test.csv') test_docs = test_data['question_text'].str.lower().str.replace('[^a-z\s]','') testx_seq = tokenizer.texts_to_sequences(test_docs) testx_pad = pad_sequences(testx_seq, maxlen=max_doc_length, padding='post') test_data.shape testy_pred = model.predict(testx_pad) testy_pred test_data.columns testy_classes = [1 if val>0.5 else 0 for val in testy_pred] pred_result_df = pd.DataFrame({'qid':test_data['qid'], 'predicted_class':testy_classes}) pred_result_df ###Output _____no_output_____
solutions/06-custom-interactivity-with-solutions.ipynb
###Markdown 06. Custom Interactivity In the [exploring with containers](./03-exploration-with-containers.ipynb) section, the [``DynamicMap``](http://holoviews.org/reference/containers/bokeh/DynamicMap.html) container was introduced. In that section, the arguments to the callable returning elements were supplied by HoloViews sliders. In this section, we will generalize the ways in which you can generate values to update a ``DynamicMap``. ###Code import numpy as np import pandas as pd import holoviews as hv hv.extension('bokeh', 'matplotlib') %opts Ellipse [xaxis=None yaxis=None] (color='red' line_width=2) %opts Box [xaxis=None yaxis=None] (color='blue' line_width=2) ###Output _____no_output_____ ###Markdown A simple ``DynamicMap``Let us now create a simple [``DynamicMap``](http://holoviews.org/reference/containers/bokeh/DynamicMap.html) using three *annotation* elements, namely [``Box``](http://holoviews.org/reference/elements/bokeh/Ellipse.html), [``Text``](http://holoviews.org/reference/elements/bokeh/Ellipse.html), and [``Ellipse``](http://holoviews.org/reference/elements/bokeh/Ellipse.html): ###Code def annotations(angle): radians = (angle / 180.) * np.pi return (hv.Box(0,0,4, orientation=np.pi/4) * hv.Ellipse(0,0,(2,4), orientation=radians) * hv.Text(0,0,'{0}º'.format(float(angle)))) hv.DynamicMap(annotations, kdims=['angle']).redim.range(angle=(0, 360)).redim.label(angle='angle (º)') ###Output _____no_output_____ ###Markdown This example uses the concepts introduced in the [exploring with containers](./03-exploration-with-containers.ipynb) section. As before, the argument ``angle`` is supplied by the position of the 'angle' slider. Introducing StreamsHoloViews offers a way of supplying the ``angle`` value to our annotation function through means other than sliders, namely via the *streams* system which you can learn about in the [user guide](http://holoviews.org/user_guide/Responding_to_Events.html). All stream classes are found in the ``streams`` submodule and are subclasses of ``Stream``. You can use ``Stream`` directly to make custom stream classes via the ``define`` classmethod: ###Code from holoviews import streams from holoviews.streams import Stream Angle = Stream.define('Angle', angle=0) ###Output _____no_output_____ ###Markdown Here ``Angle`` is capitalized as it is a sub*class* of ``Stream`` with a numeric *angle* parameter, which has a default value of zero. You can verify this using ``hv.help``: ###Code hv.help(Angle) ###Output _____no_output_____ ###Markdown Now we can declare a ``DynamicMap`` where instead of specifying ``kdims``, we instantiate ``Angle`` with an ``angle`` of 45º and pass it to the ``streams`` parameter of the [``DynamicMap``](http://holoviews.org/reference/containers/bokeh/DynamicMap.html): ###Code %%opts Box (color='green') dmap=hv.DynamicMap(annotations, streams=[Angle(angle=45)]) dmap ###Output _____no_output_____ ###Markdown As expected, we see our ellipse with an angle of 45º as specified via the ``angle`` parameter of our ``Angle`` instance. In itself, this wouldn't be very useful but given that we have a handle on our [``DynamicMap``](http://holoviews.org/reference/containers/bokeh/DynamicMap.html) ``dmap``, we can now use the ``event`` method to update the ``angle`` parameter value and update the plot: ###Code dmap.event(angle=90) ###Output _____no_output_____ ###Markdown *When running this cell, the visualization above will jump to the 90º position!* If you have already run the cell, just change the value above and re-run, and you'll see the plot above update.This simple example shows how you can use the ``event`` method to update a visualization with any value you can generate in Python. ###Code # Exercise: Regenerate the DynamicMap, initializing the angle to 15 degrees dmap = hv.DynamicMap(annotations, streams=[Angle(angle=15)]) dmap # Exercise: Use dmap.event to set the angle shown to 145 degrees. dmap.event(angle=145) # Exercise: Do not specify an initial angle so that the default value of 0 degrees is used. hv.DynamicMap(annotations, streams=[Angle()]) %%opts Ellipse (color='green') %%output backend='matplotlib' # Exercise: Use the cell magic %%output backend='matplotlib' to try the above with matplotlib dmap = hv.DynamicMap(annotations, streams=[Angle(angle=15)]) dmap dmap.event(angle=145) # Exercise: Declare a DynamicMap using annotations2 and AngleAndSize # Then use the event method to set the size to 1.5 and the angle to 30 degrees def annotations2(angle, size): radians = (angle / 180) * np.pi return (hv.Box(0,0,4, orientation=np.pi/4) * hv.Ellipse(0,0,(size,size*2), orientation=radians) * hv.Text(0,0,'{0}º'.format(float(angle)))) AngleAndSize = Stream.define('AngleAndSize', angle=0., size=1.) exercise_dmap = hv.DynamicMap(annotations2, streams=[AngleAndSize(angle=30, size=1.5)]) exercise_dmap ###Output _____no_output_____ ###Markdown Periodic updatesUsing streams you can animate your visualizations by driving them with events from Python. Of course, you could use loops to call the ``event`` method, but this approach can queue up events much faster than they can be visualized. Instead of inserting sleeps into your loops to avoid that problem, it is recommended you use the ``periodic`` method, which lets you specify a time period between updates (in seconds): ###Code %%opts Ellipse (color='orange') dmap2=hv.DynamicMap(annotations, streams=[Angle(angle=0)]) dmap2 dmap2.periodic(0.01, count=180, timeout=8, param_fn=lambda i: {'angle':i}) ###Output _____no_output_____ ###Markdown If you re-execute the above cell, you should see the preceding plot update continuously until the count value is reached. ###Code # Exercise: Experiment with different period values. How fast can things update? dmap2.periodic(0.00001, count=180, timeout=8, param_fn=lambda i: {'angle':i}) # Exercise: Increase count so that the oval completes a full rotation. dmap2.periodic(0.01, count=360, timeout=15, param_fn=lambda i: {'angle':i}) # Exercise: Lower the timeout so the oval completes less than a quarter turn before stopping # Note: The appropriate timeout will vary between different machines dmap2.periodic(0.01, count=360, timeout=3, param_fn=lambda i: {'angle':i}) ###Output _____no_output_____ ###Markdown Linked streamsOften, you will want to tie streams to specific user actions in the live JavaScript interface. There are no limitations on how you can generate updated stream parameters values in Python, and so you could manually support updating streams from JavaScript as long as it can communicate with Python to trigger an appropriate stream update. But as Python programmers, we would rather avoid writing JavaScript directly, so HoloViews supports the concept of *linked stream* classes where possible.Currently, linked streams are only supported by the Bokeh plotting extension, because only Bokeh executes JavaScript in the notebook and has a suitable event system necessary to enable linked streams (matplotlib displays output as static PNG or SVG in the browser). Here is a simple linked stream example: ###Code %%opts HLine [xaxis=None yaxis=None] pointer = streams.PointerXY(x=0, y=0) def crosshair(x, y): return hv.Ellipse(0,0,1) * hv.HLine(y) * hv.VLine(x) hv.DynamicMap(crosshair, streams=[pointer]) ###Output _____no_output_____ ###Markdown When hovering in the plot above when backed by a live Python process, the crosshair will track the cursor. The way it works is very simple: the ``crosshair`` function puts a crosshair at whatever x,y location it is given, the ``pointer`` object supplies a stream of x,y values based on the mouse pointer location, and the ``DynamicMap`` object connects the pointer stream's x,y values to the ``crosshair`` function to generate the resulting plots. ###Code %%opts HLine [xaxis=None yaxis=None] # Exercise: Set the defaults so that the crosshair initializes at x=0.25, y=0.25 pointer = streams.PointerXY(x=0.25, y=0.25) def crosshair(x, y): return hv.Ellipse(0,0,1) * hv.HLine(y) * hv.VLine(x) hv.DynamicMap(crosshair, streams=[pointer]) %%opts Points [xaxis=None yaxis=None] (size=10 color='red') # Exercise: Copy the above example and adapt it to make a red point of size 10 follow your cursor (using hv.Points) # Exercise: Set the defaults so that the crosshair initializes at x=0.25, y=0.25 pointer = streams.PointerXY(x=0.25, y=0.25) def crosshair(x, y): return hv.Points((x,y)) hv.DynamicMap(crosshair, streams=[pointer]) ###Output _____no_output_____ ###Markdown You can view other similar examples of custom interactivity in our [reference gallery](http://holoviews.org/reference/index.html) and learn more about linked streams in the [user guide](http://holoviews.org/user_guide/Custom_Interactivity.html). Here is a quick summary of some of the more useful linked stream classes HoloViews currently offers and the parameters they supply:* ``PointerX/PointerY/PointerYX``: The x,y or (x,y) position of the cursor.* ``SingleTap/DoubleTap/Tap``: Position of single, double or all tap events.* ``BoundsX/BoundsY/BoundsXY``: The x,y or x and y extents selected with the Bokeh box select tool.* ``RangeX/RangeY/RangeXY``: The x,y or x and y range of the currently displayed axes* ``Selection1D``: The selected glyphs as a 1D selection.Any of these values can easily be tied to any visible element of your visualization. A more advanced exampleLet's now build a more advanced example using the eclipse dataset we explored earlier, where the stream supplies values when a particular Bokeh tool ("Box Select") is active: ###Code %%opts Scatter[width=900 height=400 tools=['xbox_select'] ] (cmap='RdBu' line_color='black' size=5 line_width=0.5) %%opts Scatter [color_index='latitude' colorbar=True colorbar_position='bottom' colorbar_opts={'title': 'Latitude'}] eclipses = pd.read_csv('../data/eclipses_21C.csv', parse_dates=['date']) magnitudes = hv.Scatter(eclipses, kdims=['hour_local'], vdims=['magnitude','latitude']) def selection_example(index): text = '{0} eclipses'.format(len(index)) if index else '' return magnitudes * hv.Text(2,1, text) dmap3 = hv.DynamicMap(selection_example, streams=[streams.Selection1D()]) dmap3.redim.label(magnitude='Eclipse Magnitude', hour_local='Hour (local time)') ###Output _____no_output_____
notebooks/tf2/time-series-encoder-decoder.ipynb
###Markdown Forecasting Time Series data: Encoder / Decoder Approach* Nice tutorial: https://www.tensorflow.org/beta/tutorials/text/time_series ###Code import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt # plt.xkcd() # plt.style.use('ggplot') %matplotlib inline import matplotlib as mpl mpl.rcParams['figure.figsize'] = (20, 8) # for local # url = 'opsd_germany_daily.csv' # for colab url = 'https://raw.githubusercontent.com/jenfly/opsd/master/opsd_germany_daily.csv' time_series_df = pd.read_csv(url, sep=',', index_col=0, # you can use the date as the index for pandas parse_dates=[0]) # where is the time stamp? cols_plot = ['Consumption', 'Solar', 'Wind'] axes = time_series_df[cols_plot].plot(marker='.', alpha=0.5, linestyle='None', subplots=True) for ax in axes: ax.set_ylabel('Daily Totals (GWh)') ###Output _____no_output_____ ###Markdown Encoder / Decoder* Encode input using RNNs* Store into latent representation* Decode using RNNs again* Use one neuron at the end to combine to a sequence of single values ###Code days_2016_2017 = time_series_df['2016': '2017']['Consumption'].to_numpy() days_2016_2017.shape plt.plot(days_2016_2017); # derived from here: https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/ # split a univariate sequence into samples def split_sequence(sequence, n_steps_in, n_steps_out): X, y = list(), list() for i in range(len(sequence)): # find the end of this pattern end_ix = i + n_steps_in out_end_ix = end_ix + n_steps_out # check if we are beyond the sequence if out_end_ix > len(sequence): break # gather input and output parts of the pattern seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] X.append(seq_x) y.append(seq_y) return np.array(X), np.array(y) #@title Prediction from n days to m days n_steps_in = 30 #@param {type:"slider", min:1, max:100, step:1} n_steps_out = 30 #@param {type:"slider", min:1, max:100, step:1} X, Y = split_sequence(days_2016_2017, n_steps_in, n_steps_out) X.shape, Y.shape # reshape from [samples, timesteps] into [samples, timesteps, features] n_features = 1 X = X.reshape((X.shape[0], X.shape[1], n_features)) Y = Y.reshape((Y.shape[0], Y.shape[1], n_features)) X.shape, Y.shape # Gives us a well defined version of tensorflow try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf print(tf.__version__) import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Dense, LSTM, GRU, SimpleRNN, Bidirectional from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import RepeatVector, Input ENCODER_SIZE = 128 DECODER_SIZE = 128 ENCODING_DIM = 256 model = Sequential() # ENCODER model.add(Input(shape=(n_steps_in, n_features), name='Input')) # model.add(LSTM(units=ENCODER_SIZE, activation='relu', name="encode1r", return_sequences=True)) model.add(LSTM(units=ENCODER_SIZE, activation='relu', name="encoder2")) # LATENT SPACE model.add(Dense(units=ENCODING_DIM, activation='relu', name="latent_space1")) model.add(Dense(units=ENCODING_DIM, activation='relu', name="latent_space2")) # DECODER model.add(RepeatVector(n_steps_out)) # return_sequences=True tells it to keep all temporal outputs, not only the final one (we need all of them for our predicted sequence) model.add(LSTM(units=DECODER_SIZE, activation='relu', return_sequences=True, name="decoder")) model.add(Dense(units=1)) optimizer = Adam(lr=1e-3) model.compile(optimizer=optimizer, loss='mse') # model.compile(optimizer='adam', loss='mae') model.summary() %%time EPOCHS = 500 BATCH_SIZE = 50 history = model.fit(X, Y, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=0, validation_split=0.2) plt.yscale('log') plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'validation loss']) final_week_2017 = X[-1] # final_week_2017 first_week_2018 = model.predict(final_week_2017.reshape(1, -1, 1)) first_week_2018 ###Output _____no_output_____ ###Markdown Results are not bad, but funky at their ends ###Code plt.plot(days_2016_2017) plt.plot(first_week_2018[0], color='r'); known_x = np.arange(len(days_2016_2017)) pred_x = np.arange(len(days_2016_2017), len(days_2016_2017) + n_steps_out) plt.plot(known_x, days_2016_2017) plt.plot(pred_x, first_week_2018[0], color='r'); ###Output _____no_output_____
CNN_for_MNIST.ipynb
###Markdown Load MNIST Dataset ###Code trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))]) train_set = dset.MNIST(root='./data', train=True, transform=trans, download=True) test_set = dset.MNIST(root='./data', train=False, transform=trans) batch_size = 128 train_loader = torch.utils.data.DataLoader( dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( dataset=test_set, batch_size=batch_size, shuffle=False) ###Output _____no_output_____ ###Markdown Build CNN Model![](images/CNN.png) ###Code class LeNet(nn.Module): def __init__(self,n_class=10): super(LeNet, self).__init__() self.conv1 = nn.Conv2d( in_channels = 1, out_channels = 20, kernel_size = 5 ) self.conv2 = nn.Conv2d( in_channels = 20, out_channels = 50, kernel_size = 5 ) self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, n_class) def forward(self, x): x = F.relu(self.conv1(x)) # x:[batch_size,1,28,28] => x:[batch_size,20, 24, 24] x = F.max_pool2d(x, 2, 2) # x:[batch_size,20,24,24] => x:[batch_size,20, 12, 12] x = F.relu(self.conv2(x)) # x:[batch_size,20,12,12] => x:[batch_size,50, 8, 8] x = F.max_pool2d(x, 2, 2) # x:[batch_size,50,8,8] => x:[batch_size,50, 4, 4] x = x.view(-1, 4*4*50) # x:[batch_size,50,4,4] => x:[batch_size,50*4*4] x = F.relu(self.fc1(x)) # x:[batch_size,50*4*4] => x:[batch_size,500] x = self.fc2(x) # x:[batch_size,500] => x:[batch_size,10] return x ###Output _____no_output_____ ###Markdown Training ###Code model = LeNet() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=1e-3) for epoch in xrange(10): # trainning ave_loss = 0 for batch_idx, (x, target) in enumerate(train_loader): optimizer.zero_grad() x, target = Variable(x), Variable(target) logits = model(x) exit() loss = criterion(logits, target) ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1 loss.backward() optimizer.step() if (batch_idx+1) % 100 == 0 or (batch_idx+1) == len(train_loader): print '==>>> epoch: {}, batch index: {}, train loss: {:.6f}'.format( epoch, batch_idx+1, ave_loss) # testing correct_cnt, ave_loss = 0, 0 total_cnt = 0 for batch_idx, (x, target) in enumerate(test_loader): x, target = Variable(x, volatile=True), Variable(target, volatile=True) logits = model(x) loss = criterion(logits, target) _, pred_label = torch.max(logits.data, 1) total_cnt += x.data.size()[0] correct_cnt += (pred_label == target.data).sum() # smooth average ave_loss = ave_loss * 0.9 + loss.data[0] * 0.1 if(batch_idx+1) % 100 == 0 or (batch_idx+1) == len(test_loader): print '==>>> epoch: {}, batch index: {}, test loss: {:.6f}, acc: {:.3f}'.format( epoch, batch_idx+1, ave_loss, correct_cnt * 1.0 / total_cnt) ###Output ==>>> epoch: 0, batch index: 100, train loss: 0.203049 ==>>> epoch: 0, batch index: 200, train loss: 0.092486 ==>>> epoch: 0, batch index: 300, train loss: 0.076701 ==>>> epoch: 0, batch index: 400, train loss: 0.078407 ==>>> epoch: 0, batch index: 469, train loss: 0.062016 ==>>> epoch: 0, batch index: 79, test loss: 0.028026, acc: 0.986 ==>>> epoch: 1, batch index: 100, train loss: 0.043321 ==>>> epoch: 1, batch index: 200, train loss: 0.057436 ==>>> epoch: 1, batch index: 300, train loss: 0.051468 ==>>> epoch: 1, batch index: 400, train loss: 0.047724 ==>>> epoch: 1, batch index: 469, train loss: 0.034381 ==>>> epoch: 1, batch index: 79, test loss: 0.024080, acc: 0.989 ==>>> epoch: 2, batch index: 100, train loss: 0.035867 ==>>> epoch: 2, batch index: 200, train loss: 0.035561 ==>>> epoch: 2, batch index: 300, train loss: 0.022927 ==>>> epoch: 2, batch index: 400, train loss: 0.037693 ==>>> epoch: 2, batch index: 469, train loss: 0.046425 ==>>> epoch: 2, batch index: 79, test loss: 0.019010, acc: 0.991 ==>>> epoch: 3, batch index: 100, train loss: 0.024797 ==>>> epoch: 3, batch index: 200, train loss: 0.028838 ==>>> epoch: 3, batch index: 300, train loss: 0.024071 ==>>> epoch: 3, batch index: 400, train loss: 0.024293 ==>>> epoch: 3, batch index: 469, train loss: 0.026609 ==>>> epoch: 3, batch index: 79, test loss: 0.016128, acc: 0.991 ==>>> epoch: 4, batch index: 100, train loss: 0.016640 ==>>> epoch: 4, batch index: 200, train loss: 0.014833 ==>>> epoch: 4, batch index: 300, train loss: 0.024794 ==>>> epoch: 4, batch index: 400, train loss: 0.014885 ==>>> epoch: 4, batch index: 469, train loss: 0.012730 ==>>> epoch: 4, batch index: 79, test loss: 0.018735, acc: 0.992 ==>>> epoch: 5, batch index: 100, train loss: 0.010845 ==>>> epoch: 5, batch index: 200, train loss: 0.010690 ==>>> epoch: 5, batch index: 300, train loss: 0.016850 ==>>> epoch: 5, batch index: 400, train loss: 0.013051 ==>>> epoch: 5, batch index: 469, train loss: 0.013661 ==>>> epoch: 5, batch index: 79, test loss: 0.014239, acc: 0.993 ==>>> epoch: 6, batch index: 100, train loss: 0.010584 ==>>> epoch: 6, batch index: 200, train loss: 0.011003 ==>>> epoch: 6, batch index: 300, train loss: 0.011142 ==>>> epoch: 6, batch index: 400, train loss: 0.015583 ==>>> epoch: 6, batch index: 469, train loss: 0.013250 ==>>> epoch: 6, batch index: 79, test loss: 0.023883, acc: 0.991 ==>>> epoch: 7, batch index: 100, train loss: 0.010461 ==>>> epoch: 7, batch index: 200, train loss: 0.005613 ==>>> epoch: 7, batch index: 300, train loss: 0.014455 ==>>> epoch: 7, batch index: 400, train loss: 0.011743 ==>>> epoch: 7, batch index: 469, train loss: 0.009485 ==>>> epoch: 7, batch index: 79, test loss: 0.025568, acc: 0.991 ==>>> epoch: 8, batch index: 100, train loss: 0.005729 ==>>> epoch: 8, batch index: 200, train loss: 0.005440 ==>>> epoch: 8, batch index: 300, train loss: 0.013062 ==>>> epoch: 8, batch index: 400, train loss: 0.006019 ==>>> epoch: 8, batch index: 469, train loss: 0.011531 ==>>> epoch: 8, batch index: 79, test loss: 0.016222, acc: 0.993 ==>>> epoch: 9, batch index: 100, train loss: 0.004132 ==>>> epoch: 9, batch index: 200, train loss: 0.002503 ==>>> epoch: 9, batch index: 300, train loss: 0.007654 ==>>> epoch: 9, batch index: 400, train loss: 0.009748 ==>>> epoch: 9, batch index: 469, train loss: 0.009823 ==>>> epoch: 9, batch index: 79, test loss: 0.020044, acc: 0.992
Feed Forward Neural Network/.ipynb_checkpoints/FeedForward_NeuralNetwork-checkpoint.ipynb
###Markdown Loading the Imported dataset ###Code from sklearn.datasets import load_breast_cancer from sklearn.preprocessing import StandardScaler cancer_data = load_breast_cancer() m, n = cancer_data.data.shape scaler = StandardScaler() # Training Dataset train_data = cancer_data.data[0:400] scaled_training_cancer_data = scaler.fit_transform(train_data) training_cancer_data = cancer_data.target[0:400] # Validation Dataset validation_data = cancer_data.data[400:500] scaled_validation_cancer_data = scaler.fit_transform(validation_data) validation_cancer_data = cancer_data.target[400:500] # Testing Dataset testing_data = cancer_data.data[500:569] scaled_testing_cancer_data = scaler.fit_transform(testing_data) testing_cancer_data = cancer_data.target[500:569] ###Output _____no_output_____ ###Markdown Build the Layers of Neural Network in Tensorflow ###Code import tensorflow as tf from tensorflow.contrib.layers import fully_connected, dropout # No. of neurons Initialization n_inputs = n n_hidden1 = 46 n_hidden2 = 30 n_outputs = 2 # Placeholders to hold the data X = tf.placeholder(tf.float32, shape = (None, n_inputs), name = "X") y = tf.placeholder(tf.int32, shape = (None), name = "y") # Weights Initializer he_init = tf.contrib.layers.variance_scaling_initializer() is_training = tf.placeholder(dtype = tf.bool, shape = (), name = "is_training") # Adding a dropout layer keep_prob = 0.5 with tf.name_scope("dnn"): X_drop = dropout(X, keep_prob, is_training= is_training) hidden1 = fully_connected(X_drop, n_hidden1, weights_initializer = he_init, activation_fn = tf.nn.elu, scope = "hidden1") hidden1_drop = dropout(hidden1, keep_prob, is_training= is_training) hidden2 = fully_connected(hidden1_drop, n_hidden2, weights_initializer = he_init, activation_fn = tf.nn.elu, scope = "hidden2") hidden2_drop = dropout(hidden2, keep_prob, is_training= is_training) logits = fully_connected(hidden2_drop, n_hidden2, weights_initializer = he_init, activation_fn = tf.nn.elu, scope = "outputs") ###Output _____no_output_____ ###Markdown Optimizing the Linear Regression Function ###Code from datetime import datetime with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y, logits = logits) loss = tf.reduce_mean(xentropy, name = "loss") lr = 0.01 with tf.name_scope("train"): optimizer = tf.train.AdamOptimizer(lr) training_op = optimizer.minimize(loss) with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() saver = tf.train.Saver() # Creating directory name now = datetime.utcnow().strftime("%Y%m%d%H%M%S") root_dir = "tf_logs/" logdir = "{}/run{}".format(root_dir, now) # Information to create logs mse_summary = tf.summary.scalar("LOSS", loss) file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) ###Output _____no_output_____ ###Markdown Applying built Neural Network for Predicting whether people in Wisconsin State having Breast Cancer is Benign or not ###Code n_epochs = 50 with tf.Session() as sess: init.run() for epoch in range(n_epochs): sess.run(training_op, feed_dict = {X : scaled_training_cancer_data, y:training_cancer_data, is_training : True}) acc_train = accuracy.eval(feed_dict={X : scaled_training_cancer_data, y:training_cancer_data, is_training : False}) acc_validation = accuracy.eval(feed_dict={X : scaled_validation_cancer_data, y:validation_cancer_data, is_training : False}) summary_str = mse_summary.eval(feed_dict = {X : scaled_training_cancer_data, y:training_cancer_data, is_training : False}) file_writer.add_summary(summary_str, epoch) print(epoch, "Train accuracy", acc_train, "Validation accuracy", acc_validation) save_path = saver.save(sess, "./my_model_find.ckpt") with tf.Session() as sess: saver.restore(sess, "./my_model_find.ckpt") acc_testing = accuracy.eval(feed_dict={X : scaled_testing_cancer_data, y:testing_cancer_data, is_training : False}) print("Testing accuracy", acc_testing) ###Output INFO:tensorflow:Restoring parameters from ./my_model_find.ckpt Testing accuracy 0.9130435
NATHAKIT_KEAWTOOMLA_Pandas_III.ipynb
###Markdown Descriptive Statistic[Short Description](https://docs.google.com/presentation/d/1TfJ1BpWhyPgegSpCFntezZo5e6egiKr-aSVgSGu64ww/edit?usp=sharing)With the given data ###Code import pandas as pd data = { 'section 1':[13,8,18.5,8.5,12.5,12,8.5,7.5,6.5,13,14,8.5,8.5,15,11,12.5], 'section 2':[10,7,12,15,11,13.5,13,13,3,7.5,15,8,11.5,5,10,11], 'section 3':[16,14,16,15,15,19,15.5,17,12,15,17,15,16,17,14,13], 'section 4':[12,6.5,12.5,16.5,17,20,10,12,2,5,9,10,18,12.5,7,13] } frame = pd.DataFrame(data) frame ###Output _____no_output_____ ###Markdown to get all the description, use the function describe![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKoAAAAcCAYAAADx5STqAAAFZ0lEQVR4Ae2bTSh1XRTH9/P2lhT5HPlIJqQomUgmUgwxxAgpGSgDExIlpaQoA2RASAwUGSgKE4mBMhIGko+R74nh+/bbPeu073HvPcdzPzy39i7PuWfvtfb6n/9ae611Tj2/Pj8//1N2WAb+cgb++cvxWXiWAc2ADVQbCAnBgA3UhHCTBWkD1cZAQjBgAzUh3GRB2kC1MZAQDNhATQg3WZA2UG0MJAQDMQ/U4+NjlZycrP8qKyvV1dVVQhAjIMHf09OjPj8/ZepHr/AHj2NjYwE41tbWvswFCHzjJthezPH3U+Nfv4bX19fV9fW1amtrU3l5eb7UXl5eNHn7+/uqqqrKl44V+lkGOJirq6tqcXExAEh9fb32fUFBwY/40jNQPz4+1NzcnCotLVVJSUkB4L1unp6eVHp6uiouLvYStes+GSgqKlInJyc+pb8nJomlv79fZWZmBihzPzExoUZHR7U/3esBwjG48Sz9ZMOWlhZVVlb2bfPPz8/q7e3tix5llHLK6aWE0Rq42wKzZWCdewbXgYEBrY/O9va21jXLM4Q3NDQ4Lcd3Sxby0q7U1tYG4Bfssu4uwW7cbttubCZu9kLetC/PDQjhCtvufQWkKWPqeuFGf3d3V2fLUNWPQ1JSUqLlxF68rp6B2tTU5LvUC2ghGifv7e2p3Nxc7Xi3U1kvLCzU/R92NjY29BY48+LiQs9DMIcFXeYZk5OTqrW1VaEzPz+vVlZW1Pv7u7q7u9M6w8PDeh3dh4cHXcpMpwnOYFfkKH3oiW1TbmpqSuXk5Og18Dw+PjpBw/3MzIw6Pz93sDc3NzvqrNM6gZ29+ZuentbciFB7e7u6ubnRawsLCxoLcgwyHb+HhoZEPOA6MjLi8OnmLBxuNmHfo6MjVVNTE7Cn+4Z15ASTez1W956B+ieGcQ4PAll1dXWO0yHaHBAujoQAnI4eZaWjo8MRpXWghaCVYHR2dqry8nL9G6dnZWU5sgRramqqamxs1HPshczh4aEjE+oHtgnSYKUPHQnMrq4uvQWZjb1Nx1FBzs7OgpogY9HjCbZgQjxbb2+vXqqoqNAH0G9QmHxKu3V5eekLNzY47CaXwfCxjpxfTMH2+JO5mASqXyDm6aXcmNnFLGFkZAj3M2g3yLgEqJRnslQ0Bgfl9PTUqRDsb7YG2KSPwz5rtB8EtwwyJdmYtVCjurraWafULi0tfekXQ+ma89jgUDC8cIvM6+uruUXQ39nZ2XpekkZQoRhMer5MxcCm55a0DpRgyi/Ox9mSZTyVf2fc8fFxx+F+dPzKkKl2dnZCBo/5ssNzUOp5g+Y5aHMI1ngMd4b0wk0AZmRkeEKTAJWA9VSIksCPZtRwz0A2kMzDVwe/GRWH3N7eqq2trXDbOy8mZu+KPTKetAl8szQPSH5+vkpLS9NfQcJu/ntRMprIUso3NzedF0OZj8WVNgOsYPaDm2dHnooUbrCOnPgmnGw01zwD9eDgQA0ODqrZ2Vndm3DlE8X9/X00cQTsxTc7gk3Kd0pKiu9PXOiQweg1IVP+zIAMMOa6of9EFr2+vj7FSwg9L4M5MjW9tOzLVd7ARU/WCHJaATAxyLbLy8s6+EXGfOvXQiH+oarIlwxemmhnTNuoMS/70jdLVWEuHG50kaHtgLdw/SeH2GxPQsCN+vQv+19Ros5pwm7IYaBV4WUy2CcqKgxJisMrhy9eD+uZUeMFxNr5eQYIPoLU/BQoqAhiKkx3d3fcgxQMNqOKJ+zVYYBWhpc+83OitDfyOdERjtMPG6hxItqaiYwBW/oj489qx4kBG6hxItqaiYwBG6iR8We148SADdQ4EW3NRMaADdTI+LPacWLgf6OfumzzR2usAAAAAElFTkSuQmCC) ###Code frame.describe() ###Output _____no_output_____ ###Markdown We can get only some described statistic by calling the related methods![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJwAAAArCAYAAACNd+GAAAAFeklEQVR4Ae2dTyhtXxTHt19vosjfEUkmZCYTyUSKIYYyQkoGysCEASWlpCgDycALyUSRgaJkIjFQRmIiiZG/o2vm9dm/1mnf49xz78M959dv7133nXP2Xmuvtb/ru9fa5wy8nI+Pjw/lmkMgIgT+iciOM+MQ0Ag4wjkiRIrAr/f390gNOmN2I+AynN3xj3z1jnCRQ263QUc4u+Mf+eod4SKH3G6DjnB2xz/y1TvCRQ653QYd4eyOf+Srd4SLHHK7DTrC2R3/yFfvCBc55HYbdISzO/6Rr94RLnLI7TboCGd3/CNffVrCnZycqNzcXP1raGhQ19fXkTv5HYP4PzQ0pBKJxHem+TFd8APH6enppDk3Nzc/9SUJ/MVD0Fz08Yu7/Qpz4Pn5WYNweHioGhsbw0Td2H8EATbYxsaG+v37d5JHbW1tqqenR1VWVsYay1DCPT4+qsLCQlVTU5PkvHv4OgLV1dXq9PT06xOEaEqCGB0dVcXFxUmSPM/OzqqpqSkdT/94knAWH0JL6tPTk3p9ff1knvJEmWI3URoouf5ya5ZixnmmcR0bG9P66Ozu7mpds+wBXHt7u1fK/7YUIC/HgJaWliT/xXcZ95c2v99+237fTL+ZC3nTvqwbJwQrbPvnFSdNGVM3nd/o7+/v6+yVqhpB9traWi0n9qK+BhJOACNYBwcHqry8XAfQHxzGq6qq9Pmos7NTbW1taf8JyuXlpe4HKEoyuvTT5ubmVHd3t0JneXlZra+vq7e3N3V3d6d1JiYm9Di69/f3ukSY4IeBhBwlBT2xbcrPz8+rsrIyPYY/Dw8PXvB5XlxcVBcXF57vXV1dnjrjlCV8Z25+CwsLGhsR6u3tVTc3N3psZWVF+4IcjczD/fj4uIgnXScnJz08/ZiF+c0kzHt8fKyam5uT5vQ/MI6c+OQfz/ZzIOEAGYdYdGtrqxc8ADMbwElAWAjBQ4903dfX54lSkinNlGhaf3+/qqur0/cEr6SkxJOFdPn5+aqjo0P3MRcyR0dHnkyqG2xDtqCSgo4QbGBgQE9BpmFuMwBk9PPz80ATZBDOQOJbkBBrGx4e1kP19fV6I2UaXBNPOcZcXV1l5Dc22LQmlkH+MY5cpj4FzfGdvkDCZTqhuZtI4+ZuN0sDGRLgMmmUcTIgRJOyR9b4iQbhz87OvIzN/GbJxSbnHOwzRlmHpNLIXGRHxlK1pqYmb5wStrq6+uk8lUrX7McG5Kal81tkXl5ezCkC70tLS3W/bP5AoSx2hr40fNUuJZnSRlkjiARNdn0mc5IlZmZmvMBlopOpDJljb28vJQnMQz3roITyxsc6OD5AuiiaP2Ol8xsiFRUVpXVNiCbES6vwwwLfynBhvrA7JRMsLS1lnOEA9vb2Vu3s7IRN7x3AzbMd9shAUn755mUSvaKiQhUUFCj8yaRJhhFZSuT29rb3AiT92bhSvvEVnzPxm7UjT4UIa4wjJ7EJk83GWFYIxzcfSCNlMS8vL+NPK+iQUTiLAYr8TGKFAcH5DFn0RkZGFIdtzoQ0+sicnDVlXq7yxih6MgZZKbH4RCP7ra2taRKLjPmWqoVS/EOWlzdvXg44Jpi2UaNf5uVcKVmevjC/0UWGcg5uYeczNqNZ9lO4m7XunEQi4f7UQ9bgjXZiSM0RgJemoE8jZHy+w7EJZRNF66FSWclwUS/C2fsXAUgE2cxPUIINZCTjDw4OxkY2fHEZTiLyP7pyRODlxvyMJccG+YwV13Id4eJC3lK7rqRaGvi4lu0IFxfyltp1hLM08HEt2xEuLuQttesIZ2ng41q2I1xcyFtq1xHO0sDHtWxHuLiQt9SuI5ylgY9r2Y5wcSFvqV1HOEsDH9eyc9z/RBMX9Hba/QMN2aLxJF+awAAAAABJRU5ErkJggg==) ###Code frame.count() ###Output _____no_output_____ ###Markdown In addition we can find the described statistic for only some paramter ###Code frame.skew() frame.min() ###Output _____no_output_____ ###Markdown All descriptive funciton which can be called is provided![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAdAAAAF/CAYAAAD5F5g7AAAgAElEQVR4AeydCViVxfrAf4ddFhEVtxRzATN3IrxqhJCiECqmISlJWhbWtdRIs6tmuaWSpuY/b7dccLliSppLmmuQWohe0XABBRUFFQXZlOXA93++s8A5h8PilppznseHOfPNvO87v3ln5ps5n9+rkCRJQnwEAUFAEBAEBAFB4I4ImNxRaVFYEBAEBAFBQBAQBFQExAIqHEEQEAQEAUFAELgLAor4pzuJI9y7ACeqCAKCgCAgCDzZBMQO9Mnuf9F6QUAQEAQEgbskIBbQuwQnqgkCgoAgIAg82QTEAvpk979ovSAgCAgCgsBdEhAL6F2CE9UEAUFAEBAEnmwClSygL9HoUBwdz/6Xei5mwDM4bD5Ix5StNOptVz2x3tNpkxJHm69eqr5sWQmNzkPTsSrLe1ISL9Iw5jAdU47RMeUo7WOXUd+nwX1ofBfqbT9A++0fYF6tNCvMP1lL+8S11Ovw5PVAtXhEAUFAEBAEDAhUsoBqSpk2wvqFRmDugnUzS6AWZg1sDERovtbriv2cz6nXz9H4dZFbBQFzFGYKuLKVs2+sIt+qE02mjaFW9ateFTLlS5fJ/+0AWb/FU2K0pCOWof+iyScvoaCIkrhDZMUcIj+tyGhpkSkICAKCgCBQTkDeXlb82DpiXtsUsMO64zPQpR216sjfrTCtZ12xPKAYFopToAs5l1dw45TRIiKzWgISpb+u5trvvtj16kjtbjbcjs6vtlblBa5RMGsClysr4NCfRh++Su3jV0inlNJfFnP5l8oKi3xBQBAQBAQBXQLGF1BzG0wsQFIqsHB5FnPXllgpdKo9O4ynlr5HvWa1kLKOcGlsHLXHdEEuYj9uPW3+dxowxfyl6XRICYf0nSQP+IR8x9fK66Xv58Jbn5BjM4Lm/x6BfR0TpFITyNDR80QmM7j152Wk3i5YdW6GmfNbtAjzppZVCcVHlnPutX1Y/Tuc5l5PoSg5xWW/8eS/NpsWwZ0wN8vgxnfHsX3LE8WVXMxJI5dnqM3PJE6F5t/6YZZXjKmtgsJdy7le73XsZQ9wHUOHQ85c/KM9TgPg2tsjuO40UaO3mMJ9izk3Kp36v82hgV0hJdY2mJakkvHJaNJ/uPRE9pJotCAgCAgCVRzhFlCQegNF847U7dwURep5bpeYYGr7FDZjQ6hXP5HUoQu4adaZRsFppC7+HxL5ZC8I5Mw3KSqyUvoBrhy4gqLx8zj4tFHXa3KJtLHruNXAk6YfBWL37iDs61wna8VO8sXJob5HmnWn4bs9sUhcxukZh1E8F4DjuNdp1LM+t9d+wMl3FpDd8l2cQjrA8Sguzl/C9RMFgDkWtplkbT1GsZ5EE0iN5urxAix7+WCyKIJsJUhHF3Oi20zkmqqPmRcN3/XCKjWSsysvYOE1gkZBmt++LTK4ueYQhaZNqTvgBdVNk7aa+CsICAKCwJNEwPgC2uEpLE1LKb5wGWUtFxzc6lB8+izFkjnmDVyx61AXanWm2drxONiZYVbP0chEWoLy5A6ux15BUh39tsW2rQNknCFvcxz5GaWYtfagTuvacD2eGzP3cCvb+C91T1KHyMfkZg7y78wFlNRuh21dM0w7vsUzk7tjhg1mXOTWTTOsXwmjsasdZh7tsFLcIOe/S7m5eDOFt+UXS5VQFP0dl2b+z+C3zxKUiTu5ticZSVGfWp0q+T27RQdsHCSKTsZx68AZiqiNdaen1N1w4wQ35sdRUKLApFYl9Z+sDhOtFQQEgSeUgPEjXEtzFIoiSo6e4rZHF+zqZZF19BImXjKlAkpul0LGDs52+5hbmjVP8f4Ld4HQFBP54ZmSYkrF2qnh1wTrtg1RlF6l4EQWVlIJRT//i9Pv7dBcN8HkdyXStBDqvT0WjspdWEJpoe5eU0Iq1v1+F10jqggCgoAgIAhUScD4DtTKHAX5FJ+OJe9iEZRc4dbvefKjQphYZ5N/6gbU70Tdoc9g0qMXtdrWgiIlEuZYdvHBuoGxdfkC+WdzwLENtgPcsHFUUHxqP1nncqFBF+pPeglre/lBpSf4Y9WU2tM+otFzdpT8uZ+bPx0l/7qEhasPdm2bYvnKi5hZdaBWvd9I//4oxWa2KK5coFhqQJ03xlBnzAAsa+n+WG3I0hQzlz40eKklilK5T7OQSiRo0JE6/ZqXF045ya1cEyyedcO6RxsspCzyD4vfOssBiZQgIAgIAmBspUPRoqHm/w2e59bZm2B1lvwTKZhfl6hdz4bb478no8N4HD9fR11yuDnndS7+8hvZb3fGoefrOGYfMcI2i7x5y8lsM4YmXzkjyQ8RfbWenPr1yOs0jLqj6qO8VWqk3hOUVacLjUKKKU7aQeqk5SiLi7m2yBPbf/WmxXZvKDzMheIrNPqyH5YK+Yg8kmsztmFRaxbNeg3CqYsbV8OrfgTaxPklGpopKdy1gazYeIrjhmLfw5Omb5ziXKqGtXI3Vxa5Y/NxEK2di1QPEV2NyqP++CeoL0RTBQFBQBCohoCIxlINoL/NZfnlFt/6wuaJnBm752/TLNEQQUAQEAQeFgHjR7gPyxqhVxAQBAQBQUAQeEwIiB3oY9JRwkxBQBAQBASBR4uA2IE+Wv0hrBEEBAFBQBB4TAiIBfQx6ShhpiAgCAgCgsCjRUAsoI9WfwhrBAFBQBAQBB4TAgpJkuRX14iPICAICAKCgCAgCNwBAbEDvQNYoqggIAgIAoKAIKAlIBZQLQnxVxAQBAQBQUAQuAMCYgG9A1gPrmgeR8K96TDzELcA5ZFwWodEkX43CtOjCFGEEpWuvJvaFetIV4ieFoC73wjCf7tR8fq95CiPEN7ai5m/3wRkBv6ERGleh1T4OzNbOBGw6ix3/htDKlEhbuWy7sVGUVcQEAQEgUoIPKQFtJDUnfMIi0w0iBZSiZVPRLbEn+v2c/zWnS8XDxKPdH4X8799li/WfU/YC/UegKo41u05pbpx0BUupSVx+Hwq0UfOkaV7QaQFAUFAEHhECDykBbSYaye2syk19y52F48IuQdhxp8b+fH3+7zLM2pnGltDfXg9NBCXana6JZlX+dO6LvbWD8pV8vhzyRZ+z9F9D3IJWYnxnHz1VTyiz5AqIvUY7UWRKQgIAg+XQCWzYgl5J3/gY/+2KBQKFAoXRkRdRKKEvISVjHCxQ6FwwvvTXaQrJfWRY+twjsinhjpHiCWJy/C1k+vL/1wZsew4eZI8eT+P20f7OfeRG+byNddFxD/xk2QtAt7pQOzO4+SU+YTOUWQZ1xSiQlzx9PbH29MV7wnTmRjUB0+XAcw6dF1zQ7KbL0b1x9NtGIuPZCJJ2SQsexdvLw/cApZwJKeIotvJbLvpQ9SSATTW6JPyjrNspB9+fj4EzP6VDGUiGxas5+rVdXw6NpJEVR8pSY8Kpa5nHwZ598DFex6HskqoUFcyXq6saWWJl3mn/1l2xuneOBSQeuYcrr1eouP1Y5xO0wnNJp1lVUB3xu68CvkxTHUKZtXZE6wa5sdg/x50H/cTF5XaXbzaBoXqJqGcpdpWX7w8exCw+DA5pVeImT0UL/8AvEdvIl1bvcxGkRAEBAFBoCIB4wvoraMsfWsRBcM3kVsqIUmJLH/FCcWtWBYGrcM54gKlxft45+KnfLjxfKW7SCk3k6SG84grloNzfwyTF7IttT7+Sw8TN68nrebFUSxJSEffp9MTHskMTKj9oj8DY3/h4NWiij2ll1OMVXA4u1aMo/a8M7Sf/QMrptgRHnlEc9zZiZFz17DqIytmrzrMjZSfmLy8LQt2RjG3zUa+3i3fDNXGZ2Av2tlqwReQtH4WC1pNYcMPU+j4zRK2JDsxeFwgDRsG8dlXQ3DRFgWyLF9h+s8rmFL7RyJjUyvWTSpQWaxf7rpeK9RfmvCivzuxW2K5ql24pHSO782nQwdXXFxTSDifW15P0ZSu/Z3YfyKVWxcS2NfWm64tO/D6mu1sWDsdj8jV7E5W6y6vpJsqIOXHBSzvPJudW6bTZt4ydscfZM23Lnwe+SN7vwmgcVUR4XRFibQgIAg80QSMLqAlSYdYe+klBr3sgq3OZFKSdJiNeR54d3FAYeZE977PsGVfAsamRX2qCsyatKKddTKpGdUtDvo1n6hvVm3xHJjKxp8Sqvlt2JJGdWwwtbLBHltsrCywsrElKyufQhUwRxo42NGkdVustx7hUPxhdqdHMXPUWObvOU1McjpKTLA0N6W8ezNJjL2Ia9umWNs8TWevS8QmZlaOv7EjDpZW2NgXkZWfVnldvXIXiQkfTUhICCEjviImSz6yMMGqgycDj27mp8R8tb7ci/wZ25TWTZ14umMpfyRdo/yA15Lmndzgt3jiTh0nv3dHmpsUkhG/mcUzv2VPbgaZuVU9QJXByQNxpP84g1HvzWdP1mmSlc/g77uHqZNXcyRDTbDyhosrgoAgIAioCRiNB6q6ZGqBeeVXBb8HQsCODn1fIu2DNzk/fND90VDbAnN5lewxhoUrX9Ec18rHmfdH/J1JaYRH2Dd4aCspj/CHnLZoS98QJR+8mcpwXyi5lMjB3OMkfBKG9aV0Dl47y42Rz+CoqqfA0tmVPvGL+N62kN4jm2OSuIZRwX8ydMNbDNk4Uyu9ir929BizgJWvNCsrI7VdjtnCifQbVcz+H0bgooJWdlkkBAFBQBCoQMDoDtTUuRtDG//Mus2J5GmP1QBT5+cZZBvD3v9lISkvcnDHafp5taNhk5Z4ZCZw8mJVR2e6us2wq+vIrcvXyZEklNnZ5Ovo0S35pKVNWndniI/2V0kLbBwsyM4vpKQgn+waw8jgWlYuaWdPUdTbla4dn6fXvt85nlnVD811cXF34uipS9zKP8+xfU1xd6lbQ43176GurMKK1i/0xUelrYi0E7EkvDmTiFWr+O+S8XTeeZxzhToOYtearj1PErH5abq2teNm4jH2tOtKt6aW5OodcCiwtLHFITufgpJC8rPlnakjz/Zoxb6Yk2TqiFTYtqbP8EBe3HOMxJvl+90aAhDFBAFB4AkkYHQBxdqV0O/exyoiADsT+QEgJ3yXnabE2p0P1gWRNLw5Jub+rHWezpeDnsak0UuEhZszvVUtFE0GEeFcjzpW5YeDFbla0qLnEAIOjKCeiQkOQRu5LOYsNSaTlvQa6Yut6lt93IcMJGd6CL3DVpPlUJGk0RyHdNZPCSLk37WZ+6Y7Di36M2NmAZ/1HUCwfzCL4/OMVLPCud9ohh7+lMGvzubStMkEOlsZKWcsq9Y91FXLM3H2YuRrdYAcziek8OJzrZCba9rUhe6FJ0lK01kZFQ3p4u0OL3WnSyMr6vcIZFLm17wz4it+q2+uY6ApDu4BhOUs4I3eY1meJbu7FS0GfsTMgjn07ReIv98S4tO38m53P14dvpRaC9+iZ32dH3t1pImkICAICAK6BMS7cHVpiLQgIAgIAoKAIFBDAsZ3oDWsLIoJAoKAICAICAJPKgGxgD6pPS/aLQgIAoKAIHBPBMQCek/4RGVBQBAQBASBJ5WAWECf1J4X7RYEBAFBQBC4JwJiAb0nfKKyICAICAKCwJNKQCygT2rPi3YLAoKAICAI3BMBsYDeEz5RWRAQBAQBQeBJJSAW0Ce150W7BQFBQBAQBO6JQA0W0HySd0awPv7mPSm6+8pKrh/5gf/svajzQvG7l/bI1lReJjo8GBc5vJvTNGL+Vu82VJIRPQsfd1+Cwn/TCdf2yPbGo2OYdIXoaQG4+40g/DfdkG+VmyiHa1s12gM7hR0u7255zMKz6YagM9LGjK2E2rngNdgPd/fXCd97iapCBxiRAGh0tPYiUA5sMEYbqs946QebW017H6xyIf0eCdRgAc3k2NpFbDunE1LqHpVWXv0GMVM9sPNdpok9KZcs4MK+/2PO0Yy/8QJaTNpPMwmO60lUrhLp4jQ8bKp6FWLlBGtyRcpLIfakNnZoTWpUUkbKISX2NNd13ilrtKR0nh3zf6bLF6tYF/YCtY0WelQzS8hLiefk9Uqm6TIGhSSvCqHD1OiKNwg1KVNJ86Xzu5j/7bN8se57wl6oV0kp3ewiUrctZGLBGBKKb3J0alfsdC8bSZcmr2Jgh8+I0QtqbqTgo5ClLOJ2njdjFv/I3v90IyZ4Fj/pxosts7GafpPLqQIsrGTlYv1QfWUiREIQqIZADRbQaiTc18v2tB3wPuHvdKXJI2bZfW2mobDSc+z4dzyvvR1A+7L4nIaF7tf3PI4uHcnQ7ReqCZlWvT7l0W/pPXQrF6p6R70spiSbq3+a4Whf03frVq/7zkuUkLF1LB1eH0Wwy2ii0itZEA0FK4+xtPdYtl8wHiihnIEFTr5TWDPKtcKCVZMyhmq130syr/KndV3srY0NCDk4vQ+vhwbiogoaLtcqJutqGmbNGlPPzBTbRg30QhKq5eqz2GTWmzlrRtLFzpgOrSXV/TVmS3V17uW6KbYd/Rj6Yhy/HM2oKKiafqtQQd7ZdggiNNiTkKjUCpdFhiBgjEAlI6aQ9L1zGdDaTvUi+UER2uPbEvIS1jDavQkK+XhoxEoS8kpAeYk9n/phJx8/yv88lnJSnlSlbE6u/xf+KjkKFHbqiaskcR2jvFzUZRUK6o7dSSbyAOyAo1sgoWE/c8ZgUj73kRvmCgV27u+zKiEbiUpsMdbKRz0vL40zJ9rg6mxfbqmUTcKyd/H288UrIJyYDCWkRxFStzveg/rg6eLHhLkTCPLugYv3PA5lFZIeFUpdzz4M8vbAXdU36jyFanKVQ5i5ETRhFgvWneHquk95MzyGzLzjLBvpi5dnDwIWHyanbDepJCMmnAAvfwZ7jycq7braHi8P3AKWcCTrJBsWrOfq1XV8+qYc21PTYRXsziVxw3esu3qGdZ9OIzJRvRBJGb8yO8AH/8EDGB2VrLJd186QqBSd9shtHM/cicPw9nTFe9ZvZElye1zx9PZX502YzsQgmcsAZh26jlSaxKphfgz270H3cT9xUSmhLCrgz20FeGx8i6IPvRi78yrkxzDVKZhVKZo4oNIVYmYPxcs/AO/Rq9nzg9b2cYTvj9aXWXhGh8E8/rt+Fp0m/0K6btvW/8wPZZzKy1xBPcZkvoH/XM3B/TqstYt7SWK5/LGRJGbLfeWHn58PAbN/JUMqoeh2Mttu+hC1ZIAmVJ01bV4aSJfvvuGbQ2ma4015rKxkpLcXnm7DWHzkRjmLqC8YaH6QmZ1m8suVQoNy17im6wNau+ShrfKb6mxRzwEqP9b6Tc4tkleF4i0fwXafyBZVBCddFpEkya50IJxR/h4a3zaYDLSjRFGf1m62xCZnUJS8hmHeAfi7+zBuy0kSynxuHOExqUZ0aoVo/so72z+PctNjEUsGFOj3s1J9zKoeW8Z80ZBvJmXDCAll+h5myb4eOIHIc1crjmutKfL4VoQSlV5QPh5qPOa180AlrLQ6xN/7S0Ay9sn+VZryTF9p5qEbUmnxKen7/p2k4RsvSlJRgvT9gOelN9adlYqLz0rr3nheCohIlAqPLZS6NPtUis4rlSRJKeVlZUvFUqmUHxcudXtmnBR5NluSr2g/xXHzpFat5klxxdqc8r8Vr+VKcfN6Ss0m7pFult6WLqx7R2ocECGdKzBuS0m5qMcnlbZRGs470sa0ciAlZ76XfNrPkg7lX5eip7wo+Xx/SipRlXtV+v7MTSk5YpjEgAgpWZkkRQzoJn2w45KUtvEdid5LpYSCc9K6114ozxu+UUqTLkobhz8nDd+YoOLZal6cVCwVSOci3pBeWPg/qTB7jzShWaiODZelLe/0laZE31BxLDkXIQW8sEg6VnhN2jPBS3pj4wWpYl9JklG7i+Okea16SvPicjV9opSubRkjPT0lWspT5RSrbdezM1md5/O9dEbVxmekARFJkjI5Qhrg8KG040aKtHF4exUXVR7DpIjkbBUXhw92SGqrJUmS29VYZparljdknXSx9LZ05vtXpadnHJLyZc4vfCMlKDWmXdsivfO01pclSapgu67M2zoMtG2IlP6n1zZZhNbftWU2Spcvb5RGNBstbbxcJEmSPmuNJWoyZXVlm4dI7WcclPLzoqUpzeQ2nZE2Du8iDVmXoje+JKlASts9XfJy8JWm7k6ViksSpYiAQdLCY9lS9p5JUrM31kmxG96RULGQJEnrf5cSDMp9IX31drkPlNtVc1uM+Y16LsiQ9kzoru4/PRYaRmX9Lvv2lXLVWltVY0U9N6h9WS5SompfY7luoaHPyde1Ok9Il+Sx0qqn9Orw4dIb86KlTJXcEdK6i3J/aD6GvlNmk4EvZpwy4LZRStNOeKUXpI0jXpBGbLyg6qOK40Nji+z7ZW27XT4eVHk1GfOJmnlAh5W2HeLvAyNgdAdaknKMrQUeeHdxQGFmQ5066sja0qUj/BTbi9d9W2Bm5kT3vu359UgyeW16Me6lrYwY8iGLN53glq0dZtwi6cBuLr06kJdb1eZef9GzqG+PjcKKZt29eHH3YRKO/m7UFu1e+f7eZjxgaZY2ODgUUazU3reWcCPxGAdd29Dc2p7WndtyMPYs6kdI6lLHzhIrG1uwt8HK1BIb+yKy8jXhvho74mDZgNauZmw9kVrNAxY3OHkgjvQfZzDqvfnsyTpNcpr2qNKR5/zb8sPUWaw6cpkrJw+zOz2KmaPGMn/PaWKSM4zIrspuXYam1H+uN74/zGDyqjjkzXWln0Z1sFO10QZ7G0tMrWywz8ojXxUf1JJGdWzUedhiY2Wh4pKVlU8hhWTEb2bxzG/Zk5tBZq5GiaU5ZgpLmndyxz76OPFnT3LOqx3NtRHM6nfC33cPUyev5kiGZldaZlwlMsuuywkzGui1TdunuoWUZJ44yKbunjzfWA6/psv6qhGuct1MEmMv4tq2KdY2T9PZ6xKxidcBEyzNTQ3GlyWNX5rExs3d+fmf33Mo+SQHdh/nx5nv8d78X8iKOUuqjEPFQseujDMG5Ypo2lfrA7p21dSWEm5U8JtrFGYcY9PieSzdk8K5zGxu6LHQ2FPW7zq+rWOqOllCcaGCVnVtMFFeI37TEmYu3U3uuUxy9cIjSij1dOapn6fQ/Aa6PMxDFToPLDA3k2eqSvq5zCYDX7x0yoBbMmnajWDWKaI3OdPn+cYoMDY+kjByAG3Q0pqMeSv9ecBAgvj6YAgYXUArU6X6PSZ9Ni/Zm6JQWOAUtJzigmJKrJ7l9f9sZ+Po9mT/OIYeg78l/pZ64jCVB2llAu8hX3n9Cn8as+UeZD60qvZP8Uy7JBLO388HtRTUtqgJeTt6jFnAylVbicvdR9hz6kikYE7jl6exbWI9NvR7n//745rmoYs1bI1L42zYc/fUr4rGfszdNpb6G95mVMRJiu8rfInixDWMCv6VhiPfYkhDQ+EKLJ/pSv9zB9i0NwX/Hi7YaIsomvHy3OVMrL+JfqPWkFhcvgBWLVMrAPTbdspo25TFRRSXLWC6rMcTkXirXNhdp0xx6OZLYPF+/jgl+5U3YxYuZ9XWOHLPTqSbbthUPR265aYwKEDrA/dgl2qh0vjNB9asHTWe/Q0HEzqkjUpziR4LPWOq/lKazsnoQjo/XYuzEeMJ3t+YkaEDqdDdxaeIMNBZleCa9rO+DF1uYTynHXolxRQUaxdm/Rri2+NPwOgCaur8PINsf+XnA+l6d8OmzZ6hd7Nglp64SakkIUkSuUv9qX8jif+lWdPx5ZFM/r9ZBB3dxK9J4NyjF41Xr2fzuRyd3wSqhqaoZUvjojxuFZaizM5G739zSDkk7vqF6H49eM69o1FbHKsW/2heNW2J9wgnvvtuK2fk35QxpZ5LZ7ofPcOFW9mcPXaK7u6tqckzmKRnkFV4jbNHi3mxTRNsbGxxyM6noKSQ/GzDrV49nu3Rin0xJ8ksXyfKGSlq07LPawx9MZV0y1b02vc7xzO1t9blxcpTd2K3KbYtezF8aGf2xCZzq0o7yzXULFVCZuIx9rTrSremluTqxOIuq2/jQg//eOZ+WYvnXORA3uUfhW1r+gwP5MU9x0i8qW1vCTerk1kmQr9tFU9FzHCU+3eXDs8y1knEJlasAXVxcXfi6KlL3Mo/z7F9TXF3qV+msTxRQt6Va+RJEsUXTnO0qBPPPO9Kj14JxByv5slrxzYVyxm1q6a2mFLv2ef1/eZmMrF7nqZbNyfMc+Udvpna13VZlDemilQJObE/8d1Fb/p0VpAYm0i7bq40NS+kQndX0FmFWO6knzVyjHHTqqjXGvfuxzXsjY0PZ8rmLNVJVB75BUUU5BsLeq8VKv4+KgSMLqBYu/PBuuFc+1h+cMeJQREWNKpjiaJBb6auduf3kW0xUT0w5EP4kRxKbxxihndjdZ7tMGIGh9LvWRusXd/iu89qEdHnKfU1uyCWaR4iqQyAqXMvxvrvwc/WFIegjVwuNcOubkOuyg8RmTzFy5vbsPrLAJwaGrelMrmPdr41zoGTWFB3NW52Zqr/B3qgiS+Th55g0uBgPr00kvmBzhjvLIOWJX3Ne74hfGkZyruejXFwDyAsZwFv9B7L8ixZgjVtevbH6dtJjI26RPOBHzGzYA59+wXi77eEeO16IV1m67t98H91JF/Weo/xH3zIjJkFfNZ3AMH+wSyOz8esjQehTmsJG7uRFM2RmYmzXw3sVnJt64d093+V4V9asHCMF20q2GnQrjv6akbDHoFMyvyad0Z8xW/1jW236uDyXHvw8aZrc8sy6dK1rbzb3Y9Xhy+l1sK36NnIhZ6h9nwbNp3ohv0qyCxnsEH98AvFXNFrmweNyjhpy4CJ8yDmz5F59mPwsDAmjipnPaZnhT0UYIVzv9EMPfwpg1+dzaVpkwl0rlVmd3niNufWj6KxiQkWfX6k/Yow+jZqw8AZoRR8Noh+wQPwW3xM78a4rK5JK/1yizaxSccHyu2qqS1g0qK/vt+ktSdkUhZfvvMei39T77T1WAR/T7zhfV6ZgfJ6a0Et270sHtUX70/SCUDAlAoAACAASURBVFk2mhfrPkWPkFfJ/PI9Riw+iOq2wqylpt+mEZXbuYJOXZH6aVPqV+s7+jUw5LY4vvwJdxNnAue/p2Y/+F1WFPWsfHw4PMeQsFymv/EqYcuTNcfKBrrE10eKgEL+dfWRskgYc5cE5CcF/0mTzT6krXxF80TmXYp6IqplcyT8DWY0nMvG12t4c/JEcBGNFAQEgZoS0J7U17S8KCcI/A0IFJJxaDmfr2zPB7+2rNnO/m/QatEEQUAQuL8ExA70/vIU0h4LAjf5Y/HXJHqPIbidvcETrI9FA4SRgoAg8AgQEAvoI9AJwgRBQBAQBASBx49AjZ5LefyaJSwWBAQBQUAQEAQeLAGxgD5YvkK6ICAICAKCwN+UgFhA/6YdK5olCAgCgoAg8GAJiAX0wfIV0gUBQUAQEAT+pgTEAvo37VjRLEFAEBAEBIEHS0AsoA+Wb82lKy8THR6Mi/yGJ6dpxOi9w7DmYkTJygioQ1KpQ6ZVVuZxzf87t+1x7RNh95NA4M4X0JxoprZtge+y0+Wvq3oSSD3QNhaT9tNMguN6EpWrRLo4DQ+be41fU7nBUl4KsSereS9q5dXVV+R4mwP7MDVGHSNGv3ghyatC6DA1mhz9C3+7b6XJqxjY4TNicvTCf/zt2ikaJAgIAhUJ3PmbiOyeYcCUKbh2bize4FKR593llJ5jx7/jeW3i57S31cbVujtR1dfK4+jSkQwlnNPP1r/7iComTvjOWUjPJvovYlfrt8DJdwprejbCrnqDHusSJk6+zFlzmyZ2d34v+lg3XBgvCAgCxtfAkvhFuMpHiXaueHm6oLDzY1r0FaSMrYTWbojbsDcJ2362bAdakrgMXzsFCtUL5l0Zsew4eXf8ht184hf5qmTYuXni2doOO+9ZRMvBIqXzRAa10MjvTNDiP8iSMoj51A+f8D/IkQq4GDmaDqOjuFgWU/Mx6t28NM6caIOrs3250VJ2xcj1NY5O74H7iJUk5BWWR7YnlagQN4ImzGLBujNcXfcpb4bHkJl3nGUjffHy7EHA4sPklPWb+liwrmcfBnnL0e7HM3fiMLw9XfGe9RtZ0jWiZwYz+ZeLKh3l5eZxKKuIjOhwOk3expGoUMqvGcrQPXpU2xcSFU1UiCue3v5qXROmMzGoD54uA5h1SH/XXJq8hmHeAfi7+zBuy3mUenw05aUrxMwOxM3Th6Efby/nq0rJOqvQJe+yh/kx2L8H3cf9xMXiq+z92JMei45ReDGSoR2msPf0XmZ2mskvRyIJqdsd70GyrX5MmDuBIBU3mUcJyLYpQolKL9D0SSRxVbLRmlpIyqpg6o7dSSaZxEz1ImDFZiJ07dJ9+XoFPVGka33JywO3gCUcySkkIyacAC9/BnuPJypdV4BWr/grCAgC1REwetssKYvIGb6BlF2DOf/0bJK2u7NybRzXHf1ZmptL3LyeenKl3EySGs4jrriU4gsfw+SFbEutEFRIr07FLxLKIlOGb4xn10Brnp4bw/YXYlgrx6FUNKHv0gRKpVKKEkaT+ckK9l1x4IWxE+m2firzVi1j9gyYPMkPJ1VA3IrSH+mc/JtcSdePGViatJHxC5oxa8NqPu+4hRlbzqqDAGc1JXj2elZMqcu8gx2YvWslU2r/SGSsHFwZsixfYfrPK/mw8Dv+c0Cdp9t2y38EMy6oDQ2DPuP7MHdu/riA5Z1ns3PLdNrMW8buK/qTaZbVEI2OnznY/jN2rRhH7fBNxMqLgs5HrXeFni3ayzWVoS0PxVgFh6t1zTtD+9k/sGKKHeGRR8gqL4RJy2Gs2RvF2i/ciPx6L8nyKWpWYwKn/7es/I2k7cz45lkWbP+JJeOf16mtTVahy8SZ19dsZ8Pa6XhErmZ3ij0eI0NwXBbB4uXruDXuNTzq6RziVNM3Wo26f6tnY0nzrt48v/8EKbfSSNjXmP7d+zBc165kbRB0Xcnl6dKUn5i8vC0LdkYxt81Gvt4dx+9r9tDx8wg27J3PK4112lBeTaQEAUGgGgJGF1B1HQWm5hbIse4trW0xvV1kPASSngIFZk1a0c46mdSMO11AtYLMMLeUjzEtsK5tyu0ieaK2wL6ONQoUmLu44dPwNMlpBSgcXmDslx7sDJlH+rgxvOJkpRXyeP1VxQEsorhs92wscv1Z1L821iQ6fQNau5qx9URqNX12g5MH4kj/cQaj3pvPniw1Vz14jepgZ2qJjb0N9jaWmFrZYJ+VR35h2VZVXbyxIw6WVtjYF5GVb9D3NZVRptiSRnVs1LqwxcbKAisbW7Ky8pEjSJZ9lNeI37SEmUt3k3suk1zVz5CONHCw1pTP5lLiMQ56daS1jRUODcoiL5aJgKp0FZIRv5nFM79lT24GmblKzJ39GO21n4/Wtef9gDboB0qrpm90tJYla8DGpHlHehNPQtxx/sh3pVNzRQW7yuRVSCjJOHmY3elRzBw1lvl7ThOTbEIn/7b8MHUWq45crcZHKggUGYKAIKAhUMUCCiZ2zfDr4MhfuyyZYdfMjQ6O5VqlvOOsGedLa/mI2NyNj85p++82aQknyWxlypnDp0kvW4C01x+Tv/ZP8Uy7JBLO595HgxXUtqjJzsKOHmMWsHLVVuJy9xH2nO19tOFBirpFYsR4gvc3ZmToQIxF0Lw37RLFiWsYFfwrDUe+xZAyBUXkZ9+Gqze4WfAXPThk+TSufa6y4/uNHOjdhacvGLOrmtb2GMPClWvYGpfG2bBuOL08jW0T67Gh33giEtVxOauRIC4LAoKAAYGqF1CXISwK8zAI7GpCLVt7inJuIcd+z75Z3eC7RXLUNEZO2kRyscGuxcAY9ddauAyZRphHA81VJdf3f0doylB2lUpIxXHMayVfkihIWMsnS5xY8utGPkpfyBc7LlETDUbVPsxM05Z4j3Diu++2ciZP3nEbi1zfmno1sTE9g6zCa5w9WsyLbZpgY2OLQ3Y+BSWF5GfrH89CPZ7t0Yp9MSfJfCjgFFhWaV9VDb5JYmwi7bq50tRc9kRjHzPqt3yWzkfPcOFWAVnXMowVqiSvhMzEY+xp15VuTS3J1SgoTtpNxOVAFo5OIWJHEsWV1K6QrTplyCO/oIiC/LwKl6vOqEPbrq3YEnGCHl1bUmLErrL6FfSY4fjs8/Ta9zvHM3WO3RW1adnnNYa+mERs4s2y6iIhCAgCNSdQ5QJqXEwtnH1ex3/HEGwVrQmKulj2MJHx8re5fGwPy7/ey8mbd3PHbkb9roFMs/qSlibaHagVFqWnWPuvdbQKfx+fpzoSNKU/xz76ln26k4Rxgx7BXGucAyexoO5q3OzMVP8P9EAT38oj11fVgqSvec83hC8tQ3nXszEO7gGE5Szgjd5jWZ4ld7c1bXr2x+nbSYyNukTzgR8xs2AOffsF4u+3hHidObYqNffnmqkR+2oquSE9Ql4l88v3GLH4IPUrqWbaNoCZg/7g/cHDmLI+3eBmsJJKqmwzGvYIZFLm17wz4it+q28O0lVilm2g7jsjeWdYALfm/ZeYG4Y3JZXIdHiOIWG5TH/jVcKWJ9+BHbI8Mxp16c5LuOPdpQmOhnbpqjSix6RFf2bMLOCzvgMI9g9mcXwiW9/tg/+rI/my1nuM6Vm2vdaVJNKCgCBQDYEHHM5MQplxnB0rFzI7LZAfv+xLgwf33xuraerf/bL8ROs/abLZh7SVr9D4795c0T5BQBAQBB4ygZr8SHYPJiowKcgkveFwVod6iMXzHkiKqoKAICAICAKPFoEHvAN9tBorrBEEBAFBQBAQBO4Xgbv4DfR+qRZyBAFBQBAQBASBx5eAWEAf374TlgsCgoAgIAg8RAJiAX2I8IVqQUAQEAQEgceXgFhAH9++E5YLAoKAICAIPEQCYgF9iPCFakFAEBAEBIHHl4BYQB/fvhOWCwKCgCAgCDxEAmIBfYjw9VQrLxMdHoyL/L5fp2nE5D+Ud+vpmVSzL9owZKk1K64tVRZ2q4Zv8tHWe5B/pWvs/TiEWb9nPsRXQsqh+d5n0LKTNX9NYI2Z3Elf3UnZGhsgCj4wAvezv+6nrOoa/Ffqqs6WO7iumb8e0gJ6g5ipHtj5LiPxL3113B0A+kuLFpP200yC43oSlatEujgND5sH98omKS+F2JP6sTUrNreQ5FUhdJgaTU7Fiw89p6wNcszOgX2YGqOOVXMvhkmp+/gu3pPB7jakVNn2EvJS4jl5/UEs/lY4+QbQ+usf+D2nmldfSjmkxJ7m+r3ca91HfuXsq/Kdqq6VS3hsUg+E36PZ+rIxZ8S80uRVDOzwGTHV+ayRug83q3ws300bHtICak/bAe8T/k5XmjwkCx5upxloLz3Hjn/H89rbAbS3lUO5PchPHkeXjmTo9gvVvMPYAiffKawZ5YrdgzTnrmTrtMHECd85CxnVpc5dSSqvpORKXAyXB7jTyqSatiuPsbT3WLZfqDoOZ7lsIyk5OH2HIEKDPQmJMti9126H94uH2RVf9UvelUe/pffQrVy4l5vQ+8ZPt41V8avqmq6M+50uIWPrWDq8Popgl9H3L4j4A+F3v9t+P+TpjDkj4kycfJmzZiRd7O7nhJ7G1lAfXg8NxCUkinQjeu85S2cs300bKmltMamRI1HIx4kKO1yC/o84VQDlAhIjx+LV2k5zrTNjd15VvWT70Jea40e5jsu/2JuVQ/wiX1U5OzdPPFvbYec9i+iMC2wN7YCjWyChYT9zpmzwF5C4LAg7lU5ZxtssS8i+q6M0KTWSoDI5r7M47oZKTkniOkZ5uWhsV1B37E4yUZJxaBFBLto2dWfi3muUxC/CVZZh54qXpwsKOz+mRV9Bks4TGdSirP1Bi/8gSyohK2YG3X2+Ii6nBOXF9YzoMJaoizWcYPPSOHOiDa7O9uX+IGWTsOxdvP188QoIJyZDCfKxQd3ueA/qg6eLHxPmTiDIuwcu3vM4lFVIelQodT37MMjbA/cRK0nIU+cpVM6nPioJmjCLBevOcHXdp7wZHkNm3nGWjfTFy7MHAYsPk1O2mykhIzqcTpO3caRMrlZXCVLGr8we0ANP76F8HK3eo8ph5/RkZe7h47YDWBSfycXIUXT4eI+RqC8l5CWsZKS3L35eQ5kdcwWJQtL3ziXAy5/Af64nKWkNw7wD8Hf3YdyW8xQm/qTThjWsnxnM5F/SUOv3w8/Ph4DZv5IhyW12xdPbH29PV7xn/UaWpCQjJlwle7D3eJ2JNJ8Lp9L5h0sjTNG2/Reu6DEfwKxDqSRu+I51V8+w7tNxhEcnqvvJywO3gCUcySlByjpEuIqNWq+8QFZgoyzi9p9HuemxiCUB5sTMHoqXfwDeozeRLtnToqMDiWk5Ov4v79xC8R7sh3v3iWxJiWfDgvVcvbqOT9/8ihhNgHMpZRUBTT5hb46SzJ1h1A2KJFXKJGaqFwGrTiORw4Ev3sa/jMc1ojX8UF5i7yzZjiD+GXmakgplte4pv3dZ62uyT4xn7sRhOoy1/Iz5TpHGrwzZGvdnXd8NiUqpRq/WPoz4goSyqIA/txXgEfUFA7VBxPX619CGEkqT9X2vuIJPLyNyuux/F6uxTeeoUnP8F5Weou+fE6YzMUge27KflZ8Qqfq0bhg7M5Xkx0zDKWAVZ8/p21V+FqLuG31uRvxPZ5xXHH8yR62fqOeFLL0x9wuHdX1RnucyopnZaSa/XDFok2rMace4F55uw1h8JJNSI/NHee9pUyUU3U5m200fopYMoJHB/JJ97VdmB/jgP3gAo6OSdfhr57+SiuNO0p1blrElUmcs/3e1qg07Y5YRYMA7OcdgbtOYWMkCakbjvgvIk8OHFR1ifOa3LNl3GQkluanxpIbup1iSkKRjfNXHkZzfljJyY0cispVIaRsZnqSOlagsMmX4xnh2DbTm6bkxbH8hhrV/mOO/9BTFcfNQRSXTspJlZ16l4bw4iqXbXJhuwuSZO0gt6+iygtUmFI19WZqnRJLySBh/i0+W/MoVCaTcS+xLfZu4Ytl2icyv+lA35yCLR/5Mx4gLlEoX2Ti8iCs3C5GUReQM30DKrsGcf3o2SdvdWbk2juuKJvRdmkCpVEpRwmgyP1nBvisSDi+8w5xuP/PuvOWsnP0dTB5D/5oG+M6/yZV0C8zNyo9tS5M2Mn5BM2ZtWM3nHbcwY8tZVAd6WU0Jnr2eFVPqMu9gB2bvWsmU2j8SGXtdxSXL8hWm/7ySDwu/4z8H1Hm6wCz/Ecy4oDY0DPqM78PcufnjApZ3ns3OLdNpM28Zu6+UD0Xdemq5KzS6UknasoRvOn3B9p3fMN7ZEiggxVBWoRsjP7JjWeS3LP+6gHEju1G3vIlq8aVJrB+/llaz1vDD5y58M2M7ianb+Ncb5xm+5kfWfx2Is/Mw1uyNYu0XbkR+vZcLrfvrtKE3jVSSbpO0fhYLWk1hww9T6PjNErYk3QaKsQz8gp9XjKN2+CZis9L5Y80eOn4ewYa983lFO5GSQ1piIY725XFoy9qf1ZjA6f9lxRQ7wiNPUn/wWwQ1bEPQZwsY3/QPJi9vy4KdUcxts5Gvd58j6ceFrHQP5+edXxBoKQc8M8Lmkhwf7QUG+j2L7Y041nzrwueRP7L3mwAaK8ywq2NDXOoNnVMCS1q+vpS9GyL4wuM3vt5nySvjAmnYMIjPvh+Lh4P65ELh1IW+rc6QeOkGKSeOkxUdT1L2FZL+qEff55qhwJJWoQvYXMZDewdbRNpPs3jj7GDWbFnH10OewbTSsmoyWVZDNP73Mwfbf8auCjI15VQ+qfUdA5+sxp/L+kAnUb3eAiO+oLmZ9fHHr509em5YhQ0mLfV9L6VONwOf9qKxzixavW06DVEli7EKDlezm3eG9rN/0PjZEbI0RRXNn6f/8yc5kXKDCwl/0rb/87RspW9XcpWn/Ub8TzvOjYy/JJUsS1qOnMfGVe9jPTuS2Pov64w5H57X9cXd59VzU1nTDMbcjSR+nLyFzgs2s2Vuc+Z9vY19PxnOH2WVDRK18RnYi3a2SoP55RuW/3c133acQuSGzXzzipOqnv78d8mgzjJ2Hf5RZ24ZSb8h5WM5rJdahqKVuwHvDpRsNpgnVeNXjpNk9KPAzN5efdH8aZ73cSA8OYMS6hopfZuU//1Owcuf0qW2KeQbFjHD3FIe3BZY1zbldpF2wBqW0/1uRZPWrbGOTSWjBJwqsVK3hl7arDZ1VHVscHm+Gw3Dk0krAUe9QuovJSnH2Frgwf91cUBRwXgFpuYWmMrTjrUtpreLUGKBYx0LVWVzFzd8Gq4jOa0AGjvywthP8O0zkLFNwjnySsvK4Fa0QhXDsYjisoDgJdxIPMZB1540t7aHzm05+MtZbvjKVetSx84SKxtbsLfBytQSG/sisvI1ASsbO+Jg2YDWrmb860QqH7asqK485wYnD8SRfmYGow4XkJCVj6OqLUaCaqvkWml0pZEYewkvn6exMYcGjeUFNMOILFMGBryOV/vBrBu+iQPO1uWqtamMJGIPtsCneW1s6IjXwV+I+cWCTd09+ayxubqU8hrxWyNZu3Y3uefqkmt0srhOYuxFXH2aYm0Dnb0u8UvidXywpHEDeyytbLDPyiO/sD7d/Nvy0dRZOId/xGvPNaxBPznSwMFaxTwrK5/CsuBpJdw4eZjd6ScwH/U7txJOc9rxBP3Pauyoik3q86oxobppqt8Jf985TJ3civBPAnnO0dgxvhzZKJ6t6/7L2j0pnHPMN5i0NEBNm9DO6xZrkuKx2Kvg5U7nSYw/xelz7RnU3JoMLGlUxwbTMh7aO9TrnIiOp3ufCTQuW10qK6vR1agOdir/s8HextKITE05Pd8xjNxajT9rROj9qVZvphFfyEQ1fCzN0blP1YitwoYKvmeNq55P27Bf17hqbdMtLKd1GGOLjZWFjp9pypo0oVNvE6YnxNPijxJ6/7MJJhXsMpSr+93Y2JTnLFu4cdZg/O0n8YaXyi7VuGnSGlfrpZxIuY2cq/5U54sGY+7SKQ7sPs4Z8/c4fOsUWacv8oN0Ca/+uvOHVrbhXxMszeUZuOJcFdh0BL5LZzDZeSafvNZZXVFv/ovD86zu/HadW2tz2dS9X/ncYqhO/l6BtyVn/qMrJx9H1fgFnXsnHUny8eGaMLxVR7V2uH2k5yI6BWuSNMOumRsdHI3c2dek+h2XkY8L1jLOW31Ua+72EefuWEZ5BRO7Zvh1cKTM+rzjrBnnS2v5eNfcjY/KhEsUpp3hWGZdGp45zp/pheVCqkvZP8Uz7ZJIOJ9bXck7uK6gtkVN7jzs6DFmAStXbSUudx9hzxlZPGus1YisgnyyC/K4ei2bmh1oSxQXFVJcNtHdIjFiPMH7GzMydCD3HrnSnMYvT2PbxHps6DeeiERtQHgbHJubkZFdMyv1kPQYw8KVa9gal8bZsG5oln29ImDAppvOb7aKZrw8dzkT62+i36g1JBYryb2Zj1uzepQtpcWniBg1nv0NBxM6pI2BbN2v9rTr0Y5fd28nttYgxrxWix3LfuA3/260q/LBtBKKCyTNZKUr70lO5xv3vTv26XtlaIuza0fid6zhxwNtcXU2MW5XlWoM/O+OxrkVFrp3HTX2RV2DvBmzcDmrtsaRe3Yu/mUTqm6Z6tL6bZgwaBhzt42l/oa3GRVh+NS6ev5T6I27A0ztVVdnbqlMnyFveU7U1x2mGb/GF9DrMSwOvcKIXdeRpFzi5vWsTBNQixZd/oHV5m3sT8/h+oWL6D8PWQuXIdMI82igJ0NRy5bGRXncKixFmZ1Ntf9ro/gsUZPeZVLU2Woe77/K/sXzSRmxnVJJMnJUrGcGpi0642+1m037U7l1/SLJBgGSTVyGsCjMQxMAWcnV/d8RmjKUXfLxdnEc87Tn0AUnWPHJOlot+ZmtH13nX1/sIl17c6+vsuI305Z4j3Diu++2ciZP3qGbUs+lM92PnuHCrWzOHjtFd/fW1KtYs2JOegZZhdc4e7SYF9s0wcbGFofsfApKCsnPNjyercezPVqxL+akkd8mK4ouz6lLy84NOXrqEreKs7mmullwNCLrFkk7NnN5Qjijj0axI0m7WJVLwtEZ9+4pnLqQQ/7Z4+zr3hUvr6503/U7x1XB0W+SGJtIu26uNDUvxHD/Ui6pPi7uTmqb8s9zbF9T3F0qCbOtqE3LPq8x9MUkYhO1D+rY0NTZgWPnrxvf2ZUr0kmZUu/Z5+m1T2urfKlODdnoiAEUtq3pMzyQF/ccI/HmTS6dycWlSe3yo8abycTueZpu3Zwwz63q5syUOi2exenrCBJ6d8erUycuRhyk6XOtqgniLfNryq479gX9dty/bwosq/TdqjTVNeILxk7PqpIhX5N3soa+Z+jTFY7cKhFqgY2DBdn5hZTIC3AlpYxnm2DX1o2eWyLY3MONtnY5RuzS1jTGzdjY1JSv19pg/HXGpZ58411I+rVsCtPOcrSoC+1b1NIqgBr7oqaKYxt69Eog5rj2d11jY6RcvPGUsbnKFNuWvRg+tDN7Ys+RKVfUm/8601FvftPMq2Vzi3FN8r5Sn3cDI3Obuq7xBbT+84RMg+ktrVQPEbl9FKfazZSd7OjpNaH2C2PZ8MFt5ni0p++kHzitd934F1PnXoz134OfrSkOQRu5bPRYTqfu7TSObVrF17+cRjvl6VzVSTaga8hwrKZ3wUShQLUDrW1h5NhGU6V2DyZseIvCOb1o2PcTtpw2MsmXSTejQddApll9SUsT7Q5Uvju7TeLauSxpNYGPfZx5JugD3jwWzsJ913QeAikTYiRhjXPgJBbUXY2bnZnq/4EeaOLL5KEnmDQ4mE8vjWR+oHMlxwUG4pK+5j3fEL60DOVdz8Y4uAcQlrOAN3qPZXmW3N3WtOnZH6dvJzE26hLNB37EzII59O0XiL/fEuJrcsKODW0HfcCgXycweOC/WF8o77msaGEoK+MQyxbZ8c7wNxkWWsC8ZYcqLtQmrek3uTeHJw3j1U+vMW3+INq0GcT8OQV81rcfg4O3UDv4VTK/fI8Riw9qDk9123BG8zthLZz7jWbo4U8Z/OpsLk2bTKCzzqAvw3SZre/2wf/VkXxZ6z3G9NTuaS1p3tUDk93xXKruxsesJT1D7fk2bBqb8GHGTNnWAQT7B7M4XlKz2fMevn1GMz9JXuyMsNG5l5GubeXd7n68OnwptRa+RU/rFA7taUfPTjq71Pr/IGRSFl++8x6Lf1P7qFkbD0Kd1hI2diMpOuPHpHlHendqi1e7Jli0aE/vxq70fa5J+WJcxkI3UQvnwMnMUfnCIIKXHUPHRN2Cf1Ha1Ijv1lS1lRFfuJstT2N6hOj7npRp6NP7SNdhX7mF9XEfMpCc6SH0DltNlkPlJY1dUTTqiPdLz/CSd0caKRpWsKu8jjFuRvxPO86NjD9n1apgTuH6j/ENWYn93OF0r21bPm9E1yPYwBfL9RtJmbRi4IxQCj4bRL/gAfgtTsKlwvyB+sHNDrP5/ZaxAWhpML8s4OdNH9Ld/1WGf2nBwjEvqk+n9OY/J4M6SzjRUnduWcZJRflYjtK5wdfnbah7CfHawSHdj09xtpSVp5QkSSllH5ojdWs/SzqUX3o/JGtkFEjXjv0kzRveXxq347J0PyVLUqF0MytfJbM0+6A0s1tvacahrPto+18lqlhK2/iOxPCNUtpfpfLvpqf0orRl9FvSNwl596dlxaek7/v3vUN/Uko3dkySfBcekfLvjxVCiiDwGBAokJIjhkvd5sXepd8/nPnP+A7UyE1EVVklCSvwtjVDoTDDPjgW/4Wv42ZtfL9alZyqrhXcuEnDN+bzuU91d9JVSTFyreQUK70bqnarJvYj+cN/IiPddP47iZEqIutvSkDRlL6fT6S7nVTDkwNjHCQKj3yJh9cgAnu/yeauYXfoT0pKWwzn3yM7YeSRK2MKRZ4g8PgTkK6TcKgWb/Vv91j5vUK+NXn86YsWCAKCgCAgCAgCfy2B+7IDI/1P2QAAIABJREFU/WtNFtoEAUFAEBAEBIGHT0AsoA+/D4QFgoAgIAgIAo8hAbGAPoadJkwWBAQBQUAQePgExAL68PtAWCAICAKCgCDwGBIQC+hj2GnCZEFAEBAEBIGHT0AsoA+/D4QFgoAgIAgIAo8hgXtYQPNJ3hnB+mpiFj6aTCQKknfx7fp48h4VA5WXiQ7XhIRzmkZMte82fFQM1wnTdCcmlYV00r7SQ1tZNxyTNs/Y37vUa0xUpXk1tUUWcCdlK1UoLvxlBO5nf91PWdUB+Ct1VWfLnVz/K8brndhzf8pWvoDmRDO1bQt8l8lxAY19Mjm2dhHbzt3NC9BvEDPVAzvfZSQaF25M4R3kVSe/hKxjG3ln2znuxvo7MKSGRYtJ+2kmwXE9icpVIl2chkeVL/6uodhKilUVWb68ihx/MoQOU6NRR/ssv/L4psqjz999Gx4El6plliavYmCHz4jJqdE74+6+aX9Jzarb+peY8FcpkXJIiT3NdWP/0740iVUD+zA1Rv/N4X+VaXevR2cMPbZtuPvWG9asPFyH3TMMmDIF186Na/YOVkPJVX63p+2A9wl3fZYmlS/hVUqo+uKDll+19ju+WnqOHf+O57WJn9Petiz+xh2LqVkFTWR5wjn9bP0qQnlZ4OQ7hTU9G2FXM8GPfilV9PkwiNzCs/XvNurMg+BStUwTJ1/mrLlNE7sHMlgq77eMrYR6r4ZO6dx+ZTUrX2lWedkaX6m6rTUW8xgUVB79lt5DIfL0M9Q3nGlNnPCds5CeTXTed3zPbSohY+uHeEfm0+kPM175dbFOvNt7Fq4WoDeGHkQb7pOdf5EY4yNSHji1G+I27E3Ctp/V2YGqo3kPUIU5c2JQhPxadyVZMTPo7vMVcTklKC+uZ0SHsURdvEH8Il8UCgV2bp54trbDznsW0RkX2BraAUe3QELDfuZM2Q60gMRlQdjJYcLkfy5vsywh+y5eqZZWiXwJZfoeZg1oi0JhTpNB/1YjzvqVT7sPIjwuE0l5jsgRPoyOSqEgfhGush12rnh5uqCw82Na9BUk6TyRQS3UNio6E7T4D7KkkkoY1DA0Vl4aZ060wdVZ5xWCcki5Ze/i7eeLV0A4MRlKkI8963bHe5Actd6PCXMnEOTdAxfveRzKKjQSkV2dpxudPmjCLBasO8PVdZ/yZngMmQZR3nPK7pZLyIgOp9PkbRyJCqWuZx8GlekqQTISUV4ylJW5h4/bDmBRfCYXI0fR4eM9FV8mj7wjCcV7sB/u3SeyRY5uL38OhDPK3xO3gCUcyTEWWV5dTA6YnRETToCXP4O9xxOVXn4kXJq8hmHeAfi7+zBuy0kSNuhEn4/JUAuQzrIqwIOJe69D5k7G1h1JZGoh+THTcAqIIEnmobLFQ8O5SMPlF67IESv2zlXpDvxnJEmyL+uVLXPuavpOR6ZeHw9g1qHrSBnRzOw0k1+upBAV4oqntz/enq54T5jOxCDZFzTlyo7FC1S+oOp3PXmGPqNjnzF/UxZx+8+j3PRYxJKB2sVTPoqrwgZ5VzLMj8H+Peg+7icuFl9l78ee9Fh0jMKLkQztMJGIyJl0mvwLV6q0raLvhkSl6Pi47PfjmTtxmJrFrN/I0vquqk+7M3bnVciPYapTMKvOntC3q9xN1H2jCCUqXYebloeXR5kPaj1O7ed++Pn5EDD7VzK0enV9NuskGxas5+rVdXz65lfsP7xSxxfPo+Qa0TODmfzLAX2emnZUGEulV4iZHYibpw9DP96uNcXgr4SyqIA/txXgEfUFA+1OsmykL16ePQhYfJgclYyhePkH4D16E+m6/N1D1fOtkXar5s0AH/wD3+aDj79k3dUzrPt0HOExx9Vt2LmXVQEGvJOvqecvQ35SIpFDPfT7JqWq6EIGTXzEvhpfQB39WZprJIxZzh98894+uq6+QGnxKb7vL9/Fm+HwwjvM6fYz785bzsrZ38HkMfR3skRZZMrwjfHsGmjN03Nj2P5CDGv/MMd/6SkjYcaU5GZepeG8OIql21yYbsLkmTtI1TpnjcE1qUR+Joe+mcHWrsvILs3nzPevqSU6vMDYOV1Y/244q1YuYAZvMan/05gpi8gZvoGUXYM5//Rskra7s3JtHNcVTei7NIFSqZSihNFkfrKCfVekShjUMAJE/k2upFugCq6saWdp0kbGL2jGrA2r+bzjFmZsOasOs5XVlODZ61kxpS7zDnZg9q6VTKn9I5Gx11U19SOyq/N00Vn+I1gnsrw7N380iLSujVSvWwlQy12h0ZVK0hbDiPJGot4XujHyIzuWRX7L8q8LGDeyG3UrvCLZkpbGotu3DGHuxuV8ZL2OVYcqRpbffaVYY+EV/lizh46fR7Bh73y9O26TlsNYszeKtV+4Efn175i/ohN93kMTXl3RhOf6NuBg4hVupZxgf9ZxjiZd43JSIs37utJStrfVW8zfvFyPs6xcStumE91+CM7y4UElZVXGVtN3ZcizGhM4/b+smGJHeOQRssouyIlirILD2bViHLXnnaH97B8qKadTqQZ6K/U3XmCg37PY6vVbFTaYOPP6mu1sWDsdj8jV7E6xx2NkCI7LIli8fB23xgXTSxWAXWNfDWzTaYkqmWU1ROP3P3Ow/WdqFuGbiM3S3BAomtK1vxP7T6Ry60IC+9r+P3tnAlZVtf7h9zAIMoigiJCCMhmaEyEmZAoJKKGiIpJipmWh3u5Vo7reHHNMUTL15t/rkJKGhmhJpjkmTiF2BcUBFBxBREFGGQ6c/3MOHDzAAbGLBbnO8xR7r73Wt77vXfucb++1t+vnTm/rrlX9Sq774rYs5QdmbnIgdH8kSzvtZPXBOxUX84Uk7VhEqM0sIr6bRbev1rAnqcKW6jkb0wq/af6YmQUwb8NU+vcap3IuHia5ymx8CTr+S/hJPqaKOApIqfa93H9kFwu+6kzo3h9YM10uxl7Hx9MH7y66NWwcjDvJ1nX2fLZ9F4e/8sVcbkJxroUT9mEJM/9zmgfV4z4QzfefzuXqW+vZs2MdK5d8SIBZJwLmhRLct225ExKrGrx7le6vxu9mOT9Je3oOMOf72BQK7iTxq9WrvGypU0cwjftQ9YmFOr0tTTlHVGFf/t3TGIlWPi1bVjSXmPLq1H8xyGsYUy1CODvcGi2UOnlaaOvIf1maoddCk0fFKle9tfami4WtLXoxt8goBcun8rIWo6W3+W9UGW/8uzMtJM3Ib9mioqKmIvktHzSK16aas+GsD5ZakgopJwma2s2Q66Hr6Bmg+agYKc0wbdlM0Vbb3glPs3CSU+Xq7uoY1OJL9WIdfYyNiymRKq8WSnmQeI6Tjv2x0jOCHg6c/PkqDwbJG5rQ0lBHoVqPkT66mjroGxWTlV+hlFlFkf0WH1pX70x1v6bKu6kiFjXTmwq7uhV9pZIYcxs3T1VFeXWq95oM8x2L20t+hL+1mxN26pdHl2acIyr8W7YdSuGaaR5l8lmtKnFUV5bPR+5neWhmvOzjwEezF2EX8hFvvmz2eFpaeo+4qO1s23aQ3Gsm5Fb50VJyaI5Vl67c3ppAbLPzmL9hQkziRfpdzsFthAWa8hvVti0xrM4ZKZnnY9jt0q9C3b7ilkZtXWVfTxg7ZTVMaWOspxjjrKx8ql6f69C2pT6auvoYYYC+brNa6lUae/I5Q13nW9ULu3KrdflQREbcPsK3fcuh3AxMc6VoO3ozyW01A8Pf4NCJTmgffRrfVOtWbFcy1sdIX6ecRVYe+UXK748OVt2dYH4csR3jyfeYgJVGTb/UWK4okpJx8QwH086jPfE0BQmXuWyaQSmWaCk0Qm/i6NkOPX3o4XabnxMzUXw1q5yzt1niptJDneeiDuZtjNCRj6kijvtcPBFL2pUFTDxTSELWPbK+y+Ck22hs9XWhTcXFn4r5KpsKMfoHxFWxkY+p9AN8BoUye6YNIf/y52VFI/m5ZoiFrQN6n57lVL+7VeMu2I3Obju85pnXIYenW433W+hf2VbVjmkaAxX9yes6Y7Q9nrieiVxzG4rVs35qVQVOw+6ovwN96j5kFKVe4VymCWZX4rmgEFhWGtHCsL0TXU3reTembPZH/i1KI+FcBjZmKZy5kF5FB1HDsD3eXU2p9D4vnq3TBmErn97VduKja0pH62KgrFPLX6MXeLFLEgnXG/KVpnJF9lp6VCmuprT+VEr1KmYUm2psycWDC/NIv5eN2mv+kotsmTido2Z+BI3qVN2gXGq6Qou2um3lk9lmmL8xlx8/aUXE4OlsSVTquRaQuGU6gUfNmRA0rFwrUI11uX39Ln0Y+st+ImP0efMDP4z2fcP2451x7aIypa6mrbSkuB7q9moa/mWLZJQkbmVi4C+YTXiXUUqpVYrJz34E6Q94WKj2KqaBiUjQsXPEK24PG3al4OFohYZav57QresHrNy8lajYVK4Gv/z4wuwJzR6fs8qK9T0XlfXlf1XP91Ms9LFQPVjPbVUbRwju1Zk3lm7ik9a7GTxxK4klyguOCnMtmqEtn2lQjXv2AChRdxGl6kJ13h3Kfy9V7QS7IFcNlrPRebE3Q66dYPfhFHxc7dFXNdXEtutIoBo0NzCiOKeAIorJfliAhl0vRhj8wk8n0qokGQrP8/W/wrFZ8xNRH93n0yUHSKscm+bYj5pLcN82VdBImhtgXpxHQVEZ0uxsnvivNkquEjljMjMir6KcvKtisNpODfsaVjiP0OHHn2K5p/r8gzwSvl7CGpul/BL1LmmfrmJf2uMeNOxH8WVwX8r1b6WkH11PUMpoDpTJkJXEssymouM6GVRzrvqupjXu4y1Zvz6KK3nyO/QK5fTfrnCjIJur5y7h4mxLq+rt1O1XUWS3QF/fAOPsfApLi8jPrhI4oE7lXZ3R6mUmWPcw47dLtykoyeae4oJJnep9AUn7vufOxyFM+i2SfSqCtZUWM68Rc6gDffpYop2rcq9VGYcMj5derqYsX9m6fEPSAmuvNxn9WhIxiUq59YckxiTSpY8j7bTlZ3Adn5ZWdLPcx8qEbji59cb95m62tOuKvXFdl8ZamNr3wOWJ6vZ19NvQhxQzGXnkFxZTmP80/0DrfzjfqsRQSmbiOQ516U2fdjrkVkAvSTrIljv+rJyUwpZ9SfX6/ip+aOs8d6t0XHPH0Jbe/S+y5fsO9HYw5KEavyob1eCmhWnnXgw4cpr4zOozZibYO1uWn/v51zl3pB3O9iblpqqcsy/w+Ox5inNRYan691ITE+vO9FD8HhSSda/i+X1lAOo2qtsoryMxsMXrLX9eO3SOREVsGdzLyiX16iUKXuuKU7dqcbeyxdklnuj4+3W/j1KFd2ta1coP0LfH1SeOpcub87J9Q75EpY7Dsy2rI4E2x85zLD77RmEgsSUg8iZles78I/wt7v3TCW2J/CWiZrRtKSVx21LW2HzMPz3teDHgH7xzLoSVR9LrBK5pN4CpPofwNtDEOGAnd550YfoolXO7w1j982WUP5F1oalpvyW9/xHCpHtzsdGWv0S0Fbu2Rmgm7uDTNZaE/NODF14cwax3LvPRymNqXnaR96ZFm97+zNVdjrWG8g5Ul2Zaj2phcK9OBo/918POfwahJt/gZKiFxHIuJywGMXP0eWb4BTLn9gRW+NvV723oKors5hg7+xKcE8rbHlPZlCUfbr3HyvKRt7Ea9hELCz9n4GB/fLzXEFf99+Kxkypb+jjUUJRXo3qfcYqNXxry/lvvMCaokGUbT9XkataHcWrU7Y2LIpk16B3+z+h93nF5oYayfKWfsptETfbCZ+QEljefwgf9lbc9ZriOG0nm8imMX3WS1orhU1GfV31xQcOC7h4v0d6tC1bNXuAlj668OrAnllWe+6mEX7GpYaeqbr/hsUp9zap/TInxy4wKzmX+2yMJ3pRccdFXv6417Lx/3/lWxbwWZq7+zMhczfvjv+B4a22QpRO9MQKT9yfw/hhfCpZ9w8EqM1RVDKjsaKo5d1UOP2lTYkZPd2d43YWebXVpXd0v1fZquGl0HMKChYXMGziUQJ9AVsUpH0npYjd4EqPPzMFv5GJuz52Jv135/FTVc7YVWp36EmS5jeCp0ZgFVjsXVfuvsa1T43y/YO/LwhG/8ne/MczakVY+tqVxfOnozsLT6n4Ra9qIS4tisos3I99aS/OV79LfTP5s7Awrpoxg7PJmfD7ZldbV4z5vgf+KKRTOG8Fgv8lsTGxL/yAj1gXPJTJFZU6pCm8tavJTvaBrif3LL4GnO72tmu7zT/mwNRE9UPnzi5/ZvGI9qaO/YrlXA4tq1ziBm2KB/B9Y/w2L7z1J3Ty8/AWBphiG8FkQEATqRUCWEsawMfeYdXA6L+s94WpPnUX5W7gWPzM0dXWVl+/UVW3YsmzOhrzNArOl7BxbzxuDhnWgwaw1xOs5DeZMXYYKHzzE7O0VBPUXybMuTuKYICAIPA8EpGQknEfz3bfp9nuS55+GqIiMU5v4bPNL/OMX6/rNqv1pvj654yZyB/rkQEQNQUAQEAQEgcZO4CG/rlpNovsHBHYxquPN3sYeR7l/IoE2jXESXgoCgoAgIAg0MgJ1vETUyDwV7ggCgoAgIAgIAo2IgEigjWgwhCuCgCAgCAgCTYeASKBNZ6yEp4KAICAICAKNiIBIoI1oMIQrgoAgIAgIAk2HgEigTWeshKeCgCAgCAgCjYiASKCNZTCkdzgWEoi9fI1dy7lEP3Ftw8bi+NP40VhU6RuLH09iJyXj2CI8nQcREHL8LyRs/qS4xXFBoGkQ+JMSaBG39i8jeHuiitaoHNgDomf3xXDQRhIrl5ST63vKNTwlSGxDOFt9OdemwfkJXpaQ+sNCAmP7E5krRXZzLn31f8fKIk/oRXlYlpdCzMUnrG2prNzk/5aSlxLHxftSypLDGNZ1HtE5lSfXs49Oro85zIvZ0Q+evi/Zdfat+ImeS8IID34VpX7Q0xtSaVFyjT3Th+Hm5orn3KOPtSzlVWQ5pMRc5n7lOtYq7apsFpF2ejOf+DhQrjVb5aDYEQSeGwJ/UgIt4d75vey+lVttrVgjHIb+nZD3e2NR6Vlt+p5/oTEqu8a+/4vjzfd8ecng8RLUzybCPH5bO4HRe29Uu3h5Nr396Val51jrMZW9NwrRsBzE51sn0NOwPoxLyYiaStexEwm0n1RFqPupYtKwZNDnK5nY83csml2aTfoFLUyNtBvGF0rJPPwVUx69w/Z9a/A58AXfxj9eo1T62zo8Rkdx40nXF2V3iT9bykt9XnwqFKKyIPBXI1CZpqoGVkpewlYmOVuU3/lJXPjk8D2QniXE1o2Qs/Ivnco0mOw62wM6VtTtQcCqXxXq8KWJGxlkKKkod2T8xnjyZPI7yl44fXSUax/JF6WXIHH8krhSeXlXTJ38CQr+iStP+hKj6qMh9uM3k6BQMqkaSZPYy0vlyvlOONqpyGcpleG9B+HmG0J0hhRUFeTtvfl46ccEuLti776MU1lFpEUGYdLPixHufXFW8CgvK79LKB+vgI8XERp+hfTwObwTEk1mXnxV1frKuw8pGdEh+Lr54Oc+ncjbFwkb442fjysu037gplS+9q6yP7kP01n6yRjc+znivug4WTJ5f470c/fBvZ9bxdg/Hg1Zrf2CLOsUIUNdK9o6Mi7yGJHjnBgXeaucgSSIyLR8ksOCcPfzxtnlE/bcLKzGZyiLTt0iMWI94elXCJ8zjZBvv2Fh94X8rCIaXrsfMqTFhVz4sZC+kUsY+mg7Y9x98XH2ZNqe60gV578yPmXMpWSd/oKhTn1xH+FFPxO5n6kcWxjIzJ9vVuMlH7NSNbHeqoBUqOL7TP7vWGqlL8MML7Jxgjfe3p74Lv6FDNmTxkLJPY/E2HgcXV7EVMeSnq/ncPJyxUxEaSIRoTtITw9nzjtfEJ2ZScLGybirnn9KMxpWeE15iwEOyoX7lQfEX0Hg+SKgPoEWxLAyYANG83+lRHaTnW8Vc/ehitRUdUYSCwauTaBMVkZxwiQy//U1R+5KkeVmkmS2jNiSMkpu/BNmruTHW63xWXuG2GX9sVkWS4lMhuy3v9Nd8ynvNEuusOPTlRR+GE1JyTnms4aZu5J5kqhLddcbxX7+Q+6mVdXcK0vayfTQ9iyK+IbPuu1hwZ6r5bFltSNw8Q6+nmXCspNdWXxgM7Na7GJ7zH1FKFk6w5n/02Y+LFrPf06Ul6nGqPNKINMCOmEWMI8Nwc48rKZ8f7Ayudzj162H6PbZFiIOr2B4u86M3bqXiG3z6bv9Gw4mlysxZOmOqvDhJ06+NI8DX0+jRchuYrLkV0Al6Pgv4aewIIpmbuGEokzuTSEptfZbSNKulWx2DuGn/Uvw13ksLacaB+hgPXYthyO2sKTvcVYfvF7Bxxz/+d/y9SxDQrZfpLXfuwSYdSJgXijBAyyrmqjTj4qqnj54dzFC03oMWw9Hsm2JE9tXHyZZcaJVxKeM+cFlds35CefVP7B//nB0sqp1B5SPz9cVY3arjlh1sa/0fT7vvGICCl90SNqxiFCbWUR8N4tuX61hT1J9xkLuSw6piZkKEWqJQuBeRuytB+UzEZr2+E3zx8wsgHkbpuJ6f7f6869mSKJEEHhuCahNoKVJZ9iZNwDf/u3qKSLbDKOWekiQoG3vhKfZZZJTVaRukKBlYUMXvWRuZdSpzFjvgZDdPssPMQMYO6gjWlqWuAx8iV/OJtdL6qzenfxRFRV6hMWUSJW3f6U8SDzHScdOWOkZYdvDgZMxVyl/imZCS0MddPUNwEgfXU0d9I2Kycqv4GpuirFOG2wdtYg6f6uqbmuNeB6UK9/vWsDEKSs4lKU6bqa87OPAd7MXEXZWLjIuV8T5nlUL13EoN4PM3IqH0W1bYqjwQV/xw6ypq49RVh75RfJYdDBvY4SOhS2Oemc5n6IUu86oo99MEmNu4ujQDj1tI9qY1y53JM04x+5Vy1h7KIVrmfkVF0+mtDHWU/DJysqnjss+oC4/KmDpaKMlfxwtvUfc7jUsXHuQ3GuZ5CoSaEV8yphvJxJzsiMOVoZoG5uqV8RRjI9uxZil1jtWhTcKX1T46Hegh9ttYhIzy52tcyxqDH4dBXWdf3U0E4cEgeeMQIOoscinwbbN+oQ5X+zjmgJgf5Y9Y5ClmelcSFvM60aLK3syeH9403yuZ/QCL3ZJIuF6LrSvEOetjOr3bkho0aw+w1uhWj+8fbWOtDF/Yy4/aq1h2uBp5K7qxc9zbzE64l1G7VxYrW59duW6qaovRtXWbynFj+pxkVVykS2TphM/+nOCRsXwXn1cUFunNj9UKxeQuGU6gfG+RAQNY+fv70zV6J+w3QILexOy84uQUUxBjgSnLq1UhJ//BJdEl4JAEyag9g5Us2MPfHSjOfzfLGTSfB4+rLjb0DTFum8+sRdTVe5spNw/up6glNEcKJMhK4llmc2TiGhhaGJKwZ375MhkSLOzUf6rDUlzA8yL8ygoKquzvLD9i3i0D2Tt+YeUyaeBZTJy1/pg+qSuG+NxTWvcx1uyfn0UVxTPcTVpZd8DF4UCfTZXz13CxdmWVvXxPS2DrKJ7XP2thNc6WaCvb4Bxdj6FpUXkZ1d/hVm9an1lN5IWWHu9yejX/sv6z9dzqEtv+rTTIbce+a3cRhFp97IpSr3KbwVd6NS+eYVpUzq72nAk+mJNgW1a8WKf9vx26TYFJdncU4gv66Bv3Ezxw19amE+23ErmNWIOdaBPH0u0c+u+z6yMp8ZGXX6oVn5IYkwiXfo40k67iFrDb92RHj2SuXQjl5KsDNJUTajdNsG6h1m1WNVWVCk0wd7ZsrxN/nXOHWmHs319L7r0se7emd9OXiajOJWE0yb079pGjSLG/3D+qXgqNgWBvzoBtQmUFr2ZtMaNXwOt0NB24J0fKt7Uk7yAZ/AUms9/GW2JJSO2NKNtS31a9/Znru5yrDUkSLSd+Oha9buN6hh16Nh/FL4nxtNKQwPjgJ3cqXh4qWk3gKk+h/A20Ky1XLvff7hiNIDZ3zhzeoIDGvIXkSSehJzNqd5RE9nXw85/BqEm3+BkqKX4d6AnLAYxc/R5ZvgFMuf2BFb411N4Nmk1UwaNY7lOEJP7mWPs7EtwTihve0xlU5Z8uPXo1H8IlutmMDXyNlbDPmJh4ecMHOyPj/ca4pQvb8nuEDXZC5+RE1je/J9sifg3MzJX8/74LzjeWrueXItIWjGJQWPXovP5BPqZKN9+1aVjbf2ih8OIfzDi0BQGeU1iRZI8ObbBedQwcuaPwyP4G7KMAbM+jJuRxfL3p7DquHJqWI1bWtb0DzJiXfBcIpOq16vLD1VbZriOG0nm8imMX3WS1qqHVLc1X2TEwv4cessHrymrSVI9pnZbX02saiuqFOpiN3gSo8/MwW/kYm7PnYm/na7K8bo2tWgzIIil/JtRXn9nn+eHjHLQq2yg1akvQZbbCJ66kxs23r/v/Ku0JjYEgb8+gXrImcnfphzG90N3sbnGNN9fH1DTiVD+JubfsPjek9TNw9U/f/tDg2mI86YhbPyhQVd0JkOauIkRI9KZ8es/eaU+gsfSy2wcMY20Gd/y6Su/45+8/Blhij4FgeecQH0ekj3niET4gkB9CeRwNmQcwTEakKyJx8oVONWZPGUUnV3BgOCTmHOXIo/PWOuk8k+Z6tutqCcICAJ/CoF63IH+KX6JTgUBQUAQEAQEgUZNQP0z0EbtsnBOEBAEBAFBQBD48wmIBPrnj4HwQBAQBAQBQaAJEhAJtAkOmnBZEBAEBAFB4M8nIBLonz8GwgNBQBAQBASBJkhAJNAmOGjCZUFAEBAEBIE/n4BIoH/+GAgPBAFBQBAQBJogAZFAG8WgqUqzOTE56lY1ndQGcFJ2l2NzfXH2Hk/I8fqKO5fLZAnR5AbgL0wIAoLAX46AWEihMQyp7CY/LlxTLs3mb8b99IZ3Snb9ACvWdWbJ5QW4txDXTQ1PWFgUBASB541LbxKqAAAgAElEQVSA+CVtDCNelkP6ZQntLYzRkhjQtq2BmgW+/zdHFeo1eiYY6Ykh/99IitaCgCAgCJQTqOXXtIi0QwtwN5Qv0i7/z5e1FwtAepYQWzdCzsoXl5evU+rEuMhLxH05SFHP0Kkf/WwNMXRfxLGM6sofAnmtBDRteP299qxfFcYphfoIkBbJOEkQkWmFpEUGoZhGlZeZuOA+wot+9t58vPRjAtxdsXdfxqlKsWpAlk3Cxsm4ew/CzTeE6LsXiAjdQXp6OHOmbidRsWB8ESlhgZhM3U8mmUTPdsM37ALXwoJw9/PG2eUT9txU0XRV54+yH7e+OPmu4WxOERnRIfi6+eDnPp3INHEO1Drm4oAgIAg0eQLqE2jpZb776Cde3ftAIRNWlreZN+2VUlTVY5YhLdbkrZ1xHBimR4el0ex9NZptv96rXlHs10rAgC7vhhDmsJ83xizjkDKJqquf1Y7AxTv4epYJy052ZfGBzcxqsYvtMfcra5cl7WR6aHsWRXzDZ932sGCvFsOn+WNmFsC8L0ZhrxBF0cGqtzu9jp4npSCVhCPmDOlti83YtRyO2MKSvsdZffB6hUh1pekqG2UpPzBzkwOh+yNZ2mknqw/GcnrrIbp9toWIwysYbi6eEFQBJnYEAUHgL0VAfQLVtGHgtK6Ejx/PtFXfE1+gi1EVMWR1DLTQ1pH/MjdDr4Umj4qVuljq6oqyGgS02vH63M183/cYf/vqV2oXZjOhpaEOuvoGYKSPrqYO+kbFZOUrVSpLeZB4jpOOnbDSM8K2hwMnY66i7rUhDatueBBHQmw8v+Y70t1KB2nGOXavWsbaQylcy8yrI4FKybh4hoNpkSycOJUVhy4TnaxBdx8Hvpu9iLCz6SqasTWiFQWCgCAgCDR5AuoTKAbYj13JiZ3v0zV7NxNdJ7A2rvaf9HIKWhi2d6KraX21CZs8u4YPQGJKnyGelGyLIemPmP3U6YCjVzr7NuzkhIcjdhqX2DJxOkfN/Aga1al+8bl+wMrNW4mKTeVqcB8s35jLj5+0ImLwdLYkVtffrJ9JUUsQEAQEgaZAQH0ClT0g8b/pNO/mzYSZy1kecJ11v6RQqmmKdd98Yi+mqrm7aI79qLkE923TFOJuXD7K8rh7Nw8ZBdy4eJFijxdpr6+PsXEe+YXFFOZXCJrXy2tNWtn3wOW3K9woyObquUu4ONvSSm3bljj0tmHPlvO49rbF8GEyMYc60KePJdq5ciFrlY9OdX+0MO3ciwFHThOfqTLbIGmBtdebjH4tiZjEhyoGxKYgIAgIAn8tAuoTaNl9fl3ghaGG/AWiVnhHv8a8wXZoSl7AM3gKzee/jLbEkhFbmtG2pc5fi8ifEU1xEjtG2qMhMcMr4kW+nu1BG+OXGRWcy/y3RxK8KRnjp/BLw86bmaPPM8MvkDm3J7DC3w71A61F254uvI4z7j3NkLR+hXEzslj+/hRWHa9296jGH42OQ1iwsJB5A4cS6BPIqrhEoiZ74TNyAsubT+GD/mZP4bWoKggIAoJA0yIg9ECb1ngJbwUBQUAQEAQaCQH1NyaNxDnhhiAgCAgCgoAg0FgJiATaWEdG+CUICAKCgCDQqAmIBNqoh0c4JwgIAoKAINBYCYgE2lhHRvglCAgCgoAg0KgJiATaqIdHOCcICAKCgCDQWAmIBNpYR0b4JQgIAoKAINCoCYgE2qiHRzgnCAgCgoAg0FgJiATaWEdG+CUICAKCgCDQqAmIBNoohkdaLlnWI4SzRTK5HhlFZ0PooZAze1aL4krJOLYIT+dBBIQcr2Px+mqAKmXNnpVf1foTu4KAICAINFICIoE2ioEpLV/vNu4cF1PlqipF3IiLIY5MHuY+o0Qlu86+FT/Rc0kY4cGv0qJRcBBOCAKCgCDQdAiIBNooxqqEzPQ7wAXOKhZgzyYlPhHIIPNZJdDSbNIvaGFqJNRzGsUpIJwQBASBJkdAbQItjfsSR4kEiaEjbv3skRh6M/fYXWSUkpewlUnOFkgkhtiP30xCXinIrrM9oCMSeRtJDwJW/UqWDEoTNzLIUF4m/8+R8RvjyZPPUIqPGgJGDB9pTtSJRPLzEzmxvw1veFQkN1k2CRsn4+7WFyffNZzNKSA5LAh3P2+cXT5hz81CkE+tmrjgPsKLfvZDWXTqPo9Ry8dtMxPcB+HtNprF0SlciVhPePoVwufMZXtiocIfWUoYvibB7M+Ukh89F0vfMK5e28oYd198nD2Ztue6igpPxbTzuEjSuEXkOCfGRV6v6MeNfk5jWHU2k7KMX1js64mP31AmRd5U8UkNAlEkCAgCgkATIqA2gcqkxeS8FUHKAT+ud1hM0l5nNm+L5X7JFXZ8upLCD6MpKTnHfNYwc1cyZRILBq5NoExWRnHCJDL/9TVH7kqR5WaSZLaM2JIySm78E2au5MdbSuHnJkTpmbtaTEGOHr0Gvo7NkQRSbiVxpp83b1iXkpFdSFnKD8zc5EDo/kiWdtrJ6oP36Th2LYcjtrCk73FWH7xeLnydZY7//G/5epYhIdvPkqX0uyyJHdO3YbNoK999Zs9XC44gGf4uAWadCJg3l1H25YlaYtWLIb0ucj7lATcSLuAwpBfWNmPYejiSbUuc2L76MMllSqNq/pZeYdfMPfQI/Z49S61Ytvogcad3sq7bLLZHfM9Xwy2RqGkmigQBQUAQaIoE1CbQ8kAkaGo3QxMJOnoGaD4qpuT2WX6IGcDYQR3R0rLEZeBL/HI2mYc0w6ilHhIkaNs74Wl2meTU8rsapS0tCxu66CVzK0Mk0JonSj4ZN3LQsu+JR+5xfoqKAWcXXrZtxt2HuTy4eIaDaZEsnDiVFYcuE518j6KMc+xetYy1h1K4lplfnkAxpY2xHrr6BmRl5VOp6PngKjEnO+Jg1QJ92264nTxH4gM1z1Y1LOjuocHxhDgu/VqKR3cLNKT3iNu9hoVrD5J7LZPcuhLonfOcOBjProVTmLLiZ7KibyLt7sGg7xYwMyyWDOnje+KaDESJICAICAJNi0AdCRQ0DNvj3dUU5VOy0sx0LqQt5nUjTSSSZlgGbKKksARpXjxbpw3CVj5Vq+3ER9eaFoRG421zaxy9LvPxR8m83r0dmqqOuX7Ays1biYpN5eo/9Ng2cTpHzfwIGtVJtdb/uG2AnWM34vZtZdcJBxztNEjcMp3Ao+ZMCBpG/dQ93flg5SbComLJvRpML0tvlv44ldYR7zFxyyVK/kcPRXNBQBAQBBoLgboTqP0ovgzuWynmrNX+RTzaB7L2/EPKZDJkMhm5awchObqeoJTRHCiTISuJZZlNYwmvifghKyI/Ww+D5iaKBIZxN7p21AdKyc4vxaRzLwYcOU18Zml5QA+TiTnUgT59LNHOrbzPrD3YVrY4u6Rw6UYO+VfjOeLSA/tWWmrqa2Do4ET/PVv43tUJB8McEmMS6dLHkXbaRVSdO5Cgo2+AcXY+haVy/6XwQldcByQQHa/6/FUTA+sBvDW6B4di5LMV4iMICAKCwF+DQJ0JtEaIbTyY/Y0zpyc4oKF4MciTkLP5tO7tz1zd5VhrKO9AdWmmJZ521eBXW4HijVgT2hjrY9HVmVdf6469sTbNDXS4kJ6NrOMQFiwsZN7AoQT6BLIq9SXGzchi+ftTWHW8oDarj8s1bBk804MzM8Ywcs495q4YgV0tIy9p2w3311/kdfdutJWY4TpuJJnLpzB+1UlaP7YIaGLs7EtwTihve0xlU5YGaHZi2IIgCueNYHDgULxXxZIW9SEuPiN5a3kzVn7Qt5qNKgbFjiAgCAgCTYqARCa/jRQfQUAQEAQEAUFAEHgqArXchzyVDVFZEBAEBAFBQBB47giIBPrcDbkIWBAQBAQBQaAhCIgE2hAUhQ1BQBAQBASB546ASKDP3ZCLgAUBQUAQEAQagoBIoA1BUdgQBAQBQUAQeO4IiAT63A25CFgQEAQEAUGgIQiIBNoQFIUNQUAQEAQEgeeOgEigz92Qi4AFAUFAEBAEGoJAwyfQwmvsXxdJXF5dq443hOt/NRsyik4voqMkkLCUeizPpwhfKSN2q2nAkEuuSYKITFOzkH29Iygl8/AcBi06RU6TWwJEVQKuWsDSa2yf8B4bE+uxspTsLsfm+uLsPZ6Q4w+qGaptt46+a2siygUBQaBOAg2fQLPi2Pb+z1xTyHaUkRM9DwfDADZWaE7W6c1zfbCY1KSLXK8U1a4Koyw5jGFd5xGdU4YsL4WYi6rrzVat+9R7shxSYi5zX1ZEctg4us4+Rs5TG3mKBmWJhA3zYnZ0fX/8VWzLbnJg/XVG+vWkRelZQmyVerOD+DIuH0qusWf6MNzcXPGce5QMWQ5xayfi4+OFb8iv5Um35CIb31lKdFbF2sIq5htus5S8lDgu3pdCWdKT49XqyKBAU1Z/E/tE9rLrB1ixrjNLwjcQ/GqrhnNZWBIEBIGnItDwCbRK9/LFyd9gVsgYXrHQrnJE7FQn8JDEs/cYOdKMY1fuovhpz4giqGsAQYH9GB/Tmc+3TqCnYQG/rZ3A6L03yutUN/M79qW/rcNjdBQ3SpthOWgWWyc6Yvg77NS7iYYVgz5fycSeLevdRFlRdve/7LvTh1dsdCErjass5FRhGTLZT/y9uy6Zh79iyqN32L5vDT4HvuDbXw6xLaoPK3avZMjRH4nNLSJ1z0ZOvRHAq8ZV9G6UXTTMX+k51npMZe+NQtCwrEe8GrTo8RqvbTtMXH7dt9YKVSQ9E4z0nvHXt2FICCuCwF+WgNpvYGnclzjKF4s3dMStnz0SQ2/mHruLjFLyErYyydkCicQQ+/GbScgrBeltDi/yK5czsxjBFgWuUjKi/k4L016MCVrB3iuPKiHKbm0nQLEYvQSJ/VhWxT5ARiGJGwMwrCx/j40J2dT9U1JpsulvlN7lyrEXGDCwB/dPXiZVHri0mEcXfuNh3y9Z0+8GC7svZO/hnYSGXyE9fA7vhBzgLjmcWPIePv0ccV90nCyZfFrXkX7uPrjLyz6ezycBXvSzH8qiU/eRybJJ2DgZd7e+OPmu4WzWRSJCd5CeHs6cd5bx7Y5FdJ/5M3cpIu3wUnzdfPD/2w4VIe2ntJ9TREZ0CEOd+uI+eg7HFCN1j2MLA5n5cyplyVsZ4+6Lj7Mn0/ZcR0o1+4qYlMMro/jGZU6/Yk87TSjNuE2Ckw0vNFMKF+SRGBuPo8uLmOpY0vP1HE4eP0dqq5YYaunTslUODzN+Y1ukNX8b2L6GuLcs6xQhQ10r2Y2LTCEtMgjJuEjSFH45MS7yVk2f5VPTJi64j1ByvkVixHrC068QPmcaIdHxlfEqI6H6OOSUQksrutmkkpqpMsWtrOc9CDffEKLvXng8XlO3k6i40ioiJSwQk6n7ySST6Nlu+IZd4FpYEO5+3ji7fMKemyr6vJVT6YWP41P2ozwvKsZNPv5+7tP/x2n3yqjFhiDwlyKgNoHKpMXkvBVBygE/rndYTNJeZzZvi+V+yRV2fLqSwg+jKSk5x3zWMHNXEg9PbWBKlDPfZEspubKBIQpEmpj6fEmuGnkzifkg1uZJkcnySJhewL/W/MJdmZTczHTMlsVSInvEjfkazFy4j1vPSQaV3YzncJYdXV/pguPJS1wvUAb+KsO8O2NQcdppdR7KtIBOmAXMY0OwB23RwSYolO+/nkaLkN3EKKYlS9ANDOGAvGzZFV5a/B1fzzIkZPtZHqT8wMxNDoTuj2Rpp52sPmKA3zR/zMwCmLdhOgPaNlP0JEv9kU/fvs5bW3exY7U/1lXOlKewv/8QPyzYQ/fQ79m/5m/YVfv6aFiPYevhSLYtcWL76sMViboEHf8l/FQlJnnDUjJTb1JiaoQeMqQlzXhRO4xhrfoyKSyePFkOqYmZGOnrIKEZei1kxGqY0P1uCqk5d0lOMyRr7zbuuBaz6c3BDJ28g+QSJedCknatZLNzCD/tX4K/Tu3KpWp9zjLHf/63FZwv0trvXQLMOhEwL5Tgvm2rRQ1l1cfh4B1kGvq0bJvKrXuPn4GXJe1kemh7FkV8w2fd9rBgrxbDleP1xSjsFTfROlj1dqfX0fOkFKSScMScIb1tsRm7lsMRW1jS9zirD16vEF2v4YqioKY/sZzeeohun20h4vAKhpurk79Tb0uUCgLPC4E6vhUSNLWboYkEHT0DNB8VU3L7LD/EDODvWzqipVWKy8CXmHTqPLFZpyl8Yw49W2iild+SJ07MabWgpaJnfex79cEsJJnUKo+jdLGwtUUv5hYZpWBZh5d/jYEqIzflEjG9u9POyoJuRJF0p4i+innUZmjXKQ2nQ9uW+mjq6mOUlUd+kTwhqJRhgL5uM3T1DcjKyub2xTMcTDuP9sTTFCRc5rJpBlKr6hSlZJ6PYbdLP+aZq5t6fwr7WT8gO+nAENsWaGOKeY2u7hEXtZ1t2w6Se80ExaNzdDBvY4ROlZiqN5Sg030C/wkbjzR5G4FOK/mx3wxqeNusB0Mm/MBHo5dg4d0H3fgXeLXLBVJm/x9jjyzh5yQfgjrrAZkkxtzE0bMdetrQxlyneoeP96XVfZaf8aa0Mdar4JxPUZ3CbVIy1IxDKW0e96HYKuVB4jlOOvbHSs8Iejhw8uerPOherRqgYdUND1aSEGvBr/mO/M1KB2nGOaLCv2XboRSumeZRVusXU50/GszzceDj2YuwC/mIN1824y//NayJVZQIAnUSqHJfUb2mhmF7vLuaoltxQPHsJW0xrxtpIpE0wzJgEyWFJahMOFU3oWZfPg28jWnu9kgkErSdPuKamlrPV1EhtxMvkns1jH9NXkl0s0vEJGY+OwSuH7By81aiYlO5Gvyy2h9GaUkxJTra1Jm7a/NQ1f5Cn8rzp2b1fBK3TCfwqDkTgoZhVrNCtRJNWpi2RSMjm8fvqkrQsnwRJ5NkbmU0x8LehOz8ImQUU5Ajwam9FXajlvN91CaCNO7wQpALkv9qYW3RCgtrTU5dVr7IVErxo6qS4dU6r9h9Wp/VW1GUqnKSj0NZPg/vWtC+TR3JuzZzOh1w9Epn34adnPBwxE7jElsmTueomR9BozrV1qpqeRV/+mD5xlx+/KQVEYOns6U+bwdXtSb2BIG/PIG6E6j9KL4M7otxBQat9i/i0T6QtecfUiaTIZcSzV37Jq85v4bBjwc4oTL1VElOooOBuZScgmKQ5vAwP5Wjq1aQMn6vwkZJ7DJsKis/pxuye5w/msE7CzcQtnkLa4Jt2B93k8cTeQ3FRQvTzr0YcOQ08ZlVbvmrdaCFqX0PXA48qV61Zqixb9KBHj2SuXQjl5KsDNKqNMkkMSaRLn0caaddxJPTlwS9djZ0Pnedu5X/SkqG9OZlYpv3p7edKdbdO/PbyctkFKeScNqE/l3bKJ51ylIP858kd8Y4WtCybSEPc4sozC+lbcvmFR614sU+7fnt0m0KSrK5lyanL0FH3wDj7HwKS4vIz5ZfKj6tz1UCrthRw0l+JC+VKykWWJgo7/U0aSUfh9+ucKMgm6vnLuHibIv6925b4tDbhj1bzuPa2xbDh8nEHOpAnz6WaOdWO5N09DE2ziO/sJjC/DxQN25yfyQtsPZ6k9GvJRGT+FBdIKJMEHiuCdSZQGuQaePB7G+cOT3BAQ3Fyz6ehJzNRa/3ZMInZfBPG10k8peI7FrRUrfixQ5Nazyn9mKfdyskxu8Reaclvce9he78ngobijvQFs1+351ODQebaEHBLRJOduRle/kcmy7t7DtTdCap/EWiGiHp0an/ECzXzWBq5JWnfhNXo+MQFiwsZN7AoQT6BLIqLh+tTn0JstxG8NQIkiryqobdCFZ8Lq83GL/AjVysK9+q+FjD/oX2jFjYn1/+HsCwWZEUKa/GFG3McR03kszlUxi/6mSdk57KLiRWvRiiEcN/b9/j9MKhuPm9gUvgUVzWvEufFs1oMyCIpfybUV5/Z5/nh4xy0APZPY6sOorjB69jLjHGaaQDPwQF8M5OW0Y6KR3Sw2HEPxhxaAqDvCaxIkmedDQxdvYlOCeUtz2msilL/nWpp89a1vQPMmJd8FwiU1Re4KkIpAanuDwKLv7KoWGv0V1f+VIUaNh5M3P0eWb4BTLn9gRW+Nuh/kurRdueLryOM+49zZC0foVxM7JY/v4UVh1/fL+u6N74ZUYF5zL/7ZEEb0pWXCDX9CeRqMle+IycwPLmU/ig/5PnB5RjJP4KAs8LAYlMfhspPoJAkyFQQlrUp0y+/RY7gl6q+cxTXRyyPG5deYhxp3YYPM5N6mpWlMnfBB7G90N3sXl4+zrqNeAh2V32f/gvroz7gr93b9GAhoUpQUAQeFYElHNFz8q+sCsINDABbcwHfsznFx8hf19Kuz4JUWJA+xeV7zE3sDsNZa5Ui45vz8O1q0ieDYVU2BEEnjUBcQf6rAkL+4KAICAICAJ/SQLqH6f8JUMVQQkCgoAgIAgIAg1HQCTQhmMpLAkCgoAgIAg8RwREAn2OBluEKggIAoKAINBwBEQCbTiWwpIgIAgIAoLAc0RAJNDnaLBFqIKAICAICAINR0Ak0IZjKSwJAoKAICAIPEcEGj6BFl5j/7pI4vIq11p7jnD+3lCl5bJSPUI4q1gMXkbR2RB6SIKenYxUpaTV06xkLF9goFzS6/dGWrVdQ9urav2p92R3OTbXF2fv8YQcV66R+9RWGlmDRsa4kdER7ggC/wuBhk+gWXFse/9nrpXLavwvvj1HbUvL1ySNO8fFVPmKsEXciIshjkwe5j5NgvtzkcnyUoi5eP/ZaLiWJRE2zIvZ0eoSWyl5KXFcvC+FOuvVzUd2/QAr1nVmSfgGgl9Vv+Js3RbEUUFAEHieCDR8An2e6DVYrCVkpt8BLnBWsWh3NinxiUAGmU0mgebx29oJjN5746nX560XRg1LBn2+kok91WhySc+x1mMqe28UQl31ntCRQm1IzwQjPRkZUVPpOnYigfaTnt0swBP8EYcFAUGgcRNQm0BL477EUb5YvKEjbv3skRh6M/fYXWSUcGv7BIUMmURiiH3Av4mVCzhLb3N4kR+28jbyxeQbd8yN1Dsjho80J+pEIvn5iZzY34Y3PCqE5GTZJGycjLtbX5x813A2p4DksCDc/bxxdvmEPTcLQT4la+KC+wgv+tkPZdEp1TvBopr1FRQOsmTiEPo5jWHV2Xvciw7B180HP/fpRKYVkZewmQnug/B2G83iaPn4Kz8VU87jIkmjfIow4ONFhIZfIT18Du+ERJOZF8/GCYNw6+eK76oz5DxujCzjFxYPdaWf+2j+eSxHIZRd3pdbhS9pJIcFYjJ1P5lkEj3bDd+wY/yyMJCZP6dSlryVMe6++Dh7Mm3PRRIi1hOefoXwOdMIiY7nWEU9mcIHb7y9PfFd/AsZMrmvjvRz98G9nyPui46TpfSrNJGI0B2kp4czZ+o24gsKufBjIX0jlzDM8CIbJ9Rh5+P5fBKgjnu1/qrXqzGupdViu45UwbcWn2VXCfN1Yer+dMiPZrZlIGFXzxM2xhs/H1dcpv3ATakywJpjNi7yFuWM1I+TcrTFX0FAEFBPQG0ClUmLyXkrgpQDflzvsJikvc5s3hbLfbQwHxhKXpkMWfEppmeuY82RW+Sc2sCUKGe+yZZScmUDQ9T3JUprJSDXrtSj18DXsTmSQMqtJM708+YN61IysgspS/mBmZscCN0fydJOO1l98D4dx67lcMQWlvQ9zuqD11E8cc4yx3/+t3w9y5CQ7WfJquxPB2t19enOhKVbCftIl8VhEfyw9RDdPttCxOEVDDdLYcf0bdgs2sp3n9nz1YK9JNXxWFvnlUCmBXTCLGAeG4KdebgrlE09FrN/z3w6LdvIwbvKqehCkvas4avuS9i7/yum2+lA6RV2zdxDj9Dv2bPUimWrT9Gstzu9jp4npSCVhCPmDOltpZAlk4ekYT2GrYcj2bbEie2rT6M9/F0CzDoRMC+U4L5tK6J+RNKORYTazCLiu1l0+2oNe5IeASXo+C/hp6+n0SJkNzHyC0D5R9Mev2n+mJkFMO+LN+ksXyXa0wfvLjq12tENDOGA3M6yK7y0+Ds13OWGS6it3oMa43oHSZXYDpOsYF6Lz5J29B5iydHztyi4kcARB3d6W3dl7Na9RGybT9/t33AwuaYSTHnA8v8XklLrOD2uJbYEAUFAPYE6FpOXoKndDE25JqKeAZqPipEiQcvIqFyAWbsDvTyNCUm+TtLt0xS+MYeeLTTRym+Jmkk29b2L0goC+WTcyEHLsyceuZv4KcoAnCfwcuZBTj/M5cHFMxxMO4/2xNMUJFzmsuk9ijIy2Rf+LdsOpXDNNL88gWJKG2M9dPUNyMrKV9ETlSHNiCNKbX1DLGwd0Pv0AcbLHFg+exF2IR/xpuVVYk52xNOqBfp0w+3kURIfuNVzxB5w8UQsaVcWMPFMIQlZ+ZimFoK5fEF3uZ7mbdw8O6CvDW3MdeDOeU4cjOeK9hTOFFwi67IJ6av748FKEmIt+DXfkb9Z6XJD2bv0HnFR29m27SC510xQ/7j9PokxN3H0bIeePvRwu83PiffxRAfzNkbo6OpjlJVHvuKlLaXhan8VguJyf9XbadtSH025HQzQ122mhrvcng7q62Vzu8a4ZlAq1SWhRmy1+ayDVXcnmB9HbMd48j0mYKVRREbcPsK3fcuh3AxMc6V1fB8z6hinaizEriAgCNQgUEcCBQ3D9nh3NUWX5PKG8imnbfP5YM7/ceSaXIgXbJbNqWFUFPxOAs2tcfS6zOsfmbAsth2aR1TsuH7Ays3DMZcXlVxk48i/ET/6c4JGxfCeSjW1myWX2DJxet31WxhhO/gf/Nh8DdMGT6fk2zfUmqp/oSGuH4Q+hRyYOx+sXM1wc+UpeZ9sr3TWb9hJrMcUVupIKhJoPolbZhAY70tE0DB2PjH4+nv8p9RUHVcKSNz43lPEJkHHzhGvuC/ZYFCExwQrNBK3MhByUtkAACAASURBVDHwAqMj3mXUzoX1COlpx6keJkUVQeA5IaB2ClcZu4b9KL4M7qsQ3FWU3Y9mVdBdxh+4j0yWS+yy/oA+ds6vYfDjAU7ck4sQi89TE5AVkZ+th0FzE+wcu4FxN7p21Fc8G8zOL8Wkcy8GHDlNfGbFdOPDZGIOdaBPH0u0c+vBvNb6GdzLyiX16iWKPV6io1YLrL3eZPRrScQ8bI2zSwqXbuSQfzWeIy49sG+lTG4SdPQNMM7Op7BU7rtyelYZeSs6u9pwJPoimcpHcMpDtMS6hxm/XbpNQUk299KK4IWuuA5IIDpe9bltSxx627Bny3lce9tiWNlefkeYSJc+jrTTLkL+zrL6T2vsnS3L+8m/zrkj7XC2b62+ap2lJg1kp3onWphWH1ce1jM2FVuGtvTuf5Et33egt4MhDxPPcahLb/q00yG3Chx1Y2Zaxzip9CE2BQFBQC2BOhNojRatezFuLsy31kX+EpHTR7G0aNYM/d6TCZ+UwT9tdMtfIrJrRUvd+gg11ujh+SwozSb9ggltjPWx6OrMq691x95Ym+YGOlxIz0bWcQgLFhYyb+BQAn0CWZX6EuNmZLH8/SmsOl7wZGatX1Ff3ziNHbMCGPd/LVg6wYJjk73wGTmB5c2n8MHr/Rg804MzM8Ywcs495q4YgV3l2aKJsbMvwTmhvO0xlU1Z8gN6dOo/BMt1M5gaeRurYR+xsPBzBg72x8d7DXEVuV9ez2HEPxjxy8f4DfuUHUXaoNmJYQuCKJw3gsGBQ/FeFUcpWrTt6cLrOOPe06zy+SeY4zpuJJnLpzB+1UkUKVHLmv5BRqwLnktkivKZX3PsBk9i9Jk5+I1czO25M/G3a/5kVjVq6DaQnRqG0ag+rnEGNWOr2axqicSMnu7O8LoLPdvq0trVnxmZq3l//Bccb62tUlfdmOnSsdZxUmkqNgUBQUAtAaEHqhaLKBQEBAFBQBAQBOomUHlPUXc1cVQQEAQEAUFAEBAEVAmIBKpKQ2wLAoKAICAICAL1JCASaD1BiWqCgCAgCAgCgoAqAZFAVWmIbUFAEBAEBAFBoJ4ERAKtJyhRTRAQBAQBQUAQUCUgEqgqDbEtCAgCgoAgIAjUk4BIoPUEJaoJAoKAICAICAKqBEQCVaUhtgUBQUAQEAQEgXoS+OMSaOE19q+LJC6vDkmPejr916uWSlRQV2zdfPFxcmTookOkVcpQ1SNauZSZJKjBdStlGUeZ69kX74BQjufUMm6VfacQOc4JuUTWU30q21dfDvCprIjKgoAgIAj84QT+uASaFce293/mmnrpjD888MbVYSnFj3Rw/WAVe35ehkPYUrbG5T4jF0vJS4nj4v0nJawiru9bz7qe8wgPn8arLf64U+UZBS7MCgKCgCDQoATEr2KD4vzfjUlM7HF2fsD5GxeICvJkbJA/9grh6v/dtsKC9BxrPaay94Zyzdja7JaQmX4HPVMj9GqrIsoFAUFAEHiOCahPoKWX2TjIEolEgkTioDKlWEji9qm42RpWHOvB1P3pIJc52/ge9vL6ht7MOXQbxf2N9DaHF/lhKy+3GMGW5xh0vUNXKLM0p21LHYofJfPjQ08i1wylbV48Gyd44+3tie/iX8iQScmIDmGoU1/cR8/hmKKDW4+nUVWmRqVph1jk64mP/3v845/LCU+/QvicaYREZ1S4VUpewmYmuA/C2200i6PvIk38gdDwK6SHf8bU7YmUrwVfRHJYEO5+3ji7fMKem3Ul4SJSwgIxmbqfTDKJnu2Gb9gFrtXaXp3vRRV+udHPaQyrzt7jXnQIvm4++LlPb/Ap63qPkagoCAgCggCgPoHK8slMsmFZbC5lueF4x8/gw53XkSEl91Yct4KOUiKTIZOd4wuvNhT8+m8C1tuyJVtKScLb3Bw7h503C8k5tYEpUc58Iy+/soEhAnndBGTZJP/4DZuuuODe1QRogeewAXQxKCFpxyJCbWYR8d0sun21hj1XzrNnwR66h37P/jV/w642y7Ib/PDpXK6+tZ49O9axcsmHBJh1ImBeKMF9TctblSWxY/o2bBZt5bvP7PlqwV6SbYcwLaATZgGz+WKUPZqKmjpYj13L4YgtLOl7nNUHr1cIeavrXAer3u70OnqelIJUEo6YM6S3LTb1bg+UXWPXzD30CP2ePUutWLZ6E99+c4hun20h4vAKFe1Qdf2LMkFAEBAEni0B9QlUpU+JQWe8h73EniMJ3Fcpf7xZQFLMMfLeeI2eLTTRau/MwNfOcyT+Jin/PU2hstywJS0fNxJbNQjkcGL1Zyw9bsqH3/8LT1O59qYGOtqaSJBrYN7E0aEdevod6OF2m5gjvxBz0oEeti3QNjYtF9quYRPIvMSx3XZ49TJXkQSrVvHBVWJOdsTBqgX6tt1wO3mOxAeV+mMqlWVIM86xe9Uy1h5K4Vpmfh0JFDSsuuFBHAmx8fya70h3K52nak/GFU4cjGfXwilMWfEzWdHFtBvowHezFxF2Nr18lkPFO7EpCAgCgsAfSUCpkPxH9in6UkugBa4fLGft8PYVR3PU1nrqQmkJhSXN0NZqAH3WkktsmTid+NGfEzQqhvee5IxOBxy90lm/YSexHlNYqfGU7RX23flg5erHd5uyHHo2X8O0wdMpOfofJtiLJ7RPGgZxXBAQBJ4NgTruQPO5n/0ImfQmJ/ddYIBrJ1qp9UEPO+fXMPjxGP/NKUV6K4Z9x7ri1s2qovwAJ+4VqW0pCutLwAR7Z0t+u3SbgvzrnDvSDuf+rvTokcylG7mUZGWQpjDVDH3jZmTnF1FamE+2vMzUDmeXeKLj7yOrrbtWtji7pHDpRg75V+M54tID+1blk7ZVmjxMJuZQB/r0sUQ7tz5j2hKH3jbs2XIe1962GNbZXp3vnXAdkFDVd0kLrL3eZPRrScQkPqzintgRBAQBQeCPJFDHHegZPn+9DZ9jTq+gZWwaZo0Gj9T4JkGv92TC3/2IYUZaJNmMYOE3XzDCUget9pMJnzSL8Ta6nMkD7P7FON0GuBNS48Vfu0gXu8GTGD1xDn4jtWg7dwkfv9iZRwv7M+bvAZxqD0XGHYHWOI8axufjxuHR1YBSeZmGHf4rpnBm4ggG/6c7wz/7lP5BRowOnov1xoUM76gDGrYMnunBxBljGCnpyNxV07DTgN+qQ239CuNmbOAf70+hQ2EBeFevUH1fi7Y9XXidXNx7miGps706320YtiCIk3Lfw1pT5jye9y5/xfp7JdwznMKm/mbVOxT7goAgIAj8YQQkMpms5o2J9CwhLwbD9j0Ev2zwhzkjOhIEBAFBQBAQBJoKgTqmcJtKCMJPQUAQEAQEAUHgjyeg/g70j/dD9CgICAKCgCAgCDQpAuIOtEkNl3BWEBAEBAFBoLEQEAm0sYyE8EMQEAQEAUGgSREQCbRJDZdwVhAQBAQBQaCxEBAJtLGMhPBDEBAEBAFBoEkREAm0SQ2XcFYQEAQEAUGgsRAQCbSxjITwQxAQBAQBQaBJERAJtFEMl1xObCuTnC2QSJyYHHWr9mX3nsZfFUmzp2nWqOrK7nJsri/O3uMJOf6gnq5JSYsMQtKQOqr17FlUEwQEgeeHgEigjWGsZTf5ceEaCj+MpqTsKLOdjBuDV+p9KEsibJgXs6Prm8zUm6lvqez6AVas68yS8A0Ev6p+Neb62hL1BAFBQBBoSAJ1rIXbkN0IW3USKMsh/bKE9pOM0ZIY0LZtnbX/hIOpRAW9zXZa8uujoXz3+Ur6W/wx4nSlmelc0DPBSE9c6/0JAy+6FAQEgToIqPlVklFwejFduy7mdIF8mVwp96KmYjF6OzdlJdzaPgGJRIJEYoh9wL+JzSqF0kS2T3THVlEuQWISzP5MdXqSdXjyPB/StOH199qzflUYp9IqVE7kd3pjvPHzccVl2g/clN4kcpwj/dx9cO/niPvH8/kkwIt+9kNZdEqutHJL5bgb4zfGI1+/X/mR5cWzccIg3Pq54rvqDDkVKyDLUsLwVYyXlPzouVj6hnH12lbGuPvi4+zJtD3XkVJK8aNkfnzoSeQaV64tDGTmz6mUJVerJ58yNnHBfYSqX0WkHV6Kr5sP/n/bQXJpNgkbJ+Pu1hcn3zWczXl8npT76I23tye+i38hQ5pIROgO0tPDmTN1O4nyqrKrhPm6MHV/OuRHM9sykLCr56uxUkYNVE5jFz6e1pVV8yH7DtGLR+Pm44v7pN2k1VwdWsWg2BQEBAFBoJyAmgQqQa9zb7wf/Mq563L1lYdciD5D1wHdaSfRwnxgKHllMmTFp5ieuY41R+4gk+Vy64iMoNhc5GvTyzJD8DJRI4clqNdCwIAu74YQ5rCfN8Ys45A8iWrYMXbrXiK2zafv9m84mFwIlKAbGMKBr6fRYtkVXlr8HV/PMiRk+1myFJZL0PFfwk9hQRTN3MKJB9KK/opI2RXKph6L2b9nPp2WbeTg3fJjEqteDOl1kfMpD7iRcAGHIb2wthnD1sORbFvixPbVh0kuk5tpgeewAXQxeDyuGtZq6mWZ4z//20q/MlN/5NO3r/PW1l3sWO1Phxs/MHOTA6H7I1naaSerD96peN5bSNKORYTazCLiu1l0+2oNe5It8Zvmj5lZAPO+GIW9vGtJO3oPseTo+VsU3EjgiIM7va27qmFVC2qgLKWaD5u28c06ez7bvovDX/liLgSDaocnjggCgkAlATUJVP5b2Q2vsRnsOn2Tspx49oe9wP+zdy5wOZ79A/8+HTzpIEWrvOTQwWJOSV7F0mOhFnI2GuOdLfPaizXb/q/jnIbGnP68/g4jeR2jMWY0mxwmZWJFSRErROlE5/v/eaqnnurpwNgyV58P3c91X9fv+v2+v+u5f/d13XfXb2TPVmghQ8fYGAPlBUa3Fd36mhAan0L5HKJMrjh4UgI6zekzdyvBvU7yz3XnyCCXlMhgVi/cQEhmCqmZyoAnx6KxAdp6BhhjiIFeA/QMDElLy6Zk3irH8hVj5M1scNCP4HKsag76gOjT4STvX8DEycsJSbtKfJIyIANazejkrsWpqEiunCvEvVMztAruEXlgLQvXHyfzeiqZxQFUC7muNhVii8Z6Zrxiol+qVzrJl89wwNmVbpa6QCEPos9zPDmIhROnsjzkqtr4SSU2LBEH++boG7Sis9ttwmJTNVCU07KTI5yKJPzKJbLdO9JSSxMrDU2LiwpIqaxDnhX9PUKYPXM7ESl1yXNanWxRLggIAi8TgWqegZri2M+Vy0tCCW+RwH97D+WUrR4ol752zGfKnP9w4nrJxdl62cuE6znbKjOjx8C+5I88R9TIOJb4/Mrove8yct/Cp+hYjwa66uHOCJcpK9g6pEUlWYbYOnQkcmMghuH2TFipRey26fhc8mav72D2vVepetnHbGK3fVZrvYL8PPLluuioq+IyhZVbh2BZJutJDmTIbR3oF7mKTYa5uE9oiVZsIBOflFUlHaSsrjRc+QkDJubz457x2FVg9yT6ibqCgCDwshDQPANFi0Z/H8DkpFV88Nlx3hzdkxbKC+D9UFb73mH8sfvFy7bhy3q/LJyer51SFnfuZCHxiJvR0eS522IYf5GQ9t3p0VxOZl5du88l+V46uUlxXHjUnratDUobNqGdizUnQqNJrfJ8Twsje0d6H9xGsIsj9kYZxIbF0r6HA811c6m+a+WMsbZ6OpjZdcb52M9cKn4mrk2Tdt1444Tqs7pdptg5WXHhym0eZd/g4onmONmZqlcoPzayoXvvaLYFt6K7vREPY2tgJTfAxCSL7Jw8crKVN306mGnQQWZoQ7+xI3g95CKxD4un3OX9iSNBQBAQBDQQqCaAAvodGTzZjojsNxn9umXJ0l3TboybC/Pb6BW/ROT4cTiNGuhUXNbT0IkoqoVA3jV2D7dDS2ZOv72v8vVsD17rOYLPUtfw/vivONVUufxZl59cri2fhMfb65EvmYBrE9UCg5zWgz9mYc4S+g8YgZfnWiLV1t1lFh1R9HmVPoqOWMjMcRk3nNQvJzN+9RmaVtutZZ3qadkOZfmSHOb1H8Awn81cbTmQBQuVnwfh4+XD6sjs0h70sB0widHn5zBs+GJuz53JCOWqh6YfmTldFE7Qx5kuFno0damBlUlXRvplMv+d4fhtiUf5B0JarSvqsOrIDj5w9mT42PU0XPkuvZuWP+fV1L0oEwQEAUFASUDkA/3LjAPlW7iDCR60X8My7V/GSGGIICAICAL1hkD1M9B6o6JQRBAQBAQBQUAQqH8ExAy0/vlEaCQICAKCgCDwAhAQM9AXwElCRUFAEBAEBIH6R0AE0PrnE6GRICAICAKCwAtAQATQF8BJQkVBQBAQBASB+kdABND65xOhkSAgCAgCgsALQEAE0BfASUJFQUAQEAQEgfpHQATQ+ucToZEgIAgIAoLAC0Dg2QfQnOsc3RBEZJbYDq3u/i8kKyqQSU7NkMkc+eDQrdIMJXWXoLFmWSovVVYWjbWeYaFyMwdHxgXdqrvMGnV8Cnl17/nZ1JTucHKuN06e4/E/Vdck4wXlqdWejRZCiiAgCPwJBJ48gBYlcnTePHbFlmbzqKx0WiQ73v+e6yUpPCqfFZ81EZAS+XbhWnI+CiW/6EdmOyo3nPuzfgrJSogk+v4fFXSfkZ3K/KmD+zE7VFMQU7OpxnpProt04xjLN7Tji52b8OvZ5MkFiBaCgCDwwhJ4igCawuWAH7lVnF7rhbW7filelMHdqzJaNDNBR2aIhYXhn7e/cMFF1rtP5fDNam6QnppcEod8+/K27wjsxgWR/NRyqmmoZYXHkpVM7NK4agV1m2qqV7VlrSWFqXf5Vd8UY/0n/yrVKlxUEAQEgXpNoJpvfS7JIQtQGMmQyZT/vFkf/QhSDuFr4sjH13/kY0ej4g3lHVZFUlhwmx8WDcNGWbfZULbVa5ProXLa1vR5rwUbVwdwVplMW/mjnCmN8WSYlwvO074hsSCRoHEOuCq8ULg6oJgxn09G9cPVbhCLzt5HQrncqTrvxvjNl1BlA1WKk7IusXmCB26uLnivPk+GKiuLdIfQxaNx8/JGMWk7IXs2svNuDDvnTMP/x5OVdChZejR17cdQhQt2imWcTStESvmJxYNccFWM5tOTGSXqxwcyRuGNl1Nfph28QQGF5D2O59uHfQla+yY6of4McuyFYvQcTha3KP9Pk7yK+ofyyzYfTKceJZVUQme74R1wkp8W+jDz+ySKKvQdTdReNZtCL3GytF6JTE88PfvivfgnUiR1hg4oFp0iTcWpjKFa/YJY9q7Yzd27O5kzdRexyg36pTgCvJ2ZevQuZIcy28qHgLjLlTiW20rZEnZO+bKuMm3g5g9QuPXC0XstEem/qfnoAMlqOqlJEoeCgCDwRxOQNP0UXJRWdnGWZp18UHy2KOuh9DC/qKRmfri0zLq3tCw8s7RloZR+cq70ao8l0tn0Aik/ZpM0kPelfUn5miSLsuoI5N+Sjs/2kEzc5kvHk3LKa6WHSDMsh0ubYmKkfWNfk/puuiIVxG+TBjFG2hafLsVvGyOZ/Os76YGUWHzefd1lKefmTukty4+k7y7vksYW+yJLur7tHannyl+kXKW8Fr7l/rl3UHq/1RzpZFZ1/pUkqUyHTClp3/sS7uulqJxYadugHtK/vkuQYjYNl1rMOill5V2W1rm/Jo3dl1iqf6GUHvKZZNl3kxRTqNSvizRyZ4JUVHhF2tT39eLxlRe1XnKvMF4ea5AXW0X/3cf/V+rbaZkUnn1ZWtfzrVI+XTX3nas+ZpV6KOvFSDGbRkqvLTgjZWedlGa1KGdczFDJ2OQj6bsHBaW2KPWqXP+xlB++TLK2XiaFlw33knqdlp2XsqPWST2LbS8VUZnj2H1SUtK+Uh89LmE7dp90+/o2ybvnKuli7j0pZIab9M6KpdJ76j4qHx3iSBAQBP5EAppnoNrW9J/WgZ3jxzNtdTCXHulhXCEjsnqYf0zCLz+T8+brdGmkjY5RYzQsoqk3EMeaCOg0p8/crQT3Osk/150jg1xSIoNZvXADIZkppBYvmcuxaGyAtp4BxhhioNcAPQND0tKyKZm3yrF8xRh5Mxsc9CO4HKuagz4g+nQ4yfsXMHHyckLSrhKfVLpE27QTXh4hzJ65nYiU0tlvmX6adAAszTCR62FgnEdadhKxYbdx69wKA11jXrGUl7QuuEfkgbUsXH+czOuplDwS10Kuq43sQRxhZ+zpbNMIXROzSom1lXlGK8tLqaL/Tbkd7kQSFX6Jc9kOdGqplvpMY99lRpUe3Cc2LBEH++boG7Sis9ttwmLvA6UMlYzTssjOVU33lHpVrp9aWWhx+5adHOFUJOFXLpHt3pGWWtVw1NAaCkiJPs/x5CAWTpzK8pCrhOZZ0b9aH2kUIgoFAUHgDyCgOYBiiN3bKzm97306pB9gossE1keWLM39ATq9vF3IzOgxsC/5O84RFRXIRJ+fMJ/wLiPNnwaJHg10lVnQVT9GuExZwdaAQ4RnnsCvq2HJCVkL3ly6hU+aHmDAxEBi81UBA/Jjn1aHbGK3TcfnR0sm+A7mqdRXqV32u5L+PTvh0O8u323ax2l3B2zlKlufR99lStThQIbc1oF+kQfZtD8Bd4eWaD0NR5cprNwayKHwJOJmjMS7Gh/VQSFRRRAQBJ4TAc0BVHpA7C93adjRkwkzv+TLUTfY8FMCxTmYtQwwtc7lt/tZSOSR/hBsnF7H8NtjnL5XeQbznLT+q4mVsrhzR8nzETejo8lzt8Uw/iIh7bvTo7mczLy6GpxL8r10cpPiuPCoPW1bG5Q2bEI7F2tOhEaTWh4fy4TKDG3oN3YEr4dcJPahKtN2IQ9j66KDKW06m3Phym0e5adzr/gZrnK2Fkv7Hg40182livqmrejcOZ4rNzPJT0up9EJRYw3yzDTo3xj77tYc3HYZl+42GJVZU0vfZfWaYudkVaJ39g0unmiOk1316cPBVEN90zJpFQ6MbOjeO5ptwa3obm9UM0e5ASYmWWTn5JGTrVwx0MGsXTfeOPEzl1JVvoCKPhJ/IlaBt/ggCPxJBDQH0KL7nFvQDyMt5QtETfAMfZ15A2zRViqpZUXv9/7O6WGWaMlsGBV0C73uH7BzUgqfWuuVvERk24TGeqoZwZ9k2YvUbd41dg+3Q0tmTr+9r/L1bA9e6zmCz1LX8P74rzjVVLeO1uRybfkkPN5ej3zJBFyb6JS2k9N68McszFlC/wEj8PJcS2TptVm6d4gPnD0ZPnY9DVe+S28LO3r7GrPBbz4nzQfUQQcD7If+i6E/zWDY4H+zO1epqyUu44aT+uVkxq8+Q5WwpP0qQxf25qcPRzF4VhC5Ff5qR1+DPD0N+utg0cWZPjih6GKu9tayhr512pTaNJegBNXbxQ2xHTCJ0efnMGz4Ym7PnckI24Y1cNbTUF9t2Vi9pcycLgon6ONMFws9mrrU4EuTroz0y2T+O8Px2xKPEoVW64EsWJjDvP6D8PHyYdWRHRV91LT4m6jeozgWBASBP4GAyAf6J0B/Pl0q3yAdTPCg/Wwd0uL5dCGkCgKCgCAgCJQR0DwDLTstDgQBQUAQEAQEAUFAEwExA9VERZQJAoKAICAICAK1EBAz0FoAidOCgCAgCAgCgoAmAiKAaqIiygQBQUAQEAQEgVoIiABaCyBxWhAQBAQBQUAQ0ERABFBNVESZICAICAKCgCBQCwERQGsBJE4LAoKAICAICAKaCIgAqomKKBMEBAFBQBAQBGoh8GQBtCiRkC8Ws7u6ZNq1dCZOV0egkKyoQCY5NUMmc+SDQ7fQsONedY2rLy9LlfUEybGfpk31GvzuM1JqCJ96LOPnjPJt7ShO3ebIuKBbTyBfudHEk7Z5AvFPVVWiIHE3E4ZurrAHsWZRBaScXERfJw9G+Z+i7jtT10e7NVsoSgWBF43AEwbQFH7Z+D2JIpn2s/WzlMi3C9eS81Eo+UU/Mtuxwt52z7avF0paHreO7SFy+ACcGkFWQiTR96u5GVDmTx3cj9mhD+q/hVIGCWFXuS/J0LFS4GPzLdt/1pTZRc0U6QbfLT9Cly8C2OnXk0Zqp8ShICAI/DkEniyA/jk6/vV7Lcrg7lUZLZqZoCMzxMLCUG1v17+++dVaKN0h/LtMBv3dCq2Ci6x3n8rhm6q9bCu10rLCY8lKJnYxIuXQVDq8PREfu0kEJVcTcCs1/yM/FlzYgPvoQ9wsnlSb0FnRmh3HosmuSYnCdO7+qoOZcTX779bUVpwTBASB50KgmgCaS3LIAhRGys3klf+8WR/9qIICUlYEqwe5MikogQIpnaiAD3Eqru/A+M2XyJKSOOTrjX+EKiclFET4Yz/1KLXca1fo56X4oG1Nn/dasHF1AGeLs5kAyhnVGE+GebngPO0bEgsSCRrngKvCC4WrA4oZ8/lkVD9c7Qax6Ox9pOJlTdV5txIfqMGTsi6xeYIHbq4ueK8+T4ZqjVi6Q+ji0bh5eaOYdKA8M0p+NJu9vZgTeofMqK1MULjh6jiG1RHJxAf40OyTH8jgLkenujBq1w2k7FBmW/kQcDKQcabOKIaq6aYcH5s/QOHWC0fvtUSk/1ahz9/u/cRi7754DRvEpKDE8uXrvCSu/NwSu+YSsXs3svNuDDvnTMM/9A6Qwekv3sNLyWLRKdKke5xc6MPM73+jIC+HX7/NoVfQF3jrnGbxIBdcFaP59GSlhU/lcnWZrp7MWDqDUQoX7BTLOJtWSAkzTzw9++K9+CdSJOVyqIqxJh8ol+LVWd0jOcgXU9d+DFXJvR/N3hW7uXt3J3P+8RWhadC4dTusY5PUMuWo5Hjg6TaaxaEJxJTZP5ddpY9QpIQAvE39OJpaQHboXKy8A4i7HsgYhTdeTn2ZdvAG5bcPBcW6yMYFkay2BF55XKRX5wu1sSQOBQFBoISA5gBaeJU9Hx+h5+EHSJJEUdZW3rIrz1QhPbzIlikzuTBo0PYCVAAAIABJREFUNcsGt0K6to9/f5LHR1GPyb/5Kcxcxv4EOZY2EJecQuKhxfz7UBx3b8aR37ypWH6qMvoMaf+uPwH2R3lzzDJClEFUy5a3Aw+zd8d8eu3azvF45cwrHz0ff459PY1Gy2J4bfEevp5lhP+uCNKKZeYjH/EFRwJ8yZ25jdMPVJfPXBL2r2BL58UcPTiftss2c/xO6bn74QRusOPzXfv5YZ13aXLrLOJ2r2RVm38y2TmTAzMP0nlFMAeXtmTZmrPoOvTE+kwstx/d4vKPlzh5IZ70365xrmVPurZpCGmWjJj/3zLdHiR8w8wt9qw4GsTStvtYs2UH28v6HECDsH1s6DiLXXuDWTfEqnz2nZpEbL4pxvoG2A17l1HmbRk1bwV+vSyKE1db+64gWMnC/wBhaerPSIG+Xni2lxN3cC3rOn3B4aPrmG5bmuxbnX9ac3wW7+brWaYsO9OBxce2MqvRfnaF3eLa7kWssJ7F3j2z6LhuLQevPa7ZB0Xx7K/A6gS3JUiTD2H+ka9L5EY0Ydi0EZibj2Lepqn0MtFGy6gxFuG3uKcyoegau6fvwHpRIHs+t2PdghPIhqjsn8tIu5JZqKxlNwZ2i+ZywgNuRv2K/cButLEeQ+APQez4wpFda34gvsbMZzmVxsU6tvx3u2ZfqDMTx4KAIFBMQHMA1bam/7QO7Bw/nmmrg7n0SA9jHVV6skj+8/4kgh0WsHp8Rwxledw+9wNhb4/Aw0oPnRZO9H89nohrmRiZGnIn9Rrn921h1b4wrmfl49iiSUlaNOGAigR0mtNn7laCe53kn+vOkUEuKZHBrF64gZDMFFKLnzvLsWhsgLaeAcYYYqDXAD0DQ9LSsinJxCrH8hVj5M1scNCP4HKsavb/gOjT4STvX8DEycsJSbtKfFLpUmjTTnh5hDB75nYiUlT5XAP55ONMPvTtzSupMZw+fon9Cyczefn3pIXGc695e9xuR3MtPIIfLF3pFBZFZFw0193a07I405YZr5jol+qWzu3o8xxPDmLhxKksD7lKaJ4V/cv6LKBpV3c89ixgZkA4KQWqqXFFPFU/qbFIyyI7t1I7uS46MmVu0Nu4dW6Fga4xr1hqCKCY0thIXqwrxgboacsxMM4jLTuJ2LBEHOybo2/Qis5utwmLvV8cuKv1wYPKrOK4pbxPsTTDRK5XKrdKdtSqpj2II+xMa+xbNsLApiNuZy4SW3YzpFZdqxmd3LU4FRXJlXOFuHdqhlbBPSIPrGXh+uNkXk8ls8YAmlJpXCSQ17zXU/hCTSdxKAi8RARUCSMrmWyI3dsrOd0xhIMH9zDRZTcT9qzDt72ymjmdXeyI2n+Ec8Neo49lPql3fyN5aR+Ml6rEvMr7Pto0/ZsF0XuDOHSzAx65h9ihk0HLt8TzPRWlKr9lZvQY2Jf8keeIGhnHEp9fGb33XUbuW1ilau0FejTQVd30KGsb4TJlRdVUZ7IWvLl0CzorP2HAxHx+XGUM9ODNN3/jm1PxvD1A2VbBlJVrGGKpGi53uT8onkNBWTR86z0GBW1m864CvEZ/jAEpmlVzmcLKrUNKZ7ggZXWloarPPeNY+q0OK6e9x8T87eyZ0I7iDKiNzGipFU76oyLQ1yy2/pWqs1Ium/6zVhWLMh9yx7EFrzxxmk9DbB06ErkxEMNweyas1CJ223R8Lnmz13cw+96rtWsN46KQrC5mVX1RF1GijiDwkhHQPAOVHhD7y10advRkwswv+XLUDTb8lEDJCpMFf5+yiuB/3OGfY77kZIoOLV7tRIuR/8flzILiJV9JusJ6rxYYt7DBfOM3JPl8wmcjM/jPRn3aNjd8yRDXwVwpizt3spB4xM3oaPLcbTGMv0hI++70aC4nsw6TlpJeckm+l05uUhwXHrWnbWuD0s6b0M7FmhOh0WrP2cr1khna0G/sCF4PuUhsqtLLrzFm2lhY8V9CZda4vBFF6CXlc1bVjzGtO+qzZmUq7o7OdFLksm1bY7raNVZVUPutg1m7brxx4mcuFcsuOVWhz4dg2OYNxo7uTEhYPA9VrfUtsW2XyI07dQagaln6uzFtOptz4cptHuWnc0/1fLlSLc0fm2LnZFXSNvsGF080x8muSmrwik2btNXAqmKVqp+KyLodR4JdM0xV9ztNbHByTuDKzQyy4y5xwrkzdmXJ0dUlaGFk70jvg9sIdnHE3iiD2LBY2vdwoLluLhWpyZAbGGKSnk1OYS7Z6cqpsZmGcaGt2Rfq3YpjQUAQKCagOYAW3efcgn4YaSlfIGqCZ+jrzBtgW770KjPGbvTnBHie5/3Zh8np48d253NMsNQpeemosz8RuRLaZs1pb+LBhDc68dobA3C3tKGVRQOBvjKBvGvsHm6Hlsycfntf5evZHrzWcwSfpa7h/fFfcapp8XyscisNn3O5tnwSHm+vR75kAq5lF105rQd/zMKcJfQfMAIvz7VElj5vk+4d4gNnT4aPXU/Dle/S21w5y9RC3s6LD988x6pvCnhzgS8584YywGcQnqsjKUROy05OdGrRmfYtTWj9Whcse/akq5WmJVLQaj2QBQtzmNd/ED5ePqw6skOtz3HY/zwDZ6/hjP2yASun9KIsTMma032gNsd/SUbSaUNvX2M2+M0lKKGaN3GrENHHfui/GPrTDIYN/je7c+vKUSmoIbYDJjH6/ByGDV/M7bkzGWFb/h5Ala6UBVptGFyB1UW1l3jKW+i07YWv1Q78pu4joSiD6LNxDO5tj+p2By0bBsx05/xnYxg+5x5zlw/FVvM3FZlFRxR9XqWPoiMWMnNcxg0n9cvJjF99ppxjcdfamDh545exgnfcp7IlTSlQr9K4WMGRAx9p9kW5+uJIEBAESgmIfKB/maGgfEN0MMGD9lddpn2BbZSSDzL5g2T+uXsi7SosSb/ARqmpLqUe5SOfWMbt/Sed9FVTULUK4lAQEATqLQHVQ616q6BQ7OUmILPsy+dLEnicWwS6T/yQsJ7Dkygssuad/7jQQQTPeu4roZ4gUJWAmIFWZSJKBAFBQBAQBASBWglU82Sl1naigiAgCAgCgoAg8FITEAH0pXa/MF4QEAQEAUHgaQmIAPq05EQ7QUAQEAQEgZeagAigL7X7hfGCgCAgCAgCT0tABNCnJSfaCQKCgCAgCLzUBEQAfandL4wXBAQBQUAQeFoCmgNoUSIhXyxmd2napKcVLtrVlYAyfVUgk5yaIZM58sGhW2rb5tVVhoZ6ynRdMt9acmIqN2BwZFzQLQ0C/qiiQlJ/mIPHorNkFN3h5FxvnDzH43/q9yTHrg921cQvh8RdHzJ0czT5NVUrPldAyslF9HXyYJT/KSolZau+dZ38X31zcUYQEARqJlBNAE3hl43fk1icAaRmAbWdLbr1HfP8dhOrStVUW4OX8byUyLcL15LzUSj5RT8y29GkflOQMkgIu8r98s1xq9dXmdd0cD9mh9YQDKVEjm28wfBhXTC6eYzlG9rxxc5N+PVsUr3cas5IWQmERavv21tNxT+ruIydHlYe3tis2cPPGTWmTAHpBt8tP0KXLwLY6ddTpAP8s3wn+hUEKhHQHEArVfo9H4vu/UrAgUQy63Kx/T0dvchtizK4e1VGi2Ym6MgMsbCobxlrCkk5NJUOb0/Ex24Su/avxn30IW7W5aZIywqPJSuZ2EXTRvMlTpPu/MJ3v/Xg79Z6FKbe5Vd9ZQ7Qug5NZeL2vrztOwK7cZvZv34Cow/fLE18UP8GRcGFDeXsGrVH8fp5jkWWbZ+vWeHCdO7+qoOZcUkeUM2VRKkgIAj80QRqvUpJWRGsHuTKpKAECgoi8Ldxwz9CmWdSbYmsMJZdExXYyJSbz8uQmfpxNDWPlEMfYuL4Mdevf4yjrvKcB6sis/9oG+t/f9rW9HmvBRtXB3BWlTFEOXMb48kwLxecp31DYkEiQeMccFV4oXB1QDFjPp+M6oer3SAWnVXOuJT+UJ13Y/zmS6iygSoBSFmX2DzBAzdXF7xXnyejyg1NNrGb38Z5znF+3eeLbFwQyWU+TqQgL4dfv82h155xFAUFc/fuTub84ytCU1OJ2vwBCk8P3Lz9CU15TEqoP95uXgxTTCco+TdOLvRh5vc3K5Wrkn1L5N28ys9/t6M5sexdsbtE9tRdxKYrdfbE07Mv3ot/IkVSG3Nly5O55D2O59uHfdkzRYegnTHc3TmHf/gf4w4ZnP7iPbyUvBadIk3dZmV7U2cUQ5UMPZmxdAajFC7YKZZxNq2wlFflvlV8NfCX0ks4uPXC0XstERm5JAf5Yuraj6Equfejy+1TskszpHVHE2KTMtSW7JXL+VuZoPDA0200i0MTiNm7kZ13Y9g5Zy67Sh+rSAkBeBd/zwrIDp2LlXcAcdcDGaPwxsupL9MO3lDbyF4zt5J+3HB1HMPqiFSKUn5isXdfvIYNYlJQoppO9f8rJDQUBP4MAjUGUOnhRbZMmcmFQatZNrg11W6cK2Vy64SEb3hmSTqzVH/6mTbAzGsVaeHLsLZeRni+hCQd4cNOZTkn/gx762mfhrR/158A+6O8OWYZIcogqmXL24GH2btjPr12bed4vDIDST56Pv4c+3oajZbF8NriPXw9ywj/XRGkFVuWj3zEFxwJ8CV35jZOlyVhziVh/wq2dF7M0YPzabtsM8fvqAJYCZLCuH18sepvzJnckybV7Wne1wvPjn9n+LQRmJuPYt6mqbjcP8D0FS1YtHc7n3c8yIKDoZwJDKHj59vY+8NyhliqMqDc4VyFctVoKiQ1KZF8M2P0te0YppL91SDYt4gV1rPYu2cWHdet5eC1x9X4rxF9B79BR8chTBvVFvNR89jk544Fcqx9VxCs5OV/gLC0SlPmtOb4LN7N17NMWXamA4uPbWVWo/3sCrvFtd2a+q6e/4OEb5i5xZ4VR4NY2nYfa47/VqxrmnwI8498XSI3okm5fZum0stEjlFjA8JvPSifMRddY/f0HVgvCmTP53asW3AC2ZB3GWXellHz5jLSrmQWKmvZjYHdormc8ICbUb9iP7AbbazHEPhDEDu+cGTXmh+Ir2lluOg6+2cepPOKYA4ubcmyNceJ/HkfGzrOYtfeYNYNsaK6YVCNE0SxIPDSEaghgEbyn/cnEeywgNXjO2Iovk3Pd3DoNKfP3K0E9zrJP9edI4NcUiKDWb1wAyGZKaQWP4+WY9HYAG09A4wxxECvAXoGhqSlZZNbrJ0cy1eMkTezwUE/gsuxqjnoA6JPh5O8fwETJy8nJO0q8UnqKcEeEvjJWn778B3cX1EFNg3mynXRqTAOCnkQe5EzDm1pqW+MTWd7zoRlYuVlz57ZiwiIuKs2CzKnq8ZyDf0UF6USG5aIg31z9A1a0dntNmGx96uprIVcV1vDBV+NV1oW2bnqU1ClKFMaG8mLGWJsgJ62HAPjPNKyk6rpW01eBf7p3I4+z/HkIBZOnMrykKuExt8rsd3SDBO5Xqncihk6NRrzII6wM62xb9kIA5uOuJ25SGzZjZBaC61mdHLX4lRUJFfOFeLeqRlaBfeIPLCWheuPk3k9lcyaAmhKDKePX2L/wslMXv49aaGJFHRyx2PPAmYGhJNSUJmVWt/iUBAQBIoJ1BBAzensYkfs/iOcu1NyeRbMnjMBmRk9BvYlf8c5oqICmejzE+YT3mWk+dP0q0eDCum/jHCZsoKtAYcIzzyBX1f1xOYN6flmB5K+OUNc7a+E1qKMnOZvzuXbT5qwd8B0tsWqluwbYFmh/FGpHG0amVmglZKOqqSWDurvaZcprNwayKHwJOL8HKtfsSmzoIDMh9k4tmhSnmu37FxtB4bYOnQk8rtA9p+2x8FWi9ht0/H50ZIJvoOp25BRMGXlFgIOhZMZ50c3K0+WfjuVpnvfY+K2K3V4O7g2HcV5QeCvTaCGAGrB36esIvgfd/jnmC85mVIA2ma06ZVNeHSS2syiZkBaRqZYP7rH/YxCKMjgYXalZbSam78cZ6Us7tzJQuIRN6OjyXO3xTD+IiHtu9OjuZzMOkxcSkDlknwvndykOC48ak/b1qrl8ia0c7HmRGg0qRonFnLajPmQaexkc2gKcgNDTNKzySnMJTu94lJvRYdo08SuM84XYrj5KJ24i1dwdrKhiawRbfq9xejXrxEWm1repEK56sUZGfrNrWl38QZ3KsyYTLFzsuLClds8yr7BxRPNcbJrhoFJA9KzcynMySa9XPIzPmqqoe+yNN8a+tLBrF033jjxM5dSn2R8Z3E7JhO7Zo3KZ89NbHByTuDKzQyy4y5xwrkzdmWJ0dW71sLI3pHeB7cR7OKIvVEGsWGxtO/hQHPdXCoOmQZVuZm1xeWNKEIvqb+xrI1hmzcYO7ozIWHxqDyk3qs4FgQEgXICNQRQQGaM3ejPCfA8z/uzvyGx8G/09ZtMw/ld0ZVZMXRbAyway8ulaTjSav0673mfZVgTHWQm7xH02++e4mjo5QUvyrvG7uF2aMnM6bf3Vb6e7cFrPUfwWeoa3h//Faeaqp4j1mZnLteWT8Lj7fXIl0zAtezCK6f14I9ZmLOE/gNG4OW5lsjK13l5e7w/7M63q46Q0sUbv4wVvOM+lS1pVYeITtte+FrtwG/qPm5aezJz9GU+G+bDnNsTWD5cn8Mf9MNr+AS+bDiZKb2blSgtJXKoQnn5HKn4eZ5WGL/cVr/s62E7YBKjz89h2PDF3J47kxG2LXAaOZiM+eNw99tOWpW/9tGnbe+BWG34jKlBMeXPFWvDVuV8Qw19N6xSS71Aq/VAFizMYV7/Qfh4+bC6mpfl1NklZF3jbEh7endSe0NZy4YBM905/9kYhs+5x9zlQ7Gt6oLirmUWHVH0eZU+io5YyMxxGTec1C8nM371GSqG+6ZVuWlZM3iBLznzhjLAZxCeq8NJPvQRzl7DGftlA1ZO6VVJhrq14lgQEASUBEQ+0L/MOFC+aTmY4EH72TqkxQtmVT7Jh/7NB7fHstv3Nep6u/CCGVlJ3UJSj87CJ2YYez90QL/SWfFREBAE6j+BGt4Yqf/KCw3/KgR0sew/gyXRj1G+51Ph0e1fxcQqdhRQ1Hos/3GxFcGzChtRIAi8GATEDPTF8JPQUhAQBAQBQaCeEajm6Uo901KoIwgIAoKAICAI1DMCIoDWM4cIdQQBQUAQEAReDAIigL4YfhJaCgKCgCAgCNQzAiKA1jOHCHUEAUFAEBAEXgwCIoC+GH4SWgoCgoAgIAjUMwIigNYzhwh1BAFBQBAQBF4MAiKA1hc/FfzGSX8f7JTp4KzmEpqtcc+9J9S2oDilVklqsidsWh+qS3c4OdcbJ8/x+J+qISF3BV1fcJsr2CI+CAKCQH0mIAJovfBOPknfLMQnvDdBmQVIiXPpZVAh7cnz0VLKICHsKvelXOIDxtFh9kkynk9PTyVVunGM5Rva8cXOTfj1bPJUMkQjQUAQEASeFwERQJ8X2SeRW3Sd7/4TyVvvefOaofaTtPxddQsubMB99CFuFjbAymMWgRMdMPpdEp9t48LUu/yqb4qxvhimz5askCYICALPgkA1V6ZCsqL38KmXPTLlkqLMjvHKDPUFEfjbuOEfocwzqcpyf4XIVR7F9YwcXXG1McJIsagke0vBbULmeGJULEOGrNd6oguziPB3w8Y/ggLUltsKI1nlYIRM1gxHNxdsZFYo5v5IyrNYyXwWpJ6njKwkYi63xcHWuLyX5CDGyXwJSs4pX4ZVlpk6oxjaD1c7T2YsncEohQt2imWcTcstr1fmm1vl8oquETDGk2FeLjhP+4bE3Bj2rtjN3bs7mfOPZfx39yI6zQwmNMAH06lHSSWV0NlueAfEkBG1lQkKN1wdx7A6IpUyl0h3CF08GjcvbxSTdnN+ny8ly8WqsZFQrJOpaz+GFus5naWfjEHh6oBi0SnSygQBUjpRmz9A4emBm7c/oXd+Lddv6i5ilZvfS3EEeDsz9ehdyA5ltpUPAXGXK9qlnjxGE0NVP269cPReS0T6b2o2HCBZXadyeuJIEBAEBIEqBDQH0EcXWP/uKnLGHiCzSEKSYtlSbYZ6iYI8bcbui+TYYH1aLQ3lcM9Qdpy7R2FUEB9vceJwVhGSVEDWwdHYVTfBkgrIyxjDvoRdDL7RgaXXvqbn1iDO3a+cNqSKDS9+QfZD7iQ3QLditmrNdqU1x2fxbr6eZcqyMx1YfGwrsxrtZ1dYdcmmS8Vo2fJ24GH27phPr13bOX6zJcOmjcDcfBTzNk3nDYsGQEOsuyvo9uNlEh4lEXXCkoHdcgmeeZDOK4I5uLQly9b8yB1VkLkfTuAGOz7ftZ8f1g2heTWrzml6I0v1PMKZ1+Zx7OtpNPI/QFhauW+Lru1j+ooWLNq7nc87HmTBYR2GqPT7amTJuJE1p/tAK368fItHN6M4Ya+ge5sOFe2KV08UXhVhUcI3zNxiz4qjQSxtu481W3awvcwGbyyrsaGqJFEiCAgCLzsBjQG08NpZdtzuw9A37TCs8wVFB125Mjo2QL+RNo/zCtFu+wbT+hxi/MiPWH3gMo8MjWpPMqytS7EYuT6NtPPIK1Bdrf/CrpIbYGKSR36dbDWlsZEcPQNDMDZAT1uOgXEeadnqqcA0scolJTKY1Qs3EJKZQmqm+lStvL5Wy464E0lU+CXOZTvQyeAmp49fYv/CyUxe/j1pofEkqeJe0054eYQwe+Z2IlJqSLpu0RijYj0NMDaQo61ngHFaFtnKneOLfwp5EHuRMw5taalvjE1ne86ExVH1tSE5LTs5wqlIwq9cItu9Iy216mZXST8FpESf53hyEAsnTmV5yFVC86zoXxcbSjUVvwQBQUAQUBHQGECLT2orZ0SqanX5rYNRC0c6mOmVV9Zrx9v/d5h9k14jff8UXIZtIPKR6qJZXq3CkZYRLTztMdOrc+Su0PyF/GD8N15tf42oG5nPSX2J/NhAJvr8hPmEdxlZnoqzan/yVjj0u8t3m/Zx2t0B22I/KJiycgsBh8LJjPOjq2pcyFrw5tItfNL0AAMmbie6LCBWFftsSmTIbR3oF3mQTfsTcHdoiVZd7VJXwGUKK7cGcig8ibgZI/EusyGQ2Pxaxqe6HHEsCAgCLzUBjQFU27YHoy2PsDM4liz164m2GW16ZRMenUTV+UtD7EbOxa/XK2VApQfX+CVJn45vTmDm/y5i1IUD/HStiGZtXiU1/CqJVYWAVltGrvoXvUxUV+kycX/dA+02KMZbsXHjIWKySqd3xbPSLLJz8sjJVj5zru1HhtzAEJP0bHIKc8lOV4dbSGrsRULad6dHczmZNU5WG2Pf3ZqD2y7j0t0GoyZtcXkjitBL98uffaqpIjO0od/YEbwe8gsJj+TV9K/WQOOhNk3sOuN8IYabj9KJu3gFZycbNL53a2RD997RbAtuRXd7Ix7WZFcVhjqYtevGGyd+5lKqahoN5TZcJPZhkUYNRaEgIAgIApUJaAyg6Dvgu/FD9LZ5Y6SlfInICo/NVymU/Y2+fpNpOL8rujIrhm5rgEVjeWWZZZ+LHpxlgcISLeVLRIZjCB3my4B2jbDoOwn/hl9iratLs6GB2FoYozZvLWv/8hzoYzviM1aYbsfRSKfk70DlDoz0y2T+O8Px2xKPSa0wtDFx8sYvYwXvuE9lS5q6a3UwdxnBZ6lreH/8V5xqWpKyWqdtL3ytduA3dS/XyuKJDhZdnOmDE4ou5si02jB4gS8584YywGcQnqsjUVWV7h3iA2dPho9dT8OVHzBy8Ihq+q9VebRsPZk5+jKfDfNhzu0JLB9hi7oFZRJk5nRROEEfZ7pY6NFUg11ldU26VmGo1XogCxbmMK//IHy8fFh1ZIeaDe/Su2l1D+nLpIoDQUAQEASKCYh8oGIgCAKCgCAgCAgCT0FA403+U8gRTQQBQUAQEAQEgZeKgAigL5W7hbGCgCAgCAgCz4qACKDPiqSQIwgIAoKAIPBSERAB9KVytzBWEBAEBAFB4FkREAH0WZEUcgQBQUAQEAReKgIigL5U7hbGCgKCgCAgCDwrAiKAPiuSQo4gIAgIAoLAS0VABNCXyt3CWEFAEBAEBIFnRUAE0GdF8vfKKfiNk/4+2Cl3bbKaS2i2+h6KTytcLV3c04oQ7eo3AekOJ+d64+Q5Hv9TVbff16z8yzQuXiZbNXtblD4/An9SAH1A6OxeGHlsLsnz+Pzse0Ek55P0zUJ8wnsTlFmAlDiXXgZ/wGb6UgYJYVe5L+USHzCODrNPkvEMiRXFBzC4wzxCMzTsL6vMTzq4H7ND63rRf4aKPQdRNdr6HPpTiZRuHGP5hnZ8sXMTfj017h6sqvrC/5ayEgiLvo/0Fxs7L7xjXmID/qQd242xH/Qh/g7taPYnhfB65fOi63z3n0je+uRzXjP84/ZiLbiwAffRsOvqR3TymEVgbwuMniEYLSsPlgQ+ppmRBidrWeGxZCW9mzV+hj3WQVTKIXwV26FTMo+HbGfrkBZ1aFR7lRptrb35U9coTL3Lr/qmGOtrYPzUUp+i4XPiWq5JFhfWT2A0/lxt99qfM3bKlRFHgkAxgWq+dYVkRe/hUy97ZMolRZkd44MSkQoi8Ldxwz9CmR3kFkHjHBkXdIXIVR7F9YwcXXG1McJIsYiTKenVlN/kkG8HzBxH4Ot3hBjVzuTkELt5FEbF/cmQ2b3H5qh0jRlAavOdlPUruz/1xqZUltH4IJKlLCL83bDxj6CA8mWdW5GrcFDWM3LAzdUOmZEnc0/eoaCackm6wa5RrUu5dGbU6nOkSYWkhS7Aue9XhGcUUpC4m/EdphKUWHNy5zI7spKIudwWB1vjsiKSgxgn8yUoOYfkIF9k44JIVpaZOqMY2g9XO09mLJ3BKIULdoplnE3LLa9X5ptb5fKUd+1jPBnm5YLztG9IzI1h74rd3L27kzn/WMZ/dy+i08xgQgN8MJ16lFRSCZ3thndADBlRW5mgcMPVcQyrI1LVfKK3oDy2AAAgAElEQVQcAw64KrxQuDqgmDGfT0YpdRvEorP3kVJOsrDTQr6/k1Cx3qJTpEn3OLnQh5nfJxbrberaj6HFtkxn6SdjSuQV11ONs1uUM6kkr3K/ZVYXklWsuweebqNZHHoHqSCPx79e4GGvVawdrAqetdhRmV3+XX741BWXVRfJTdzF6A6z+OHqD5ptraxbFb/uIjzIl+rtLzMGpHSiNn+AwtMDN29/Qu/8Wu7DqbtKV3NySajsw6+D2abue/VEPVX0UX5XSvtx64Wj91oiMnJJCfXH282LYYrpBCWrCyjVrwLXRiV6qtqn/0bo4tG4eXmjmHSg4jh28i35nlfpsxAKbvPDImW7Ubz3r3+zdGcMd3fO4R/+gexWjp2jPxDg7czUo3chO5TZVj4ExN+r2HeG6gKTw41d/6BZhbEdpzaW1TiLQ0GgjgQ0B9BHF1j/7ipyxh4gs0hCkmLZMsQKzYuKEgV52ozdF8mxwfq0WhrK4Z6h7Dh3p5pyXbzWXyE/fBnWFZQsIDP1LubLwsmXHnNzvhYzF37HrSd+FJjOhfUzmJPjw1HlcqgkkbllCJaalYeCPDLG7iXh2DButFrMtcNObN0RTko15fdlzei/PooiqYi8qEmk/s/XnLgjYdLzfZb0OMIHy7awdfFGmDmFgVZ1zDGT/ZA7ycr8q9UpqQYqrTk+i3fz9SxTlp3pwOJjW5nVaD+7wu6rVdJwqGXL24GH2btjPr12bef4zZYMmzYCc/NRzNs0nTcsGgANse6uoNuPl0l4lETUCUsGdssleOZBOq8I5uDSlixb8yN3KvgkHz0ff459PY1Gy2J4bfEevp5lhP+uCNIqqJGPfMQXHFHW8z9AWJrqwlZSKU1vZKktRzjz2rwSeRrqlYusQ79F19g9fQfWiwLZ87kd6xYc5lrxanJPBnu2q5QsvgZ5ldklGNNrwjjMNm9j9ZadPJr2Fr2aqC/m1CCr3IAKR3Wxv+jaPqavaMGivdv5vONBFhzWYYjKh1+NxK548UJOy8o+dO7HWHXfx9d8Y1eU8A0zt9iz4mgQS9vuY83xcH4ODKHj59vY+8Nyhliq26puRglX/crtt+xg+wY7Pt+1nx/WeWOpbJJmyYj5Own4KJ+Z//czDyq3OX6D375ZxDtxwwg8uJMNKxcyY1RbzEfNY5OfOxZKGbKWdB9oxY+Xb/HoZhQn7BV0KzxaSfffShXUw6qLCx2CI4h9dI9r55rQv2uzaq5p6jaJY0GgegIavwmF186y43YfVr5pV+kiU70g0EFXrvwGN0C/kTaP81QXyOrKa5KlRzMbG/TDbpFSCFYataymfeENTu9IZ/hKBdZ1Xg6Voa3bAG1kyPUN0X6cV5rvVFN5A8waK4MN6No50td8J/FJOWBpRs+p/4NHv8FMbeZPxJA21Fnt4ryVeeQXVIhM1RhoSmMjOXoGhmBsgJ62HAPjPNKya0zyCeSSEvkdO3f8l5DMFMwyNcwiAK2WHXFnJVHhzTiX7cA/DW7yf8cvEaM7mfOPrpB21ZSkQii/hsqxaGyAtp4BxhhioNegWLe0tGxyK1ggx/IVY+TKemlZZFdOvm3RGKNiWwwwNpCXyNNUr0xmHfp9EEfYmdb0bdkIAzriduZHYlN6Fo/RqjcrNcmryk7XwZNJbmvov/NNQk63RTf3aplmUJMstWrqh7XaX8iD2IuccehNS31j6GzPme/jeNBJXUjJcRUftpSREhlcq+9LWheQEn2e48mX0Z34M4+irnLVTIt5XvbMmL0IW/+PeaureTVjW3kTWMSDyErtR7zPMo81zJ5pjf//jKBrcUdmvGJiRDMbe/T/HcFZ1zuV+rzML7cjce43o/qbX/Ro2ckR5kcS3voS2e5jMYjZUUnOPQraqHEx/p7oyFe5dP01hrZsWHJC/C8IPCUBzTNQpTBt5ZfhSaTqYNTCkQ5mlWdd1ZU/iewnrauDXPfJniVqGbXAs4NZlbykVcqzLhE4zaNkeVjXkY+vq3STyE2K4WKqKeYxl/g1uWL4UNXS+Nv4b7za/hpRNzI1nv79hRL5sYFM9PkJ8wnvMtK8BonyVjj0u8t3m/Zx2t0BWz3lrFjBlJVbCDgUTmacH12faFzU0NcLcao6dnlkpz+Guw94mKPhJak/07YKPuxCq5t19L26zi5TWLk1kEPhScT59cDqzbl8+0kT9g6YzrbYR+o1NR+rt58xEu+lW/ik6QEGTAwkNr/SjWKjBugqh5l6Gz8nyJGKv8fVr8vIkNs60C/yIJv2J+Du0Krk+1tBjmN5sJe3ofvALM4fOM4Zrx60/yNe1NNMR5T+RQhoDKDatj0YbXmEncGxZKmPdW0z2vTKJjw6qXSGpk6hIXYj5+LX6xX1wuJlQU3lsoaGWOZl8Si3iIL0dGr9q438OII++4DPguLIr9RDhY/arXAZbcD2nce4nqWaBStr6NGszaukhl8lUcPkS8tuJKv8elVJXF2xvIC7P27EN2E0x5RL2/nhLFOtQ+dc5uv/2Yn12iMc+vg+//7iGMnq7CooWemDdhsU463YuPEQMSqdi2elWWTn5JGTrXzmXNuPDLmBISbp2eQU5pKdrm5kIamxFwlp350ezeVk1jhZbYx9d2sObruMS3cbjJq0xeWNKEIv3f+Tnhc1wMCkAenZuRTmZJNeGwb1801scHJO4MrNDLLjLnHCuTN2Zk8a/TWzy792nG2/jWDlpAS2fXet5jGprtMT+1XVWJsmdp1xvhDDzUfpxF28grOTDZrfu1X3YRsKa/J9FX10MGvXjTdO/MylVLXvj6wRbfq9xejXrxEW+1CllIbf2jTR0F5maEO/sSN4PeQiscVyU7iXlklS3BUevd4Bx46V+zTFzqk5x0KjSa3pe2RkQ/fe0WwLbkV3+6Ya+y5X0pj2Lq+ya+khmnW1rvJdL68njgSBuhHQGEDRd8B344fobfPGSEv5EpEVHpuvUij7G339JtNwfld0ZVYM3dYAi8byuvVUqZa27RtM9QrB01Abk1H7+K22m/jHSVw8EMCa769S09cXjHHwXco8ve30M9Ipebmp+M9ldLDoOwn/hl9iratLs6GB2FoYV5lxVlKz0kcdXuk+grl6X9JGyaV4BqpHA53HxO5YylrrGXza15ZXR/2Lf1z0Z+WJe3UMOvrYjviMFabbcVTqrPw7ULkDI/0ymf/OcPy2xNfhy66NiZM3fhkreMd9KlvS1F2rg7nLCD5LXcP747/iVFPdYrt02vbC12oHflP3cq3sWqmDRRdn+uCEoos5Mq02DF7gS868oQzwGYTn6kjKqlai83w+NsVp5GAy5o/D3W87aSZP0IuWDQNmunP+szEMn3OPucuHYquOpU6iNLCT7hK6eS+m70/g/THePFr2X0IfqN+w1CDYpOsT+rVclpatJzNHX+azYT7MuT2B5SNs0WyOug+bYabB92VSNeij1XogCxbmMK//IHy8fFgdGcuhD/rhNXwCXzaczJSeyaxyULDwZ83fxMrtVx3ZwQfOngwfu56GK9+lt7nyJuY8yycP5e0vG7DkAxeaVumzENsRM1mSs4T+A4biszkem94DsdrwGVODYsrHoMycLgon6ONMFwsdKve9OjK7zFTQxsSuE6/zdwZ2by6ef6qREYdPR0AmKd+yqfc/ymdQ37N1+UaSRq/jy37i4X+9d5lQ8C9LQEoIYPCYe8w6Pp2u+tUvsFYLQPnmb7PvGZS0poYXkqpt/TtOSDyKWI77glcI2Pc2bTTfffwO+aLpy0bghRlCOQ8eYv7Ocj7vK4LnyzZIhb31iUABKVGX0X7Xg45PEzz/NFMkClJO87+fH6XXlH60fmGufH8aMNFxHQi8IDPQOlgiqggCgoAgUC0BiUfn/sPsWGfm+nR8gr8uqFagOCEIIAKoGASCgCAgCAgCgsBTEBALGU8BTTQRBAQBQUAQEAREABVjQBAQBAQBQUAQeAoCIoA+BTTRRBAQBAQBQUAQEAFUjAFBQBAQBAQBQeApCIgA+hTQRBNBQBAQBAQBQeAFC6ASOfHH2LA7krpsbvdCubfgN076+2CnTK2m3Imo1r0N62Jdedq25LpU/0PrPC/dCkg5uYi+Th6M8j9V9wThZWm96rij0B/KSnQmCAgC9ZHACxZAC0m7uI/3v73O89p2/c9xUj5J3yzEJ7w3QcoUbIlz6fVHbHQtZZAQdpX7Ui7xAePoMPtk3QPOHw2qTNdaOpZu8N3yI3T5IoCdfj1pVEt1cVoQEAQEgacl8IIF0Kc1s563K7rOd/+J5K33vHmtzinYfr9NBRc24D76EDcLG2DlMYvAiQ4Y/W6xSRzy7cvbviOwUyYB/93ySgSU61qLwMJ07v6qg5lx5axAtbQTpwUBQUAQeEIC1QTQQrKi9/Cpl33xZuwymR3jgxKRCiLwt3HDP0K5gHqLoHGOjAu6QuQqj5JN2x1dcbUxwkixiJMpBRTGbsbDSLkZvQyZzTAW/XC7OItLYexOJrrZlcqWYVqcJR6krEtsHu9QIkuxgJDilGASBckhLBqk1EW5Cfx/ntDEF6B6VhIxl9viYGtcrmzZkmIOyUG+yJTBSFlm6oxiaD9c7TyZsXQGoxQu2CmWcTYtt7xemW9ulcsrukbAGE+GebngPO0bEnNj2LtiN3fv7mTOP5bx392L6DQzmNAAn1J/pBI62w3vgBgyorYyQeGGq+MYVkekqm2QrxwDDrgqvFC4ujF+8yWypELyHsfz7cO+BK31wqi4rQeebqNZHHoHSbpD6OIROLr2ZfSnh0v002QruST/sBRvNy9GvDeVT5eqdP2K0LJk3IVkVZCfQMzejey8G8POOXPZFatMHJ1LQhWbfuV6gC+KYZ44OX/CwUT1BNOqcX0LyvTKLe1HxeAe90L9i3UbpphOULJY9i0faOJIEHh5CGgOoI8usP7dVeSMPUCmMm2XFMuWIVbVZC+QKMjTZuy+SI4N1qfV0lAO9wxlx7l7SJmpXDNfRnh+AZkH+nPJZw77EvOQMm9z4tZ7hOcrZUukftUPUx5ybqUfG23Xkl70mKj3b/P2RwdIlFI5u24Bh7pvJr0om5hNb/31vJP9kDvJyvyrddiYO605Pot38/UsU5ad6cDiY1uZ1Wg/u8Lu18xFy5a3Aw+zd8d8eu3azvGbLRk2bQTm5qOYt2k6b1gok4Q3xLq7gm4/XibhURJRJywZ2C2X4JkH6bwimINLW7JszY/cqZB+IB/5iC84EuBL7sxtnC4Obo3oO/gN2uvHs3v6DqwXBbLnczvWLThMTMxhFqxrx4rD37B2erdqdZaSvuXf79xgbOB+dm/4ii9mqHSdSi+T0lyvRdcqyT+BbMi7jDJvy6h5cxlpp5yFymlZ2abuNli/vZ4f9m7ji16nWHP8BjUmAyq6zv4KDLbw3+0hdPx8G3t/WP4Hb4heLTJxQhAQBP5gAhoDaOG1s+y43Yehb9o9wZ6ROujKlRe2Bug30uZxnnrSK20M27/B4Ncvc+JSNRf6wpuE7cvlTUU7Gsn0aOHsxusHT3Ppzg1+OVRUWt4Ao8Z/wadaxTkZ88gvqBCZqhkKpjQ2kqNnYAjGBuhpyzEwziMtu8Ykn8UzsZTIYFYv3EBIZgqpmZpnTVotO+JOJFHhlziX7UAng5ucPn6J/QsnM3n596SFxpOk7lrkWL5ijLyZDQ76EVxOUCZb1ipJhPwgjrAzrbFv2QgDm464nTnHiRPnOOPWERsDPUxeMavGxgJSL5/hgLMr3SxLUq9prFhF/kViNaQVq2JTSzkFKRc5sHoZ60MSuJ6aXXMATYmpxCCP5v3t2TN7EQERdzXkxtWorSgUBASBvxgBjQG02EZt5YzoSazVwaiFIx3MxLOnJ6FWXNf4b7za/hpRN57Xq1ES+bGBTPT5CfMJ7zLSvAYN5a1w6HeX7zbt47S7A7Z6ylmxgikrtxBwKJzMOD+6VjsulLlR6zCLrqF71amC/Dzy5bo8E3GVbdK6wraJ0/nRfBi+I9uquqzltzqDWQz1nsu3nzRh74DpbItV3jSIH0FAEHjZCGgMoNq2PRhteYSdwbFkqU+KtM1o0yub8OgkDXfdDbEbORe/Xq9UZJiXRnp2IQW3wvjuZFtc2jWpeF71SbslTkPlfPtDNBlSDrfOnODkABc6WliXlB8J557mSZNKwov7W7sNivFWbNx4iJis0uld8aw0i+ycPHKy6/JHOzLkBoaYpGeTU5hLdro6rEJSYy8S0r47PZrLyaxxstoY++7WHNx2GZfuNhg1aYvLG1GEXrqv9uxTHXUuyffSyU2K48Kj9rRt0bD8ZBMbnJwTuHIzg+y4S5xw7k7vnl3ofCGGm49ySLuXUlK3iq06mNl1xvnYz1xKrTDdLZetPKoivzN2TTRF90o2PYwnLKQVPXpYoZuZW1EmDTAwaUB6di6FOdmkK8+aaWAga0Sbfm8x+vVrhMVqTixdSbD4KAgIAn8xAhoDKPoO+G78EL1t3hhpKV8CssJj81UKZX+jr99kGs7viq7MiqHbGmDRWF4zkluL6NNYB932q9Fb8jGDW1dXvzHd/+XPu9cmY6xlhmJHG7Z/6Y2VrKR80r25WOsqXyIKxNbCmL/WPFcf2xGfscJ0O45GOiV/Byp3YKRfJvPfGY7flnhMaqYMaGPi5I1fxgrecZ/KljR11+pg7jKCz1LX8P74rzjVtGRZVKdtL3ytduA3dS/XyuKUDhZdnOmDE4ou5si02jB4gS8584YywOf/2zv7uCirLI7/eDFQUISkwhVNEbS0zVc2RQRmE4FQQHyXQCgTNUuLaClN/SSWghqiG7VhK5mh65CllbtJFvhSiAYqGiAoiIPyNrzqwMxw9vMMA/PMG4yKCXrnD71z7z3nnvM9l7nzPM+de/zgE5+Ntq4KmxqRv2UJvF9MgNnGMLjZKJ9Pcm3GQzFt1RScilqAWWvKsHZLIIaP9Ed04G94beYCrN5X2uKX9VgtX40dA7FlowTrvKZhZtBO5A1ttVWIy60PLHXod+S73cZMw6d+zyEkSozNi5ch/pjm1WM/OM8JQO37IZgSsRtiDryxgzqDbQdwYOlU+M4Kw+aey7Dcvb1L+jYjWIERYAQeMAL3NJ2Z7HQshs8B9v7R3m2/B4zoQ+UOt2M1AN/4fY1dM+wfKs+Zs4wAI8AI6Py+zrAwAowAI8AIMAKMQPsE7ukVaPtDs1ZGgBFgBBgBRqD7EmBXoN03dsxyRoARYAQYgftIgC2g9xE+G5oRYAQYAUag+xJgC2j3jR2znBFgBBgBRuA+EmAL6H2Ez4ZmBBgBRoAR6L4E2ALafWPHLGcEGAFGgBG4jwTYAnof4bOhGQFGgBFgBLovAbaAdpXYya4hLTYITlzqt4Frkd7AP0PxPhlJNcj54jVMdPbBTI9Q7FSkB/sTbGlLI8Y/jvBPGFcxBC+dWduQUpQejsJEZ2/M3/YjznyxBM4eAXh176X2D6Fvk++kApXhp3+EYMOv/JRyt6GbriNtrT+cfUIRe6xSQ1CX3zKUp22Ap7M35sYe67rJ1jU86fitBMV7X0PgzguQdtyZ9WAE9BIwYAFtQOF/k7Avm533qZfiXTdIIfo2GkGZ7kipk4GK18LVonMOZddlGtVfRsYFfWfbqiToeipi3zbGO4e+xf6jnyNMkR5M1a5ZMlSvSq4RhV+E4Jn30lBLtbic8QcqOvF7Q5s9XC7UgKl4L11z0VBZol2S8GwrwpGEY3D98AvseckcB96tRPgnX2H7nKEw4A9IW/Ud1tDVo/gs2w0znW30pBZsXzFd+RFbPn0aHyYnImKSnjOp+SroCg5v+QGjP/wCyRGTYFgeJDnqL2fjQoUMuCPufANus2zweOYY6O2Podv/g19rW8+FvM2xWHdGQJF3qkMMVcjasw3fFSgzhdSm4b2nBrecjcuXlZXgpw0zMZS7gjLqD9dtv0NxTLe+er7sw15uLsDhT7Ix7xV/jLTknSV7T7jU40xCGOZ/X6Rxpq32YHJRIdJ7DYCdTesB7XKUH1qBZ15chCCnJRqJpA3VK8KhcE+8GD4bTiGHYOq9Gl8uGoNeZz7FlPmHUKR+0K62UQbX8OwxHgjvjXFYNLqvwdJcHtGBStt6y2tw47wpbK3MgdpyFF3lUsq1MuGpLD+E8GfmIjzIDSEpvGTmvC53XpThemY6rvk5w+EOV2151Q2c72UDq14GKuD7rWW4nrkgy0LClBX4vkgC3BF3rYE6qODNp9AM/NXQOPcZAcHkU/iRXRh0wJc1t0fAwL8knorew+G3ejUWP2en9u1bnpOCiIRh+LRaBiIR0l8bDe7YeH31PI2sWC9C7rlhGONopWLB3T7duRQCH294+McivVwGcLc2bSZCEDgVbk4+iNwUibkCFzgJYnBS3IjSlHDYuE1FoMAVzqG7kFPfUmcUkoJStNyimxu5AVuTc3EjeQ1eik1HVf1Z7AzzhoebC/zjT6G29QpQnof9W/fhxo1krAldAN9x/TE09hQkTRKc/04C15T3MenSR/D38MVMwRvYl7pfTa+41RPuqmCBD2b6umDiym9RLJOh6VYhvqv2RMoOX/RIi8Wzb63DB61jvfQR0hU5PY/gw0XT4ebkhw0nK0Dt+i4HafghzvuWZ8+X2BcdhFX/EwGKL3Tz4eE7V+0WLJX/gg/8XOAmmI9/pNVyMxflnG2rUvDT/s+QfCMXyWsWI+T1eBzHT4hfGY+04t/V2cmacOv8GVS7bsOOgP6oz9mFMIEH3MYtQPzpMl58WmMm17AnFzVqMvxbtQ0ouliK55yegAlkCl38uIYIf0P6B5xf/hAsOYDS1vnj4Ypx/jtwWnxBFc8VqxE5d1zLIq/3drkEeW1+r8XevOsa81EKWdtc+BABdtwXCr7MSsSmn0Wagnuxhu9vYNPbCyBwGwPBhmMQk1yDlcpvRVz8PeE70w9LUoq14lxLct58ckGBMs6a86Gm7Bd8wNcDKwz+qzXyRLV6sgy1TmD2PyPQDgHS+ZKQKHUjTXew5D5OCXCgYGExUdlBWmypmG/kEJNJUoWsjMoOLidLRb+WNlgup4NlEj31Mp0jPtSVIiEFYzEJRS1EORby3ETyHLmBTjZUUNrqyeSZeJHkin6zKDG3mgqTFhD8kqhQlk9JfhPo9cMlJBIuJkxJoBxJASXPm6SqCxaSiIpJGDyWgoU5lBnjroyfhAqSFtKkuN+psSaVIu3D1WyQZsaQg0MMZUrrlDInqZgbY04yFTdfo4OLvWh1WqUydK19WueFRkQ5/Xac7bkkDB5Nc5IvUzNJW2wOFlJx21hEpOGn9euHqVKjTt33Kzr8qOL52er7JbomXEL2oUK61sy37xblJs4i+9VpVN90jj6eMpKChYVttomkmRTj4E4xmXVK27hY1WuPmfEVBSOUkoubiOR5lOQfSHFZNVSTGkX2C5MpY39rfPKUMStWt0dLRkiiNjs5H15osYHHrS2uW2No8ZNrKK2+RUBekET+k7ZRVmMZpUZ60EJhEani2cqjmOdPoXJ+FKvA8PzWno/nqKRtLqhEiCdDbXNOydIzkXIV83U4+SXlk6wwifys36TD5Rc1WLX63fLZ8iQXF8UQuuZrAW8+tfqVpxGbUNr60cuk0sMpa5l7qs8xng+syAgYSEDHfSgAtb/h42VH8bfdRTgwrgyfB87FL9wibOuLhLo6LIqdhjlti7IJbH23QZw5UDvzir76NllWUBBQ5MNsglTWdvmHyrwsnBjjjkG9rIBRT+HE/y6h0pvrzd0+NIO5hSVgZQFzEzNYWDVB3KBM8mlnC2uzxzB0jCnePXcVbw5pj3ElLhzPRGnueiw6JUGOuAG2IglgZ9meEKBIdG2Lsb5P4a33NsAx9i3MG2uhR6YR5dmHkbznK6TWlcO2Toa+MIZZD5MOnuOp/BSLG1oeB+j1/ZoOPxoxSMuiCpxLy8bEqZGwU3vEXIW8jBJ4eD4Jix7AY3b6Uu7xFepgd3U8AC4RvRFQmYvjR84it8cynLp5EeI/LHDVC4AiPubKmInU7anQlLGBSA4oLu74Q+sq2zvD1/tdvLfKAbHvBGLghVM4UnoOPRb9ips5f+AP23LItIHo0qSjTq5jPuaj3BPKuaBDRFfVE33RWzFfLWBlYQYTcwtYievRUHJRg1Wr3yboN3YKvN9aj1WO0Xhnnp2OOEswRGs+lWv1mz0gFN4JrXrGwlb3J58uq1kdI6CXgM5pJL+chUMSV/xztDWMTBvQt6/ObnqVsobbJGD1FwwfkY+cK3WAvc1tCuvrboQ+jxgSt95wWb71DtKR9YDdC2vxnekOrJz2BqQ/b8WzWqYQpHlfYlHQeczf/zLmCKO1enRehaYf9Th9VFO7HFIJGbB4a8rpe68xJnc7VK2rAMvjtmOGYgXkbru+qtbK3SbWtocvw+9uAdtBpvitRgLoyoZrNBgvbPocpnFvY9qim/jqRQJcliNu1wzYKdXItIHwB7jPZd1+G9n5YNN3pohb+QoWSTfgRWgw5x5N6LRcs58c9aNtlXp24z9hQ1BX3YBx9o/iXu860Gkeq3wgCNz+M9AHwu0u5oTJEAhCB+Kzzw4ht57bRWOCR51GYeKZXBTdrMGlrIuY6DwUBuybBErLIW4sw6UzUkwe1h8WFpawrmmARN6IhhrNn4U8iqddHHA0/QKqWi9+bweNUR8MmToP8yfnIyOv7aknT0MzqvOykDrib5gwwAx1yotkXodOKtoa6Ec/ODkPwI9a/vbFkFGP48zFEtyU1qCsVLH9rQPbOmD36DC4PJ+D9LPt7XbWsKddGQsMcLRG1pUKNMMIZjriamQ5FFODZ2Nyag6q7Ubj+aO/4myVrl1Zj8DC+hHUNDRCLmlATQee6p6PjrDtUM7ADrbtsTKB5ZDnETx/FFIzamFn0HzVNR/4egpRjXqU5NbBqX+fDu6EGOgD6/ZQEtC5gJo4jkeg5S/44XgpND9yAWP0tLRCU+1NNKIJNU91KawAAAyFSURBVNU32UP4u546veA4OwpbbXZjXG9Txe9Aj/f3xqr55xA1MwhrSsKwZbaj2qYtvUPmb8cy7xBsNgvHUjc7WDv7I6J2KxZOWYHPxVy4e2GY+3QM/DQKK1JKMCjgLURLNsJr2mz4+uxAtq7PW12D0TUcWjoVvrPCsLnnMix3H8LTW6D8faQJ+rnMRlTVdiwO/QjH+vXQpUlRZzrMFeED9yBihRCXb/uXBeYYrOUH389c5Y7jnnCcvQobFf4GImhnjrK+F54KfB2Bv0RiZsC72Neo306VA2baY/L/WIyHIGB9OCTrAjEtyA8+8Vk6/pY07Pn3LUxXk8nm7ZQ2w6C/ucL4SDZKyEQrrs1Fe7F0og9mBSegZ9zL+PuEQKyPlmCdlx+CfIMQn92gMh394DwnALXvh2BKxG6IrXlNeorGjj6GzUfTIXAPt8KnEWuRcpm7WjbgZeygwarVbxnKDr2Jib6zELz5EcQtn4oJWnHWpV9zPmzFDwf4elzR72Y+TqaOgPuzt7MzW9dYrO6hJqD7WamM6s7vpvDxdspNRBMoMvWGsmszNRXspyWKNnvySrxI3LYg1QYFdY366tV7sXd3T0C1IUd098qYhq5IoLmYDi55mT7OadlS0xVN7B42yajycBR5x52mhu5hMLOyixJgCbUfmK9PLc/Y+n/jCRHvudcD4x5zRLEhXlZRgAu3nsAz9pbs1uMdz4lGVORdxq3+jrC/57+7vmMjmWA3IMAW0G4QJGYiI8AIMAKMQNcjoPMZaNczk1nECDACjAAjwAh0LQJsAe1a8WDWMAKMACPACHQTAmwB7SaBYmYyAowAI8AIdC0CbAHtWvFg1jACjAAjwAh0EwJsAe0mgWJmMgKMACPACHQtAmwB7VrxYNYwAowAI8AIdBMCbAHtKoGSXUNabBCcuHyqA9civeFOztbrKs50th389F2drZvpYwQYAUbgzgh0oQVUhvJf9+CfPxUrj4G7M4e6p5QUom+jEZTpjpQ6Gah4LVwt1NKFdKpbVH8ZGRfaO6O1U4frUFlXs6dDg1kHRoARYATAHWzbSa/mq4exLmIf8gw9S1VrXAmKj/0LW86UP3wLaHMBDn+SjXmv+GPkPT8ZpR5nEsIw//si3jmrWsEwvKL8EMKfmYvwILeWBM2GSyp7drI9tz0+E2AEGAFG4M4IdN4CWnYeXxwoRh2783j7kagXIffcMIxxtFLJUg1ydi6FwMcbHv6xSC+XAVy6LJuJEAROhZuTDyI3RWKuwAVOghicFDeiNCUcNm5TEShwhXPoLuTUt9QZhaSglEv7FDIOcyM3YGtyLm4kr8FLsemoqj+LnWHe8HBzgX/8KdS2xa/ltqm6vibU5+xCmMADbuMWIP50FUjWhFvnz6DadRt2BDyG0p82wd/DF7Nf3YeCGk3dnA1j4CbwhcBtDAQbjqEi71s1e1Q5XRp5uvYin/tidjwWi3xdlf7K0Vz4JRYI/OHr7ImVB69ApvBRXb+Y5BD/+hH8xrm2cLMJR0ppo4YfZShLj1XYPVPwBlJK+afCq0LCSowAI8AIqBHQeUav7CIletkrD5IfTtOjj5BIymW6l1Fdzj56+4XhyjZHWigspBsHl5MlFElZlPVeFJel/8Dr5rpztPdtP3JQylgu5DLQ11FmjLtSHgTHRZR4vpq4UZuLk2lOq37HINp2qoKa1Wy0JMeF/6bzdYpj7ansRBzNcbRU6lIehN9cTeeTltN4S87O0bQwMZvqOOVd4SUSUjAWk1AkbbNGnptIniM30MmGCkpbPZk8Ey+SXNFvFiXmVlNh0gKCXxIVyvIpyW8CvX64hETCxYQpCZQjKaDkeZNUdcFCElExCYPHUrAwR8HZISaTpCShgqSFNCnud2qsSaVI+3CeDcrD6fn6vjtKSf6BFJdVQzWpUWTPxU1hUyglFzdR8zUhhdovIeG1JiKdugtIGDySpnx8jiSFSeRn/SYdrqzm2dPmvoYupS2eiZTb5u91ZWe5whY7rk3O+aihv/w8JXp60vqTVdSUk0BTOM4lORp+fEgfveJFq9MqVQawEiPACDACHRDQfQVKDajKd0BMZh2a65LhczYKbwqvgG6eQcLL2yAJPoC6ZgJRHj6fMRiP+W6DODMGDg4xyJRy9T/gtWct1BZq1ZsanEmIxBpJEP7LPe8jQt3nM2CnfOTnEJMJKd1CwWopImJTcZ0AIztvJNRzfeuR88ZNvLPjF1zn2UjSLLyPTxD9XTGo9gTiw37AX5OK0EzFEAY34Xq1BNJ8Id59uwlv5tyCtOgfwKoYfH3ZkLyPKsvvWcnMAtbWTZDKuDWfe8lRmZeFE2OGYVAvKwwd9RROZFxCpaLNBn17m8HcwhKwsoC5iRksrJogblAm27SzhbXZYxg6xhSHzl3VkUKrZYSWfytx4XgmSr9ej0XLtiBV/AcKRRopqPj6DiQj9chZfB29DMu2/A/i9EKIFBdrj6CHaTPE507gwEQ3jLfj0oHp020Gu8esYGZuAStxPRoaW33m2yXX0KVse6IvevP9lZUh+8AORCccQV1BFeoUadA09JfkIePEYDw1qDd6WNu2JJcuz8VxNT+aMMDrKfznvQ344vSNDpjx7WRlRoAReJgJmHbkvJHl0/AJGImIozm4PrwQe0r+jrgXnGB5p3tc5FdwfE8NZsUJ4KD3eZ85Bj49AjbrCiGSA3amfdBXYakFnMZPwOOx3Af3IJXppo9j6AgzZFytQtPlLBySuOKfo61hhNYciI0o+e0nZLwYhqSB5jAlZ3hNjsPJ/Gq8OORxlZ77VbL6C4aPyEfOlTrA3qaTrDBCn0c6DC+A3nBZvhW7Zth3MK4R+pibwQgCLI/bjhl2St2lKW1ycmkTpGY9YNo2NzR1X4Wqd5uYzoK2Ls1uDchLikLQWX/sDw+A8BXN9o7ea/hBtRjdcwdWTnsD0p//hTCnXh0pYO2MACPwkBPQfQXaHhQT7mqjvQ6GtJnCrIeJIR0VV2P1OXuwUuAEIyMj9Bj3FgoMlFR1k6LqxjWUbvo7rLifiRgPxty9VZBI73jHk0p1Z5RMhkAQOhCffXYIufWcTSZ41GkUJp7JRdHNGlzKuoiJzkPxqCFjlZZD3FiGS2ekmDysPywsLGFd0wCJvBENNZrP9h7F0y4OOJp+AVW6LgS58fj6vAIgeD4H6Wd17eBV2vzjrzhbxflggG69/mjq0tWxCnkZeRgxYQwG9OBSu+t59RuMUaMKcbGoDlJxOUq5brbD4KLph1EfDJk6D/Mn5yMjr1qPMlbNCDACjICKQDsLaAMqam6BZMU4cfg8nncZhscdJ2C+3Q9I/iYP9RofuMa9beBwswwVtXJAVovqBj2Lk8mTcJlvgd3JP6JAsViojNFduoGf47fgcuj3aCaClLtVrLujotZk8Cj4mh/BgZ+v4mZFMQoruUWjJ+yHPwv7Of/COeVtY6KLSPDt346mP7OpFxxnR2GrzW6M622q+B3o8f7eWDX/HKJmBmFNSRi2zHY0bMt0/nYs8w7BZrNwLHWzg7WzPyJqt2LhlBX4XMyFuxeGuU/HwE+jsCKlBIMC3kK0ZCO8ps2Gr88OZGuGja9PMAkB68MhWReIaUF+8InPVtvJa+wYiC0bJVjnNQ0zg77Erekd6FYg5ttT0LYDW11XIrI1137YwSVkFqo2L0No/An00xcuk+EIjHZHarAvpi7bjnyun7GDuh/bDuDA0qnwnRWGzT2XYbl7F7groc8fVs8IMAJdh4DOZ6TSTIpxaN0UZEfjw3crN+hobiKyJ6/Ei8Rt3aGmfNq/ZFLLZiLLOZSYe0unaq5SaxORF7c5pGUTUcvmFiJpZgw5OMRQplSqsSkIhNFxlCXhbHSnmMw6IuLLyqju/Je0wsORLMdOpskOjhQsLCaSltAvcS8rNxGB8GwMZUq6yi4ivahuo0G50UaxYeg2xPR27Wx9egf6kxqaSZqbSNMVG7MepLj/SfjYMIwAI6BFQHdCbdlpxA6PAPYeRMRYy66z2htkSRNqqmXo07cXUHsSH3itAW3Zh3ef62uQdPftxP3s5FX0/8YTol0zWjbL3JUzna3vroy5C+FanI4NQUSGMVBogimbtiBSMAB3/RTiLixioowAI/BgELh3C6g8G9vGT8Lrv9erkxodh6xTr+FZQx+Bqkt3/E5t3OGYHr0dCZEC2Kl2tnSsg/VgBBgBRoARYAQ6IKB7Ae1AiDUzAowAI8AIMAIPO4F2NhE97GiY/4wAI8AIMAKMgH4CbAHVz4a1MAKMACPACDACegn8H0EmOUCBKDVMAAAAAElFTkSuQmCC) In some statisticl data, like correlation, and covariance are compare from pairs of parameterLet's download the data of the stock prices and volumes obtained from Yahoo! finance and calculate for the covarience![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZoAAABWCAYAAAD2W1AYAAAemklEQVR4Ae2dC5BWxZXHD68wD4bHAIIyoJDoxCDBJEQzIYlilWygVoTNA4NuEtQysbZISWlMtCwpjaWbxF2ssFtBKlkwrpYEUyqVwpSixmQtotEsmLhmJIqEGZHXAIPMDA9h69dwrj2X+333fq/57jdzuup+9363u0+f8+++fbrP7dunX2dn53GxYAgYAoaAIWAIlAiB/iWia2QNAUPAEDAEDAGHgCkaawiGgCFgCBgCJUXAFE1J4TXihoAhYAgYAgNLBcHBI/1KRbqkdGsH2SurkgJsxA0BQ6DPIWAzmj5X5SawIWAIGAI9i4Apmp7F20ozBAwBQ6DPIWCKps9VuQlsCBgChkDPIlByRXNkzzHZeMkO+es1bXLMPtnJuXbvueceeeSRR3LOV2iGzs5OWbRokWzYsKFQUqnKX0653njjDfn6178ubW1tqcIkTczQ3mh31JOF3oNATopm9erVctddd0lLS0vFI9DRfET+8uVdgiK0UBwErJMoDo5KpZxKUXmwsyFQDAQSrTprb2+X+++/X8477zwZPHhwTuUOGtlfzn92TE55LLEhYAgYAoZA70Eg0Yzm2Wefla997WsyZcqUnCR/98GD8vuh29yxZcn+bnmJ23ZvuzOrYVrb9VhHt3T7/+eQkMenwbUGNckpfd80h4mO/0qDNH+c/I4wi9F8r3z6Xdn7VJf8YWKrK1fjlX6mM6PMW2+91ZmU5syZI9XV1cLZN4cwsue+Hr75CTMYByaxqHgdxWrcnXfe2Y0VytFySQMdP0Bb83L2y/bTRV37tOvr6+VnP/tZt2SZ5MIkdOGFF8oll1zi8pCXsn1cfNphvtWktHnz5kC2sPnEl8unC4NhzMJ5/bKj5MpGW02XfppcMPUxmzp1quzbt68bpj5dMAQLAuUqr+CqdUp6DT7tXOv6/Y7jsumbbfKb6hZ3cM09DW2/PxTEkablgQ+ePU0TdVa8/DitH8XNrw/49mXSfA8//HCkzBpv58pCIJGimTt3rjQ0NOQs2dh/rpXPt4+Xs/+zPjLvuyvfk4/82wipmjhI9vy6Sz7x/Bg5+NrhwJzVsrRdOv921NH41B/Hyrv/ddApCxTJmzfvk7ELh7i46TtO8Lb1X9u7lfPq7J1S/ZGBLs2oL9fKjoc7RGdY0Bsxs0o+s2Wci//0a2dITeOgbvkz/Tlw4IDccMMNcu+997pOrqmpyc34SM9D9Prrr7v7PGAoaR4+7mtYuHChTJw40aVZuXKl8FCRlnDffffJGWecEeS//fbbNZs7P/7447Jq1SoX39ra6hSJPsB0UuvWrRPuQ48D3pIE0i5ZskQWLFjg8sHvtddeG2TNJtc555wjL774opOVPKSF3tq1a11nCZFsfBPf3NwsV111lcMU/rdu3SobN2505SMfGKlc8AivlEFYv359YNdXnJ944gkXFydXHG2IUF9btmxx5YXryxWS4Yf6uPvuu2XTpk0uL+fhw4cHqYmvra11cfC5ePFiWbZsmft/yy23OBzBkzZEPMcVV1zh8merj6CALBe7ftMlH/5+nXyxs0Eu3TPOpdy+psOdGYxt+fcD8rn/HePiSdPwjdos1D6Iol2DVaYQrg/qlLrVNkw+Bjg7d+508oLZQw89FCjgTHTtfroRSKRoSiUCnX/NR0907qcvrJX+Nd0/8hz9lVo58/tDXfFVEwZK1aSBcmTXMTnwymHp2nJERs6udnH9q/vJhJvruikpIhoWDxWUHaH+0io51PJ+0RYkoBDoYAkXX3yxvPPOO+7BYBR69dVXu/v8NDY2us5l9+7dwT2Uh3YYn/zkJ2X//v0uLx0PSupb3/pWkDZ8AW3KIHBGkdApa6DD5sg1aKd++eWXR2ZNIldkxpM34/gmmWIaluu3v/2t0PGq3DNnznSYbdu2zVG/7LLLgrpghDx9+vSgs4uTK442BdDZM7Ag+PXlbmT5+dWvfiVXXnllwFs4Ke0H3jXkQrvQ+hj7T9Uy5OSzN6Cmn4ycMVgObj6qrMjhPcdk34uHg/9JL84888wgqc5uUC608ZEjRwp1VldXJ9rOkIOBA/Wgwcd7/PjxctZZZ8mf/vQnjbZzBSJQVkWTD16dW048DMyCeECyBZSLhmGfGywf/Xm9oJRKEejseaAIPGBq6hg3blxeHX8mHhn5KW3OvmmNjovOWk0tYRNSJppJ7xciVza+KR+FzKEBxYIyBlOUuMqEzGFMUdCYnRQXZiBJQhLa0EFxQZsAxr/4xS8CpZeknExpKJ86Ur6jTGuZ8nK/kPp4769H5PnG7YF57C/f3hsUxax/yooR8ubd7S5+wxd2BlaGIFGGC5QJM35mKoSOjhOzpGHDhsmoUaNkz549snTpUoefyp2tvkjDDN9CZSNQcYqmeuKJ9QvMaHybMjOdo23lW0HGSI6HAnsznaqaeTj7HWghzYUOlZG1b0oJm9aY4dCBcfCA3nzzzYECLKTsQuRKwnccb77MyIapjk4fE9JNN93kzE4qN+atXEIm2rnQyCctgwKCmhrDprVsNAupD0xjf75ur3z41qGBaey85SO6Fcds56Lm0138+Gtq5eV5uxMpG5QJioYZCGY0AjNsZjE8HwTfvKp1xsAiKhDPQMOfKUWls3vpRqBiFM3ORzucuazm3EHCQdiz7sQMgnc221cedO9sGI0lCYNGDXCKqeP1I0mSZ0xDJ8HI0h/1qtIhE6v1kpqyeEh5Wazp6Uz8GQv0TjvtNGeC4BqFFo73GdUH3b+X6ZqR6Ntvv+1MG6ShEwwvBoiTK0zDLysXvv18dE5gC8ZgnSloR4RSY8SsIcyTL1dS2kor01lna/DoB/B/4YUXnKJXhRheDMBgAD7oUHk/48dzn3jfrOTTj6sPP23Udc2kE4M2ZjfMXjIFTZcpPuo+71gwBdbU1Dj+VdEw6GL2r+/QovL69zB9kr5YgzWftl33HAKJeuXnnntObrvtNlm+fLmztXKO+55GV36x4mvzv7QJL/a5Dq8+yybqrjUH5YUxLS4fCwcmrxntXuajTLjmHjRJM7hhQPA+JhtNjYPGxDuHCwsG/FVpGh93VlMOZhxszPrOhfcHPBjYnukohgwZkvghIQ8jO6XNS1V/dM4I/rOf/axgYoE2L1F5gawBxcR9PYi/44473H9Nk+kMbd4nKG06SX+2lESuMA1dHRbHdyae9D7YgjFYq2xqFlQbv2LG7IZFBRrCPIXlykZbaeR79t9DzJo1y9WtKkRofulLX3KLJJAJOc4///xuiwVIw/s6VWSko44JSerDJYz4oe0zS3lp5i5nGmN2M/6aIUHK8IqzP1/b5kxpSQZx8EhAQfB+BWXDQhAd9CAni1lom1qXnJFRAwMc0nEf5U16/luoXAT6lcrDZqG7N7M0mVnKOf8xomTvVaKqLW73ZkaemKPo+JKu6Ioqx+4ZAoaAIdBXEEg0o+krYJichoAhYAgYAsVHwBRN8TFNHUXeD/gfefomCzVPpI5pY8gQMAR6DQKpNZ2VC+E401m5+LJyDQFDwBCoVARsRlOpNWd8GwKGgCFQIQgk2lQzH1lsZpAPapbHEDAEDIHeh4DNaHpfnZpEhoAhYAikCgFTNKmqDmPGEDAEDIHeh4Apmt5XpyaRIWAIGAKpQqCsioavfnWprX7xrOj4S3J9Px0an8uZjyz5ktz/+jiX/GlMq1+L69f3SXlUXPXL+qT5KC/XspLSLmc62l1465hy8pNv2Wy90xvcRNPOcm2b+WCWb5+Q7/NDOyulXNAP96H54FKqPIkUTVdXl6xYscJtQ8NWNLh0LkZguxUq3N/qROmy5QT+TIq5KaXSTnLuqQafhJdwGho7nSObQfo+X0iXZr7Dctj/8tYXbSjNnVMp2wd73rGPXHh3j0p9ftiSKOzXp5T45Uo70aozwD/77LPluuuuE3XrzP5nM2bMyLU8S18EBPBtgwOtfDYaVAVeBDaMhCFQkQjQn3Gwh1quIa3PD3zhiJE9KOkX+J+mkGhGg0JRpTJ06FCZMGGC84AXJ4hOT9U8VsqpY5gXneJSNqCHdyKmoSlfnPlPwAQR55a4nHKF5dT/Sfhm9KoyZzIX+Wl8XLQcPWs6f0Ss98jnm9kULzCmXOKjzKHkj7qvZUado0blfnnk8dsCZfs8K03lK5vMmtY/K+7k08OX3W9nvmyaj81A/U0k/bx+OVHXPu0oXzZ+ffhl6312/sYXjPLttwmfdi6YkC8sQ/heuD78clVOf9NNv77CecN9iuIaJRO0aRvQxprid8aaL1t9KG7QjuJZ252WHcZBZeOs7Q1slC9k0bw+faVLWs3n16fSZQPZc889V5566im9lZpzIkWTL7fZXOzmSzNJPiqmlG6JyyVXNtlpZHHulNWRmL8jtE+TB4mHUH3pgGPYtEB6P53uWs1D4Odl01Hf3TL5eIjZxRe6uAfHA2WhAXqFug6mw1W+MEfyMNOhxQXkYGt/dtDmmrwf//jH3ciSTozOy3fljLmG3aWhnaS+spUfph32ZUN8JjfR2g4wWdMW4J1DfcLAX5w78ky86SxbXV2QDjcHtCPt2OPceqN41ZUzmNKu4Akew8819MGVQBrw5T9pace0S19R4eETb5/Kp8t40qFdIc8P5bHZLuY4rjnCZm0ti/aFjx341edLTXnk4z7xPt/kTfL84O1XXVNoeWk456xoXnvtNdm8ebPzERInQDYXu3F5C4mPc99Lg49zt5yt/ELk0hGJjlw4R41OspWPl0I8FpK3WIHGHTXSC9PHfzvpwlu3J3GJTMemisl3f61lEKcOzfRe3Nnfdh9seTh5WHNxHezzpR2Q31Fm4oFOCx8+bIVPIO+YMWOcF0n+4/zLd+WMGwD4TUI7U5l6P61uonm2GGSoDx3aFUoLlwga4tx6+66cwRQzMeZi9U3D+wgC7Z9ZAPQph5E8+IIzAV5Qnn7HW4pnh7KUt2xu2En3gx/8wCmRH/3oR8Hzq4pF8yIXGPp8k9dvp1HPD2nwv6Su4fmflpCTomlpaZHHHntM5s2bJw0NDbEy6HQU4DiyuWyNJVbkBH6HH3YNHFdUIXLpAggd9XBO2rlquTzEjKTBtFiBB/n48eOBU7Uouk8//bQbtfmjU9IhQ5y7ZdLxcGiARjFk4MEqtutgX3kpv1FnHNX169cv8GevCkSVFTMt3zRFxxc24UbRLcY96sQ3xUSZ1rKVU8jzgeLVzh9MMOcwg9PALEP7BM7ZnPdpHj1TN9navTqS0/ThM76iShGgG8cbdY9TPt9JIrzw7L300kvd/C0xewmHJM8PbVJphvOX839iRYOSYRQLSJMnT47lGS1dqIvd2ELyTMCol8auJqJcVrYVKpf/AOvDlnRGo+YWGlzYNp0nFEE2GuiIEd3d+QaRJy8uvfRSefXVV08xR2g6zBz5KFDNn88ZvovtOlhnQ3H8UH90LqpM6BzCtn/fNKXYqLkkjn4h8b45iXLDprVstAt5PqCLwzNm3CgZBkV+B8lgKc4deTbe6NCRRwMzFN8rKQMePz6sWJIOIpR+0nMSuszUGFSibOh//MDgRPsjbSf5DMRQWgRVOH4Z5bxOpGh8JaOLApIyrRVAA/Nd7CbNn0+6bO57lR586cgoyt1ymIbm03O+chUyo9Gys02P4/hWGuEzo21GnjRu/0ENp4M+q1v8hwUck7hbDtMK/6eDS6p0w3mL5ToY8wudJJ2lBp1JhpW7mmgZfGjn4CsROlhwIn+mkG998U5JTSs6+PE7XMrT0T28Ua/heJ9GmL+45yOc3v9Pe8D0w/spBgFqytI0+br11pmivuxGLsy4lEX7ZSbFzMCvF43XZz3bswN/+dYHvKHU4lxUs3qXQQDKVtuFKmb6oUJDqUyDhfKVSNHQoPmW5plnngm+pYlz5UzF0wAY5VHJYRe7PBysyiDOX/1CZ0PQ6TVmrTVr1jgXw0k7IUb+pXJLHCdXoRVSaP6w7LryhYdSTSmMwMEc7JlhaUAJ0jkhI3Ec4ZEXaSmDh4W61fy8X6G+o9wtK/1SnOGRQOeSr+tgxQJatHXffp6NZ+1Afbx8zFA64KTusYnT+lC6mepL4zOdi+Em2qcBb1qXhbiJVn7peDErsjgC2hqQN5s7ck0XdQZnrCooD2jyn/aq7/2g/eCDD7pOnHjaIm3SV/7aqauyCpeTqT7inp8wb5QfrmstC35YQEK7oL8jLW2O2RjXemhfqPmSnJlBhk1zSfKVOk3J/NGUmvG+TJ+REIqeTowGbqE8CKCE6fR8xcQ9OuzwYonycFi+UhlIMmrnA2867zSF3lpHae4XEs1o0tRIjJcP7K9qjzVMyoNA2P4PF4wofbNTeTgrf6mYgcKLAMrP1QkOmFFwFMNUlRaZ1Hx6/fXXp3LwWXGKxje56RTTP+v0Py0NoBR8MIuhQTH1zjQ9L0W5RrM7AmHTE+0Q84c/w+meI9k/RqaYif127V/nY1JJVnJ8Kp4vnxf/mrbIElzugQMzmrQGeIPHKNNwWnnOxhemwLCZMFv6no4z01lPI27lGQKGgCHQxxCouBlNH6sfE9cQMAQMgYpHwBRNxVehCWAIGAKGQLoRMEWT7vox7gwBQ8AQqHgETNFUfBWaAIaAIWAIpBsBUzTprh/jzhAwBAyBikfAFE3FV+GpArzfcVw2fbNNflPd4o7m2/afmqgMd3TZbq5L0FnOG97+pSfZb2vrJ3PmfEgWLRok3jZbRWGB5bW6RDhq54vDe/fKs3PmyNtF8mpbFKZPEoEneIPHXEM55dr1hz/IuqYmad+8+RS2/eXb4XaqX/GXsy2ewnCF3EikaHANwBe+ehTLlXOFYFRxbG5f0+F4vnTPOPliZ4M03jWsR2Sg07SHMHeo2WyR78OS7uKtJWhn/eiECcLxZ287IdKgCDQu3LEeZdfw73wniOeae36AnubPV6H49JJcowS0TD37Zfs8Ee/HaV7OBJUxSrZMvOhehFE+m9jqhq1o2PYmbk+zTPT76v1EiobdmtnyhANlw+62uHK2kE4EDm4+KiNnDJYBNf1SxSBbkdCZ8jBXUqivPy5r1x6WZcuOiLdtV4+KUHvmmd3KYzS+fvZsmXTllfLlv//dHWNnzBDtZFEyO154QeY2N7u4j91wg7x8441u9kEH/Mr3vidjpk93caQhvH7SgZh20B3btwf5P/fAA/L3xx/vxgN/as86S/pXVZ1yP+mNsFyjP/MZx9NFv/yl1J9/vszZtEkuWbtWPuTtLN54/fUuDXKPbmqS5uXLXXEHt26VsRdfLJwJHS0tcmTfPvlQXd0p7FSNHi1VJ7fUPyUy5gabpWZztheTvU9GJ1I0PjJVVVVud1v/nl2nBwHMZl0t70cydGTPMdnwhZ2BSQ3zGukJam5r+/0hwdSG2e35xu3y3l+PRNLyb6pJjE02M7kl9k0SUV+2+5sWYkrKtuOB0tKvusN5fZOHxpFW80WZqHx5/OtHHhkg1dVV7rjnnoF+lGBSw5y2YUN/ufDCwS5Nsc1rdLB0tHTAftj66KMyfs4cOWv+/OA2aTiY6bQ++aScu2iRDDypGRvmzHFKYX9zs+zdtMnl4R6BNKRtb252eYk/+Pbb8ok77gjyw8dHFi4MyuKCsi/8yU+CNN0iY/5kkism2ynRKFcUos7GRl1wgRx46y33v/2NNxyPHa2tcqyrK8gLRmHlFUTaRUkQyFnRtLe3OzerbHdtIT0IqBJ5emSrbF/dIX/59t5uygJF8n837pPx19Q6cxpmNcLf7m7vJsRLM3dJ7dkDXZqxX6mR1v8+YYbrlij0R2cq+KNRMxAdvO/KVk0SbFESDqRNgxvcMF/8v+KK96Wzs0tWroxWuFu39hMU0JNPHpbW1kPC/40bc36soorOeA9FsmvDBqGTjQpdJ32S+CN2lEnN6afLu88950b8XKsSggZpD7W1CYqINMwU/FlEVDlpuAevzMxUFmYqg+rq5MDmzdK5Y4fUeu4e0sBvX+Uh8ROBqQyzGfs44U8iiYfNvgpqOeQeNLK/NP3uNEGBnD6/Rs5bPsIpi4uaT5chHx0k+185LB1vHZUx/3hiy3bMah/+fp2899oRQUlpmHhjnTR8o9b9Hf0PVW52pLMeTVPsc1rc4OYj1/Dhx+Xee48I5jWOpqZjTtnkQyvfPP57C0xnh/bskYFDh55i0qqbNMkVwYhfr7VMzF+YwcJB33vwPiT8Diictqf+N//0p8F7HGYzOjNDLsLIadPc+6nqMWNkQAlsnfisUQ+iPSVzpZeTWNHg8Ezf06BoVqxY4XzUVDoAfYn/mkkDpX919vc2KBcN9Z8fLFNX1Zf8XQ+7IMfteNwTbnBV7ko7T7nlFvcuZfzcuY71wSNHytH29m7mIiJUwaBktFNWWTEtkYe8fsDMxLuQaT/+sX+7rNf+OxpmM7+bP7/byrdhjY2CuWzoOee4mRpKt5iBGTyD7lmzZgU+fIpJvzfSSqxofOExmx06dEgOHz7s37brlCPAjOZY54l3MrB6eNcxOezNZsrFvnorzVZ+T7jBzVZ+muJ09oHZKCqoyUxNaKThHQajf16+c/jvNYjXtORFEWGaw0SX9nDGzJkyuL4+4B/ZMPlNX7VKhp4076NAVb5iyMP7PjyW4hKi0ha2FEP+fGjkpWjwQsisZmiRRwr5CGB5kiFQ97FBLuGOX59Ywoo5bNvPD7p3NpjdihEq3Q1uMTDoCRq8j5i0YIFgQtJVZn65dLRDGxvl9WXLgpfkLWvXuiQjpk4VRvy87Nd7KCHSjps1y3XSdN4EXc3l0056rQtESr3c/Z2nnnJmwpqGhqSsFSWduskuCrE+QCRRD6PvZ/Q7GnCZ76126QM4VbyIKJNpj41yyoUVZSwaqGoYELyPKYaAmBR8F9q6csz3IRTltjstbnDDGPBZCavIWHW2cOEgufPOgZGrz8L5euI/Jq2Zzzwjf1y82L2veLyxUbZ5y48xp/HCn/u8X2Gp86d++EP30hxFxHLltx56KMiLCUpXsBH/hdWr3axHv2V5+bvfzUksXCZfcMEFglmUxR5Jg74Tev6rX5W2jRtl7dSp3b6VgY7/jgYZWB1HYJZmIZ0ImD+adNaLcdVHEMAME3YH3VtEZzl5MRzBlQMPluDzrUyUaYw6M7NZbrWSaEaTG0lLbQgYAn0ZATpi9bJZqLfRNOKIkpk4cWIaWUstT6ZoUls1xlhfQUA/cs3lQ9I0Y9PU1OTMZbwwR+FUUtCPeheGPk5FBt3rDEU68+R7rEqSrZy8mumsnOhb2YaAIWAI9AEEbEbTByrZRDQEDAFDoJwImKIpJ/pWtiFgCBgCfQABUzR9oJJNREPAEDAEyomAKZpyom9lGwKGgCHQBxAwRdMHKtlENAQMAUOgnAiUVdHoUkKWQIZ9lPhfkxe67NP3SVJOsK1sQ8AQMAT6IgI5KZquri63azO7OLe0tBSMVzYfJWxLgj+T1tZWaWxsLLisXAmwVr7U+zTlypOlNwQMAUOgEhHISdHQ+RKGDesZH/SVCKjxbAgYAoaAIdAdgcSKhhnMyy+/LNOnT+9OIcs/NVlhGuPoyRmCb3pjdsTX137QbTKUN1WiuutsNrfE5ZTLl8GuDQFDwBCoBAQSKRpMZuvWrZNp06YJu7ImDevXrw+UCx0/4YknnkiaPe90KIIlS5bIggUL3FYYlI0/Ew38x0Me6ThwQcz7Iu4ncUtcLrmUfzsbAoaAIVBJCAxMwuybb77pkrGHUS7Ozi677LKAPDMHZkPsiFrqgGtgwuWXXx5ZFDOcq6++OojjHdDw4cNl9+7dQlxcKJdccXxZvCFgCBgCaUQgdkbT3t7uRvyzZ8+WqqoP3PwmEUbNUGqeitqoLgmdUqTxV7yNGzdOmpubExeTZrkSC2EJDQFDwBDoIQRiFc22bdtkx44dsnz5cucnm22/9f/q1aszsokZ6qabbpLFixcHJqqVK1dmTN+TESyl5p0MK9owneWysi3NcvUkhlaWIWAIGAJJEYg1nU2ePFlYzqyBGc4DDzwg8+bNk4YE7lPVHzyzgKVLl8rcuXOVVMnOvkth3rncd999bjEA72w0wBczLcL9999/yowmTEPz6bkccmnZdjYEDAFDoJIQiJ3R5CsM7zro2Fm9RYfO7Oaqq64KyDEzwNUvcVHufXVVGGatNWvWyNSpUyXph5thl8I4Kbr99tuDsvElgYtZeKT8IUOGnPKtTpiGuiWOkysoxC4MAUPAEDAEHALmj8YagiFgCBgChkBJESjZjKakXBtxQ8AQMAQMgYpBoOIUjW9yw+wVPlhNZsEQMAQMAUMgPQiY6Sw9dWGcGAKGgCHQKxGouBlNr6wFE8oQMAQMgV6MgCmaXly5JpohYAgYAmlAwBRNGmrBeDAEDAFDoBcjYIqmF1euiWYIGAKGQBoQMEWThlowHgwBQ8AQ6MUImKLpxZVrohkChoAhkAYEYvc6g0mcnq1atUrwS6NhypQpMn/+fP1rZ0PAEDAEDAFDIBKB/wc4oyaRyE5nqAAAAABJRU5ErkJggg==) ###Code import pandas_datareader.data as web all_data = {t:web.get_data_yahoo(t) for t in ['GOOG','IBM']} all_data ###Output _____no_output_____ ###Markdown and then create the data frame of price![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfgAAAAsCAYAAACJ8KryAAAWDUlEQVR4Ae2de4xXRZbHi5fd0CBIA47SPBoDrA8eSwQGWGcBI7sxS4txnVEgriITwx9OIDpmYYwEJZAYJpD4B2J01CAMhGWBdqObVsDZBBl0lqWZOCyw0ggN7ojdAq00L2HzKffcqa6+93fv79G/7v71Ockv9/5uVZ0651t169Q5VffeTo2NjdeNkiKgCCgCioAioAgUFAKdC0obVUYRUAQUAUVAEVAELAJq4LUjKAKKgCKgCCgCBYiAGvgCbFRVSRFQBBQBRUARUAOvfUARUAQUAUVAEShABNTAF2CjqkqKgCKgCCgCioAaeO0DioAioAgoAopAASKgBr4AG1VVUgQUAUVAEVAE2pyB37Rpk+nevbvZu3dvQbYOej399NOmsbGxIPXLt1Kp8CSNvlRRUWHq6+sTi0ZeyqTbTtSXbl2JhcpxxpUrVxrutXwQfR0saQt+1O3Tmd//3rw3aZI5f/Son5T4/9XGRrPvF78w8PKJa/8yeHBomp83yf/L33xjdlVUWJ7Zyp2kvrA8UZhJ/xW86ZdKHROBlAZ+8+bNZvny5aa2trZjotPKWstkR25UjunerJkO5P4gQd1tzXgdOXLErFixIjAebnMhP7rv2rXLVFZWmr59+wbJYJiu8Q4Kd9CTXGBGW2DsFy9enBaKGH2MKMYbI55PkokBkwPXkN9w001memWlqaiuNr1HjsynSLF10dfp89wD8+fPj82vGQoXga5hqp0/f96sW7fO3HXXXaaoqCgsS4tde+SRRww/pR8Q4AZ9+eWXg6jG9OnTrdGaNGlSi0PUp08fU11dbUaMGNHidaVbAYbilVdeMXPmzAmV7+uvvzbIPzKDwVcGyHRl0vzZIVDcv78p7tevGZP6/fvN8J//3Hz7+efmQm2tuXH48GZ54i70//GPzT+eOBGXrUn68c2bzZ/WrDEzdu60dTK5qPntb02PsjLTtXv3Jnlb608UZq0lj9bbthAI9eCZaT/66KNm1KhRiaVlwF2yZIn1MPH0fI9PwnR4AnhWpE+cONHghUGux+hedwWgLOXk54YY3fKku2kuj1yfiz6ut+3X7aZhoDMljPqbb75pNm7cGIT4Xd4ubnL9xRdfNE888USAmRse9TFz0+JkFI/u4MGDth3B3C0fxVv6yRtvvGFlogw/ysNTSOTneljk4MCBAzbrAw88IEVij/Q1MKINXn/9devV+/zdel19XOZuHl/usHzkF3LLunrF3R9uebed5XrcUfgjLz/6hUv+vSVtkQQzn3cm0REMMB4xnrFLGNU/79lj+txxh7lp9GiDsXfJDZVvHznSnNy+PUh2vW888HRC9PA9tmGDGb96dTChwKgPnzcvsXH/48qVtk7f+0dAWU4QuQj3U6eQqxd5mGz4FIWZn0//d1wEQg38rFmzTFlZWdqoNDQ0mIULF5pVq1ZZA4RBIhLgEoNreXm5TaeerVu32mTxmk6dOhXqdTHgwBuPkgGFn3j6nC9dutTMnj3bXocHRlAGKbf+ljhnsKypqbF1I9+GDRuCiQsyIAsyISeTp2xoyJAh5osvvrC8GHxLSkrsObwXLVpkvVrBhuMLL7xgJwWc83PDo9u3bzdvvfWWvY58yJoOZhjJ559/3rz//vu2XT7++ONA71S86ScY6H379hny0R+YuHz00UcWGh8z2pX2RX4h8k6ZMsUaK7kWdyQSQZ20AZERJiHwdEP49CmuIU8YYaDd9iRvWDTFzSf9NIleUfdHmCzpXFuzZo259dZbrW7ITL8QAodDhw4FaeDD5IbrSTD78MMPgyUPykA7duwQ9lkd8divnj9vw+AlQ4ZYYy9heo7/tXSpGTZnjvXOZx0+bAbNmhXUJ147njuh9L5jxwZpcSfnDh+2WTINv2OQL3z5pUEm6r9j4ULzh2eeCYx4bWWl6XHLLTaNdHdy4+uF7Ew2mLAoKQLpIBBq4NNh4OdlIJGQ7tSpU83p06ftwCH5GFhkwAtLl3zukQGJQRUDJrzd9JMnT5pevXoZ8eaYLGAUxGC4ecPOxYMU74ZjOl4SxoLJBzRo0CAzdOhQU1dXZ/VGbowqMuWCSktLbegZXmAxc+bMgO24cePMuXPnmuAdJIaczJs3L5AL+TBUTB6Ezp49a8aMGWONKJj4ntno0aPtZI6yrt6Uj+NN+xQXF9vJ3IwZM6RKe6TdXMxIRy/aGaI/0K+Y7EQR+Pfu3TutCUAUL7mO8UrSnkzwyMfkyW33OL2oJ+7+4N5hkhJ2H4ic/pGJIAb8qaee8pPsf2SkvYRY1mB5g2WOJEQfFHnoJ0y8mPDmgvDYbxw50nr2GFuMPUYf+qa62h7LKipyUVXOeIj3P2z27MDbR8aSoUONTByo7MzevYHBdytHvxt69TKiF1ENJjH/u3u3m03PFYFYBHJu4P0axduU6xh1IQwK66gMCqmIAR0+UQM6g/nq1auDkCv8CEsnJYwJdbi/dAdRvy7XUPpp2fxHVwwvhLzu7mSMsaQlqQOPEqzk54dtZQ1ecPHbasKECdawUxc8SBdvNo53lHzUhfHGkxW5Bg4caA7/v0clekfhKyFljKkvb1Sdqa5f+f6qkR8Gb+u2fzV3Txhvr4WV++CDD8xzzz1ncXCNexK94JfJ/REmR7rX3Emuj3ccL8Fc2iudey8VbzxZwvM/mjbNZsPQYez9MH0qHq2VVtS3rykqLY2sfujPfmb6T5pkKseMsWF8NwR/qa7OfL5+vWHJQUL4f/jlLyN5aYIiEIVAixt4jDI3fjZE+SjjLnzdkKsYJDccLfnCju7gJoNUOh58GM84ecPKJLmG4RJMiZZAeJbozPIARjkJMSgTdSAcK3i5YdskPKLy5IK3KxfyuROuVP0BT5K8GEo/4hAlb6rr3bp0NfIrv22YefKJeabm82P2Wli5++67z7AvgQmOu/YueVPpJXnyfURO5JVlJI5JNyfS95599lkbXZN+FLW8ka5eeLJ1n35qfvfTnwaG7vDatU3C9OnyTJof43zxzJkmHnfSsuS7VF9vMNRC1y5eNN8dPy5/7XHU4sU2RC8heNfIs9Qg4X1C+PzIr6QIpINAixl4bnwMZ7rrpGHCM6DDB37w9YnBCI8u03W/XHrwyIAsyITcrHtilCExfL784gHFGSQGYtasxWuHD/yph8EVj9X34Fnf3rNnj0336x0wYIAh5A8xwPsevJ8/nf+Z8o5ra2QQXKO8ePKgV9RyBWnHjx8PQv7p6HX77bdbnME7iuDPPhSiSuAKJdErip97nT6Q7uSzX79+tl9IFAQeflvLpJG62DcjeaXuOMxkQktfRu9cEAaydPz4JoaOHe3f1tTYMD1GWM6p79CaNU022WUjAzv1B1VUmD+uWBGE0YkoHP3Nb2If1SPSgHd+bOPGIO/pqiobor9pzJhmYnUuLrZpksBSBJMB1umVFIFsEAg18Lt377abp1599VU7SHJM+jy8hFYJ87HOKuvtcUIyEDIIUm7Lli127dcdyOADP9LJx4+BCiIUynon656SxlEG17i6s013d2T7a6+se4pueDp43ewXSEo+bza0yXrnQw89ZA0+uoLB2LFjm3nw7r4E8jFJguAxefLkYI0dudnjkAvKlndYW/uTHzx0ZA6b8MXpgHw8Xif7C2Q3O0ZbJk+EmTGCLmbwZTLIpAq8SeMX1s+og7bmfhDMk+gVJ3sm6ciK3HJvsj7uetnscWCyJDr17NmzmQcfhRlluC+FN3187ty5mYjZrAxrzjdPmRKsY5OBR9R6lpfbMD1G+La5c03VvfdaD7/XsGFm5IIFzfhkegGP2Q2jV02fbm6+5x4rj+zQJ8T+5a5dVgb3OXnKsolOwuxskvvrZctsWSYKPNMv4XfykJewPcQE4W/eftturJM8HHWTXaYt2XHLdWpsbLyeC/UZHFl75GaXddhc8G3rPBjcMTTyrHpbl7dQ5JP+RmQnbBKJJ8mkFCOLEcoFsRZPuF4pPQSkrVpzbGDj26eLFplRv/pV8Nhbelq0v9xtAff2h1phSRzqwReWiqpNISKA54y37T6S6OpJWBpKuhPcLavnhYcAIXLWxcNepFN42qpGisAPCKiB157QbhEgbMzLldh7gLfiEl77ggULbBheQvBuup7nHwEJ48uSRUtL4L5ohjfS3f3rXzd7kU5Ly9Aa/Fm2os9zD7DEp9RxEchZiL7jQqiadxQENETfUVpa9VQECgMB9eALox1VC0VAEVAEFAFFoAkCauCbwKF/FAFFQBFQBBSBwkBADXxhtKNqoQgoAoqAIqAINEFADXwTOPSPIqAIKAKKgCJQGAiogS+MdlQtFAFFQBFQBBSBJgiogW8Ch/7pKAhcqbtm9v7kK/Pv3Wvtr/bt79qE6vLa4nQfJeOtjv7b/tqEQp4QPM6InGFv//Oy5u3vypVdzcSJRebIkU55q5P2kjchhrU11yRd3tiZqXDwypZHpnWHlRPd21IfcOVELv9e4r7kkVz/cVzKyWOJUW+1JA9t0Br6hhr4ixcvmtdee82+rpbvfW/evNnVX88VgXaPwLHVDabvT4rM3zeW2V/ZP5XkRaewwSMvFbfjStqagcoFlLx9EWPhvjLY5Svfx8jVB6Bc3nHnGLPHHnsso9dAx/GOS2/t+wPdV6xYYQ08BluId27winH5wJdc58j7BiorK+3HvtauXRuKG68Vhy/880mhBh6Qhw8fbl/1yetnT5w4YXg/vZIiUAgIfH/hurlY+73p/3fFserIp2LlGXj3f2zhDDIwkPA1PAZ4pfwgsHjxVbNv3yUzYkRO3tqdH6HbcS0yuWlrrzRnwsVLs/hOBfehT3x9k09ZR3nigwYNMmVlZaFvz0z1Ui6/nlz+DzXw06ZNM/ygG2+80QwePNh89dVXuaxXeSkCrYbAtcbr5sKxq6H1+6H7Pz153nS+1MW+g56JAf8bPv7eHFv6nQ3t/27kl+bb/74Sysu9yMydjyfxNjf3A0LuW/biwrIS3pbQrVvWrYtz4SWDkV+WdCHx2I4ePWrfgAZ/P0QpeTM5uiHMsLerIaPoxFFkllAuH/3h4z+Sx5Xd5U26mxYnKyF5QvPduxebioobTH39X0L0vBhxyZKuZu/ezjYtLE8cf5EfudwPZ8WVyzbdb2v/y4FRmMl1PsLEB7/kw16+7JnqJfzD8Ehyf/h6uW2NTBhn7gl+7777ru0vfh7pQxylnwneBw4csKfygS65LkfK8D0Fvj2CLOkSHwODpJ50y2eSP9TAZ8JIyygCbR0BDDEGeWfZaXPu08vmkxlnrJFmLR7Dbg34M2fNoCdLbNj+vrqBVqX/WXG+iWqUKxne1eb50cM9zKl3LjRJD/sjnjnfg58/f74N4zFIENrD6EGpwrLkJZrGl+w498u6dTKo4WkwoIqXRGhRynKddAZFIT4Py1fg+Mwt34Pn63K5GIiQc+nSpXZg5Jy60V+I/4cOHQp0Ah/k57p4eoSpCWWL3m50g88n8yVJ0pCbQdsfuKUu/4jHjudeXX3J9OnT3HtvaOhkFi7sZlatumLq6y+aIUOum6qqZEMmBqukpCSQmS81hr1S2ZcpF//dtgYXP8wfhZkban744YctnpQnoiQebTZ6CX/aic9pu5Tk/nD1CuvD77zzjo068+niqqoqKzd9i7zI/d577wU6oZfcGyIHn/WO+7w5cvMZ6pMnT0qxxEcmCPCXz4cnLphFxtje+tlnnxlm9gimpAi0ZwR6/lU387eHbzH31t5qeo+/wUyo6m+N9KT/GGC6lXY25/7zsvXsb/6HH9beuvToZG77517m28+u2AmA6F7+TC8ja/aE+Qn3MzloScLYYnT5/HAqeumll6zxdr9uKIOhlBVPZM+ePdYACT8GUAZaBmIGP+rLlmSSEOUVUde8efOCahhA+/TpExrmDDI5J5SFB5RLuaWKNWuu2NA9y7FTplwzNTV/8fIlT9gRHGfOnBkkjRs3zhoGDEtLEoYMoyZtHVZXNpi1ll5J+vCsWbPs0jI642kXFzddgmMSyy+MaBcmvUwOUhH3Tu/evU1dXV2zbKSxTr9///5maXIB/tTT0v1A6ktp4Gtra822bdvMgw8+aNcWpJAeFYFCRaDHsK6mc/fUg7i7dt/3niIz5q2+hslASxLGlsGBQSSKCP2vXr26mRfCF/U++eSTIOQKD5YKXMKwul4VXnLYZ3jdMrk6x2NHJn6EhaMG4bD68NalLEc/HB1WJh/XGMBZ5hDZCHufPXs2H1XH1pENZq2lV5I+nEpxJiZMYOWDR/4SFHolmdDSnkTCovJy39TU1EQuyZSWluZloidYRBp4jDuhLzz3O++8U/LrUREoaARYm2eNXujymWvmct01+dtqxzjPAsEIfRNOxcj7YWqMN6FRBjL5ETJmwGpNYpkAWUW2sPBtlHx4q2x8IqwvOvnh6KiyLX0dYwLheSJbdXW1jUy0dL1x/LPFrDX1yrYPE5WSfoKRZsmL/xD3QZJ7jPxRnj5pTBymTp3aZFnDbRM8fyIA+brvQg28a9xls50rpJ4rAoWIQK87ulm1/vxvP9z0hN1PvvGdXZMnhJ8LYgZ//PjxtNfwGNzwGnbs2JFSDJ5+YRDG8DGYQ+zuZVBZt25dyrKpEmXw8jdcpSpDmq8vsvmfMHUjE8joe/Dl5eXGX06QegcMGGDr4D8ThbbiwSMPRoSBHOyYTOXDg+/Xr5+tRzBkAuVjEoeZz0OwlmOcXvQ7+onvJUv5VEe/v0jeXPRh4cWRPuUS7ZTKM5e8tCVr8MgZRoToo9LIzz0s+IWVz/W10FGLm4ln4Xfu3Bk8C798+XKD4VdSBAoVAYz43dv6WaPOC3A+KD1lisu6BOvtudCbUCGP4RCyZVCRnfB4epxzjQFZdo3LRjjWl4mosYOXPG5ZXy48FTZ1UQflycuaPJ6HlOUovP3yYf/JTzTv4MGDoeuPYWW45uvLwOp62TNmzLCDHvpRR8+ePZssFcBD1u8lj+yMhvfkyZMDLMEGvZPSpk1d7A76MWOKzJYtXczAgUXNdtMn5eXn47lnNrOhE3Kzg5q9BZBMlkijnWlvzkWvuL7g1+X+py7CxBKKJlzsPmufBDOfhzupS6WXK0fYuSwNsAzDLn36p8ubMn5/kfsDfLLpw3IfwIcffWXZsmX2XGTF8+Y6+EcREycmy0w40iXaHdtKPfki/R58vpDWehSBdo4AAzSRgfXr1we7qtu5Siq+IhAggAEmbM9ENmz/iaSzgc/fgQ8T0tnk+vjjj4feH9w/TCDcDbBB5S10EurBt1BdylYRUATaIQLiUapxb4eNpyInRgDPnmWFDRs2BMtbbmGWlwivhxl38vHoHFFuljh8Ytki7A15fr5c/1cDn2tElZ8iUGAIELLleX33eegCU7HdqOOHmiXkzNEPd7cbpdqQoCwR8M55/50FGOiGhgYbwfLFlQkwSw73339/8Nimm2/r1q2WL/zzSRqizyfaWpcioAgoAoqAIpAnBNSDzxPQWo0ioAgoAoqAIpBPBNTA5xNtrUsRUAQUAUVAEcgTAmrg8wS0VqMIKAKKgCKgCOQTgf8DLsox0qtthh0AAAAASUVORK5CYII=) ###Code price=pd.DataFrame({t:data['Adj Close'] for t,data in all_data.items()}) price ###Output _____no_output_____ ###Markdown and create the data frame of Volume![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfUAAABECAYAAACYqax1AAAYXElEQVR4Ae2df4xVRZbHS0VpBBoEZEZpBJwA6xrAkBkZYGcWNbIbs7RtXOPPuMiSGP9wotEhC2MkKgOJkjQZ/lDIGFgBlbBEwB3doPzY3bQMOuvSbBwW3BGUVndQEBq1BR3ZfGr2vKm+3Pvufa/fe/369fckL/e+W1WnTn2rbp06p+pWndPR0XHGiYSAEBACQkAICIEej8C5Pb4EKoAQEAJCQAgIASHgEZBSV0MQAkJACAgBIVAjCEip10hFqhhCQAgIASEgBKTU1QaEgBAQAkJACNQIAlLqNVKRKoYQEAJCQAgIASl1tQEhIASEgBAQAjWCQNUp9RdffNH169fP7dq1q0Yg7lwMynX//fe7jo6OzgH6VxQC+fAkjLbU2Njojh07lpk/cUlTaD2RX6F5ZRaqxBGXLFnieNcqQbR1sKQu+JF3lD759a/dK1OnuvZ3340GFf3/9Gefue2Nje6fLrus5LyzCnVo/XovA7KEZG3MMKHtiIRAKRDIq9TXr1/vFi1a5Nra2kqRl3gUiIANcOzF51roy19s5x3tdMi72hTWgQMH3OLFi3MKI4QX+Sn79u3b3ZYtW9yQIUNywWBYqMLOJe6lN6XAjLpAwc+fPz8VxW86Otzun/zEoRSj9F9LlsQ+j8a74KKL3LVbtrjG1lY3aPz4aHC3/qc90i5pp3Pnzu1WWZR5bSEQq9Tb29vdU0895err613fvn0rWuLbbrvNv/hTp06taL7VmhkvPC8+nSGd4rXXXluwYi+2bIMHD3atra0+b/KPKsdi+ZYiHfIsX77c3XnnnW7cuHFnsfz0008d8o8vojO3Dhf+DGZElUGg7uKLXd2wYT6zPv36ue9Mn+5+39LiUPBGWLzt+/e7IZMn26Oqv/YfPdqdW1dX9XJKwNpAIFapozxuv/12N2HChMylpJNdsGCBVzhYdFHLjnCsI0b8WFCET5kyxWFtQaFlGD4PBSAt6ewXug/D9ISHYSGPUt9beUKrOpp3GIZSLpYY6Kxatco9//zzXtHCJ+Qd4mbPH3/8cXfPPffkMAtdn1HMwrA0Gc1y27t3r69HMA/TJ/G2dvLss896mUjDj/TwNDL5eR7nIdizZ4+PeuONN1qS1CttDYyog1/+8pfeeo/yD/MNyxMyD+NE5Y6LR3yjMG1YrrT3I0wf1rM9T7saf+TlR7sIKfpuWV1kwSzKuxgvyMU//KG3qrGujVDcnx886L4MPIUn9u93ferr3YUNDT4a7nrc9rjY+WHFZyW8AGF8eLXMnu0YODAd8J8/+5n3FsC/7Ve/8vngPbBBBnEs36Spg9G33uqm/OIXjkGKSAhUAoFYpd7U1OQa/v+lKUSIkydPugceeMAtXbrUKx2U0IoVKzqxoEMdM2aMDyefjRs3+nCzjj788MNY64pOBt6h5YhVD9GpLFy40N1xxx3+Hh4oPuuYOglQhj90kAcPHvR5I9+6detygxVkQBZkQk4GTF2hUaNGuffff9/zosPt37+/v4f3gw8+6K1X7s3j8eijj/qBAM/4ha7PTZs2udWrV/vnyIeshWCGYnzkkUfcq6++6uvljTfeyJU7H2/aCUp59+7djni0BwYrO3fu9NBEMaNeqV/kNyLu9OnTvYKyZ2lXLHrypA5CD0jogTDckCeOUMphfSJTnFcpjGftNEu5kt6POFkKebZs2TJ36aWX5toK7cKIAdi+fftyYeDDgIbnWTB7/fXXc9MZpIE2b95s7Iu+orgHjBnjjr39do7H/+7Y4S14lCTK9zcPPeR+0Nzs/vaDD7yb/ZNduzK55nMM89z8bs0ad/kdd7iRjY3uwIoV7kdr1rhv2tv9IIMBwN6f/9zN3LbN540MyBKdO8/DXkFCoCwIxCr1ruRE52Hu0BkzZriPPvqoU2dMZ2KdXFx4XN50nHSkKC3jHcY7fPiwGzhwoDOrjQECisCURBg37t4sRbNiuBZiDaEgGHBAI0eOdKNHj3ZHjx715UZuFCkylYKGDh3q3crwAotZs2bl2E6ePNmdOHGiE965wJibOXPm5ORCPpQTAwaj48ePu0mTJnnFCSZRC2zixIl+AEfasNykT+NN/dTV1fkB3MyZMy1Lf6XeQswIp1zUM0R7oF0xwEki8B80aFBBSj+Jlz1HYWWpTwZ1xGPAFNZ7WrnIJ+394N1hYBL3Hpic0SuDP5T2vffeGw3y/5GR+jJiyoKpC6YwshBt0OShnTDYYpDbVYq64KOu94+2bnW4ti+aNMlnhZU/YcGCs1z2xcoxsqkpx/vyO+90fYcOzbFioPG9u+5y9WPH+mfIgCx4EkRCoDsRKLlSjxbGrEp7jiI3QolkmbekE4dPUidOB97c3Jxzp9Kx4HLOSigQ8gh/hXac0bxC5RgN68p/yoqyhZAXRUt5+aGALSxLHliOlpZr1CUbnVOP1tXVV1/tlTl5kZ5ws1rTeCfJR5lQ2FisJtuIESPc/qCzJE4SvuYuRoFG5U3KM+tzlNyZM2ccA6skeu2119y8efM8DqFCz1IueBbzfiTJUsjzcGAbxTuNj2Fu9VXIu5fGO3TBR13vpL3wkku6xbV98r333G9++tOc+33T+PHu8KZNacVRuBAoOwJlV+ooYl72rhDpkxS68Q3dqXSg/EJXs8WLu4YdmnVMhVjqcTzT5I1Lk+UZysowxSsCYUFSXlz/KOIsREeMdwFXq+EVumSz8EiKUwreoVzIFw6y8rUHLEbiohyjnoUkebM+HzZsmLsomPONS3f99dc71hkwqAnn0i1uvnJZnEpfkRN5bYqIa9YFhrS9hx9+2HvRrB0lTV0UU67QBR+63o3Xlx9/nJvj5tkXgafJ4pTr+v2nnvKud1z/9mNtgEgIdCcCZVPqvOwoy0LnPePAoBOHD/zgGyU6ICy3YufxSmmpIwOyIBNyM4+JIoZM2UXlN0snTQnR+TIHbdY5fOBPPnSoWKZRS5356paWFh8ezXf48OE5q5NOPWqpR+MX8r9Y3ml1jQyGa5K1Thys6aSpCMIOHTqUc+dnLReW9xVXXOFxBu8kgj/rSvAegSuUpVxJ/MLntIFCB5wMRmgX5u2AR7SubaBIXqyDsbiWdxpmNoilLVPuUpG54H+3dq078sYbnVa9Y8Uffest91lrq88O9/x769b5efCsC9NsUGDz86di+pe4snz3mmvcb5ctK+l39XH56JkQKBSBWKW+Y8cOvwDqmWee8R0j16zfq5vbFBce86Y2f54mGJ0fHR/pNmzY4F3JYecFH/gRTjx+dE4QnS3zl8xjWhhX61DT8u5qeLiSOjqXyjymlQ2LBuua+f+sFOXNojSbv7z55pu9kqesYHDVVVedZamH6wyIx8AIgse0adNyc+bIzZqFUlBXecfVdXTAgyWOzHGDvLQyIB+fwtl6AVuFjqK2ARMuZBRfiBl8GQAykAJvwvjFtTPyoK55HwzzLOVKk72YcGRFbns3me8OrWnWLDBAsjINGDDgLEs9CTPS8F4ab9r4XXfdVYyYiWlQ3qc+/dQvmrNV70RmPvsvnnvOvfXgg94NvmXSJMfct1nLtjqd5x9v3+62Xnddp01oGhobfZ64zl+/4QY37t57Xd+Ma1/Ig8Vx8LQV8Gx0o4VyidWogAohcE5HR8eZUuRFh8hcIi+4zauWgm+186BDR7k8+eSTvoOvdnlrRT5rb3hw4gaOWIwMRFGsKB5R9yFgddXb+oYsiAubLCgpTiEIxFrqhTBQXCHQHQhgIWNVh58PhnLgcoayruAO0+peCAgBIdBTEZBS76k1J7n9FAIbHrGWAIsnJKzz++67z7vYzb0ehuu+8giYi96mIyovQfXkyLQR7ZJ2yhSbSAiUCoGSud9LJZD4CAEhIASEgBAQAsUhIEu9ONyUSggIASEgBIRA1SEgpV51VSKBhIAQEAJCQAgUh4CUenG4KZUQEAJCQAgIgapDQEq96qpEAgkBISAEhIAQKA4BKfXicFMqISAEhIAQEAJVh4CUetVViQSqBAJfH/3W7frxEfcv/dr8r+0fv6hEtql52JbBhX72xe6K0V33UjPrhgh8eoiccbvwdYM4PsslS/q4KVP6ugMHzqmYCNSX7UgYV9c8s3DbObNY4eDVVR7F5h2XzspeTW0glBO5ou8S7yWfz0Y/nSWdfZ6YtLskcaiDSpU3Vql/9dVXbuXKlX6rWM7LXr9+fVhm3QuBHo/Ae80n3ZAf93V/3dHgfw1/178iZYrrMCqScQ/OpNqUUimgZBdEFES4XW/I186jKNUhSyHvtHsU2N13313UFsxpvNPCu/v9oOyLFy/2Sh0lbcQ2yWzvbYdo2XOu7DWwZcsWf6DW008/HYsbW3rDF/7lplilDrBjx47122yy9esHH3zg2A9eJARqAYE/fHnGfdX2B3fxX9VVXXHoPDhljk5dVBkE5s//xu3efcqNG1eSHbMrI3QPzsUGNNW2nTiDLDay4lwI3sMocaolx0InWdwjR450DQ0NsbtYwi9po6xoPl39H6vUr7nmGscPqq+vd5dddpk7cuRIV/NSeiFQFQh823HGffneN7GyRN3yrbOPOQYBEFf+H/v3U27/Iye82/5fx3/sPv/vr2N5hQ8ZoXNAEbuqhYf0hLvdpblczXVtbtkwbZgX98bLOqBoWsKNzDJ79913/S5n8I+6Hy1uMdfQPRm3gxoyWpm4mszmpuVgHQ7YsTih7CFvwsOwNFlxt+N279evzjU2XuCOHfuT+50NChcs6ON27TrXh8XFSeNv8iNXeDhVWrquhkfrOnoiXxJm9pyDjjhUyw7PispebLmMfxweWd6PaLnCukYmFDLvBL+XX37Zt5doHGtDXK2dGd579uzxt3YIlj23K2k4v4CzPpClUOLALcjyKTR91vixSj1rYsUTAj0JAZQvSnhbw0fuxFun3ZszP/GKmbl1lDlK+7cPHXcj/76/d8lff3SEL97/LG7vVEzS9R/bx8f57i0Xug/XftkpPO6PWeCcpz537lzvoqNjwG2HooPyuVyJi9eME+K4j6YN86Qjw6KgEzVrCLehpeU54XSERhy1yulqHBnLeeqc2laKzgc5Fy5c6DtD7smb8hvxf9++fbkygQ/y89wsOlzQuKmt3KEXg6OIOaGRMOSmo4521pZX9IpljoXe2nrKDR58tpV+8uQ57oEHzndLl37tjh37yo0adcZt3Zqty0RJ9e/fPyczJyDGbWcclakU/8O6BpeoCz8Js9CNfMstt3g8SY/nyCzXrpTL+FNPHE0dUpb3IyxXXBteu3at9y5zDPDWrVu93LQt4iL3K6+8kisT5bJ3w+TgiOy0o8KRmyOdDx8+bMkyXxkUwN+O4s6csMCIqS30nXfecYzgEUYkBHoyAgP+7Hz3l/svcde1XeoG/eACd/XWi71invpvw935Q891J/7jtLfgv/M3f5xLO+/Cc9z3/mGg+/ydr73St7KPeWigszl4XPi48s2atzilvqJgUbQc5ZuPnnjiCa+ww1MDrQO0tGZxtLS0eKVj/Og06VzpfOnw8p1Xb2nSrjYwSLJ+yGvOnDk5NnSagwcPjnVh5iIFN6SFB1RKuS2LZcu+9m55plenT//WHTz4J2ve4sRdwXHWrFm5oMmTJ3tlgDIpJ6G8UGRW13F5dQWz7ipXljbc1NTkp40pMxZ1XV3n6TUGrvziiHphoMuAIB/x7gwaNMgdPXr0rGiEMe/+9ttvnxVmD+BPPuVsB3mVeltbm3vppZfcTTfd5OcKTDBdhUCtInDh5X3cuf3yd9zhXPyQH/V1k1YPcQwAykkoWDoEOo4kwq3f3Nx8lrXBSXVvvvlmzp0KD6YBQkKZhtYT1nDckbZhmlLdY5kjEz9cvkkdb1x+WOWWlmvU1RyXphLP6LSZwjDZcGkfP368Elmn5tEVzLqrXFnacL6CMxhh0GqHCkWnlyhXlkEs9YnHKyku783BgwcTp1uGDh1a9sFdolJHoePWwkK/8sor8+GlMCFQMwgw186cu9HpT751p49+a3+77ZpmQSAYbm1cpSj2qAsahY3bk87LfriD6aS6k5gCQFaTLc41myQfVimLl3DZW5miruaktOV+jgKBsDCRrbW11Xsgyp1vGv+uYtad5epqG8b7ZO0Excx0Fv8h3oMs7xjxkyx6whgszJgxo9OURVgnWPhY+uV872KVeqjQbcFcKJjuhUAtIjDwz8/3xfr9P//xRcelfvjZL/wcO+75UhAj9UOHDhU8J0eHhnWwefPmvGLw1QodL8qODhxiVS4dyYoVK/KmzRdoHVZ00VS+NIRFy4ts0aNGQw8EMkYt9TFjxrjoVIHlO3z4cJ8H/xkcVIuljjwoDjpvsGMAVQlLfdiwYT4fw5BBUxSTNMyiPAxru6aVi3ZHO4law5Y+3zXaXixuKdqw8eJKmwqJespngVtc6pI5deSMI9zvSWHE5x02/OLSl+JZbE/FC8S36tu2bct9q75o0SKHshcJgVpFAMX9/ZeGeUXOpjSvDf3Q1TWcl5s/L0W5cQPyyQzuWDoSW8GORcc9z+iEbbW3LWZjvhjPGStviROmjcqFRcLCLPIgPXGZY8fCsLRcjXc0fdx/4uO127t3b+x8YlwankXLS2caWtMzZ870HR3lI48BAwZ0mgaAh83HWxxb0QzvadOm5bAEG8qdlV588Ty/8n3SpL5uw4bz3IgRfc9aBZ+VVzQe3yWzII0yITcrn1krANkAiTDqmfrm3sqV1haieYX/yQsXsLmZcQWH38JnwSzKIxzI5StXKEfcvbn9mWJhdT3tM+RNmmh7sfcDfLrShu09gA8/2spjjz3m701WLGyeg38SMVhigMwgo1Ci3tGt5FNO0nnq5URXvIVADSFAp4wHYM2aNbnV0DVUPBWllyOA0sUlz+A1bj2JhbMIL7pyHugIZ6Hq7NmzY98P3h8GDeEi1nJAHmuplyMj8RQCQqBnImCWoxR6z6w/SZ0NASx4pgzWrVuXm7oKUzJ1hOs8TqETj8/c8GYzfRElpiTidqqLxivFfyn1UqAoHkKghhHAHcv39OH3yjVc3KouWtSNbO5krlFXdlUXpEqFw/0ft/MbSvnkyZPeUxUV3Qa9TCfccMMNuU8sw3gbN270fOFfbpL7vdwIi78QEAJCQAgIgQohIEu9QkArGyEgBISAEBAC5UZASr3cCIu/EBACQkAICIEKISClXiGglY0QEAJCQAgIgXIjIKVeboTFXwgIASEgBIRAhRCQUq8Q0MpGCAgBISAEhEC5Eag6pW6fbPChvkgICAEhIASEgBDIjkCfuKgct/rCCy/kgiZMmOBuvfXW3H/dCAEhIASEgBAQAtWHQOp36uwB/9xzz/lzanW4S/VVoCQSAkJACAgBIWAIpLrfOWieDezTiH1v2YkHt7kdTGGb8ZOWcLbgI9zOTw53QLJdefLtjETacAel8ECKMD1xwrA02RUuBISAEBACQqAWEEhV6u3t7e7IkSPeUk8rsG2jt3TpUq/E2SM3etwjpwdxUhNKvqmpybF9HmRbUSadp4xCZ+9pziUmLT/bdJ/7hQsXOjba5x4ebJxPGpEQEAJCQAgIgd6CQKJS37Fjhz92lRNlOH+3oaEhEyZsem/723LEHMc9omiNOHbRlHFcuMULr6RHSXOsovEOw9lIn3NswyMaUfA7d+4Mo+leCAgBISAEhEBNI5Co1Jk/5wx1fij1lStX+jPWC0WDQ+FDpR6eJYslv3z58k5n2sbxJz18Ro0aFRfsz3dubm721r655zmnWCQEhIAQEAJCoDchkKjUQxDGjh3rTp065U6fPh0+znSPIkbRdoVIn6TQje/cuXP94fYMAOw3f/58C9ZVCAgBISAEhEDNI5BJqbe0tHhrvb6+PjMgLFxjQRwHzpdCqcMHfvCN0vjx470lv3nz5miQ/gsBISAEhIAQ6DUIxH6nznz6tm3bciAU8p06C+GMVq1alZs/t2dJVxa1hWk3bNjgJk6c6NasWePn0W0efsSIETkWxp9FdqtXr3azZ892odt9+/btiQfa55joRggIASEgBIRAjSCQ+p161nLi8p43b55fgc5cuUgICAEhIASEgBCoLAKZ3O+VFUm5CQEhIASEgBAQAsUgIKVeDGpKIwSEgBAQAkKgChEomfu9CssmkYSAEBACQkAI9CoEZKn3qupWYYWAEBACQqCWEZBSr+XaVdmEgBAQAkKgVyEgpd6rqluFFQJCQAgIgVpGQEq9lmtXZRMCQkAICIFehYCUeq+qbhVWCAgBISAEahkBKfVarl2VTQgIASEgBHoVAlLqvaq6VVghIASEgBCoZQSk1Gu5dlU2ISAEhIAQ6FUISKn3qupWYYWAEBACQqCWEfg/b4aPxRNEN9oAAAAASUVORK5CYII=) ###Code price=pd.DataFrame({ticker:data['Volume'] for ticker,data in all_data.items()}) price ###Output _____no_output_____ ###Markdown The percent changed of each day is calculated as time series (described in later class) is shown as given![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARAAAAA2CAYAAAAPiMaKAAAKgElEQVR4Ae2dS6hPXxTH9/33L0neSnlmgCSPTCQTDJVXUh6llMwUJUUiEoWiGHgVGYgkjwGlPCaSiRgYYIA8BkIeg2t2/312//Wzf8c5v/P4nXvuzz3fXeee89uPtdf6rn3WXnufc9bt6u7u7nFKQkAICIECCPxToI2aCAEhIAQ8AjIgGghCQAgURkAGpDB0aigEhIAMiMaAEBAChRGQASkMnRoKASEgA6IxIASEQGEEZEAKQ6eGQkAIyIBoDAgBIVAYARmQwtCpoRAQAjIgNRgDBw8edHPnznUvX76sgbStRfz69atbv359x2MRxyf627lzp+vu7m4tZIWlbRmQy5cvu/3797v3799XwjIAonzAVerfCNRZ1xiIPXv2uMWLF7spU6Y0FM314MGD3bFjxxp5fX1RyID8+PHDHT582A0ZMsQNGDCgr2VQ/ykI7Nixwz1+/LhpMKY0UXEfInDjxg3f+7Jly/7gYsuWLe7jx4/u0aNHf5T1RUYhA3Lv3j23Zs0aN2PGjFw840pfunTJHwMHDnQcIRB4FkuXLvX5lFGXZPmzZs1yV65ccWPHjvV1zC3HYm/evLmJFm3pjxSWkwdta0s5PBhvxpf17Qk418RzlG+rU/YZvnFZ4c9w4WweWJpczOLICb9hu5BPaJvMnEO5DXcrD8tCGnHX1OUwvKER6po2lBttK7c+k3Qd11dcXiu5qP/ixYsGNowdsLQU8hWOkzR90D7E3GQLsQ/5Cmlb38h/69YtP55pH03krV271l28eLGJ52i9qn4XMiDLly9348aNK8Tjhg0b3OvXr73w586dawCBcnDbAIfrDx8++DIAHzFihLt586Z79uyZW7VqlS+jTt5ZddGiRW7SpEmePjJcvXq1IcO+ffsafGEgUZDdqAwKlApP9Msxb968RtvevPj586dj1jly5Eij31OnTjV1mSQXLi8YgduwYcOa2vADbKFNucm1evVqX4/fSfr4g1BCBro2vMEUY2KYcpOCcRTTMnTdSi5Y/fbtmztz5oy7ffu27//t27fu6dOnXgp0PWjQoAYeW7dudcePH/e/qRDqA1kmTpzo7ty549uCGXVpwzUyz5w50+sOuaB94MCBBt4sRbZt29bABCIYtqFDh7rx48d7mnF/pk6d6r5//+7evXsXV1xpXiED0g6HGzdu9IMWGnPmzPFAADZgsL4ztw3AMSYPHjxop7umtrt373Z2gyxYsMC7gvRNCvlCQdxwnz9/brRHsRx5Uzib2YzEmUGeNTHQbC0c5RsareRK6gO5uYEZ7EY7rFuGPkK+Qky58eibpRV6LjOlyUVf6BaDTN8cTAYYERJYLFmypMFSOEYt0/SBHufPn+8nHsrA7M2bN35c8xuZR48e7b58+eKbPnnyxK1bt66B9+zZs70BCscVfIwZM8Z7ZtZf9Ey/GBmjGy2v8nflBgTAAYCEsi5cuOCVCBhHjx7115RzMIOVmbj5LDFomC2MF8uPO8Mng4aZnvpRlzeujeVhsBjU0aMdD4ZBBj1LReSiPXSYQeNSb+iDmR+6GOaenh43cuTIuK7bykuTK4047dGvjUGWUvCdJY0aNcp1dXU5DAXJDAOGhITnzZg22hivs2fP+jL7Q520RHuMjBm9tPq9WV65AWklDF4AsxNKtINZqhMSN7zxhPK2b9/edBMn8ViGBxKlzU3PIGon0T7JeBjdsvXBzI/R4EYbPny4dVPqOYtcrTpkoiDZOExa/sXRsL7NSDDhRL0slu02juwcTiYs+dIS7dhITdNfGp0yyjvGgGClsai2Ax0nHAOP2cAse7SOWWSWB2V7L2FfWZRs9cv0QBjU7COEXpz1k/fMYIdOuC8R0siiD+rbJmnakox9G1vbM/NOmzataW8h7JvrNF1H69vvNLmsXquzLSG4UfFSs3ogto9ixof2oXHAU8TLZi8kKWEUMA60TUqUsQfSGx5cUp9J+YUMyP37992uXbvcyZMnvSCc230fhEF1/vx5vzZmENgRDkzqYNFtKWG72NTF7UQ5XLO+PnHiRJLMufOjXgT09+7d27YXkJURk5enT+wL2T5OWnvjO3yiET4RgA707KkW2NGGlEUfaf2zMW165KY4dOhQAzP0yI1KP1Yni67T+qS8lVxp7VeuXOmuX7/ueYI39iniNqDj6FCXFMqEbCYXxgQPB32YzKE+aGuG24xRXD9MoGaM48qrzOtSTNQq4c7XFzMNSyVu8nAmy0elb2pjiFjPd8oStAoUMBRMLqGhJA8vjckRw5Ilgd3Dhw+b6Fi7ThsThTwQE0ZnISAEfiNgS+jfOc4/Rcy7Z2VPIuOW83gweG6dMqHIgITa1nXHI2B7LrYECM/R5UDVwtiNHy5hoku3LDwhE0tk3j0K90u4tvdQstCpoo6WMFWgrD6EQD9FQB5IP1WsxBICVSAgA1IFyupDCPRTBGRA+qliJZYQqAIBGZAqUFYfQqCfIiAD0k8VK7GEQBUIyIBUgXKbffBqNI8o4z7iCx9r2lukcd1BIxrKjzxoclYSAkUQKGRAfv365U6fPu1fZ+eVdkIbVpF4Dv43hjTsTb5505O3E/l0PilRHhcij/cV+B6GMuooCYG8CPybtwH1eT138uTJbtOmTY7whnwoxfcxCxcuLEJObVIQ4EYnoFLRZG802otOIR2+G+GVc+pk/cYmbK/reiNQyAPBUJixIC7qhAkT3KdPn1KRxN3GzeawNwjtQyMam6tuZeaSW374URh17GM6Zk9c8ZAWbemPFJaby29tKbfvFUK+rG9PICH8npUlndP4tnZhvyFflIdlJo+1y3KGh1Yh8qDBB2TUoa6SEMiDQCEDkqeDaF0+s2fG46bu7yENzXNoFYqR5U2rEHp4BYZVFMssv7N8uUn4PL7uTAqTkKUf1aknAm0bkOfPn7tXr175tXQWCMPQgWG4uDJC6KX1H4bYi4YGDPnik+qyQhqm8ZQlhF4ajVblfOBl8S2S6uHNUSfuY7CkNsoXAiDQlgHh/8Fcu3bNrVixInOQ5TAYTh1CGqYNM1te2bItTwi9NNqUZwmRRz2CJGWtm6Vf1akHAoUNCMaDGAcYhOnTp5eCVtkh9Eph6n8ifD7Nzc7BbJ01pGEaD+2E0EujTXnW6GkYj6x1s/SrOvVAoJABCY2Hbaa2C5dFYrInBnH00sLcmQvOpmjVIQ3Zy2ADNO5djTS+bYmBccoTQi8Oo2he1hB5nRJjM8q/fnc2AoUMCNGSeBfk7t27jXdBFNIwWdFspsaFYqRFqxB64fIGg2ghAu1pjD3lYflDGXW4Dp8gmWFuFSKP/SdibFJXSQjkQUDxQPKg9ZfWxaAkhchDJAwSyxe9B/KXKrgP2S7kgfQhv+q6AAL2Alnc8hDjwvLF6hQgryY1RkAGpAbKZ1kTFyKPJRCeSZUR5msAd61E1BKmVuqWsEKgXATkgZSLp6gJgVohIANSK3VLWCFQLgIyIOXiKWpCoFYIyIDUSt0SVgiUi4AMSLl4ipoQqBUCMiC1UreEFQLlIiADUi6eoiYEaoWADEit1C1hhUC5CMiAlIunqAmBWiEgA1IrdUtYIVAuAjIg5eIpakKgVgj8B5HYwedhn3KxAAAAAElFTkSuQmCC)**Hint** tail() function is used to show the last data, the default is 5 data, you may change the number of showed data by add the number you want to show as the input parameter of the tail methods ###Code returns = price.pct_change() returns.head() ###Output _____no_output_____ ###Markdown The corr method, and cov methods will calculate the correlation and covarience of the overlapping, non-NA,aligned-by-index values in two Series![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAATEAAAAXCAYAAABj53j3AAAJyElEQVR4Ae1cW0hWWRRehoy31EobJy95gRQfzJAmM3sogyAC82Gi0ugCPdSDUdCFJIoi9MFAyaCCoLKUpAnsfzAQRnsYs5qKLJgwGc00myHT1CmtaS58e1pn9n88/zn/5fw5f+0Fh3POvqy9ztrfXnuttX/+oPHx8b9JkdKA0oDSQIBqYFqAyq3EVhpQGlAaEBpQRkwBQWlAaSCgNaCMWEBPnxJeaUBpQBkxhQGlAaWBgNaAMmIBPX1KeKUBpQFlxBQGlAaUBgJaA8qIuTF9FRUVFBYWJq7S0lIaHx936vXy1i1qysuj0a4up/KpfvkwPk63d+6kRxUVk0R58uQJ5ebmim/CHe+BRu3t7WQ0H4H2He7IK88XsIhv19PThgZqKSyk98PD+iq/vvuCs8uXLxMuX8ilEWtoaKBjx45Rf3+/L/zd7otJ2rRpEw0NDbnd51M2PHfunDBeNTU1YuG7OzYMGwzc93PnigtAY+LJ5zoYHJQxAYwAJdfrjZEZb+bh6p6enk63b9+m58+fU0ZGhqtm/9ty4KW8vFwYMSxquwgblq+Lyi5Z9HwwT5gvbKJ5eXn6asN3PcYYS4xDPYZQz3XcV8YlNmy0wd0dssLZypUrqb6+3tAou8MfbSYZsdHRUaqsrKSoqCgKCQlxl88X3y509mwKjY110gMmunnFCvq2qoq+e/ZMXMHTpwuPDQC5t38/xeXni/Kizk7R93F1tbjDgP24eTNllZWJ+sKODnrZ3q4BzIy3kxBEFJmWpi8K6HcsYmwmJSUlhEWi6D8NRKSk0LTQUK0gOCyMck+cIOArqaiIFlZWCjylrFuntQmJjaWVP/wgynH/5dIlgdG/Jibow+ioaIdn0Ku7d2lmdrbWV37wBmezZs2i48eP06lTp7x2YCYZsZaWFtqwYQNlZWXJ8lk+8w6GXYxDL9nlhYdVWFio1fFux+XZ2dl05coVSkhIcApxAFiEDDIv9MV4ILmewz45PEI/lo3l4rH5o2SZXbnq3NboPnvxYipwOOirmTO1ahip7vp6ARrUMyWuXk1R8+bRcEeHKEosLBR3gC2ztJRGOztFODDQ3Eyz8/KI+4I3DNpvbW00MTxsypvHYgDLgOU6X+88b6xTOazThz48VxiTPe6uri4ND3JfzBfeHz58qIW7cn/wePDggRB/zZo1Tp9hhQU0lucaeGTPn8uPHj1KW7du1XDKY6MdIgXIz8S4wrvVd4E/LvRhncmYNtMnj2d1xzzDYGHevaXwxESanppK7169EiyCo6IoOj2dRj7iMjgigr5esoTe9PZqQ/iKM2xEmZmZ1NzcrPH05GGSESsqKqLExERPeGhtMfk9PT3CsCD8gpsIYOE6fPgwFRcXi2e4xOxCwhI7HA7q6OigtWvXau4yQh1PdtmCggJKTU0V/PENV69e1eQCMFkuGGmMzeAF+JqamrRxIau7rro2gMHD2/5++r2nh2bl5BjUkgBB+Jw5ToCDJ/duaEgAZqy7e5IHFRITI3gO37tnyttwQBsLobstW7Zo8wmdcZiNuj179lB1dbU211isWMBMnZ2dtHHjRrEDAwu9vb2aYUKbs2fP0sGDB+n69esCFzdv3nQyHjdu3KD8/HxhDJinfHeFBciBueeQDHgELiH/+vXrxf3QoUPEqQOUHzhwQGZt+mz1XVgfjFF5fYDpmTNnDPVpOqAfKoFbeF/RUoohZuFC4YHBkEVnZvphVKJly5ZRW1ubmANPB5hkxDxlILfftm0b7dq1SxTl5OTQyMiIEKqvr48iIyOJd04YLgAIYLSLAD4AEQSFDAwMaAqR5UJeYcaMGTQ4OKgNDfDh8hdxbgG5BD4AMDJSCAMQDvz5/j29ffGCIpKTnUSCkQuNiXEqM+Lt1MAPL9gxk5OTtfmUh+C6BQsWiGLMNQyBHqAwctikUI9NA4aMaf78+cLAoS4pKYlSUlLo1UfPAIYFc4vxXZErLABvkAV8QcjHAKPAp11k9l2yXPL64LH1OuJyf9/fDQ6KtAfwifRH+vbtIqqYGBwUBi0iKYn+GBujX1tbnYybnXLFxMRo9sJTvrYaMXl3BEBra2sFYADAqqoq8cyuNHYlOwmGiwmLgj0DLnN1h5wAHnZvyCaHNq76eFrO7jbyWrzDIX8AQyYT5yDgoeGSXXa0A6hActhqxFvm6Y9neLXx8fEuPSGzOsiDjUQ+TIBh4Q0I9YsWLRLGC8+YE8wle8cwYrLBM/o+IyygHYwfzzP4InVh5+Zl9V1GsnIZb/4wsJBNDjW5jb/uck4MGH1UXq7lXjFmcHi4FhUAe97kvqxkj/2YT5adC6s+XG+rEWOmRnd4Qwg1AEK+PHHVjXjaVYYFwjJhAe7bt0/z4rwdA15TUFAQDd2/b8gCXha8LXhSTGyk0NfIyCFPgRxF2Jw5pryZn7/uCInMSPaC0c7K6Jjx0tdhgZt5Yfr2+nekE3iucfc0baHnZ9c7G2vIhNQKjNqnNGT8HTBSaSUl2gYbnpAgDgqQb8uSQmv9Bsz9vb2z8WJj5gmfT2LEsEMByNeuXXMpG4R//fq1y52RFwIm1m4vThbKaoHKbc2eGQw/V1cb/n4MHtmbp0+p3+EQbGDMHtfUUMKqVcLTQi6tz+HQjrJxWokdMq24mMLj4wXQXPE2k8uTOk5C6xcTQqHGxkbDRYa6O3fuaDkubFzIQyF9gIXqK4EHNhrGg7v80A+RAr4JMrkizL+rsA745LAWOT7kWv1BWAtxcXH+YG3JEzjrrqujb5Yvt2xrZwPoNTo62iuMTDJira2tIql6+vRpEaPi7uvvxeAinz9/XoAZYOJLXhxoA8+M3X0+YURbhHgIR/GMBXHy5Enb9AcwsjzM/8iRI14pUy8Udi/8vAJ5BuQbHNnZImmPdjBySy9cEIBBXWNGhvi5BZ8k4gRzaW0t/bR7t9YXp5N8WmnGWy+H3e8IwS9evCi8BdYdh+H6OoRsMGAcDtohC8JF+XDGXZ4IWSELn4BDdpabech5W9TD6IH0+ERIjRyXHQSjKp/c26kzzpkCX32NjXR3716BJ/4tGOSXc2LAKDwx4Aye//uxMTs+0ZKH1WGNGYMg9aeIZur5tw5Axg4t522sewVGCywghC44DYQBCgRCyIWQH57V5zgnRnOAU3Q4E8jfwqAGGpnhzNdvm+SJBZpylLxfngbYg6qrq3P66cWXp4nA/2IYN/wkZ8eOHV4bZ2XE3MQB8nC8eOAJBDph90PIbvcJ3afSC7zGsrIycXL5OcyHO3rDSSqHwnIqxp2+U9XGCmf4SY6v6QYVTk7V7KpxlQaUBmzRgPLEbFGjYqI0oDQwVRpQRmyqNK/GVRpQGrBFA/8AAoaLKknnxpwAAAAASUVORK5CYII=)![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAATUAAAAkCAYAAADo4q2OAAAJ0UlEQVR4Ae1dXUhVWRReRj9aaZZKpJYllBiEEQ0lBiMGQQXmw9SUQj/Qq1HQD0kURSTkg5I99BBURpI1gV2oBx+qgRGrmSQJJm6CP6nFUGpppTXNOHx7Wod9j/ce7+3uq6fb2mD3nP2zztrf/s7aa619oJihoaERkiIICAKCQJQgMClK5iHTEAQEAUFAISBGTYggCAgCUYWAGLWoWk6ZjCAgCIhREw4IAoJAVCEgRi2qllMmIwgIAmLUhAOCgCAQVQiIUYuq5ZTJCAKCgBg14YAgIAhEFQJi1KJqOWUygoAgIEYtRA6Ul5dTXFyc+istLaWhoSEfCa/u36fbubk00NrqUz/RN5+HhujBnj30pLx8lCrPnj2jVatWqTnhF/duK319fbR9+3ZX6mYaK3AK3GKegXP2MpE866irozuFhfSpv99HLaxRYWGhpXdTU5NPO3hVVlY26p3x6WTgJiijVldXRydPnqTu7m4DjxxbBCYPAgMkN5YLFy6ohamurlYLGKyOMHQweL8sWKD+QA4ubHS4DQYIdVxAIBCJ2+3GyUk2ywj0u2TJEnrw4AH19PRQVlZWoG4TVo+X/NixY7RhwwaCrqYKXjp/G5Mp+eHKuXPnjuLZ4cOHgxYFTjFH+Je5ZOcQ2nUe4Vo3VswpnadOisyZM4c8Ho96b3fv3j2qK9YuPj6eqqqqRrWZrHA0agMDA1RRUUEJCQk0bdo0k8+NalmxKSkUm5zsM0fsrA1r19IPlZX00/Pn6m/yzJnKo4PxenToEM3Ny1P1RV6vGvv0y+KDjL/t2EHLyspUe2FLC71qaiImm5NsHyWIKD4z017l+vubN28qHTdt2uR6XcdTQX88W/jzz4ojKysqaH5REYFLq86coclxcZZqP1675sMj8Acc/PzuHU1NTKTh169V377mZkpYvNgap1/MWLiQJsXG6lVBXe/du5devHhBdi8uqMFBdnI0atgptm3bRsuWLQtS3P/d4C5fvXpV/bELrU/C7qaiLwrX5+Tk0PXr1yktLc0nJGK3XJeFseye6+0cJurhFMaxbqwXP5sniHtuw6/+LO7j9JuyejUVeDw0dfZsqxsI01ZbSyAa2rmkb9yoSNPf0qKq0gsL1S8ImF1aSgNer3LxXzQ0UEpurjUWsmHg/mpspOH+fkfZ/CzIBLlBetOF141x070fPbRFu32tdPx5/fQ6yL59+7YVjum681rqa6avl10vlss6FRQU0Pnz5wkeBnRD6IQxrIcuC2PtujOfMNYNPNOxCeYaPAKv3nd2Wt3n5ucTjBk4+/f794T7wbY2qx0X4JDdUPp0cLgBVsXFxVRbWxuxMNTRqBUVFVF6erqDioGbdu3aRe3t7UpxhGs8CRAGoQQmhmuEPGgDQdh9bWlpoc2bN6s29EFoFErYAbIuWrRIycccbty4YSl64sQJSy8YbTwbREYB2fECQSc8F3+5ubnW2K+9+NDdTe/a22nOihV+RYBU0+fN89lN4el97Oujt16vIpXdw5qWlKRk9j965Cjb7wMNVgK7nTt3WusJzDgsR9v+/ftVuIF64Ip1ZiOUl5dHjY2NCmeohD5v376lFRpOXq+XZs2aRfPnz/er9dfwjMNtrD/CJDZkCJ3AwWCL23gWrN7cDxEANk6dl4lLlyq+Dba2UtzcuTRlxgzubuwXKQ6sc1dXlzGZuiBHo6Z3DPUaZIGriQKSYhIgLSaCuJpDCZAIBu7evXuhPiJg/6NHj9LWrVtVe35+vnJ38WwUXS+Am5iYSK+/uNtox0uEv0gV7IDIcSCfwQcK2AntRguuPVz8fz59og8vX9KMjAwflWD0YpOSfOr8yfbpEIGbhoYGysjIsNZTfwS3LV++XFVjrZEfYkO2bt06H3ID9+zsbJ8NrLOzk1JTUwPmLvX1FJ7p6Ae+/nXLFsU/T04Opa1fr6KFf4eH6UNPD2GznBIfT923blGCwfylrg28NWxUvb29erWx64gZNezCUB4FO2NNTY3aBTGRyspKy+VHH+y2JgsMGRd4Wuw5cF2gX+iJJCZ2YOilh1GBxoRaz2Eg8mKzviTlYdDsLj5I9nlgQHlw8OL0EAHP5LyHHub6kx2qfqH2hzfuZHSc2mDkYMSam5vVY7Gx6WuHSsh3KsIzJ3T8t3FODfk2pDD0wwKMSFq5Um2k09PTR22m/iWGVot3C7zAhhWJEjGj5qSs7vLDg8JfKCc8TrLDbYMRZJ0A/MGDB9V9OHLhVcXExKhchT858MLgjcHT4sJGC2P9Gb2Pvb00OSGB4ubNc5TN8iL1izDfqSApzF4y+tmJDCMGzw2hKfraT1/Hku/07O+NZ05Y+GvDJphZXGxxD3wC35D31XNmdm76kxVKHfiAtYaHH4ky7kYNpAWx+UTL36SSk5PpzZs3AcNAfjGQnzHt5en6hPNC6XLgTWWWlNCfVVV+v1+Dx/a+o4O6PR41DMbtaXW1Cg0wFjmPLo+HcEqFglzIk1OnFCGnp6Y6ytb1COcaSXLssMBcLwj56uvrR9WjD9oePnxIjx8/VkOQu0IOE+kGyEJhIwZvDV6XPacF4tsNoxo4xj/B8CwpKYk6OjoC5na+NZ6NAcmoZvAMB1g4dYeBG68Co4Z0FPCPRHE0anfv3qUjR47QuXPnlBL4Dfd7NZD24sWLitwgNv/pLwvnXjgM5JMl9EVIiPAV13hBzp49awwXTmCzTpB//Phx6wUM50E4McLnHPisA/k05DNwCIACw7Xm0iVqu3JFtdVnZSmi8UkljtXX1NTQ7/v2WWNx+sknqU6yw9E5mLEI2S9fvqzyp4wbh+32Npxmw6Dphy9Yaxgz8ApG0F7YOLFhtLcHug+GZ9CvpKSEcNoO3fn0E9ffKs/4O7U/Dhygrvp6Apf4OzXGinNqaENqA/xBZIB0x3iUsQ5/wtUhRv6PgtAghMcCD44PIkIb7e7e8KRwuIONDC+8Wwo2G4Sop0+fNrLBuGVegfSAJ4O0h30DCNTfbfVO+ju1mZqHo6dm6iEiRxAIBwE+KXdKWYQjX8aOHwI4iEOuWvfWTT9djNpXIIo8Hoco2Hm+9cIfoyI8jOTnLF+LE7BGGgDfEELX76Vw+oU/+nX7vOHpI4RH6I+Pmu0Fazc4OGh96mVvN3Uv4acpJEWOICAIuAIB8dRcsQyihCAgCJhCQIyaKSRFjiAgCLgCATFqrlgGUUIQEARMISBGzRSSIkcQEARcgYAYNVcsgyghCAgCphAQo2YKSZEjCAgCrkBAjJorlkGUEAQEAVMIiFEzhaTIEQQEAVcgIEbNFcsgSggCgoApBMSomUJS5AgCgoArEBCj5oplECUEAUHAFAIxIyMjI6aEiRxBQBAQBCYagf8AqCCKaxgSvBwAAAAASUVORK5CYII=) ###Code returns['GOOG'].corr(returns['IBM']) returns['GOOG'].cov(returns['IBM']) ###Output _____no_output_____ ###Markdown Described statistic Hand ons You can get the data frame of exchange rate between THB and USD using the given source code.The provider of the data is from [Fred, St.Louis Fed](https://fred.stlouisfed.org/), you can search for many data provided by them using the given code and change the reader index to the value you want to get![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA90AAAInCAYAAACFsDpNAAAgAElEQVR4AeydC3xU1bX/f4DvR1GQkCGIUsE36EBIvOCjKoKGK/FWS4tFuYB/Eq5WsLe21iIEpGrLvVWsLYlXoVhuqai9JhYUfFUr1kBglIcPRIFISAii+CoqQv6ftc/ZZ/Y5c85kZpJJJslv/OCcx95rr/3deyaz9lp7HTSk5VXfsOKOHzYsWB9P+KaGBQ9vsgtsalgwtqRhRb19Wr+y4Y6xf2jY6FQ35VnHdzxjFd71TEnDD+5Y2bDLLrvx4R82/MCp6y7boORG21F1x5rlGxqkvpbd0GDqJcfRPqm6Wv/1f2ikrw0Nb8wb3DB48OCGeyNOp2IPdL/X/6HhB1p2g6XTgmeEiUd3p9/ufup+6X4oJo68+Cx1HVFO5JjnsQpbV1wsdB90YZON9MvR2eLpjJXf2CidRd9ov7VeUT4JyHHatFg6bWod470rvWSOuHXwreLtu1PInEcW1x+Mjc6zmPF5WM9nc6waGqz5+8MoQ+HpmesOl/V/cMbOHB91bPBwVDQPIvequTp43hvm1Zhj92fFfdvskxxHvwusPvnpqeo4nzGrnDP/AueRHlPdhqeeZ141KGa6rD0W9mcjHhuTofW9EJUhPd/1zB+c7y93Wc8Y2p8r3X93m+Y8qW9Y4cwDN1vnbNeTDTcPHtww+EdPNnzoXIw9CBonU89g/U2d3LwUS+d7paEh5txRxUeGOQfjjK2SaZaN851s9tPsj6MGD0iABEiABEiABEiglQh0Rqu9zsSEiWcm1vru1/HKlsuQN0CK98C5w/rj3ZrdAHbj9VXvov+wc5FlSzrryvHor6Xa9b4/sod1pcdl+P7wd7HwqTd1CQCXYfqfxuMs44pzuGENVvYbinNV9TORNxzYulPatV/PrcEmORwwHhOUbvpG7PvAm6tQVVWFS17ORW5uLqZW7IktpK8MGIIRWjbexOrnLkPeIH1T3q1+j/jeZXa/e0CO31203NJHigy/HbPtfvfIcYgAgSyBrJHfQ19HhrRxMhx2ZvN+x8/dhbHXjsPYqTvxfZNnXDb9MWGezb5HDvr6yK1f8Xu8Muw/MMIeQp8iAILkNDI//IU5V+tXzMLY3wI3zZM5dTJylA67sXLGOCzc4BTzHDyLOcJB/ZuFlcZ0cRUMGh/0wIiJelxdNeyTyzB9tn0/u1fMXJ+uP1MDxtvjb8+VIdZnLWvQUPTfshNBaqlGzp2m5mrVhS+ouZp7czmCZuu7i26x+zoOYxdEP1dnTbwXE7bdhYULFuHRnHujnw97/sXqaXWv/3hdtgdyTjb6H3ceAUH16te9inedz7B8VgswoZ+WmyQbPcevfQw58xZH+wT57IxvZI4m2ua7eGWdjE5j80AaLcS8qipUTQdm58r3yn1Yr5tJ8j1x/Q3BwnLbY84c37RmGyZcGfSd/i4WTrU+F1Nrvocleg6LuEbG1mgRaOQ7+d1Vr6M+qfFwSecJCZAACZAACZAACaSFwCFpkZqgUDFqpi561y7dHxOC6tXtxLsQY+bZaInh0cO+vQIsMqnXrxfMu8oArYnWxfAh/gY3gPqd24Atz2LqtYucCv3HW4dZI2diHmZh6rXj4hvuds319+di4iPAuIeqUHWzIy7gQAz8u/DoigLM7rUGK4cPwQSYSu9GzZb+yMk2qisDbKdxIeAwLkur3dUbxBBejoUnD8GSADExl4ffjiVi8O1+FjOEyfh7HaNv5YxbsHCLXaOfDTBGgM+F5+7CVGVQmyPoU66RS4HzI269N7FsETBh3mXI6gF8f/g4zFkwBEsmii4e9i45cRZxXOXinGxYhLF3R+f6iDhFnVs+c926J3MFWHn3OKx0Cl/mHPkevH4fcm9YDFy/AFVV03yL6Iti7OrFHX3Neu+BET8ajxlqEcYYv0A93bVjz2SxI8V5dHKOsyjnlpskGzXHeyg9xDAeoRfzlNA3sfDau6KMje+nxNs8ExP+dDsWXnsLxi6yFhL82doS68sxteBOrBp6B1ZUVaG7u6FGz3bX6O9eKZqo/qZYWQAFHhAWg17Ho9uG4iZjqM2S1sLYTIyok7m9Bpsmnml87yY+tvG+k8+auBjTF4yzvq/7jcc807B3K8MzEiABEiABEiABEmhRAmkyui1v9MLHnsWoAQFeuw2LMHXRyZj+p5k4S/3geyy448qgvMztQbVLi9d74Zo3MWGA5WHZ9NQivAvbqJB6tldPe8LdPzSDm5Q7Wb1OBvoNDfzxJob3kpHApgXaIPN6efag/OaRuPPVRI3tqD7iscdvl2MhtmHCj8RQNY3uHsjp9y5q6pRDzKqkjOlo/cCjOCylzllDLsOcNW8iT9oVHZJ99bgMs3++E2Mfex31Iy/D7gW3YOHJt2PJ7DMBMSbjDHNMU8Nvx7ycxzB16iLkmN7zmIJBF+x5GDQ/gqo516OMz5p4O0ZcexcW5ozHVsQzLpzKqR3IosXd2zBh3mKM6GEZI+bIBwr1metWWZkrwIjvuT2zfnL2VEzFyNmrEjK2/eq7r+3Gyt++ir7DgTkzno1+hgL1dNf2nm1KcR4pz/4iw8jbsFwtAFkLGYmziepjRZUsvPv3WDlopu3dlnG6C1vH34slI3tALSYGDlpjbYrhvRgTZB5MNduIagC9KJKisW1JkggaYMTP5TsrGf0NPeQ7ctBQ4LevYxNeRd/vzQxY3DDqqEiDW+wFLPs7O4mxbew7WQzvJROtz80DK84NWBAy9OEhCZAACZAACZAACbQAgbSFl0uo8ogtizDVCDm1ftxZIbfKY6G9zBIyaHZWhRpvg4ogl+s9zsX5/Z7Foyt0UOybWGjLzRr5HyqM1QrnHYfVOUZ4ubfe7mfx6HP944RAmkpI2OMQ1YdlOpR497NYaOtQv2KRE1bpCt/2iDh/uhVWPu1cz43GTkV3PIuV0OHtZgXLmFz52LMqlFJxfexZ9B9fYHiPzPLGsZeJLHiYY6RCRu/CnG1+7Rpy4hxuWvMsoLyLu1GzDRhhhzar63Hq+d1SEQXjtynDTcJGk33FnR9xhVnbCVbevcgO2T9ThcJvXfQqzv9RwEJSXHkJ3lSeYJu9CsVOsJ49rnOcsXwTK9Vc9c4VWSTSffLIPm+GFVZ+80DPjeRPxUiu+d5MTJj4H5iARXhAf3YD9YzXRhPmUY/LcJPMHx3yv6aXEV6eBBtTvQHjMd21TcWKPDl/kLh5rZD1aHErTD66LSVem8ZnMWC7hcjd0+s6a5zuL0zau6312rTgLqzsNx6j1LaYePrrGgHvsmXn5EWYs+hke/tPQDnnshUB0f85HZbeyNgaizRKRJzv5Oi89mxNcNrmAQmQAAmQAAmQAAm0DoE0ebqlM1aoJK69C2Of052Tvbe2d0iMcn1v+O2YPvxZzJk6C1D3xeB5F3OmjsNCO2x5xOzbUWOHXep92JbUHhgxezGcEFzxpjoh5XLPrCeeHfEgan0ae7cMrRmyF1GKqpBFq3LWyAJgxjiMVWHTElLs9XJLhe7orl3sjTUVc9/TL899MUan19ihlHJPOLnCXT0VnFMvEysc2rlt75kf0cs0LC3PkfJY6z3D0QrWkex3dcY5ykP2mo+1Q5tH/Px2jJCQ8RnAvNk5XgmB59G+7lT77xMePiXRw9E1P+L3S4WrYpyxreEyTBgPtTe15ueNe44DOxTvhix6PHaLEyI7fXx/zLl7HCDtmdsJYmRIP+8FZsgea7kZHdcoP2ubhHwG/LZydM9KNkAZUHu6o7sv1N7qm/B7zMHtWGLnYJAw81em3oIZsELRg/SM6ZJzwfIu+8+jRkLl1f5eKyLFEidh1NHtAYmycVSxD3Tkw4wc6dOZGDUemCrfV7Id4ufj0f/uuzBWGEgY9ZDL8O7dEjJujclZns9udDzOxIQhizD22rtUK2qfus9kb45xUt8XEn2iXvH0t4vEeZP+9c9JYMFPy1ALIa86ESzB3xGytUMWXBepz6C1lSH4O/msiUOw8NpxmCPtyHe12gqiG+U7CZAACZAACZAACbQegU6SwK31mm+GliUM86kczLaNQRXqbf/YbQbpHVCEGCVrkBcTzi1eOCSe/C5TyDU6P9povzKFb8brIQsry5Ez207Yp/bLIzh5Ysb3J9MUFL6/B36kQ+0zTT/qQwIkQAIkQAIkQAKtTyCNnu4W6px4TXJmYey1dlIg5Y3WHpwW0qEdNVO/4jFsHS8hweZLfljfBXxvsXmxbRzHnR9tuF9tg34GaCnhzL1Ucj/rG8KKtvF9WkEGaNvmVJA98id/D0t8PPJtri9UmARIgARIgARIgATSRKDte7rTBIZiSYAESIAESIAESIAESIAESIAESKCpBNKWSK2pirE+CZAACZAACZAACZAACZAACZAACbR1AjS62/oIUn8SIAESIAESIAESIAESIAESIIGMJUCjO2OHhoqRAAmQAAmQAAmQAAmQAAmQAAm0dQI0utv6CFJ/EiABEiABEiABEiABEiABEiCBjCVAoztjh4aKkQAJkAAJkAAJkAAJkAAJkAAJtHUCNLrb+ghSfxIgARIgARIgARIgARIgARIggYwlQKM7Y4eGipEACZAACZAACZAACZAACZAACbR1AjS62/oIUn8SIAESIAESIAESIAESIAESIIGMJUCjO2OHhoqRAAmQAAmQAAmQAAmQAAmQAAm0dQI0utv6CFJ/EiABEiABEiABEiABEiABEiCBjCVAoztjh4aKkQAJkAAJkAAJkAAJkAAJkAAJtHUCNLrb+ghSfxIgARIgARIgARIgARIgARIggYwlcMhnX2zLWOWoGAmQAAmQAAmQAAmQAAmQAAmQAAm0ZQL0dLfl0aPuJEACJEACJEACJEACJEACJEACGU2ARndGDw+VIwESIAESIAESIAESIAESIAESaMsEaHS35dGj7iRAAiRAAiRAAiRAAiRAAiRAAhlNgEZ3Rg8PlSMBEiABEiABEiABEiABEiABEmjLBGh0t+XRo+4kQAIkQAIkQAIkQAIkQAIkQAIZTYBGd0YPD5UjARIgARIgARIgARIgARIgARJoywRodLfl0aPuJEACJEACJEACJEACJEACJEACGU2ARndGDw+VIwESIAESIAESIAESIAESIAESaMsEaHS35dGj7iRAAiRAAiRAAiRAAiRAAiRAAhlNgEZ3Rg8PlSMBEiABEiABEiABEiABEiABEmjLBGh0t+XRo+4kQAIkQAIkQAIkQAIkQAIkQAIZTYBGd0YPD5UjARIgARIgARIgARIgARIgARJoywQOacvKU3cSIAESIIG2S+DYY/q2XeUzSPPPPt+aQdpQFRIgARIgARIgAS8Berq9RHhOAiRAAiRAAm2IwNtvbWlD2lJVEiABEiABEuh4BGh0d7wxZ49JgARIgATaEYG6ut3Ytm1HO+oRu0ICJEACJEAC7YsAje72NZ7sDQmQAAmQQAcj8Omnn2PPnr3YtevDDtZzdpcESIAESIAE2gaB1jG6d7yNn83djI/bBqMktNyL5T+9BhdfJP8ewaYkarIoCSRPgPMteWasQQLtj8CXX36Ff36xD5999gU++eSz9tdB9ogESIAESIAE2jiBVkik9g1eX/YBXq/ujMfe+DYmn9OyKnz09F24+p51atjGPPA4pgxomRFsrXaDe7cZ8y+6HUulQP4UPPHrS9EtuHDcO7pv+bc9hHuuOC5uWd4kgZYh0Hzzu2X0ZSuawIMP3qcP+e5DYPLkaTFXDxw4gP37v8HXX+/HV199rf4dfvhhMeV4gQRIgARIgARIoHUItKzFK33csRl/2NgZZ3QDnnhqC753zuk4Ptm+b3gEF99UkVStljSwk1KszRfejEfVIsZoXNeowS2e2Rswt1I6PRoPvHQ9zvL0XxvwQfc9xbGp9BrctMR9tWljbRhrbrHGmb/uRoGOfZjA57NpY9Sx8Sbb++hnyr9mJo5Fz55Z/sryKgmQAAmQAAmQAAm0QQItbHSLl7sGb/U8EX/+f8Bv5tTgsTf6tbi3O33jdBwKfv04CtLXQMZJ/ujpxy1v+djzYgzo9CobbBwvvekabO8QXveON9/SO6c6pnT5vKBJUT+NL6alQjY71COVaqxDAiRAAiRAAiRAAhlHoGWN7l1b8MTGzrj6hn44vidw9dkf4GepeLsHXI8XX7reDdPwrjHM2Y0mfWfayz0It159avqa8ZHsGPsSHe8ysC0D4GWfOklfamLYfdLttcMK7rFphx1sY11yebWN78ylf3we32/CFpN0YTjhhKTjoNKlCuWSAAmQAAmQAAmQQMoE0mt079uHzz//Ah9Uf4bt7+3Fe1s/wuqeOfixvY/73FE5OONXNfhN2T6ce9JxOKX3sTipz7dw9DGH4bCWSvG2+3ncds18qIjnGAPO5uopo2nHGhSpeXz8wz8H4dbHb0eBy9kTKz9k7FEH/OpY2rrDsCU8+jzdjdTfN7xme7mv8eiZushEa9Zut/blS58vzDX3kbeG99fH6+5rsMcZv5jysTJdBpMCFSvPHa5v3td790/Eo3ov/9i78GKxXiwx2rN1qXWF7qcppN712fK04RiFnuvSd+eenjHBc1+XiP0MxG5vcJfRNeO3b30P1EdzJEg1F1stR96TGRNdzxgb+1LsXNBlk3gfcB7GoML6DPtUS+x7yatbBW66SLb++IxHzJiNRpVPu95LXbse673EcxIgARIgARIgARJoUwTSYtp+/tpqXP2jZzHyp6/g6tkRTHvkfTy1YS/2fKsbbrumX3QPd+9TcdOV3ZD96V688NIW/Kwsgh/84iVcOfVZjPzxaqz+PM0sd7gNbmmt8p4bMH+D2e5eLJ8bNcrNO1L24tLN5qXkj3c/j1/bid3clddh7jXXeHRxl9juMrjlntTxZk2XH/nefc/yw9hOouYWmcTZXiz/o/3juoW93G4l12HuE00cA7fA5M7EkPBjWTkfV190F5bvDhb3cuk1TlI/V6kAmRIGfNvTe11Fg0/EGNL75+1SSqfExv01l8Et9Stw00+fx0fBDaZ2p8eluOeB0XbdCtzkfJ42Y76dt2HMA27jWAzj2JwOfnM/qlJC/dnwSEx+AEuCfF7ijeVat8EtlZbc7jNWKYxJs8yFKIfAo2+f6E6k2ITvJb82/Mcs0bwcm/Hne6uw9/DDIMnB2uu/zX/+KzYn0D8/vrxGAiRAAiRAAiSQ2QTS4uk+Ju803PT31bin+jBM/s8LcPXJQbb9Ieg3Iox+IzSkb7Bl6Src+PeDuLzwNOQdo6+n533pPfNVaPKLkgDM8MIsXbUZUwZoDyCAi+/Ci782zmF4d5a8hk3FpzZpP/OFDzyOe8ws6vF0cVBUYO494k16XHmZox66Cry84XqcpeVtqLATl2lPp5WlPFreEZjcgZabn4/zXN745MSkWvqsq6cgf4m9GLLkdly8BGgW719SCkUNQ5dnzxm/dZg793mc5xu2W4GlvjobMg1vqR6vynsqsOkKtxHqp7IZfh/1unq8rH4V5VrlfMyt1N5do05lJV7bfWnzRzUMGI1b8+15uuR2zB/2OL6/w84VkD8F39dzWXRzGcZaR7khegYYcQn3J8f5PGk0UW/vOrxctRcFPskCK++Zj0o9VobnvvLFtfjoiuhTAZIfk+aZC7ov7nfhpRdg/LeHJPa9dCqmvPQQToqXINEYs+hn1PoOdevkd/YxXvzvp1CfOxk98Q4W/fgvcK2JqirdcfltkzHczr22YdHdWPSGR1bWRbh1IvDIPS9hF8zye/DcPQ/iGZyFU+s3IXj57jSMvy0bz0j9c76L/xp/mtOA1d5pGP+b72JA/auY6y3jvabPbQkDJvwc4wcAWaEPMffHf7HkONJ5QAIkQAIkQAIk0B4IBFnDTetb5664+D/zMKP/QTx476t4ZsfBBOQdRN2y13CLGNzXDMItF3VNoE4Ti4y9K/qIKxVqact7/wPDo3ccCq4wDW4pk4WT8pvYtq7e41IUmEaFXM/OQSLixzwQDT8/a5j2FgLbd0S9oZtWaUNkEG69NWoAnFV8F8ZoHZJ+115uYMx1UZlJi2lKBZeH1BIknuBmfT668gzr567b76a3V4fXq20J/xE1RgdcjwfG2p1Thqp/R6MGiHHfkek2hKLjK4sqRnnfw7147UUdfj8aDzgh5Meh4NYpCcwtWczRhv1xOO/iQXYr67C9zrfBwIsqGkQ9t15z9PMYu/WScbQe6+ees9KIaz47Osod2Vagdfaqk2B/BsQuKHTr3dsrLPZcwvE14x6DcaH+8FbWoNYpncKYNMtccBRQB9ZnRMYiGgVhfo84pZvwveTIsA+cMXMtoJyKC/VnxFvBOP/w7yvx3If9cPmI7tGrYkD/5uf4L/XvuxiAPXjmngfxXH20CJRhrcv8HP9121D0zBqKWyeIsbwHzzz9jiq8a+UTeKZeDOrRmOySKV/z3nZM+ake78FzC17CLkP2oHXWQkLPEVfj8qx3sGiRpVuqLbAeCZAACZAACZBA5hFIi6dbdbNzVwy7aRBmPLAOs+e+Ctw6FJf3DrLxv0b1X1bjxhf34/If5uHG845uEVJjhnmN6eBmox6v4DKp3zG8iQkLGY0LvcZ6TN3NeFk/Tqs5PdK71+Jl2QTv+hEd03j6L6iEeqONx5BJk9ae0tj99s2vjmNMxOwrB5SRvEQWPGxDNSYawH/8ojKt7QVzfdRWiyoDzH3snkJ6fORyKlnlm3OueFQLPO1xKX56W6U73H6sN1dAivM5qf4YUSyByrpv5F882B2a7b5tnaUwJs0yF/x0Ma7J52RK4PdIKt9LhnB1aIyZWsSa7yoQf0/3J3jnrU+Asy9EoIoQg/ki5V0WQ3q44YF2NaRPBlykDNtn3vgLFm34LvDMHgyYMDmOfF2xud4/RI0sDhhPRBsw/ru28O4YMKg7nnlmEzbgtBbUqbn6RjkkQAIkQAIkQAJBBIKs4KDyyV1Xhnce7jxzP+6dG8HrX/lX/3rtBmVwX/nv57WYwe2vid9V+eGpPW9+95t4TcJRDa9TE6W1SPVNT1hh3QkZG2nXyEqc9uJLbs995T2/j7ufOiG1xIP50uN40fznGyoeX5oZeRC/ZPu7K0adi99L0egMb2+7XXGNEX3h9vR7y6blPGD/dFraaiWhEl0h46EjMQI/J5nwvbT3A7z9EdAj2/By+3HLOg1hMWJr92CX333Xte4YPvEi9JSdCgv/gg3nfFeFdruKpPXkNAw6B0D9S5j7Y693HugZOgHAO1jXaDRLWpWkcBIgARIgARIggWYmkD5Pt1a089Hof9KhwI4j0e1wfdH9flj3I9EH+9C/z5HuGxlw9tHTv4/uidb7Nr0ZiFPW052kzQk3NvaFpiw6XRV3P48/Ku/5aFzns7+1qc06WcnzcxBKSpjsLX0cFzoJwII8zEkJbZbCJ/WO45UObMEn+3Ng2fZxY1Op3mMs/Ym3H172nVuh292arevG/mkz87azP7/ZGkpBUPPPhWhOBD/OafpeisnQD+CYvsE8PtqLPQBigkSCatTXIRphLiHnd+MZVdbcwy1e5qG4/JyX1L5vy8gNEhhw/Y2/4Cc/DriXwOUB43+O8Wrfua2jZ4+4iKiv3QMMaGSxIYG2WIQESIAESIAESCAzCKTX0233sW7nPqDX0ch2+nwQX+8z9nmHjsaZ2IftjbspHAktdeAYgfJDvNmzdNdju/OsMk/CqGbpoLH33LW/NHXhH1VVqser5d82OoXkccfhpG/rtndge0xm773Y/r6+n9p7dO9zavWTqRVty0qyZdaNhgYPwknRiW8W8T2OJ9O3gt/FHifiJH3dlZ8AQF2N83g8XSQj3p1kW6MxRu/1rZyPR10eP3MfcCJ725Po2e4PsF0Xjwlr1zea8J7CmDTLXAhSuceluM7g/GtXVvzm/F4yxixOfoMgNZO6npVtRG2Loa33dEeTrCl59a/iGTvR2q5nXvJJzNZIq2IkO/u/f47x4rlO8iWG93/dZnnc8cZfMHelLC/wRQIkQAIkQAIk0F4JtIDR/Rm27wTO7XMcDsNBfP722/jdHc/jyp8+j2nz38aWTwEcfixOPA54b+cXGczZSCKlM3crbf2MxxS6YRjFOnxbSfEaTUmJNpNgeR7H5PeYq0Zlb8aj6vFmqXu5TUPC+3gzM6rADF1XjxtSybiiSbjUNefxUlpxt7fSMnat7QEXq/rex6npeim+G8n3XGG6jgEp+96TzO7uknlDzGOnJLdA448NMw0dw3CVCAr7MVwp9jhN1aLjJo8HmyKZ6e2Wlt7kHrPo/AG89zaVRudHkxR1PnPRhIEir3J71I+avPwUxqRZ5kKwpmYyRdf8Nask/b0UuxgSHTPJU5DEGHU7Dgn5euvfQaQe6DnoNBU2bqofe2wnMjvnu7j1cpHeconLYrzqktjNNrx31X7oUjUrlFDPXXV4QgIkQAIkQAIkkLkE0h9ejk9RvQs4fv9HWDJnNf6wC+h3+om47xrgmaUf4MZffIBzz+uHYd2B13d9BqBlkqglOiTqB6NKiCU/8q/B0piK1g9JPB68VzWminPB+iEuj47SCcCcW/pAkg/9FHgihb3EIkLtk71nnaW3/WgtJXrsaIxZUuHTH91w7LvzyKNUknNpcSqzd4X9PGQr6Zm+5bznT8FPEwldN/vjVLYO8m8zsol77jXf6amY8sBoLFWGrF/is9js2423fSqmPD4F26+x9s2r7N/3uGvl3+Y+9zuLhg+7523+2NHIX1LRYt5uP/2Vvs5WDSA6r+6yk3qZSdUq8MenRxtPGZDM8EHzR6JR/GgkcM3OOl4pkSc+Cb+UBJlvuAsv6kzlCYg1iyQ/Js0zF0wd3Men4vu3DcJStZBmhpkn+71kL+5VWhnz9feks11mwPV44rYddpI89+ckbiK1407E6d3ewqo68QIHbTaxjWhJqGZmOHd31DmLZisXA/0EXL7uQTzzxkt4rv4055FjTuFUDtT+8pfwzBvRZGi7Xt+o9poPUH6tk/cAACAASURBVIb0O1h0zx5cLtnURf6uOnVPG+SW8X0aBgVnjktFK9YhARIgARIgARJoZQLp93Tv+gwfAHjx+S145chs/GrGpfjdjafjjHNOxy2/vBR/vu4EHBbZgt+9B3y9fS8+bmUgMc1LhuwHoo/jkvsqOdTjUW9cTJ0kLpxVHE1qZFWT/ZuP44nb9GOakhDmW1T2OruTjFnPbT7Pt3TwRe3lbnqYvfT5xQB+KtFTAgsMIsOfkcXvnkSM9uDOJn5HZVD38JXaKglbKgsxson1Utzz0kO4Vbt7DW2ET0J9Exkexiqj+9U5hrQMONzwiG2MmY82k8Wi/3D6L4b7fCPMXM0fz2fS9Zz0lLolCfm8zOU54N7PZ0rCrUqpjElzzIU4KruS11XOhw4zT/Z7qdsVtwd8Hq3G5X7QZz5Yva447YyuwMa33CHgKgnZ3fjJj+WfPGNbHu0ljw4zX9Z+aauMVe65t17FIypbuS7bHcNH2Y8QW/BqAknYTPlBx90x/LbJ1qO/lH53Y65q03oWN2RxYFQd5tr3frLwHfS8fDJuVQsGe7Bh3R7gnLM8fQlqi9dJgARIgARIgATaCoFOn36+tSGtyr73Jn72l69w9fdPR15QorSD+1D92tv4wwuH49+nn4k+aVWIwlMioBNKGR7KlOSwUusS0ONoLx4lZMC3rsbtv/UOPCbH+iRSe/DB+9CzZxayQz1wwgnAuocew4ZcbZi24+mw4S/4yUJgfMwCgrvPjTM7Hl27HovDDz/MXZFnJEACJEACJEACrUYg/Z7uU87Er24NBxvc0vXOR6LP0DBm0OButYkQv+Ho3tZknm0eXybvppWA7N/+6fP4yNVIdO+0eIYvzE0lq7pLIE+SIcAxSYaWXfZ4XPyfVyJr3TvN5IlOQYUWqrJh3Ye4/DbthW+hRtkMCZAACZAACZBAixBogT3dLdIPNpJWAtazsAvS2gaFNzuBoL3JysvdEnvem71HbV8gxySFMTwV429r/17bAeMnM6w8hdnBKiRAAiRAAiTQFgik39PdFihQRxJobwR89g9bXWzhPe/tjWtT+sMxaQo91iUBEiABEiABEiCBNkuAnu42O3RUnAQaIaCScF3aSCHeblECHJMWxc3GSIAESIAESIAESCATCNDTnQmjQB1IgARIgARIgARIgARIgARIgATaJQEa3e1yWNkpEiABEiABEiABEiABEiABEiCBTCDA8PJMGAXqQAIkQAIkoAhMnjyNJEiABEiABEiABEigXRGgp7tdDSc7QwIkQAIkQAIkQAIkQAIkQAIkkEkEaHRn0mhQFxIgARIgARIgARIgARIgARIggXZFgEZ3uxpOdoYESIAESIAESIAESIAESIAESCCTCNDozqTRoC4kQAIkQAIkQAIkQAIkQAIkQALtigATqbWr4WRnSIAESKDtEPjs8614+60tqKvbjU8//RxffvkVDhw40HY6QE1JgARIgARIgARIIAECNLoTgMQiJEACJEAC6SFw+hn9cMSRR2DPnr345xf7sH//N+lpiFJJgARIgARIgARIoJUI0OhuJfBslgRIgARIwCJw8sm9ceSRR+Czz77A11/vJxYSIAESIAESIAESaFcEaHS3q+FkZ0iABEigbRLo2fMEHHHE4fjqq6/bZgeoNQmQAAmQAAmQAAkEEKDRHQCGl0mABEiABFqWQNeux9LoblnkbI0ESIAESIAESKAFCNDobgHIbIIESIAESCAxAocfflhiBVmKBEiABEiABEiABNoIAT4yrI0MFNUkARIgARIgARIgARIgARIgARJoewRodLe9MaPGJEACJEACJEACJEACJEACJEACbYQAje42MlBUkwRIgARIgARIgARIgARIgARIoO0R6NTQ0NDQ9tSmxiRAAiRAAiRAAiRAAiRAAiRAAiSQ+QTo6c78MaKGJEACJEACJEACJEACJEACJEACbZQAje42OnBUmwRIgARIgARIgARIgARIgARIIPMJ0OjO/DGihiRAAiRAAiRAAiRAAiRAAiRAAm2UAI3uNjpwVJsESIAESIAESIAESIAESIAESCDzCdDozvwxooYkQAIkQAIkQAIkQAIkQAIkQAJtlACN7jY6cFSbBEiABEiABEiABEiABEiABEgg8wnQ6M78MaKGJEACJEACJEACJEACJEACJEACbZQAje42OnBUmwRIgARIgARIgARIgARIgARIIPMJ0OjO/DGihiRAAiRAAiRAAiRAAiRAAiRAAm2UAI3uNjpwVLvtEaiu/6TtKU2NSSAFApzrwdDIJpgN75AACZAACZBAeyVAo7u9jiz7RQIkQAIkQAIkQAIkQAIkQAIk0OoE0mp0r78/F7m5+t9UlNfb/a0vx9Rc47wVMSgd71/fihqwaRLwJ+Cem3tQfnMuplbs8S8sV1+/D7m594GzORgR72QigfW4z/k7IX8vMuNvQyaSok4kQAIkQAIkQAJtk8Ah6VJ7T8VUTHxkHBZUTcNAAHI+suA+9LXP09Vuo3LF4C94AZcsn4fCLGDgzVWoarQSC5BAGyBw7jRU6cksBvgNcD5/bUB7qtjBCYx7qArTzu3gENh9EiABEiCBGALV1dX46KOPcODAgZh7vJAYgS5duqBbt27o06dPYhVSLMWxCgaXJqN7Pf44exXGPTRPGdzSfPfRkzBu9kS88Po0DOxlKbT1z1OR+8gq4PoFqLp5IKAM4juxSm4PvQMr7i9Ed4iHbyTufFUuaiPeuvZCv3HAI4vR92fjsPhXW3GHbUiLh/CFC6swDWJ4LLYas+viz5b8VQVTgeXz0PfPuZgIu31lqLjLD7Tb122twjCrHbiNd7sRvpFA+gnYBvW46xdj8SOIfn4cQ/s6bF1gzeOJuZbhjftzMVHKyqdIGTfiXZwI0NBJ/3ixhRQIeL7jH6rCddVTMXK2+utgz2ERa83jxRiGYUNXYVU/+S7PUX8zXhi+AvNGd4f8PfD7jh82w7qvIkRuAGI+T66/PcC4uXdg663RBVtZSJ6NGZi0bWRUfgo9ZRUSIAESIIFgAmLEffPNN+jduzfEcOQrNQKyYPHpp59CeKbL8OZYxR+bNBndfo3moO9Q4IXqPYAyuldh68krULX8FUwteBjlP5iH8197AascY9uSsadiNu7EHVhRVYia+3PxcMV1mDfaurdqS1+sqKpCdwmo/dVEvPDaHhSe9woefmQcJt0sZcTzN835YSYG/7Qf3IFhj0R/OEVDcdfjvhsWQ/8QUz/U7r9E/YATSVZbK/DKzSOtdkYXYl5VoaUI/08CLU5gMXBhFaoulIWlF7D+5oHOAhfQHYUTx6mFKivSZD3ue0Qb21rRgZjmuMX1Nb6TQOsRWHxDLtRSkVqEzVGKON/xsiB7A3DH8ioU7rwPuQvKcd398jdhIharvxnnq+9myyQP6oP1Ha8WnXrJoukfsX60FYkFadnzecL9I52/Pd2VyD0oH3qn/f1fgz/O7otJVd0xEIyWCiLO6yRAAiTQVALi4T7llFNw+OGH45BDWtBsaariGVZfFi6E4XvvvZc2o5tjFX/QW3D21mDrq0DfidbPF2AYLjmvO5DVF32xClt3AoWj52HBtlyMzL3T8d7VbFsFvLrKuiZ9ub4GgPWDbNjw82FJG4jrZgzDyNl/RPmMrVh1/STMk7Iuz7X4yeO86rdiq9ZJWjh5GPDcVuyJaSuODN4igRYjMA6XSCju64k0OBDTlt+BqQW5yNWRGlmJ1GMZEmg5Au7wcit3gfMdv3MrVsl/Bbm4U6k0DvKXQL369UV3dEfffvpCwLv6jgdWaeNevu/rYS9WeT9Pe7B1C+C0r0R2x/nDh+HO517Bnj5bsXXGdcZCV0CbvEwCJEACJNAkAuKhPeKII5TB3alTpybJ6siVDz30UAi/dIboc6ziz7A0JVKzjODFN0STOu2peBiLYf+wMXWyjd2+dsi52mNdtQDjHpmI+163jV/xZFRVoUr+SRi6z6v7eZdgGBbjTglrv1DtIkf5AstzXSXyfOq4LtnGv3jL5aWMffVjzlWKJyTQYgTUws8WWfiRl71o1UcvWiWpRpZEZlRhxQzgzjnltswkZbA4CbQWgV59JYBcebrV34GY3CCWkWypZxrg6/GCva3CWuC1Ij4sGVZeD/8uWTJWiYFtFFDbpF59AbMXbLUWjY17PCQBEiABEkgPAQkrp8HdNLbCryXC8zlWweOUJqNb9nDPw4LrF2OinZV25Oy+MUmdXpiTi9yCO5VnWpKayR45K9u5FTJ43bm2nH53YqTObhuUaTzrfFwyVDqqDXvLK7Fq9kjk5k5UYYuLF5Rjjyq3CncW5CqjPopmIKY9NA5Wedn/Og4LAgx8VSeDMrBH+8Cj9kSg++gZuAN67k/E4usXJJ5o6txLMA7y+ZNM0NHs0LIndtxEyZVgXZOFLb5IIOMJyKLRQ33V97b1N8Ja0B2otgtNRG7ubLxgdEIWrKzv8heA6/UNK+Jj6w0+T9TQRYz3gTevMD5/+u/FQFxyvewdn6QScUpx91MGDAE8JAESIAESIAESIAGbQKeGhoYG0iABEkg/ger6T9Anq2v6G2ILJNDKBFpjrrsSpqWt/1aCt60TU8+03hps0oaDgkmABEggzQTWrl2LcDiMzp3T5idMcw8yR/zBgwcRiUQwePDgtCjFsYqPlTM4Ph/eJQESIAESIAFFQCf2lCgsvkiABNoDgfW47+aW2HLVUu3EGxNZNIxu+4xXkvdIgASanwCN7uZnSokkQAIkQAItTEDlA4m3JagZ9JFtU1XqUZbNIIwiSIAEUicQKUVRUZH6VxoxxNQtR0lREVzXANQuK0HRg2ZBo06cQ1XPbke1N3M5aqW8ar8Ey+vclSMPFqFkmSrhvtHIWbSdWJmqqtHfaD9qsXxmUWy/FINSJNfbCEqln36M7La9TBvpEm+TAAl4CNDo9gDhKQmQAAmQAAmQAAmQQAYTCBejrKwEhdkeHbMLUDQ6hMiTtnGsbkdQXpGN4slhT+EETwdLW2XWv1kFCEm1cDGKB9eivMIwbeuWo7ymEEWjVIkEhVvFQqNKUFZWDF8NxYguBYptHYpRahv2IRRMKURobbnL+I9UlCO7OEBWY1qtrfQY67VY/qTRx8bq8z4JkEAgARrdgWh4gwRIgARIgARIgARIoC0RCI0qRLiuHGW2xznyoFisKRqhcToeHi0Gb6ntVa/F8vmrkTfFNsrj1Ev2lhjRGF3oGOTSLirKLeM4uwCFYvzPj3rgS1GMYl/rvbGWQwhlR1BueurrIliNkLXQYFQXj35MpIGP9z9Vz7/RFA9JoN0QoNHdboaSHelQBFIKH0sToUzSJU1dbHNiPWPSkX74dKS+trl5SYVJoEUIhFFcHEatGKa297kwJSPUVnatHcruDb02veqRcpTnFKLA63lvcn9rUVsDZPcyvOfZIWSjDrV2aHt4crFaZCiPiFe6DoWjU+9s3lWFwOqIFUIvYflrVwN5eXB1K1KKyiG25784HI0qCBejZDScBQAJmS/PKUFJCp7/JmOjABLIQAKHZKBOVIkEOjwB+WNVUhGwL0xC3UaniEiMsZnVKAwIYxODpXStIVvaSjUkzxCT9kNZYTc3nKVJb+Hj/RGhmCGIk+yT03vrQiicVZLQjzL/8Q+jOGDc0s43rQ2YjKINhUYn+GOtkTkdlcgjEiCBDkMgXIjC7BKUzqxT37uGyZoUAhX2PUqqyP5p2Rfu/q5XXvWKUpSUyvdz6sZusFJ1qPbsG48tG0bh6BBKSktQJ9+bLgs5tnTcK6Ew8lCGSF0BQtm1iIjNPSWE6gqjloTWG6fmofAqrilC2YPVAApRMjlV8qZUHrcYAXkccsFWTKqahoEt1mjHaYie7o4z1uxpGyJg7e+yVpJLRocAc09ZU4zg7AKUNGK4ibFj7V8rRtgJnUsdXqOeR49XNtmWVAIaY7+b6C573op0wptkBTZjeQlrlB9BFs9CVOsQwETaMMdc+jRYjNOAJDuJyMvoMrIgYXtOZN/iLAmfTDDxUQJzOqO7TuVIgATSQCCEgqvCQHYewk0xQh3N9P5p755ny+DF4Hwn/Nup0iwH2eiTgP7K+EcIeYObauSGEM6DtVddhZb78bMTuEniNXOx2+6veN6z19Yhrwke96ah24iHx4zBGPXvYWw0ha1/2L4+Bg+vN27sXoHpUv4XK1BvXDYPNz40BtOfDrxrt+lpTwRo2WPi1QdEvujs0kspUI8Vv9D9mY4Vu02tmvN4Pe4ruBOrsBgTc3OR62T1X4/75Nz+N7ViT/M1arCJYZ/CWGmGY8b4jIOjtZ4fPmWC2nTqykGc+vH6A4CebhfIVj5RHptyJ6wH8PeMub2R/mXMnrjLA8guRIlOBuIUtFZxy80VVR9vodcDFy4ui9075O2HjxynWR40jYDB2u0ZNMfTmCOqfLCn262M9cde1qvVy2jLdw4Z97UuMl+U53xtCYoqJPeMd75EUDrTmvOSOTUq1+391PK0KtF3SZADFM5y79cLTy5B4cwSlEcK1PyUz4B4qAtrbH3g8RqbnvJmm692WOAQ/SMojOJZqXtCwpPLUCyRCBURFNgLL+7Po6dPUUixRwH9VfJqCtWiRenaJOTFttC0K2JIzwJKZkqCoLAVHWDMr+g8ARA4p+UzUAZMMaMLZL7VojDm+69p6rI2CZAACaSHQAihHGD1zlogbP8tqatFHbKRn4AxnopOyoAvqoTEaGVfVYaQJ7Va5EEJGy9Gmfw9k78lT7pbUXvoB4dRPrMUoUYW+d01m+es/ulK9P7dUiztAdQ/PR03PbQRS2842zKW5gAzli7F2cpwehgbl06C3EGPkZjzO2D6fQE6rH8Ys1cCp04IuI+zMWnpDGBMpadAPVbc93dc8LulmNNDjOf7sSJ3Dkb28BQDcPYNSzEDYxAj4en78ffzH8DSX2ZZ/SnfiJGqP7EymnZlIKYtvwNbXZ5ueczcRGydsQJVo7sDyhM+G+XnzUNhVtNak9oby3fgB2o8rEWH+58OY84VIngjHk52rGSMMANLl54NiPHsjLtXz6CxitOmS0RQfe9Y34Sn1o/EJCNkIK2ebvmhqxMt+HmdzPvexTL58WfV9ffsRO8XNerRipb1l2U9+sHW1btnxwXafuyEenxEgCz5Aeg8XiKgjEemOpV684EiJ0NmIUKoVV9aZt5Iy4COeoVU9syZwe1I38XYcDJvSnaNunKUeLyA6ku0Tn5kG97VtTpDpqWwyJKQZzGcRJ54YCOlnkdz6B/G2ks3y0o04vsYCpND3XKUmsk7zHsZcBxZZmZCzQCFlAoRlOo5o/ev2arVLisHpuhxsletk1Vb9qihENZeuFosr9DzswSFKEe5OTHhr4sKNRsMaO95bHIXMURlrttzTxlDYiwZHmLl9SxzZWd1uhKpRMTXkyGr9SFE1kSVrK0osfehScZbM1lMBKWlEoIovJrHu2/pZ+tQmtojZJw+GgfhIWGgptZ5ZE2JZOTVGW3FE97I95clKoLSNflGNIM78y0kuqE1DW7d3+ww8rJrUa12WTQ2/3Ql893y2KxeG92mIZ+LurxwTFIgsxaPSYAESEATMP/2S0Kz2rR5tHWLse+uxGnytDJPYrXYGk29Ekb+4Agia8PIj1knthaT1d8i0cX4Gyut6t+cxZMlu3uif5Oaqq+7ftYVkxyjNiunb/Tm+kqsGJFvGdk4G/kjVqDS9HZHS3qOxBjbipEjTvVcT+B0dwR/xwUIKyM7C+Hzgb9XBXnLg+VtronWOTWnGazd4Kbcd+pfwQuvjsMkMbjllVWISdevwp1/TgicW5bP2dk32IseIjrH4JvCWG1cvQIj89QSCjAwHyNXVrqjHHzad11KoU1XfXWyGTucnxynorf2udgF02d0R0oNY8/6ka4zSUrb8sGUDIuWMVgMlLoNRyu81u29cjoXKUXJ6jyU6B+bOdEslU4Z4yCuLDESjdDU6KMYDAHGYVxZYnjMlEc1GMZOouGkyrNjZL1UiTKk4WiyDPHolMt+28HRZB0qe6YY5wHtiL6uJBZqn5OIXY2I49W2vkSNbiLUy7uEKh7FWuWJ1AlJrHAmWejUe1atPwa14qHXYUUqsyagfshH7R+zKWuv1Pxq5Gdwso1wr2qUJGTQeLqW1lMxWO05E+rjMiRCo4oT2jvsp54Yp2rB68k+RkRECAWTjfkZUzFYl5iijV2QkLa6MAr1fNDZWc1HszQmw+e+GP6W0W95DpwiLsNdfmwAdeJVaIaX+r5QCyLC1P0d11Tx8mMn5Mlo6310jH8b4UYendOKHm5/hSFRP/Hnn39F+Y7K1ll+IfsTs6Pzyr8Kr5IACZCAQyA8GCizHSmlNbJHOcYKdcqm7UB+H8rPZK0Hit2/69LQsERW+T/CzArbF4eL/E6oHGIlcVOOHPu3uX5kmkrwJknoWvG3k2mI1ddshWmwipG31TBmgzBufGg2tk64GVfmBJWIc712Bzb3zYE2k2URwDSg49R0bmVdMQdL8ypV6Pn9uNn2BDu303uwcytWDe0Ls+s5Jw9LQ5v1iLwCXJBrkUp1rKKKZaF3/62oSSIUvznaHPnLpchfLVsB7gemxUY0pC+8XLIYOt9NlsenvEasPDH7dUioLiB7YspRYoRNRsHFHqkfm3niHbNeYniWzyxHZFSAkR4rwrlirRiWOHtxmiILKuQnhDxbMa/hqkNcXUawo4n7QDwyykY1DGzJIimmQCjHMIht47y2rhqarluS98wvKYc9PhXWntHCWUWAei6jYfiIYSKicsxHR1jhx5E6a2EgnB1BpUrClY2QqWJOCFhbaxkyOjzKVCtSjtV5hSgxr8liSEWodf7AmXroY1mseFK8rXaoq76ese/u8GwMTlxRJ5RbwsaKiqIh4WZIsjymNHGRyZWsrUZtdh9XttRsmUM1yYmpq0nMcK7dWQfURVBSVO40IOF8zfZSz5MFVJTKzBIgwWRqfu0rXdFHLVSpjLZO6LpsG5GMtom9dNSKVTqkJDo1W8GT47TtOvB8V6U0/2QRpRSVEZmvkl04H2WuNnhCAiTQdgmEUDArzidavnub+odK5YwoiItILa7GLZHoTVnwDFDY/jsSLClO3eBKxp149eWewdmri3HPrb6nntFaSxzK/t4/5zyAOUZ4b9Lt6pDlKyS026gte3dvfBibMRIzdIi6cbvxQ9kXPBsrcCom/S7WODPrqxD5mh9g6dJ8PDzmJoypmWGHy5ul0nTcqy+GvbpV/fyyfd2o2bYKwKRmbFDC7m/Cju8vxSSfsPtEG5JFlBWrN2LSQAkvfwoPv9sXM/QWg4WbgREpckt4rKP9WJr3MMbcOAY7pi91hZenz+j2UJIfwI6x6LMfRRmoqyVsMoXQP/Vjc7V6fEJyCTO8ey71D9dUZEldCYcsV17n8KwC1IknylwcmFwW31Bx/ai0QsjNx09oI8L16Ah4DV8PeM+pnzEvRdQfjV5WBuhyMQxk33dZ1LNp/dj3GPzy7EYxTuqiIaDKee4xmKzFB2HtvywgiyjZQ4JyYXo6YJyKESOrrFCedvlyz0elyhQtx+YCjLm/2Qp11gsflowS9HnS3qM0OayiMHTm8OgeZCtUtWxtLQq0B9bQJbMOjfDsUSGrP0karKo/6vEfJSiR8LFQLUrsEOyCbIuns9e7uTsvXnvPIpKe+zFNhfMRLpXHwkimVfOuHQIniXQaean5mZ1nePXjV/Cbx853W5yqep+5Cpd26RqnkuuWeGprEVLh0SHU+e7zg9uAdtW3T8QbocLSZbFRFmeiiw1+xVvtmlroC1vRCSoiydoCkOz8kzDI0jURRNC0R+m0Ggc2TAIkkEYCAzHt/qZYZYmq1lLtxNOnOwrvnxavQLu5JwZ3Zd5Sl8GtvMyrJUxbe1M3o2+e9kH7d1085Vi5AmNW6vs3YToewJwrRmLO0pH6YvB7qDdO3VqDepytWrW8qVcqHSYtXZqA6Wp5gCdNs8Km9b7xjTecbYfJBzed0p2svuiLF7C1HhgoaLLOxyVD78TDFddhnr2n++FHhuGO5c31mbH2uWOa2+BOZayyrrgZk35xE8aMATBiEiaNsEZaRQpc0TiNwDZlv38iY622EkzCzQrNJCydDozRiwB28+kLLzf6p0LJawqhQ04gnizjfrKH4vVSz1+0KzqGZLKC4PGkJF3fW8FadS3OKVf7ur2PFvKWjjlXq4dGaPrMxverx8iIc8HxbolB7QmREuNTslCGi2W/qxjSVh+8e+3jiE/xlhhHIfTRYQtJSomUViLfyewcPRbPln7VLiuzkn6o7QglyFtdArNfkdKowS0h/GU1hca2h2jovBhnlsGlJWfqu8xrncXUMtRS09Q28iSyQnmf7SymKvw7cYnq86oWXALqqEUzYxuFWrwy9lyrbRXGlgWXGImSgU/ugxJjP7qrQuyJGO51xh71OPkF1D62tcYeaHvLh3shTDdhLX44U1FxS32uq3lshN2LLub3oLXfMLr1RGvhfVcLaNqbrSNYvIVa/Vz22Rvh802Yf5AolZpSlNb4ZeFt9Y5SARIgARIggWYkIJ5h8XCbCayUeNc+342oXDkS+Y3YjpLcbOlS698DE07FqRPE4I5vqLu60iOMC/B3RFSYszuE2lUu8CQLOX03R/eBy77j/r2dcPXAainfGIhLZM92QS5y75d927JQswB9Z4+0spcX3Im+DzVPEjVRceND4uH28fSnMFZiYkt4txqvvB34e044OU4ptWmA7pGDvu/qsQZkwcbcziAl0+7pVoae7L/We0+lVbX/NHU/mdqnjBJl2Cpxal9jtfJ0OYZlQOZvA4+4ptWjGAI1MTzPUY+nW4L7zArphezpnmyFahdVeL2u7hpBZ6FRRShcXYJytffa8uKpENuYMG29cOAO6fbK1Vyc0GGzgLNXvBjFEv4dLkPYTpoWeXI5asMF9h5vr7fa8ibKlgFlNAeEPGSMOgAAIABJREFUuvt7ybUClv4SMCsvFYJrPCdaEloHZXGXO+Fiy6Mtho15rISp/4nhCBRO0R5PO5RevLdh+5qRrVqF8K+thdWuagH5UWFt5MjerjGzCOXyOSguRKhU9lUl9nxvtadbP5NTFmiUZ1/C6+UzV66iIIpHh1AqSfQkG3kjCyahwXkIVUioupFF3UXSCgEuFX3tsSiYVYzqIivjuRqF4rLAPepOlEZ00KxHrCWcKdxK5lYii1zSmMruH9Ap7f1XbK1ORPeKywSWaBHrubAF2fKImT7qe6rU7q98j1jRK3b0hWSA9SyA2UVVHgSzS97oE2hddCGlt57njpSYAyszrYyH5IeQZDelKLXD3v1qW98dkrDNjB6JEdvEC5I0UuZr9CWsnC1KantHavNPvj8kqV51r2jkTrQVHpEACZAACbQnAvU1m7F55U0Ys9DulRNWfDYmTbf2RsudkdMli3m6X1kYOe0CTL9xDB6GZD9/AHOSDKE++4YZ6Cth5ao/Eo4+KTljMskuDry5ClU3m5UGYlpVFZo/RqIeNVuBFSvHYIXdXHRRI4Wxkozlc2xJasyTWBxR7afQpolJMthP76vCymWs0X8SHrjBo0NDGl87/zqzYXLZOp8W1jXMnzyzYVlt9JZ/2dhy0RrG0br5DZNnLGvYaVyKPfSXta5scsPMvxo1a5c1zJw8v8FP66hMf1mxfdjZsGzG5Ib58YVFxbqOrLqTJxv1lW6T3Uz9rrnkNDQovUw5nvsNwm+yR26D9HFyw2SHhX3u4hx7TXhO9oyt37WoCtJP91xQ96RfvnMnWlPkarZBxw0NsfLNcTLriWTzXrQl+0g4NaJTTB3jwvZde40zHpJA2yGwrqyx70R3XzJrrsv3VHL6u3vTvGeZxaZ5+0ZpJEACJNDcBKqqqhoOHDjQ3GI7pDzhKDzT9eJYxSebvvByCdMVD7evF8cOCXWyEduJ1XTGa9fKQWMntVj+ZAThq1LzYjTnoxjU3tC1dsIxUTuZcFLxkJmP8ZLHNckW6Gz9yCY5LkChJMQywltVSKmZLVzkSIZLnS1S7d+sVY9sin1ck81WZ7425EKHneowVMiYhVTYuX5UlA7rN9k72dT12Dpe9KCwV9kXrveENzbWqdy39mKXa31UBuNa6MddeCUqr6zBoXbZcucJleKxT2Tvrlcmz0mgLRMQT3cp8uPno8jgDsr3VJ2R5T2DVaVqJEACJEACJEAC7ZRA2sLLVZhuXa0rI7CEMupQSgkJLVbP8bbIRsMuEyBthH1L6cRCvwPkqkcxWIaqKiE6ppooyxvuaeumk6FJ6HTgPm/Zzw1DD1FGhYu6FxPkMQ7CTYXiKoWDQnblprUgoY7kMVA6ZNhbTz2uDCiZWe4K8fSGojth/fKoCCXDh70jyw5flXLGuNvVXG9iAJfLI5r8Mpu7SqZ2okL1Z8ojm6z6ql9+cbRyW82HahTZoa6qrKpmhannTQkIO05NNdYigYwnoML3M17LYAWt763g+7xDAiRAAiRAAiRAAukm0Ekc4eluhPJJID4B2d9ajtCsdO4Zja9Bo3dloWdNvrNo1Gh5nwLV9Z+gT1ZXnzu8RALtiwDnevB4kk0wG94hARIgAS+BtWvXIhwOo3Pn9AXnettsr+cHDx5EJBLB4MFJPEc2CRgcq/iwOIPj8+HdFiFgJZyqXNaUnPbpVTSys0/AVon0tkvpJEACJEACJEACJEACJEACbZtA2sLL2zYWat/iBLILUDyqxVtNuMHwqIKEy7IgCZAACZAACZAACZAACZAACWgC9HRrEnwnARIgARIgARIgARIgARIgARIggWYmQKO7mYFSHAmQQEsQiKC0qATLJcO/pAxcVhLN2J/G5luqnUS7kGn6JKo3y5EACZAACZAACZBARyJAo7sjjTb72qYISLb7oqJS55FlSvm65SjxXmtCr6w2pJ0ilGTwnvomdDGxqupRe1EjPrFKgPDr0NwSBcVyJEACJEACJEACJNCBCdDo7sCDz663BQIRlKdqDDdmoEdKUVpTiJKyMpSVlaGwpszxHLcFMs2no360Xi1Wr00umZ88wq8k1UcM2h2g4d58I0lJJEACJEACJEACJJCJBJhILRNHhTqRgE0gNDgMVJRh+eAS6Oe9u+FImHXUGx59tnoEpTPLISZkqTyg3OeZ77U764CcfOgnj4cnlyD6+HJ5jFsJylX4tudZ8OIVLo1YariewW7o4rpuhX+XVGiDNoziMv14OKlTjj6zClE90+6Hp64YpaVrrebCxWUojirpRmGfSfnKIWa5Rh5JVxfBahSiePRqlK6OoHZUyGGinnXvcAijcLS7SWmrPKfEMryFy5N9UDKrwKrvOZdQcItBlKdcU31bW4Kiiuhz74P7bI5LrD5u7XhGAiRAAiRAAm2bQJcuXbB//34cfvjhbbsjGaC9cBSe6XpxrOKTzXxPt/xwfdD+gR+/L0nfVT9s0yM6aV1YgQR8CeQUonBwLcor/CaqGGClqBtdojzVZbMKLQNdGcphFM8qRAhi4JahTBuCRiOhwXkIrS1F0czlyjg3bqF2WRnKc4qV3JLRQPl8XSaCUnleufKOFyO8ttzxjtcuK0dEDGa5N6QS2i5HpBQlFdmWHmVlKB4cQanrM12L8pmVyFd6FiJkyJS6YkArmcVhRJ7Uepjauo/DQ8KIrDF4RcpRnpNvLCi4y0cqypF9VQHCwqNuNSL2PnEppThARwPko9pZOHDLaPwsgvIKoHCW9KUIqLD6ERpVguLBgCyWSB/VgoIZgSB9Lo0uqjSfPo1rzBIkQAIkQAIk0NoEunXrhtraWnz99detrUqbbl/4CUfhma4Xxyo+2bR5usWDU4YiI/RSDIQyYEqQxy6+oq1+1/TuiTIeb1zy+pkeq6iHy5ITvRf1XCbQgqljjGfT8EJ67jkeOM91adG5B8swaGoobQK9YBEPgfDkYoSLSlEaKUOxdktLGfHQ1oVRqMObswtQOLgcpRURFExuxB0s9bMLUFIWUp7ykqJyZfhZ41uLyOpahK+yZCjjvKIaYosqI36yR0HzdG0lIggjHC5GsX1dDGCZx1qj8OhChGaKsR62vffi+bU939khZJvyDDnm5bjH4UIUPimh8pb8yJo6FI7WrXtrRlC5Nox81acw8rLLVYh5gWKqOdiea4hnOYRIjVdGoudW+LrILphsDqS7vuKVJwsmAMKyWFCO2jognN3c+rjb5RkJkAAJkAAJZBqBPn36oLq6Gm+++SYOHDiQaeq1GX3ECy1GsfBM14tjFZ9s2ozu+M02811PGGczS7cNT8tTp3+6i5e86MFilMUxbpTBWlPoXyZSjuqrylAmAmXv7cwSLJ+lFyRCKJhVhpCEribcmVosX9MHJWXF6se66FfyYMhu2/KIothqT+nl3APE21bWS0JjPY2Jh3J1HkrKxOgQoz1emLOnLk+bkYBl7JWIx3OW8WVZW43a7D4uIzU7JwQkZRRannDxKBeVlkD9N6oO1XVApLQIRU4v9Mx3L8SIGa41knmk/pNwduVhF0O6FrU1QPYQw8j0GtZOG34H0QUodTdbt+ZXVl8LIZwHlK2tRcHgCMpr8lDksuR1OelLOSKoRUTpbF93QsyFQwh9DNWjNZM9skLqJeO6hJEHL6ZZvGrtcHOrFc24OfVJVn+WJwESIAESIIHWISDGXDqNxdbpVftslWMVPK6tZHSLAVeJ/GKg1I5BNX+ERr2rIRSOjv5ajl6XDtn7IrEcJUpGBOKts/Z8mj/Uzf2jxvXsQhTmGGACDXfxLklYqN6DatWxvI/aW2fI1caGCqlVO2pRVBTdw+m0aHrwssXDttq5ldqB23smIbZYY0uy96wW2XZTaFQhwkW2RzJOY7LnN6Q9bsrLV24ZMtqzGqcubzUvgdCoIhSuLlEhyo7kUB+E6rQH2rpaV6P3TTulEjsIF6NkdAlKasSfnY0+8rG7ytwXbYtxQsXFc23txzYbUAs4o6ys3hJCLotSoRxg9c5aIGxbr3W1ymueiPkcebDECnOfFZZVgNiFIbNx41i885gfgezWzr6qxPIaG/etQ/3ZLjP2y1sRIeWRAnvveC2qBWn0ayhGStAFK8Td7KW9wKEW2YIWsEKKVyjP3iceIzx1fWJE8QIJkAAJkAAJkAAJkECLEGjFPd3G3lBzL2rdcpQ5+z+LgNXRvZnqB73aS1oG2WeqMg1LiKxshJTQaHtPpOx7VF5kKSuGvb1/1NynWjZF6huMw8W++14B8S5lIxTzo1sME+sHsEvurD6olGzTyogJWWHoZdqDbbRnHsqeU+QhHNOGWSiZYysbszK8pZp4RF3VRfc6FbLquuw58RpwoV7ZqFVGmacgT1uAQAgFV4VRu1b8svZLLdYY2c3rlqN8rSxU2asryqMcPM6yiOXsuxZv7+pahHJkEoqnOOTaPx150NpXrJKvDbb3R0dk4Sb6ql1W6uzvVh53+5bMw9oK8ShbL9lDXTu40DB0ozLcR5bXV89j1z5td8HYMwm1z5FQ+2zkR5307nJqr7dXjzDyB8PeE24eWxEpZY3t6bYXQdRedCegXpo19rF7PP3CyvxceXlFuaagj7vHPCMBEiABEiABEiABEmgFAq3k6ZaehlGsQ7OV8VCuPEq1O1cDo4vsn6uWoVGuPbbKsxZNKhTyZBK2+FnGQ6TOCI0dnC+Wp+WxnqINEmv/a2WTocfKLR6VhNBAD3sSMlxFLa+7LDo0luXZVc3nRH78lz6psznbj1XKEZZ8tQoBiY4YHM3kLcZxwaxiVNshy6KTRHpEs5yLkVaK0plFKPfJQSDe8z4zi1BUavdGythRDLLAVVxTpKJHtFy1R1tFSZRCRWMPFn1EfgkgWyNGFSIi8nQiN7V3QilledF1CLfKHRBkCZtkrc9/kR3mHi6WxG2lKJkJlMxqPOZb5m8op9Bl+prSxYgPD9E7z6N3lJFfakeC2PvplerZkuE8hNKg8H3ZS55dYmWLF5ZDKlHklA2jeIhws2BLZE+xvchm7ZmXe3ZEjI46sHmpKCC7rN7fn5A+0S7xiARIgARIgARIgARIoBUJpNXotrw3jf84tvpv753cGURDjMlSmPuSy4KK2saH2+h0/INxavndinqF3Z7opu+vVOHysudbQmeb5WWF+/aZVeb8oFdiJQzZJd/y3uc35lmXH/87S2zDy/KgetJcuaTypHkJyDOgvTNDrrnnvR2yHNB0bHmzoJU7oMC8ZBz71/W2Z+oTLE+HnRvi7UORZ/bScy4RKGaHjROznpLvEi6LRHXIm+Ke+WYRP77qvqtNb38LPPxNid7+h62cDbqIS66+qBPauUchPi8TSDx9jDZ4SAIkQAIkQAIkQAIk0GoE0hZeLqHIUJmM7b5JKKcrTDuCSh1vqrIwWyHcKoRZEhmparZ3VR2bRq48fifIiPaGxtZi+TJpSPZKWtmDLXESimvrFvdNvG3ZKNfPEFZlrQWAiAqRtZI2uR6p5HocUoBw2RsblGQtoEr8y6KTPO/YJ5RdIglQjnKbt/Vop+BHKJntqB//KqS/ENXyyCPu5zbx8DhTCfiGjjeXsvJZK0JRURFKawpRxM9Ec4GlHBIgARIgARIgARJolwQ6NTQ0NKSrZ76Jz5R31UqkhsERRJTh6040pp6fra+PzkZ5Tb5KyBSVJ88g7oPymeXILpYwavkRXIJyeayOOrcTOWkZjiFqJUlStqdKpFaO6iHRZ+MWPdkHJT7PM1Z8VHZnvUoQ+8iwqM7iGdNJ13R77v6JvGhfNP3YMiKzPMedUEm1A7+s6botLc+ro3Hf59FgVpIqT/9VwqdyewEkVj+jJR4mQKC6/hP0yeqaQEkWIYG2TYBzPXj8yCaYDe+QAAmQAAmQQHslkFajOxiaGICVyHeM0+CSvOMlIJ77OhSMMkNyvWV4nokE+GM7E0eFOqWDAOd6MFWyCWbDOyRAAiRAAiTQXgmkLby8vQJr7X7JI5Sqe9Hgbu1xYPskQAIkQAIkQAIkQAIkQAIkkAiBVvJ0J6Iay5BA+yJAD1f7Gk/2JpgA5zrZBBPgHRIgARIgARLoeATo6e54Y84ekwAJkAAJkAAJkAAJkAAJkAAJtBABGt0tBJrNkAAJkAAJkAAJkAAJkAAJkAAJdDwCNLo73pizxyRAAiRAAiRAAiRAAiRAAiRAAi1EgEZ3C4FmMyRAAiRAAiRAAiRAAiRAAiRAAh2PAI3ujjfm7DEJkAAJkAAJkAAJkAAJkAAJkEALEaDR3UKg2QwJkAAJkAAJkAAJkAAJkAAJkEDHI0Cju+ONOXtMAiRAAiRAAiRAAiRAAiRAAiTQQgRodLcQaDZDAiRAAiRAAiRAAiRAAiRAAiTQ8QjQ6O54Y84ekwAJkAAJkAAJkAAJkAAJkAAJtBABGt0tBJrNkAAJkAAJkAAJkAAJkAAJkAAJdDwCNLo73pizxyRAAiRAAiRAAiRAAiRAAiRAAi1EgEZ3C4FmMyRAAiRAAiRAAiRAAiRAAiRAAh2PQKftu/Y2dLxus8ckQAIkQAIkQAIkQAIkQAIkQAIkkH4C9HSnnzFbIAESIAESIAESIAESIAESIAES6KAEaHR30IFnt0mABEiABEiABEiABEiABEiABNJPgEZ3+hmzBRIgARIgARIgARIgARIgARIggQ5KgEZ3Bx14dpsESIAESIAESIAESIAESIAESCD9BGh0p58xWyABEiABEiABEiABEiABEiABEuigBGh0d9CBZ7dJgARIgARIgARIgARIgARIgATST4BGd/oZswUSIAESIAESIAESIAESIAESIIEOSoBGdwcdeHabBEiABEiABEiABEiABEiABEgg/QRodKefMVsgARIgARIgARIgARIgARIgARLooAQO0f1+682N2PvRHjQ06CvJvR922GHo1/9UfPXVV3hvy7sJVe7S5RB0734CTuiZhWOPORZdDnHUcep/+eWXWLem0jlP5eD4bt3Q/9TT8dGeD7Hl3c1xRXTu1AnHdu2KrKxs9S796tw5ubUJkXHIIZ3RqVMndAJspg1QaG3ADfpda2Oe6+MGqdNJ/dPF+E4CJEACJEACJEACJEACJEACJNB2CDhW7pJFC/CPl1/CgQMHUtL+W12Pw+xf/wbvb9mM++fek5iMTp1w5BFHoEfPbJxx9gCMKPhXnHbm2TjqqKOc+p99+ilm3HoLDh486FxL9mDwkPOUbmvXrMbcO2fGr96pEw499DAcd1xXhHJ645xBQ3DhxZcgp89JOOKII+LXBdC5cyec8K0jcWiXzjhw8CAaDjZADGz1Dw1WP9R5J3XtYIPVL6ccdHngmwMHsPPjz3Ho4UfiuGOiTBpVggVIgARIgARIgARIgARIgARIgAQygoBjdLeKNg0N2LdvH6q3bVX//vbsClx4yWX4fzfejBOyslpFJXFL7//6K+yur1f/1kfW4fEljyB/6AW4dvxE9D2ln69HXpTt1Ak4+vBDcdghXZq0SCCyxEj//Muv8djf1+NfzjgJ4dO+3To82CoJkAAJkAAJkAAJkAAJkAAJkEDKBJKLm065mcQqSmj68yuWY+6cEnz6yd7EKrVAqX3/3Ie/PbcS038yTb3v3/91YKtdujQP0n1ff4Nlq99Gp4aD+PYJxwS2xxskQAIkQAIkQAIkQAIkQAIkQAKZS6B5LMRm7J+EkW98I4KnK55sRqnNI6p+Vx1+f99/4+UXnks5DD8RTb7+5gA2bK3Fuzt3Y8SAPjj0kIwbpkS6wTIkQAIkQAIkQAIkQAIkQAIk0OEJxA0v79KlC3qfeBKOOvroRkFJmaPjlDvl1NPQrVt3dOrSRcna/+WX2P1hPfbU16sQc7MBSZ62rOJJ/OtVV5uXXccnntwXZw0ciEO6xO2CqnPiSX3RKcADLcnOevTIwjFdu6rEZzh4EHv3foxPP/kU33yzX4V5mw1//NEe/PHhh1Ritj59mz/k++DBBnz46RdYuW4zvnNGbxx/zOFm8xl1vPX9LTj++O447vjjU9Zr+9b30fW445skw9t43c4adSm7V473Fs9bkcCX+/ahrnYnskO9cMSRR7aiJmyaBEiABEiABEiABEiABFqOQFyLVbKL3z77l+h/+hkJaSRh1xveiPiW/cG48bho+GU45JBDnftS/sVnV6B03r3Y+/FHznXZz7zvn1/go4/24Igj/ROInXHW2fjRf96GIxP88R4UEi5G99h/n4RRV30Xh9jZ0//5+ecQg3L+vN9gyzvv4Ouvv3J0k4OaHdX48+I/4Mc/n+HUcRVI8OSr/d9g0/Y6HHvUEejd/Vsq+dreL77Eoy+tR5/ux+KUnl0hKc9lr7jsExcPeEu+nvq/x1C9fZtvk1ePuRZz7rgNY6+bgOGXj/ItE3Rx2pRJ+N7Y6zDswu/gV3NmQGRdOrIgqHjS15949H9VnRtv+alvXTHKn1j6J/xw/A0uY/939/4a/3LBdzAoN8+pp8uKjl4jXu49s6wCb0TWItSrF07qewpGFox2yXQE2QfShhpQ44afbON20ocm36Qr+1Twmwf/cv5FLk4+1WIu1dbWqDkz/c570Pfb/WLut9YFPca6/aOPPgYXXXpZQjrK90Qm9UX3ge8kQAIkQAIkQAIkQAKZQ6DF4pbFkPa+JEv4ecMuwFkDBnpv4eCBg/j6q+C90zEVmnJBnutlvI465hicNfBczL1/PvKHna+ymRu3ceCbb/D2pk346MMPzctJHQsPCSF/4pUN+J+nX8P/PLMam2v24LnIFuz78itccFovyKPHDjYcwPYPN6lM6Ek10AyFc048Caedfqb6px7b1tDgnActhiTS7NDzL0T3E05IpGhayrz84nPYVbszxjheV7UaH32429Xmvi/3qUfWybv5Eq/tr+bMxK66nRg1+t8wKDcf297f0miGe2lDkvVprvLeFJamTvq4uflKokPhZeosj+FrLy89xhLxIn2UMZUFJTGo4732fvyxKhevDO+RAAmQAAmQAAmQAAmQQFxPtxiGX331Jfb9859xSXXq3BmHHhr1YMct7LkpbUh970uudTnUCkX33pPzmupqrFz2FA479DB5yBasJ2KLYzh6LOVCOTkYOGiwn4hGrx159NGY/KNp2FG9DVvfe89V/uM9e1C97X1kZWe7rid68uGn/8QLb2zBmTnH4/TQ8Yhsq8eClZXKo/1vuafg8EO74MDBb7Dtw434aP9OnH7wokRFN1s50+O75I8LcdoZZ8V4tXd8UI0//M98fPHF5zj9jLMw7MKLVeiwGCSvvPQ8tm/bqvS5dMQVOP3Ms9Xx7vpd+KfPnBIjZ/U/VuHD3fUQb+Plo0Yr77L2RIqM51c+rWSEB+dh6AUWEzGAV738It5+axNOOrkvPv/8MxxzzLGBHF595WVcOuLywPuJ3BCvrST7u/kntzmezkQ9/n4cpc1VL/8Nr69bo/oui1Gal3iaZQFEFgTeeftNaC/z229uxGur/q7UNcubfLU3XsZHv/qcdLKSV/PBdhx55FFKptwrGP1vTl90Wf3eMzsUM/YL/+f3amvGv151jSomCwr/eOUlJ4JA90duis7aUN+0/nUsr/g/VWfoBd9BePAQdZzq+Af18cp/+x5kbjy3YpmahzI3ho8cFRjaPuRfhqn+n3/RJbjxhuvw3uZ31LmfXrJQ8uTjf1Z6/+6+uer9xmm3JtWeqsT/kQAJkAAJkAAJkAAJtHsCcY3uAwe+wR0/uQWHHh7foP5W1+Mx657/StoAlZDvtatfw4bIuhjQhx56CI47vhv2f70/5p5c2LThDfXP96Zx8TvDR+Dsc8PGleQOe/bMVmHD295/37W/WxYjNrzxBnLPGxooUBYUDsqigqfEl19/g/J/bMSRXTrh3D4n4MjDDsF3zuyN8/pl45uGBnzriMNUW1/t/yfWbHsGQ08v9EjwP73hV5ZB6n+38asP/eyKxgt5Svz9b89jZMGVynP91JNPYM+HuzHmh+NRV1ujDOgh5w3Fhx/uxtxfluCXc+cpI1qMMzE8vS9JoCfeYnmu+to1rylP8r2/fwjaE/nuO28rY1kWgR4u/a0ynmRhQI7f3fyOuid7xDe//VZg6LO0/dmnn+CC7wz3Np/UuexlP/ZbXfHYnx7B5BtvifGaJyUMwNL/XQS9GCD9E163/qJEGd7iaX7y8UeR0/tEnD3wXGW8Sj8kVF3Yy8ssr/lqb3y//qcq3n8t/4vKWyAGsBjwWuaFFw/HO29tUl7b/37gf3z7Ikb7tvejC0+yL/tfhl2o2u3Vuw++fUp//HHBg7jkspGqvvRnxfKnHP2kPW10P7fiaWccH/jNryDbAGQcUxn/M88aoOaJXx+Fy10ltys+0keJcHhz43r8dPpsda2x/3U7oYcq4qfXzF/OxReff6bui3dcv5rSnpbBdxIgARIgARIgARIggfZFIK7RLZnEP0ng0V0HDzRg/35/41jjWrzgIfy1/AnLI90J+PKf+yDeUEla9tWXX+pi6v2www7HOeFB6Nr1OHy42x3u6yrYAieHHHooemT1ROcuXVRYuW7ywIGD+LC+Vp/GvIuxveTFCPZ88jnGXzYER8nCRSfr+dur3tyGHbs/xogBJyqDWypLKLkqY0vaf+ArvPpeBY4+/Dj0yzo3Rn6mXBCjT4xseYmxuG2rZZiJl3b2r+511Hzlby/gvS3vxuyLdgoAEM+k/JOXJN6TEF/xYurX9ZOKHGN64/rXIZ7aXr1yIEamNtyk7Iyf3aKrxLy/EanCqaef4WtYxhSOc0GSxxX/6BaU/vZe/OdN/w+DhuSrvenefd9+IsT4E4+1fsnecjFQzT7IvT8tethhKAb+7SV3OV7ahWW3KINWsxdvrDxuT3vHpb72xhcUftfxYEvEghi4zz2zTDX/49tmKBa5+UMVR1ks8UuM985bb+LOO36mVYbsy5a2rrrm+8rYlj3t2aGQGj/5XPv1R3SUl984ik6pjL8Y8hJx4NcvuGTTAAAgAElEQVRHmRc1Oz6AXkjo3ecktUggevjtw5YFlKOPORZbNr+j5ogsJMjLT68v9/1TLWRIGzrCIdn2lHD+jwRIgARIgARIgARIoN0TiGt0N2fvq7dvhfxr7CWJzXpmZ2PilB9Z2cQbq9AC99WCgmdPeufOnXDEUbHPz9be7Sdf2YA1b72PLp064/7/ewnFVw7Dt446Arv2fo5XNr6PIX2z0ONY/wzOso+7/rNq7PzkffzrwMno3Dk4zN7sfiqearN+Ksfdukf3ZsuxGFvyktDnir8shRhr+iXJ8eK9xBAUg1QMJf0y91JrT6nck3Bn8QCffY4VxeC9p+ub7yrk/W8vKOPWvJ7qsRid9/zmAby5aQOefGwJfnHrVMfAiydTdDe9ox9/vEcVN/sgLE0O/U89zTG4pbDck3+at1wTw9V8iTdeXhKyL9sv1lVVxpTRBrZ+31G93WW4a3myUKENfLkWClmZ4S+7/F+x5rVX1ThLJIO8/Pqj5ci72U89jnI9lfHXEQt+fdT782VRJJGXfHY//+xTZcSLJ1szaUwvLTvZ9nQ9vpMACZAACZAACZAACbRvAi1mdCeCUR5RFup9ImbePRdZPePvlT7yqKNwzDHHAJ1i94ObbcnjqMSQ99jMZpG4x/u++AK1O3fEPJf70MMOwyn9LE+YKUA83M+v24xn176Fq/P64bijDsOyyDbc98RL+PeReVjy4jpkf+sInNzjW8q7bdbVx/u+/gz/eP+vOKf3heh2dHwOuk6mvYsHWAxF8YiKgSZ7ZOO9xKMtXljxnE6wDWnxdDfnS7YkyEtCkv1eYlh+8MF2162dO3aocz/PqNyQR1+JsSsypY9Vla86nk+XIOPEu6dbe4CNIgkdSpSB7EPWryOPcC/iiNEoZcQw/8crL+OcQYNxxajEtipomfpd9sj7MZDIBjH+xRO/pvJVJ1JB10v0PdXxT6SPMgfNl14wMK/JsSwqyL3bfnwT/nfRQ/+fvTcBs6o60/0/ap6oopihcEAQJClRjErHgbYd0JtoEPWmtW2Sxgwtdjodo7T3JnbEbjV/A5iYzl9sbyLpYFpiJxIyGCmMJqhwRSJTEQNhEKxihiqKKmqG+7zr1LdrnV37nNpnHupdz3PY0xp/e5/ivPv71rcEc7Sj6Zff9tzt85gESIAESIAESIAESCA7CYQV3Tk5Oc6auibgGdau6kn28eDyciksKtJLUW2x9Ne1N35C7vn7+6RyWMBCF66iK2ZcI1964J+lyCU03GXycnMFYv706eiW23r/j7Xy5+3b3dVKSUmpfGRqcNT17tOn5Q87PpSf/v49+cTF58io8mIj+D91yXny+h/r5Dsv/15KCvLk2gvOkyITKK1T6hp2CNz4qyrPl4K8IunoapON+94wS4VNGfvxPu1mwgkIFcybxvJbEGoIqNVfgus5EiyXEFII0uUnQSRB8P3+t6tNW7Bm7/zzDsEcX3da9euVRoSGWiP6I9VT5fXVq0zUavTbuEn/eqVxNUZdmCO98uWX5C//6noTcRyu2OrOrW71cGHG+H//xmsy67ZPB1mn3f3RY7RljwHtwOJ/1TXXapY+Wwh9uNjP7FmiDH2Fy7M7YZ74zE/cItM/fpW5hDgJ0ST3nG7UU1RUJP/xve+Yfk6a/BF5/j++ZzwP3PcE44Gre7gU7f1HnaHGiCkKSLv/vMNZkg4eGKHuP/LiGtzfMV8eeY/1rFDg9VziXiOBPZ7ZaNozFfAfEiABEiABEiABEiCBrCYQVnRjTe1vPPEtmTj5grAQYEnGJ9Ra2Cg8cvQYY5mG+N1fV99n7eu8/AI5/4ILpKKyMmxbehFiGnO/CwsL9VRct3ipsHP7n+S7C5+UxuMB919tAGMdUzVORo4YpafMtvu0yJbddXLNlCo5d0SFCaB2sq1BEBDtmimjZWR5sZw9rEwqSgqkrbNF1u/5jXx4fLtZtzk/t1AuPecGycvNl33H/yQ3TPlbKchNzNiCOp2AA8xrhig0a1KLGEGJIGDh0scumy7//eIyMz8a+WB19pNskYSgbkhebUEIwyL7N5/9XMhq4SoNl3Xbwo66Pvu5e00ZCMeaV34pw4YNNwILwcvsBJEMEQ53ZOS7vCcatp0n1D6E3o9+8B/yubsDkcDR7uw77gqVXe7+uy/If3zvKYcXMmLNdHtOOcRueXm56Qv6g1ReMcREXA9ZcYgL7jndaAvB1/A9uetv5xqxumP7H+W7i/4/+ebifzfC1R4PngfMuw6Vor3/4caIlxn3/P2XzMuA//rR807TP/jxT519rx30FR/Mqf/q//oX8/yqi7r9XOJe42WJXkO90bTn1QeeIwESIAESIAESIAESyB4Cg/YeajQLaH/joa/KujW/D3Kjhqj99+//UM73KYAgun/985fluwuD3TmB638v+DeZcd1MQ+7phU/Ib3/zmz4i/axzx8uCby6U8RMmOoSPHD4sd33qJmMNdk6KGMvVfV990Cx5ZJ/32sf8a6TfrnpVFv7bI0FZYM2/+54vyF9ee73k5iFYWrecbGqSX634qZkL23zyZFDUchSGZf8fH3yojxsxLN176+qkIu+MNLUdk0373pC9xwJzmkeWny1XTPiUDC6qlIZTh+XNP//MuLxfOXGWlBZWyK4jm6W2/m1p72yRC8ZMl+nj/4cM6nGdLyopkZKK3rnTQQNI0gFcoDFHGBY9Te5zsPhhPi/EDpK6TaMcrLBYZgnl7XKINo4pADhvW0RhLYXIxRZJ99VKCRGNpCJT24aLNdpBe3oN+fS69s0UDvGP9huX3flh/Tx3/AQjMrVO5EO72h7GAcu3WsHtZuyx2+ex7x5/qLHa5bz6qm3UvPILYw3XYGnoL4Q6XoBB0Nv3CnVqOfse4zxY23Prca64uFRaW1uCngntvz4nNh8/91HLo36/9x8eAbD4e41x7hfuQ1XOvce+9s1c6PlH20Wbylz7bvcb2e1+aV69B/qsaNlQ7dltc58ESIAESIAESIAESCD7CYS1dMdz+BCQubk5Aus5rEF7d+027puwlGna/+E++d7ib8m/fespKcF87TBp7Zu/k83vvesI0zBZZeKkyTL/G496ZoFr94s/fF5+8qOlxlp/+vQZwVJpOO+VIMz/4qoZgvWF3Qne93k5TbL6jyvlQNNuGV1+jnxi6heM9Xrdzl/ILzf/h5w/aprsPLxJxlVOko+dc72UFJabIFfVY6+QiSMulmMtB2TE4HG+xuVuP5HHKijsNtznINZswRZ03RLr9vlzxp/nVAkRY18LtY8CKnC1sLttsdpDnj7XtaDH1m7XfdkW0qHqxDjsfHYd4ep2j1/Luceq57H1qk/PtfQErmtsOG5eQhw4sF8OHjggV/3ldZ48tJxdP/bDtW/ndfffi4+7Dbtud3k7r71v9yncGLVvXv3Qa9i628U5dxm7fXsfed3H7rJ2W9wnARIgARIgARIgARIYeASSJrpttMNHjJB75v2DPPmv35Cjhw87l7q7u40QX7b0+/KFf/iywAodKmF5Knz8pOEjR8np7tBzuru6wi93pm2gP39xxdXymc99UZqbAxbwUuvlQPfpLvnj/rckLydXPjX1XqkoHu5EHr/+I3Nkz9Gtxpp9yTnXyXnDp5o53Fo3XkoUF5TJuIK+wdk0D7ckEAkBBE37zx886yz1BVfo62beJFfO6PvCKJJ60ynvQBhjOvFmX0iABEiABEiABEiABCInkBLRjW5OnXaJ3PTJT8lLP14WNL8bQvq13/zKzIeddullkY8oQSUQ6O3GW2bJrNs/LR98sEe6urqkoKBAJk2+QIYNH2FazcvJk7+Y+CnpauuQHFdU9fzcAjl/1CUyYcRUGTQot8/1UN028+XP9AawC5WP50nATQBW5If+5d/cp7PqeCCMMatuGAdDAiRAAiRAAiRAAgOQQGhTcoJh5OcXyKfu+GuZdtnlxq3bbq6hoUGe/tYTfQKY2XmStV86eLCxDD6++LvyxS99RfLy86WlpUXe31ZrhPcHe3Y7XYFALi0ZElJQY63k3Jz8kNediqwdeN/n9xOh3crOXRIgARIgARIgARIgARIgARIggTQi4Fi6q8adLZM/Ui1nrLnMeXm5UljkP4I2ROfQYcNlimsdZMzbHlwxxMxdtsc+bPhw+dt7Pm/aRPAyOxUUFsqvVq6QT35qtulX1Atti8i5551nlg2rGDLEzLVFP0MlzNkuLi2VsVXjpHrqRfKRCy8yY9Io6ZVDh8nIUaNk6NCh0t19WiZNtqNsDzICubS8Uro6WkUE64P3zlk364DZDZ9xn7HzBiLCFxSVSF5RiV2K+yRAAiRAAiRAAiRAAiRAAiRAAhlCwIle3tHebkSk3W9o0/yCAiNY7fOh9iEwT3d1SUdnV58s+QX5ph634MU8brhqn8Z6W0HpjAzKyTEu3O1tbUbABl2O4CAnN0fy8/PNvO5Oj771VhVoMz83V3Ly8vpY4DUforR3tHdIbl6eWatYzwdvoahtER181dfRIIh2rCgW+iWBr3qYiQRIgARIgARIgARIgARIgARIICUEHNGdktbZKAmQAAmQAAmQAAmQAAmQAAmQAAlkMYGUzenOYqYcGgmQAAmQAAmQAAmQAAmQAAmQAAkYAhTdfBBIgARIgARIgARIgARIgARIgARIIEEEKLoTBJbVkgAJkAAJkAAJkAAJkAAJkAAJkABFN58BEiABEiABEiABEiABEiABEiABEkgQAYruBIFltSRAAiRAAiRAAiRAAiRAAiRAAiRA0c1ngARIgARIgARIgARIgARIgARIgAQSRICiO0FgWS0JkAAJkAAJkAAJkAAJkAAJkAAJUHTzGSABEiABEiABEiABEiABEiABEiCBBBGg6E4QWFZLAiRAAiRAAiRAAiRAAiRAAiRAAhTdfAZIgARIgARIgARIgARIgARIgARIIEEEKLoTBJbVkgAJkAAJkAAJkAAJkAAJkAAJkABFN58BEiABEiABEiABEiABEiABEiABEkgQAYruBIFltSRAAiRAAiRAAiRAAiRAAiRAAiSQl+4I6vbtlUVPPConTzaZrt7/0MNSPfViWfb8c/K739YEdf+a62bKnHu+GHSOByRAAiRAAiRAAiRAAiRAAiRAAiSQKgKD9h5qPJOqxvtrt729XVa/+iu54aabpbCwUCDAX/nFCvnsF+aZY7v8f7+4TD5+5QwZd/Y59mnukwAJkAAJkAAJkAAJkAAJkAAJkEDKCKS1pRtC++ZZtztwGhsbpLikpI/gbjrRKG2trTJi1GgnL3dIgARIgARIgARIgARIgARIgARIINUE0lp0Aw6s3Uu+u1i2bnpPLrz4Epn35Qf6MFu/7m2ZdunlfcR4n4z9nMjvPtVPDl4mARIgARIgARIgARIgARIgARIgAf8E0tq93D0MuJf/6Pnn5Ev3z5fyiiHmMqzcK3/2knz67s/GLLrPHlnhbpLHJEACJEACJEACJEACJEACJEACJBA1gYyKXg738eHDR0jTiRPOgGHlHjO2KmbB7VTIHRIgARIgARIgARIgARIgARIgARKIE4GMEt1HDh2Uo0ePSHlFwCINK3ft1s1y+cevjBMOVkMCJEACJEACJEACJEACJEACJEAC8SOQ1nO6IaqffOwRObi/3ox48OByefBrjziu5bByDxs23DmOHxbWRAIkQAIkQAIkQAIkQAIkQAIkQAKxE8ioOd2xDzd8DZzTHZ4Pr5IACZAACZAACZAACZAACZAACURGIKPcyyMbGnOTAAmQAAmQAAmQAAmQAAmQAAmQQGoJUHSnlj9bJwESIAESIAESIAESIAESIAESyGICFN1ZfHM5NBIgARIgARIgARIgARIgARIggdQSoOhOLX+2TgIkQAIkQAIkQAIkQAIkQAIkkMUEKLqz+OZyaCRAAiRAAiRAAiRAAiRAAiRAAqklQNGdWv5snQRIgARIgARIgARIgARIgARIIIsJUHRn8c3l0EiABEiABEiABEiABEiABEiABFJLgKI7tfzZOgmQAAmQAAmQAAmQAAmQAAmQQBYToOjO4pvLoZEACZAACZAACZAACZAACZAACaSWAEV3avmzdRIgARIgARIgARIgARIgARIggSwmQNGdxTeXQyMBEiABEiABEiABEiABEiABEkgtAYru1PJn6yRAAiRAAiRAAiRAAiRAAiRAAllMgKI7i28uh0YCJEACJEACJEACJEACJEACJJBaAhTdqeXP1kkgIwmsra2TXfUNGdl3dpoESIAESIAESIAESIAEkkmAojuZtNkWCWQJgV31jbLoJ+ulubUjS0bEYZAACZAACZAACZAACZBAYghQdCeGK2slgawmsGt/g7F0P7tyY1aPk4MjARIgARIgARIgARIggVgJUHTHSpDlSWCAETh0vEWaWzvNqGve3UNr9wC7/xwuCZAACZAACZAACZBAZAQouiPjxdwkMOAJvLxmu+ze3+hw2LLrsLPPHRIgARIgARIgARIgARIggWACFN3BPHhEAiTQD4Fd+xuDrNubd1J094OMl0mABEiABEiABEiABAYwAYruAXzzOXQSiIbAoYaWoGJbdh8JOuYBCZAACZAACZAACZAACZBALwGK7l4W3CMBEvBBAHO6kc4bO8RssXRYtkYxx9hWvLnDjHNZTa3gg3nsTCRAAiRAAiRAAiRAAiTgl0Ce34zMRwIkQAKbe+Zvlxbly5XV4wwQzO+Gy/lFE0ZmHSCMFxHaIbR1XfKpE0bKzMvGZ91YOSASIAESIAESIAESIIHEEKClOzFcWSsJZCWBlp51uSdUVcrsGZMcob0lS+d1t/REaQ8luGc+sFyWrarNynvNQZEACZAACZAACZAACcSHAC3d8eHIWkggqwks+fl7JmJ5c1tgqTBYtcuKC+SiiSON+/XabfUy58bqrGIA67a6lsOVHhb9ebOmCV44IK2trTNbrFnORAIkQAIkQAIkQAIkQAKhCFB0hyLD8yRAAg6B1Rs+CJq3fUV1lbl2RfU4gas5LMH4qCB1CmbwTmA98g4zgnm3XmK29vhKiwvMOV2zPIOHyq6TAAmQAAmQAAmQAAkkkABFdwLhsmoSyAYCCJJmB0obVVkaJK4hRLFWt1rBs2HMGINtwfaarz6hJ5Ac1ynPljvOcZAACZAACZAACZBAYghwTndiuLJWEsgaAgiSZicEEsuUFGq+NV4ivLxme9hhqAUblnyvBPd6TfZLCT3HLQmQAAmQAAmQAAmQAAmAAEU3nwMSIIGwBHSJMM105YUB13I9VitwOgZTQ/RxzEdHwjgwRxtzsSHG19bW6xA8t2rBtl3K3Rn1BYT7xYQ7H49JgARIgARIgARIgAQGLgG6lw/ce8+Rk4AvAm7RjXncmZIQ/AziGQHfdtU3mnW2YaHuzzJtj/m2GZMyZbjsJwmQAAmQAAmQAAmQQBoSoOhOw5vCLpFAKghAiNou09oHndu8YO5VgvncmZRUXC9avl4+/tGAhV7PYRyhgr/peuSwZId7yVBWHHA916XUMokN+0oCJEACJEACJEACJJAcAmnvXl63b6985d575HN332E+tVs2OWSaTjTK1+f/kzmPLY6ZSIAEIifw7MqNMn/JG54FnbnNxQVBAdQ8M6fRSV1bG12C0F69YU+f3qm4xoX5z7zuLAM287LxJip7nwKuExPGBpYPgxWdiQRIgARIgARIgARIgAS8CKS1pbu9vV02bdwgTz69RAoLCwUC/JVfrJDzJ08xY3n+uWfkrjlzpXrqxV5j4zkSIAEfBDDHWYOKrX53j4waWipYl1qt3jq3Wedu+6gyLbJoNHVdY9vuFJY8w5zuF2q2CQQ2EgQ45mY/eKcY6/ZtMybbRbhPAiRAAiRAAiRAAiRAAlERSGtLN4T2zbNuN4Ibo2tsbJDikhJz/Oft78uwYcMpuKO67SxEAr0EsN60uo0vXP6OPPjM62YedG+OzNxTSzdeFuj4MJLZV0+SBXOvFriOwwJe8+4e84IBeXCslv3ZMyaZFxCZOXr2mgRSRwAxEey4CKnrCVsmARIgARIggfQgkNaiG4hg7f7OwieMC/lrq16RT9/9WUPu4P56qRw2zHE9Rx7kZSIBEoiMAEQp5mtDhKo4TTd3aQhjey62nxG2tHaabLDYT6ga4hS54sJAILg5N1bLg3dOF7Vo4xjjV8s3yum+U9i1M3ViYPk0203dlYWHJDBgCOB7gKkq855aZV5mYeA4t6ymdsAw4EBJgARIgARIwItAWruXo8Owdn9l/tdM3+Fevvib/ypfun++Od65Y7vjev7aq7+WN994Ta6/6ZNe4/R17sCBA77yMRMJZBuBkhyRB2ZPkVV/qJP/emOX7Kk/Kvg+nGrvMkMtKcwzx17jrqrMNafffb9Orp86zCtLROeOnmiT4RVFThn04ZkVfzCfL9/6UZlyVq+AdjK5dlDmdxsDc7iHlpyR7hFFsrYnT1luuxnLyBKRkSVFzrguHFckc2dOdI5dVXoeHjsWmMvdePJUROU8K+NJEshwAms37ZUVaz8wo/j9pg/M34OVv98ub207KJNGFcrZI8syfITsPgmQAAmQAAlERyDtRbc9rBGjRsvw4SOk6cQJc/r6Gz/huJ6PHlslGzest7NHvD9mzJiIy7AACWQTgWlTco3obmo7Lfg+qAV34rih5thrrIdPBUR3QUFByDxe5UKdO3zqsOw51mHmVaP9F1a974j/+oZuufby/r+nmKf+4ZEW08S4saPk0o+Wysp1+0wguAnnnhWq6Yj7P3jIMCld+UfZd7hZcgrL6Y4ekiwvDAQCuw697wzzWFO74PuBvyVIRWUVMmZMwDPEycQdEiABEiABEhggBNLevdy+D0cOHZSjR49IeUWFXPCRalm75neOSzkE95ixgSWB7DLcJwES6EsA8y01QJp9dXTPkmCHGgKC1b6WrH3MxX57a72JJI6I4ir80b69H64/KK/JzOkeWiorHr9dFt13rZ6OyxYu6Lqk2I9W0YU2LlBZScYTwLQNfN/M1I6xAc+ULTsPZ/y4OAASIAESIAESiJZAWlu6sQTYk489Ipi/jTR4cLk8+LVHpLxiiPmcN/F8ue+eu821a66bGZNrebQAWY4EMpEABCKW0MKPY3veMiKXI0GURzqHOl4c0Db6pmtgI9I4+rhg6VtOE8ijfXVOWjvrtvWKbut0QnY/c2O16S/6PO/WaU7U94Q0xkpJII0J6Mu6CT1CG12F8GYiARIgARIggYFOIK1FN8T14wufDnmPMH87ljncISvmBRLIcgL649hLuCKgGqzgWD6rpbUj6STQLtKKN3eYLQQ3IqzbCUuczbv1EvuUsw9Lub4wwFgSncDwhkvHG+GNgG8amC3R7bJ+Ekg3AngZhjShKrB+PfZNsMGagJfKHBHB9xN/f9RDJN3GwP6QAAmQAAmQQCIIZJR7eSIAsE4SGAgE9MewjlVdy22LlF7Tc3AH1SjmftbobmnrdMSu1hXNdneP6Mb62kgTxvb+gNf6Vm/4wLiaw/o984HlQe1C+CJBCOtYtFyitldeGJjagrW/mUiABHoJ2FNWILgR2XzJyo29GbhHAiRAAiRAAgOAQFpbugcAfw6RBJJCAEK0ZsMe46Z9xUcDArG0KN/T9VOtVLv2N3gK3lAdxg/qFWt2CJbeijYF1skOWNeffeAmpxq1yONlAYKkIR/aEzlj8kDsqpv82h7X8ttmTAqyuDmVJWBHLer6MsNuAn2li61NhPvZRgDP+Pwlb5hh6csyHaN+d+0pIdjn90IJcUsCJEACJDAQCNDSPRDuMsc44AlAQOOH7rJVtcbSBCAqrt1w9Edyc6s/yzWs4O4f2u46/R6ra7mKWK9yasnGeNRFFUIcybiuYr53ZWnI8XnVGes5iGplgDWJISiQsF3y88itelo+1n6lS/lsG0+6cE2Hfqj1Glu8yJt/5/Q+3cL3Gd9JfGf1u63f9T6ZeYIESIAESIAEspAARXcW3lQOiQTcBCCg3SmU67We13ndKGfmZborsI6vrB5nHUW/ix/lSPiBHiqpCzd+tCPIGhLOQdipINcf9qHqSMR5dcHHiw0VFIuWrzfzVyNpDwx0HJGUS9e8EGNeHgDp2l/2KzIC+O7hmcVLp2UP3+L5sgurBug1DZCo3/XIWmNuEiABEiABEshMAhTdmXnf2GsS8E3AXmbLFqOhXJ5xHhYrJA24pvMyQzU6oSow/xoW9ViS/hAf3RNFPVxdEHLoqwpvuLdirjeSzrEOVz7e1y6a2Bu0DQHoEOxNLfCRtIX7peOIpFw65bUt23iBYC/hlk79ZF/6J2Dfy3C58eIt1N8Uu5zGaMDLGCYSIAESIAESGCgEKLoHyp3mOAckAfxgXrT8HcfSqPOeASOc9Vpdz1UEq8t5KIgaXdzLoh6qjNd5fUGgIt7Oo+7b9jn0T13M8SMe48ULAz1n5030vv1CAwHo1CKvgeFCtf/gM6+bYHAICIcgUxiHfkKVSffzCHAHN3ukLbuPiHsJN4wPLyWY0p+A1/QI3D9N+qKtv78Rml+3eLHkV9BrGW5JgARIgARIIFMJMJBapt459psEfBBAYDMVzsgOF2gz7/Ku6WbfRxXOXGU/eWPNo5Z1L/dyWNIgYNF/vBSApfugWXqoShbed63TdFmPld45kaQdWPnunTVNnl25UQ4eb3G8BCIRFrbYhoV4nrX0UpKGEZdmcJ9wf/DiQQUatrhv4LHgh2/J1PN6PQPi0igriTsB/O3A+vPw4rBf2L28ZodzTl+0+RXd+kINzwEE/fy7+s4Bj/tAWCEJkAAJkAAJpJgALd0pvgFsngQSRQA/anWta20DP4wRFTwSS7AfV2+tP9KtCjItpy8I1NKu57GdPWOSY8VWUY7yELt4maAfr7J2PYnc17bx8kDH0l97Ot/ZfnGAMpnsYq4vGuz7q5Z/vEwAm90HAuux98eH15NHAJ4mOkcbrb7dE6BQvUBqFO4AACAASURBVBZwX/HZsvuw48kQae/UKwblIOj9fk8ibYf5SYAESIAESCCdCFB0p9PdYF9III4EYOVW8YNq1f3Zj0VKRS3K6RzMWLoGYYmP9gc/7uFOrcsMoW51LfdyI8d1iOvbZkwWLAWmLwJaPALExdLPWMvq3Pf+XMq92sFLA71HuA5W0cwJ96o7medUaOMZmn31JKdpCLeFL75j7iFOaj4nA3dSTmDLzsOyYOmbTiA/ffEDYYwAgZ95/FfGOo1jfKIJ+IfvCJ4L/Z5j+gsTCZAACZAACWQ7AYrubL/DHN+AImCsULsOG0HjtnJr1GA/QFTUIq8fkd5fnZi3jI9G9db86C/csSHIVr+7x5y229Z8uoW1G9ZkzEdHALV49E3rjscW/YFw1pcLWqe+UNBjbHEOc7iR9CXHgrlXGWu+BrKLRtTYbaRiv7ktECkfLObdekmw639xIECfjpfCOxV3yF+bENX2/cF3FM81rNOacE5fMOmqB3ot1FafC0Q0x3OO74HdTqhyPE8CJEACJEACmUyAc7oz+e6x7yTgIgDrNn4IwyrsFn7RWqz9CFv9wa0/wF3dcg4hrGHRtZM7oFa4fmJcSOpKbteTLvtzbqyW+c+83m93MPdcxYYyxvgW3HO1rN1aZ6YG6JxodVvvt9I0yOCMqWfZN/t+6zixhQu+CvQ06HbadwHf50eXvmX66Z6KEK/OIxYBEtqa8/gvw1YLwQxhrkm/m3rc3xb58VzDAwYv43Csz0d/ZXmdBEiABEiABDKNAC3dmXbH2F8SCENAIwmr4FaLKYqEi1Yepso+Itkrr/7gRrv4IQ7h72XdheVW+4Z6YPGcM7PaqRLH0fbTqSTFO243cXQHS4hpAht8IDjU4qvXsEV5WIjVNRt5Mympy7/tsYCxwANABbi+pIE7M5M/AhCm+E55fa/81dB/Lg1kqN9nlNDv5w2Xjjf7uI84h2c01qTPgwk+2NAr4GOtl+VJgARIgARIIN0I0NKdbneE/SGBGAhoJGH8QJ55+XipWb/HcQdVoeOneiN8a8S4f/rJb+eBsIZQxA9z/Ki2RTby2YGaEBEZlmEVlrYws+vMtH23tXvR8vWycF6pEdqYG4sEPg/eNd1YxVV82ONEHZgioAHI7GvpvK+iUKNUo69ugWaLunQey0DoG+4XPFBUXGPM+P7X3HinEfj4u4FjfUbnWFAQcyCW57O0Z7qB+2+E1QR3SYAESIAESCArCNDSnRW3kYMggQABde/+zI0BwWtbGyMROnB9hgC2lwmKlrF7HrdXcDC0hWT3N9r20qEcBIqOCf2BqMAyWW5xgXy22LH7jvulwaZUyNrX02kfLuXqatzSM6fbjlKdTn3Nhr64n6NYxoT7hhdlcCfXvx+67B6eTzyHKrjd7SyYe7UgDgHiK0ST7GkT9HqIhiDLkAAJkAAJZAoBiu5MuVPsJwn4IKA/xt1zI1W8+ajCZMGPYQQ6clsow5WHyMRH21ZXdy2jrtRbdh3RU87WWLxnVkf9492pKI12YKmGe/+Dd0434hnixitSM/KFEi0qdnSedBoNL6grmJutY9O+at+DMvIgagK2KHW/yIq60h7PEy2vfz9sMazXQm2x/CDEdzRJxX00ZVmGBEiABEiABDKJAN3LM+lusa8kEIaAih0Vt3bWSKzcdrlI9iHSkdQqq67uTr+GlgrcSWFNc0dWj4dFPZK+JiMvROeyh28xlkK46CJS+aGGU55NhxI5et90nrRn4TQ4iTnruO8abd2OJRCueyrywuXhtcQQQADDF2q2Od4X986aZlYSSExr3rWGeu69c/MsCZAACZAACWQugYgt3U0nGmXZ889Je3t75o6aPSeBLCSgQZDU0owhwopas/hOY7VO1ZB1rV8sWabWz4EitlQ0q7jQFxB+74XOi9YXGbr1Wz5Z+XbVN5qmEBALSccbqn0NlhdPi22otnjemwCeTft7iO8mYkEkO4Xy8kh2P9geCZAACZAACSSSQMSiO5GdYd0kQALRE1Dho8I2+ppiK6kuo5jbix/1KjSxFJjboq3iK7YWM6O0bf318kbwGoU9LxqCW124vfKmwzkVcX7Hlw59zpQ+KFv0146GH23/3X8n8KIEsSAinYoSbftaDu7pSLpcmZ7nlgRIgARIgASyiUDEoru8YohMu/RyefON17KJA8dCAhlPQOdQ25buVAxKrZwQ21iD1064FipwmJ0vG/eVC8bm9x6pMAJHWJExL1xfYqQTI332tE/ZEhBPx5MOW8RX0O+OvmCLpV94BvFyBNZtFdo4d9uMybFUG3FZ/S6s21YvGmQR69wv6FmTPOIKoyyw5OfvRVmSxUiABEiABEigfwIRz+l+7dVfy4vLlpqadYuD0WOr5KGHHxWIciYSIIHkE9D5wvojNvk96NviZo91mOHy/nZtnRMpuW+p7DwD9/poEoQRpg6o2MYSTbaAd9cJYX6wocW48kO4wCqKYG6JfC50/j6s+fBwULd4d9/8HMOiq275fvIzT/QE4NqN7yOeGU1ubxQ9n+htwCum0QRjTMU0Co0zEUnwyEQzYf0kQAIkQALZQyBi0X39TZ8UfJhIgATSi4CKMrWOpkPvtuwORCqH6LPTsw/cZB8OiH2410ezpjHEss7XBygIEnutZDc8XMfLDrj5q+UQIlxFN67H+xnRZcJgJcWa67ZbvLt/OB5dWWpO2+PSfLDqo7xXHyHM1tXWm4BtC3sC92m5gbDVlxluz4Joxw7BjRccZVUF0VYR13KYbqLz/O0l9+LaiEdl+rcTwlvF97Kv3+J8ZzyK8BQJkAAJkAAJREQgYtEdUe3MTAIkkBQCaqlKl7m0butsqqxnSYHvsxEzf73GZ2YrG8QnhCjcgFdv2NPHZd/KanYhuOGqe7ih13qpc4DxnKxYs91T0LrrieRYRQtEHAQhorWHS/oCAP1BWdtyD9dpjPcil6h2R9sOV3+2XtPvtwrTWMeZjh4F+qzGOrZIyntZ1nFu5tDkB5aLpN/MSwIkQAIkkDkEIp7TjaHBxfxzd98R9Pn6/H8SRDZnIgESSD4BWDKRVMwkvwfBLdr9SKbFKrgX6XWk1t1IewWWcNued+s0p6gdVMs52bOzZfdhE8DOFhI6BxjnvNZJd9cRy/G8WZf4cg+fffUk04xGPLfbNILngeWy8MV3nNPwErDHbY/PyZTlO/pyAi8rdHm2TB+y7dGAfX1W7fNzHv+lzHxgecLGjJc8SPDIwdJpSF5TY8wF/kMCJEACJEACURCIWHRDWNdu3Szffub7ctecuXL/Qw/Lo99cLJ+4ZTbnc0dxA1iEBOJBYEvP3Gn7h2o86o1HHenYp3iMK9I6VDyjXCTzu1Fu/l3TjZDVFxihLJ0QY/ho0vx6DHfzwNzZBj0V81bFrx2My0+lsIrjZQLKw4qtSevDse1+ri7s2o5a17XcQNlqMDW8rMgWBhiTjgvPhTvpM7363T3uS3E51pgE+K7p3yt4iyCgGxMJkAAJkAAJxINAxKIbjQ4bNlwKi4qd9keMGi27d+6gpdshwh0SSC4BXW4HPxrTLQ2kZcH6Y6+WSszv9psgAnRZJXUvVhHirsMWrBAx6tavc4B1Trmdz11HtMeRuiojvwat0rXc3W3DAomxYqsC0xlTz9rgKIM8thU83ZdWc48zkmOIUkw1wHiX9KyLHkn5dMyLMdliu2bxneYYY0SMAE3uFzR6Pp5bfEfxPUPbifiexLOvrIsESIAESCBzCEQsulVst7e1mojlB/fXZ85o2VMSyEICsP7ApRgpXUS3bcntb35vFt6SkENS0RwyQz8XLpo40uTQYE/u7CrGYQ2GiNHnAZY825qs7rTu8n6P7brU+qgWQr91IF+vgG5wRPPu/cHTlBb88C150LI46osL2wqO/sx7apUjkuB6nc1LQOlUAzerSNhnQl48p8tWBUS3rnP/Qs0251mJ1xj0+6DPsD6X8aqf9ZAACZAACZBA5KK7sFDOmzhJmk6ckCFDKuVXP/+Z3HfP3XLs2NEg6zfRkgAJJIaALbhgjVm4/B3HpVh/NCamZf+12pbcSC2g/lvJvJyfubFaEHU72h/1Wk6tvqEIXFk9LugSxJk9d9o9Pzoos48DlIewhUVZ5xaXRrkkmrrAw9qNceGZRtLz9lghvHRuvC04ESUfLxzgDqxWbnxPNHq7jyFlVBb9TimrjOp8BJ2Fh4eKbUTGxxJnGPOKNTsiqCXyrPo9i7wkS5AACZAACZCAN4GoopdfOeMap7bvPPu8s88dEiCB+BOA6MB8Vlgv8WMbFrxRlSXG5die24sfpOmWdP5tuvUrVf2B5Vmtz7H2AeJDxVeouvQljIozBImCVQ+iGZ9oxQXqgKUZQheiCILILfRD9cl9Hp4QqM+2TMMjYNF91zrCXssY19+eKRQ6JmxtYa4vAVAGQbnUNV/rGOhbvJgARw0Ylu488Bw0t3Waly279g8xz+3btXVGgKvXQ7zHYL6nlYGl+vCMx+s7G+9+sj4SIAESIIHMIRCxpTtzhsaekkB2EMC8TbjXQkyo+/Ci5eudfR1lOoqL/kSh9p1b/wQcC7DLDbu/GvACBOJYvRBsodpfWfu6znPVZ1Hn40YrTNRl3m5D68JLAfca78inL3M0ujnmryMaeqYISXus8d7X++NVL+4Zrr+9tc7rclqeg7DGyyM8E7r+O7wc7BeOsXRcn2O1qGtd+gzqyhB6nlsSIAESIAESiIaAL0t33b69suiJR+XkyaaQbYweWyUPPfwoI5iHJMQLJBAbgZbWTtEfgLDu/WhVrYy2AqepVTO2VuJTGsHT5ki1TKgKv15zfFobuLXAhXpnXYOJbg4KXgH1YDWGVVrFtq4X7ke0QKC90DOn9pG5VxnLukbKR3sq5GO5A9ov1AHRDIu5WrFxDsIbMQIgsHVOPNz0Fyx9y8QywIsdOwgXrNtYzxxJeZiDLPsH7CE+YbnGfcLLCbCCe/2yr9/iOVr9+6GC0jNTGp+040NgSa9Qnhp4fvy+8FMmibKapzFOdo0ESIAESCCJBHyJ7nFnnyOpciN3C34sUVY99WITKf3Jxx4RDeRG0Z/Ep4ZNpYyALXhUWKAz+AGeTj+k8QIgnV4CpOyGJaBhDVKnS0bZ4kKDi9nPAvZxXl/QqHCBS3d/4mTt1jonOBlEOu6pbUmdf+f0mEeI/mGeOxL6hvGMkuAo/PDiWPbwSMe6qV4daqW0O4Hl1ZDw/VAe9vVs2df7rvcDL2DAD0zgFRNKkGby+HXMGMPuA8EB93AOY8dLB/w91Mj4mTxe9p0ESIAESCB7CPgS3akabnt7u2zauEGefHqJFBYWCgT4K79YIedPnmK6dMGUj8o3HvuWuZaqPrJdEkg0AYgjTbYFUM9hC8sf08AgAMsw5mOrezieCeyHstRBfEEsqwiDcFErKaYpID4ABBvcsyFUjGjbsEfgWWEv5dXSE+BMKcOlO1Sbmsfv1s8LGvTbzgd3YMQ6QH/tlwxoc+bl4x1rt/YBQhT5UEcoUQoBa7ehZTNhi2cA40LCMlt6vzOh75H0UZ9dff5RFvdtXU9wP3wfYL3uT3QjKnrNhj2ORVxfZkXSF+YlARIgARIgAb8EfIlut7XZq/JEWJohtG+edbvTXGNjgxSXlBiRfeTQQSkqLqbgduhwJ9sJYK1lLP2EBMtgpoqDbL9PyRyfChCIjlAC2Et8PTr3arl38atB0b3V3RyCRZdpsseiQcnUeuxVr50/0fsYL15Iob9u0V1WlG+ahyhHgkCDZ8DfzvyosQZjH1ZhNzO4amPeO/LZVtVEjyVe9ePlCRJeROAeanBFHae+OMlkgWnfF31JUrN+T9BLFowfkevVI8KLLwQ38h2SFnN59ozJXtl4jgRIgARIgATiQsCX6E6lezms3Uu+u1i2bnpPLrz4Epn35Qecgb/9+zfk1V+tNMfXXDdT5tzzRecad0gg2whAcKvVW12Es22MHE//BHRpLszhhWjAvF5YpSNJEKlww8a8aE36bOkUBgg2CBEVrOplgTaR3EJX60nWNuCK3ruEmN2uiky1hi76yXrjSo++Y7kpjCXUiwqs+Y3o2Av+7qo+otxuI9X7GL/eM335YvcJ1u612+rNPHiNraD31p5Hb5fJhH297/D2QLyBFcUFzssjxATAdwFjf3trvSO6sZb8ydYO80IFY8RzgWcBMQIQDwD7fImZCXeffSQBEiCBzCXgS3Sncniwdn9l/tdMF2BxX/zNf5Uv3T9f3C8Clj3/nNRu2WTme6eyv2ybBOJJQIUO6rR/YNvWnni2x7rSnwAEpQbNcluk1bKrlt5wo/GyAs57apUTrAzCzBYiagkPV2cyr/XnPqx9gcBS8a3reeMaXOdh1dYEEa4JIgyrBmC5qngl1I81xuP1ssL+G2Dvo78qwnXs2bReud4ziG77nmHcuIZ7B9GNOf2YdgNPiIXL3zGeC/DOACsth5UAQnlsqDeAegfE6zlgPSRAAiRAAgOTQFSiG+L2208+FkRMA5wFnYzzwYhRo2X48BHSdOJEnyjp0y693ARVQ5C1aNOBAweiLcpyJJAQAu9/2DdY0F9MHiZ8VhOCOyMqLcvtkpHjiswzMLTkjOnztt0H5MCBYY64LMlp8/WMXHDWEPnTh41yycRhsu9wiymvArW5udnUkdPZZtrYf7RJ3tn8Z7N/1ohSX/WnEqiObcXvap1u6NhwwgjSDz6UksLAf4PHjgV/1xpPnorrGFf/351y9sgyubp6tNOfWHZwfzR1dHSIjhfn/uGTk+VffvQHOdXepVkE9+zDIwEvBb23zsUM3Pmbv5ogre3dpucr1n5gxqd/F6/66Gh5a9tBOXz4sIysKHLG/spb2wz/9dv2mXLnDC8IeY9HlQemKGzeXifjh+VmICF2mQRIgARIIJ0IRCy64e69ccN6+fYz33eEb9OJRln5s5dMgDNYphOVMI/76NEjUl5REdQE+vTaqlfk+hs/EXQ+0oMxY8ZEWoT5SSChBA6fCv6xB8vM3918WULbZOWZQ2CceT62SdeZXLH/ftn74UbzT/+zSJrbOo0FFm7rtz38spN9wtmjTJ2BP4vvyNETbVJUFvjbW1leGtSeUyiNdqace8C8UKh5r75Pr3QZtc37ThnrKAT4f78VEGI3XBoIwrbvcHNcx7j5gw1yJqdA2rccM3Ot1QW+T+d8nrhocrdAbCJNObc3sjsCzF14wXj5578JLLMG7wec+/L/nG6WF0P+srKyuI7NZ5fjmu3vbu79/7q5Y5BZVk6f+7+/tVya2t4xHNDoX19XbaKa/7GuWT59wxipP77B9OVjHzlXxoyp9OxXWdkxcz4bWHkOkCdJgARIgASSSiBy0d3WajpYWFTsdFT329ta4xrYDGLeXhZs8OByefBrjxix/9qrv5YXly11+nDXnLl0LXdocCfTCcA9EtGh4SqpCT+cF8y9Sg+5JYEgAmrF1bWsgy6GOAgn/Gw3aLzswfQGLCGGpK63IapNi9PusamYxlhumzHJzGeHizJckvHiQfnp0moYBKZ3uF23oxkcvsf4rGutN3WiDnf/QtWLcva90HylxQW6a/qoc7y1XkwfuG1GY2Du+tghZqoA5uljzGatdqd05u/AjVxdxjEa8LKnBig/xMXAPdW/q8oq8wlwBCRAAiRAAulOIGLR7SWwIbaR9Fq8Bl1eMUQeX/i0Z3XX3/RJwYeJBLKNAH4UYq6u+bFd2bte8YJ7ro6LAMg2XhyPmPWoF/wwEBRNI1ZHw0XFtbusCnldQiwTAnGBg67/jfFoAK4rLhwnOuddI7Gr4NZxKwddm1zPR7tFYDYkjdGAAGcI4NVfQr8wzx79sUUkyukYsA8R3RskbYhTLdqY4xyJiQUwb/Eq60x27EJUzxw6PuRgcO+R8OJIYxOAKRMJkAAJkAAJJItA5KK7sFAwf/r++z4f1EfM6U6ka3lQYzwggSwmoD8KseauWmJg9baDWmXx8Dm0KAio5Q7iOFRgqCiqdYrAkojAVCoa1XLoZEjDHVio3d8Zd/A15abR32ENnz1jkmM1jUcQLTB7oWZbECGIafNSbWjvS7WgDD0Har3Vlx52Hv3bgHMI0HYRBHY/Qh5M4C0Dy/5ASra3gr6ccD8bA4kHx0oCJEACJJB8AhGLbnQRwcp+8OOfJr+3bJEEBgAB2+qm+9nmDjoAbmNShqgWPDQGwYhlwGJJtriz64bIxosfTHtAygTR3R8HjfCtwhb54VquYt1YResbnWWn+qvP6zoE9/wlb5iXFZgegvnVYAwLO9oNZ51FWay7bfoz0dsqW7P4Tl/i3e6bLdbt89m+r94L+gzrMmqhxq1L8x20pviEysvzJEACJEACJNAfgZz+MvA6CZBAcgnsqg9EUbbdH2HJYiIBNwHbgmfPRXbn83usdUAg2nWjPKyoEKpItiD3W3e65VMXebzYUgu+Cq149RVrgqN+cMP0EHyn1Rrd3zJe8HiBNRz3Ipz3Qja8AIkX73D1uKdd6P0PVUZfTugUhFD5eJ4ESIAESIAE/BCIyNLtDmymDSRjuTBti1sSyHYC+iNPf5xjvPxhne13PfbxxcsbAsJw/p3eFnO4Zy94/s0+gjz23ie/Blg6V2/A0mGNxvKMHqjQUiso5mJj32tNcz89Vis63PPhztwyY5IR3iiLgGbhkrpBu8ViuDK8FpoAAubhHmDNbkwn6O9vqr5YgscDEwmQAAmQAAnESsC36FbB7Y4SHup8rB1jeRIYqAT0Rx7nHA7UJyB148Z8Zvtlj7sneCZjdWF315mqY/1+Ya6626qvkcF372+Ut7fWRy269busXisq3tW1/dmVG+XeWdM8Eahbc3/i0LMwT3oSwEuVZx+4SfrzMkBhPBM6FcDP/HvPBqM8Cc8LPHvY6jMTZVUsRgIkQAIkkCYEfLuX79v7gfzVdTP7LMuFCOPz/vGrsnbN7wTrZTORAAlET8Bxcy3Kj74SlhxQBCDa4hVozy0+vUBmiwiAAAM3JP3e6XjVyonjLbujs3RqPAYINzdXWL6RXl6zvU/b2gf1eFHru57nNnYCfp9hfeGxa39D7I1GUANe1jz4zOuyaPn6kM9HBNUxKwmQAAmQQBoQ8C26D+6vl9Fjqzy7PGLUaCkuKRFdOswzE0+SAAn0S0Ajl/OHdr+omKGHANxmw1mnCSo0AXBb8tUbjcs3onqr9VutnCgJK6cK6NA19b2iEcJVuNk5IPrU+q3WcPs69vW8/QLAnYfHiSWgz8OPVtU6Ee0T22Kgdo3rgZdBjy4NLAWYjHbZBgmQAAmQQOII+BbdiesCayYBElACukRRWTEt3cqEWxJIJAG84MIa2G7rp4pitI3l+9zJbR13X+/vu6yCTgWWlsc8cLtut5Vc83GbeAI6tx+u3jrHPpGt4uUOvB/spHEB7HPcJwESIAESyDwCFN2Zd8/Y4ywmoD/A+4usm8UIODQSSAsCF1nLdK3dVi8zH1huPhBGi5a/Y5YCc3cUVnFN/X2XVdDZogp1v7CqVtTjxRb+Wi+3ySNgL6Fn36dE9QAvdzDPv2ZD8EueZLSdqDGxXhIgARIggQAB34HUkP3bTz4Wkhtcz2eFvMoLJEACJEACJJA5BGCJVtFli+l5T61yBrFsVa0gajzyQjBt3nnYd6A5DdaGuduIZI4o5S+v2WHcmEvX7DBt0OPFQZ2SHXuaj7r7h+oIvBNgEddl9fxESHfXtXrDB+aUPm8acA9WdvWMcJfhMQmQAAmQQGYQ8C26r7/pk4IPEwmQAAmQAAlkOwHMxZ536zQTvRwRzu2EtbNb2jplWU2tlL1ZID/6+s0mIjbEM4KkoSys40ihlnJTEQWBtWDpm6YMhBoSomujDQ30Zk7yn5QQUOGLxmFx1vvm7syCpW+Zefg3XDpedh9olCs+WhU21gJE+rraejkjYtZhR932tALUj7Yg5GH5ZtwGN3EekwAJkEBmEaB7eWbdL/aWBEiABEggSQQwz/vKC/sGELVdzyGU5i95w1l3G+tA49zBY81GOPsJhAaBDfFtiy6siW5bWpM0ZDbjIoBAhermv6LHA8GVxRxCHCPhBQ2mCfTnEg7PCDwrcCfHy5vVPXED4PEA4Y6EZ8AsWxZlMD9TCf8hARIgARJICwK+Ld1p0Vt2ggSynADX5s3yG8zhZRwBCG+IYgQ0gys4BNjsGZOlubXTiTAOkaXpthmTTN4Vj99uxFe4QGiInI4o5xDmWCJKLd2was68LCC8tF5uU0MA9wEW53sXv2o8ELys3XhZYr8wQU/7c0fXe41ymKagCfE8cP9b2jrMKYjwFW/uMFMP5t813Uxj4LOhtLglARIggcwhQEt35twr9nQAENC1eb2WGRoAw+cQSSAtCUDsQAhBcEOA4aPCB4JcE5Yds6Og92epxnXUBWGOCOpLHrhRUIfWrfVym1oC+HsMizfSCld0cZzTwHfYh5Va53WHs3brC1bk19gBKF/as3LFg3dOx6HT7rpt9cYbwiuSvsnIf0iABEiABNKaAC3daX172DkSIAESIIFUE1AhbYthWCAX3netlBXlG9dyCCzNF01/1SJeVlUQTXGWSTAB3Hu4gW/ZdaRPS+rpgGcCL2gWvviOmYuN83ip4pX0BevMy8cbDwo91hc1+jxA8Ou88geXvG6Et1d9PEcCJEACJJDeBOJi6V72/HPy9fn/JE0nAnOa0nvI7B0JkAAJkAAJxEYAogiCCiIJVvB5s6bFViFLpzUBiF94NcAdXKOLa4fVVVyXehw9tNRc0vOaz94iEJ8me97/6MpAWb2G7ZXV48yhu107D/dJgARIgATSm0BcRPece74ojy98WsorhqT3aNk7EiABEiABEkgAAbVQJqBqVpkmBDSAHrwa1LqNrqkbuUaq91qD3T0ELa/TC3Ad0xe8phZpve46eEwCJEACJJA5BOIiujNnuOwpCaQvAVgx1MXQy9qRvj1nz0iABEgg+wmorKqcjgAAIABJREFUJRtLui344VtO8DS1WmOqAZLO0dbzOGcHWlPBrfngKVGz+E4zr9+LIoQ5BLnGD1CR75WX50iABEiABNKTgC/R3d7eLs997ztSt2+vMwrsf+Xee+Rzd99hPrVbNjnXuEMCJBA5AQTIUfdBL2tH5DWyBAmQAAmQQLwIqMUZ67Hjb/Wi5etN1Sqi1dtBt3oemRYtf8exjuvLVbWI++kfAu1pvciP+eX4MJEACZAACWQGAV+B1NrbWqW4pERGjBrtjOqN11bJ5+/7slRPvdjM5V75s5fk/MlTpLCw0MnDHRIgAf8E1HqhkW/9l2ROEiABEiCBRBNwB0WDxfvlnmjmarV29wEWbsz/RwC2RT9Zb+b+17z7gcmmlnN3mVDHZT2RzddurTPLiCEf5oPHEsAvVFs8TwIkQAIkEF8Cvizd7iZh5W49dcqIbFwrLCo2WSDOmUiABKIjoNaPR+deHV0FLEUCJEACJJBQAu6Xoroet9s7SfOtWLPDzPmG+IblG+uxQ6wj2dHw/XRaRbq+oEWZXfUMYOuHHfOQAAmQQKoJ+LJ026Ialuw//bFWzpt4Pq3aqb57bD9rCNgRcd0/3rJmkBwICZAACWQ4AVi7d+/vFbpwNUdyW8F1ya8Vb+4QXZMb1nD9+478uh8pErv9SMsyPwmQAAmQQGoI+BPdhYUy7dLL5f77Pm96OXpslTz08KNOj48cOmgs3yrOnQvcIQES8EVgV8+POATLYSIBEiABEkhPAphXjfW4YaVesPQt00n83XZbrdXdHC9UV2/YY/I9eNf0PuI8klGW9riX22Vg9Z5jn+A+CZAACZBAWhLwJbrRc8zd/sGPf+o5iHFnnyNf/NJXPK/xJAmQQP8Etuw8bDLZ67X2X4o5SIAESIAEkkkA4hofiGlEE0fk8dtmTO7TBV2r274Q66oUaBeiH/9fvF1bF2Rxt9vhPgmQAAmQQPoR8C2606/r7BEJZA+BXfsbzGDs6LTZMzqOhARIgASyiwDcx5994KawLuLqTh5q3nekRNAm3NLxQST1+c+8HmkVzE8CJEACJJAiAlEFUnP3ddnzz8nX5/+TiWLuvsZjEiCB/gmoezkt3f2zYg4SIAESSAcC4eZkQxTDCo4PEqcOpcMdYx9IgARIIHUE4mLpnnPPF1M3ArZMAhlOwA6iRkt3ht9Mdp8ESIAERASu5BpcDYJb53jHC466quuqF/Gql/WQAAmQAAkkhkBcLN2J6RprJYGBQWDZqlozUFpCBsb95ihJgASyn4BtBYe122uOdywUtP5Dx1tiqYZlSYAESIAEkkTAt6X7tVd/LS8uW+rZrbvmzJXrb/qk5zWeJAESCE0AP5iwpAySO/pt6FK8QgIkQAIkkCkEYPFOxtQhrAPe0tYpWCNclyyLJyPUT2+seBJlXSRAAgOJgG/RDVEdSlhjTjdEeajrAwkox0oCkRCoeTewlMwNlwYi4kZSlnlJgARIgAQyg0AiRLB75It+sl4gjO+dNc0zoro7fyTHi5a/I1iebNnXb4mkGPOSAAmQAAn0EIiLe/ms2z8tB/bXS3t7O8GSAAlEQOBgj2vghKohEZRiVhIgARIgARIIEEBcECQIbqR1tfVmG49/UDcEN14QwzNLXxTHo27WQQIkQAIDiUBcRHdhUbFh1t7WGnd2dfv2ylfuvUc+d/cd5lO7ZVOfNmBl/87CJyj6+5DhiXQnoEFw6LKX7neK/SMBEiCB9CKgcUCw+sWcx37pdM5YpFfVGpGsQtwO2Olk9LGzaPn6IKFN0e0DGrOQAAmQgAcB3+7lHmWdUyq2VXw7F2LcgeV808YN8uTTS6SwsFAgwF/5xQo5f/IUc4zqIcIbGo7LsGHDY2yNxUkg+QRUdJcV5Se/cbZIAiRAAiSQsQQwTxxrgG/ZeVjgLYX/T0qL8s287mU1teYaBDhe6kJ8X1FdJQvmXm3GC6t1aXF+2LnfKLu2ts7UiWBwz67caOrOWGDsOAmQAAmkkEBcRPfKn70kY8ZWOUI4XuOB0L551u1OdY2NDVJcUuK003SiUTZuWC8fv2qGvF7zqpOPOySQrgRgJdCAabblgZbudL1j7BcJkAAJpCcB/L8Ba7dOT1pbW28ENgK3QXRDNCP1Wrs7zXFXV5e8v+egrKutky/fcamcOXPGGeDhhlNSWpQnpcUF8sa7O6S7s00+cfV5MvOSKvn/f7pOdnxwQJqbm5383CGBTCAwaNAgwSc3N1fy8vLMNhP6zT5mFwHfojtV0cth7V7y3cWyddN7cuHFl8i8Lz/g3IE1v/utYD5504kTzjnukEA6E4ClANYGBNXRpcIQaZaJBEiABEiABCIhgBe4+hIX4nvqxJECr6nmtk6RmkBN+P/lthmTzbxsWMK7u7vl33/2rry7/ZAcO9Emg3L+IJt2HTeCBCXwMrggP1cK8nJNBUOHVMhrG/fL65sPSG5+kTn32SdrnOuBVvgvCaQvgdycQVJemi/jR5XJZ2+YLPn5+TJ48GAjvtO31+xZNhLwLbrDRS9PJBhYu78y/2umCbiXL/7mv8qX7p8v+/Z+IOeOnyDlFUPiJroPHDiQyKGwbhIwP2iWvfKeXPXRUc5SYXOvnyB89vhwkAAJkAAJxEJgZAlKd4vkdjnVTBxdKheOC4hluJT/7t335U/7jkllaaF89faLTL7ZV02U/NxB0tV9WppOdRjhLoNEhg4ulJxBg5y66o62mDxFBXkyujIQy8e5yB0SSFMCeNH09Ipa2VHXIO+9956MGTNGKioqHK/ZNO02u5WFBHyL7nQY+4hRo2X48BGyv77OrBl+cH9whM69e/fIQw8/aoR4NP3FF5GJBBJFQF39Vm/cL5JbYJrBUmHTLzo/UU2yXhIgARIggQFIQOd2X3HReCMyFMFLv/9A9je0yiNzLhWRQTIoJ0ea23pF+qCcXGnpOG2y5+WdljHDyrSoFBZ0Snd7p3SeFikupuh2wHAn7Ql8fc5fyN8/9VspKyuToUOHyrBhw6SkxLylSvu+s4PZQyCjRPeRQwfl6NEjMrZqnDy+8GnnLmiAtc9+YR7fXDlUuJOuBOC+t+LNHaZ7n7mxOl27yX6RAAmQAAlkKIEVj/fGw8EQ4H6OoGu79jdITl4gcGdBfp7k5uVJe0eX5OQMktOne+d243jU0LKgua8Q6IMG5QimgGNuLBMJZBKB7tNn5PTp0+ZjxzHIpDGwr5lNIK1FNwKlPfnYI6IW7cGDy+XBrz0StSU7s28Ve58qAghCE49AZ3DtsxOs3KOGltqnuE8CJEACJEACCSNw5ky3FBUUGxfy4uIiKSsplPy8XMG8172Hmozw7ujqllGVZVJYELyqRnFhvpxqD1jFc3LisuJswsbJiknAJgCxjXTy5EkT14Ci26bD/WQRSGvRjfnatkU7FJRxZ58jX/zSV0Jd5nkSiIkAosDOmVkds/BW0Q2LA6LLagCcmDrHwiRAAiRAAiTQDwH8nwNLNxKCpI0ZHggkBYu1CuiJVZWC6OUtbR0yrKKv+3huTo4R57AY4gOhzkQCmUQA7uVMJJAqAlG9qsTa2J+7+w7zwT7cu5/73ncEkcaZSCDbCGAZlvlL3nCWXYl1fPjxgzVPaeWOlSTLkwAJkAAJ+CFg/3+Tnxf6px/W7rbncdt1Q4gjiBpSW0fvPHA7D/dJgARIgAS8CYT+y+ud3whrrI397We+L3fNmWtyIcAZ1s9ub2sNUYqnSSCzCWAeNoQ3tv0lrMWtCfmxTBiSBlLDsi5MJEACJEACJJAsArboDtcmArCpsHbno2XbTYTHJEACJOCfQOSiu0dYFxb1dT3y3yxzkkBmEFChjN5CQC9a/k7YjkNwwx0d88CRHl36lry8Zrvg/LxZ0+TeWdNkdCXncYeFyIskQAIkQAJxJQAPq4X3XSsrn7gjpnoHDRpklhGjY3lMGFmYBEhgABKIeE43xHbrqVOCSOKasI9zFOJKhNtMJAChPO+pVabr+HGCHymazhs7RHbvb5Qtu47oKc8tBDfmbqOeK6rHGes2LAcTxg4xc8LjEZDNs2GeJAESIAESIIEwBPB/WnNzc5gc/V+CFfxUW6e0tHVJSVFwoLX+SzMHCZAACQxcApGL7sJC+cSnZsuiJx6VkyebDDmNKl5YWDhwSXLkGU/AtmpDgOMHypadgcAzV1aPM2Ia1m6Iai9XPVizNVgaYKytrTNMMH+bYjvjHw8OgARIgAQGPAEEU2MiARIgARKInEDEohtNIFr4d559PvLWWIIEUkQAAvhQwymZffWkkD2wBXNLa6dxJbeFOIQzor/i3Myh4427+W0Pv2zqW/b1W4IENyzjqONQQwujlIckzgskQAIkQAKZRKCoMCC6m061y4ghnGaYSfeOfSUBEkgtgahEd2q7zNZJIHICb2+tl9Ub9sjmnYdlwdyrPCuYd+slxiKNedu79jcIopZrmlA1xOxCdCMwGqzgyKPpYEOLEygN50xdY4eYOsqKCzQbtyRAAiRAAiSQsQQGFxdKTk6ztHd0SWdXt1njO2MHw46TAAmQQBIJRCW6X3v11/LisqVB3Rw9tkoeevhRwdraTCSQbgS27A64iavLd6j+qdu4LbiRt9QSznAxx5xtbDXBDR1zvpEg6nU+ONfiVkLckgAJkAAJZAOBwSWFcqK5TRqbO2jtzoYbyjGQAAkkhUDEorvpRKPUbt1slgyjwE7KPWIjMRLA/GzbdTxcdWUhAsMgEFqLJbJVcCNIWktbp4lYjnrhVo4AakwkQAIkQAIkkI0Eykvze0R3W1Si+8yZM4Io6EwkQAIkMJAIRBURY9iw4YxUPpCekgwfqz0vG0MJJ8A14BnEs+2GDhdx29qNpb9qFt8pt82Y7NC54dLxsui+a51j7pAACZAACZBANhA4ffq0dHV1mU9xfq6cPt0tbe0d8uHhk32Gp/n6XBARCO7Ozk6z9boe7TnUi3bRTzvhuLu72z4V076209HRYdrDsTtpHndf3PlwrFy96tNr4OU1Br2ufdB2lb+99WrbPoe86AM+aM9OONZr/Y0J/XTnRb/0nFf9dtvu+sNds/vIfRLIBAIRW7ph3S4qLjZLhiGgGhMJpDsBzMO2E+Zfw40cYhzXRlWWBgU7g+CGeEYgtDkzq6W0OLAsim0FRzRzJJ3rjTrm3zXdbob7JEACJEACJJA1BCCI8vICPxsryoqNtfvkqXY51VYUtHxYTooinKN/+BQUBMdRUVEa641APRCgubm5hgPawnF+fn6Q5R7CE9f644A8EJVaH/aRwFgFNOrAdVwza6T3sEUbKsRxXZO7Tc2j17222le9tzhGOdSLLc6rZwLGi309tuvTtrz4oy67n1rO3TbGqWMId03Lc0sCmUQgYtGN+dyv/mql+dgD5Zxumwb304UA1s3WtbUhjBFNXBPmYeP61Akjg0T3sw/cZLLAuo3lvjTBCg4RjqRzv9X6befT/NySAAmQAAmQQDYRUEFUXlogJ091GHF54NhJOW9spRFoXmIMAlIFmfs6hBU+SLgGYabnVATimte5UFwh3Oyydj70BXVhq+1p/9C29s/rHK7ZAhvH2ndtQ/up9eh5ry1Y2vWhzyq8UQ/q0HGgb7gGQat9s/OjfuS320U+fLQOHbdb/Oo99eqjV17UabeDcu623HW58+t19MluA/lwDn0Kd03Lc0sCmUQgYtF9/U2fFHyYSCATCCxbVWu6CcENoQzRrXOzDx7vFeB+x+IW17B+uy3lfutiPhIgARIgARLIRAIlhQEPMPT9ZEubYwFW0aRjghiDdRQiCtdUfOt1CCskXIeoRH6IMJxX8YXrKBdKuGld2KoQDSUMtS9oD3UiH8pga4s87HvVEa4PyK+CH+X9JLs+lNfkbtvuL8qo+Nb8XlvtA8oi4RgfW+SGKqdl3NfRL69r2l+MX9u1XyjgvCa0b/fBZmDvI799bO9rXdySQCYRiGpOdyYNkH0lARC4orrKiSi+qz4QZdy2ekdLyVi/LWt4tPWwHAmQAAmQAAmkOwGIJ3wGyRnJzekNhtbZLUZIuQWZimUIWwgttbrqOHGMD8phC8EGcYVjFW8QdPjYQk3Lu7cohw/EtTtp3e72cB5l7BcC2Me5cAkckEfFoJbpr5xXnRgfyqNvSDiOph67brs+nEfdEMJeCQx03jXa9WpbOel43fWgPdwjvBBAWzhGQl04px+MDXUhedWF6/1dMxn4DwlkGIGILN1eS4VhvHfNmUvrd4bd+IHW3YsmjhQV2zUb9sjsGZNM1HFw0KW+omXCZcGiJcdyJEACJEACmUTAFmOVg4ulubVdmpoD841DjcMWVvY+8kOYqTizy6MdiNr+BJpdRvch/CDqVBTredRlt2/voz30Q8Ugythj1Tp0a148WO7fWhbCMtKEfuElAfqtbaJvKj4jrQ/5dRz2GHHefax1o13tu7LTFwBaH/oT7sUH8mv9qA/1uJmjLlzTselW+4Gt1hHump2f+ySQKQR8i24I7gP76+UHP/5pn7Ete/45wXW6nfdBwxMpJIClwpB0GS+4gb9dW2dE9pKfbxS9juW/8MEcbiYSIAESIAESIAFvAioKcXVkZYl0dXdLk4i0tAXch1vau6SsMEfy83qDe3nXFBCGEKsq1iCyIGaRtB0IN+QJJ/bc9UO0oU6ty33d6xhlVChq+yr+3Pm1XluUoo9IsBZrQt9Rp51Pr+kWY1bB7R6jLTp1P1SftD7doo+oz29+LYetzQHHGAc+4caBfNpHrQtt+2kf5TSfva91hrqm7WT7dm1tHZeizZKbHN53pmeQ7e3tRnDPuv3TnsPGeQhy5GMigXQh0NwWcC9TMQ1X8EfnXm26t3rDnqBuLlr+TtAxD0iABEiABEiABMITyNVo2qdPy4HjLXKkoUUON7Y6hVTAqSCDeNOEfQgq5MFWhateh2jUc8iDhDIqejWf1xb5tW29rsfuvqio0+toA/teCW3jOvqGevQDt237g/LIo/lQTtvVenFszzHXurDVvmiZcH3S+nSLvEjuMeC8XtO82Gobes7Oo2W8BDeu2fcHx1qXva/16hZl3Mz1Gsr5uab5s32LJW4XLH0r24c5YMbny9Ld3hb4A1pYVOwJRs8jX2FhoWceniSBdCCAYGpYT9stuieMrUyH7rEPJEACJEACJJAxBEqLAj8jjzf1Cu3Ort51sSH88NE51iqoMEAIUpxX6zDyqWjDdRxDoGGr5cKJOTc01I/8mlBPW0e3oH+n2julrLhQCgt6l/vCdU32vp5DXVqfjgfXkNdLlKLP+Gg59MdOKlj1ul5DXahT+ej5UPOx9bpu3cz0vLajbuR6Hszt8aDP2paW0XuEMuiXeyw4j35rPXYd6I+OVfMpX2zxQkLrRx0oixTumsmQpf/A8/JRl9DGErcXTRjpa8QojyVvdZUdX4WYKSkEfInu/kR1f6I8KSNhIyTgk8BnbqzuK7qrhvgszWwkQAIkQAIkMLAI2CLKHjmWzRw7YogcPXHKOY2gajKoV8BCSKmYRj3YV2Gl4g6F9ZpTUc+OCjQcoqx9rHm9+qfn2jq6pKOrU7DFqiXFhXlyqq1TWjtEzh4VbCiy+6N161br0+NwW1uUap9R3k6hxKvmwXUdq7ss8mh/3NfA2yuFag9tuIW4lg9VF65r3zQv+uFVT6h2tVy4NsJd0/LZtt21v1Egsu30wqpaeUFEHpl7VZ+pkDXv7jECW0X5ijU7ZELVEIpuG2Ca7Pf+VQzTIVivx4ytkpU/e8kzF87jOq3cnnh4MkUE4JbjldTajWtYd7tm8Z2cL+MFiudIgARIgARIoEfgucUdwOAc5naXFPVGxYbIbDgZPN0Q+bS8brW8+xrK46PWUVvcIa99bN8cu149j3N7DpyQDw6eMIIb51vbA4HfKsr6xnGx+6J16FaveW01j241jx5DeLqT5vHaal69psfuLa67U6gyoc67y/M4+QReXrPdiTPk1TpEOD5ra+v7XF79bu90ScQqWlZT6wQO7pOZJ1JKwJfoRg8RJA3C+nN339Hng/MMopbS+8jGPQjgDw/ShLF9rdiwdiNNnejPXcdk5j8kQAIkQAIkQAJ9CJw1slxyrCXEjjW1Svfp3nWn+xQIceLDwydlz4FGOdp40rhluy2dOPYSmiGqC3kagd7gXp6MBMEdjz4no69sIzUE4A6+6CfrTeNbdvZauREI2E4qsOFCjqRiXAMDax1uS7ldB/dTR8DbByVEfyCsKa5DwOHptCIAdxu1dGsgNbuDau0eXVlqn+Y+CZAACZAACZBAhASwZjdecNcfbTau25gLfLypTUYM8Y4FFKr6k6cCFnLMtfZyVQ5VLtR5uJHbacSQUjnWdErOGjk4aJ1xOw/3SSDZBCCiIZxh8VaDUWlRvgn+O+fxXzrdCVi764wlG0vfrliz3VyDaF+2ChbuwKo9WAp3yc/fk3m3XuKU5U7qCUQkulPfXfaABPwR0D9a4XLD2s1AE+EI8RoJkAAJkAAJ+CMA6/G5oyuM6IY7N6zdg0vyHbdu1DKsokgG91iYIYjbOrtk6OCAMLcFsh2MzV/r3rm6z9iB1AbJ0PIiGVJW4GtJM+8aeZYE4k8A87iRnl250al8Ts9vVCx3e6ihRSDCW9o6ZcnKjYJziE+g7uYQ4xDamiDi126rp+hWIGmypehOkxvBbsSPAP74qJU7XK0U3OHo8BoJkAAJkAAJRE4A87vxgYi2hQBqwrkhZZ0ydniZHGtqE1i2W1q7jOXZFsi2AI+8B70l2tp7RfeQsiJj3c7N6Tu/ureE996Jljbp6Aq4yxfkDZKK0iLvjDxLAhESgEXa/T1BFVjmFgm/VSG6b5sxWdSL0/0bd0tP4DW4oyMflsF154mwW8yeAAK+53QnoG1WSQIJIVCzPhBUAm8FmUiABEiABEiABJJLYOSQkiBr8tDyYhlcEphD3djcJsdPthrBjV5BeJ9sbZfDDb3LjuE8oo3Hmuw6hpVHL5SPnmgza5BjHfL6I81m2bFY+8byJAACqzd8IDpHW3+3PnjndNHpj7B4I+gvYhDNvGx8WGijh5b2mydsBSm6CNf4+c+8bl4qpKgLSWmWlu6kYGYjySKAN3u6BvezD9xE9/FkgWc7JEACJEACJNBDAJbuc0eXS92Rk+bMiCEl0tLWIbAYI+0/2my2mAuOgGt7DzaZY/xTVJBnBPeBY80yfkxwICknk8+d1vZOOX3mjAwrLw56CWAX14Bv6EuohHqQ1MX36IlWGTOsLFR2nicB3wRUcKMA5mBDgNriGkuB6XJgbuv11AkjRa3cKD9hbMA6rs8p6vaKa+S7c0nIiD6ueHOHefGA8WRzoqU7m+/uABzb27V1ZtQ3XDqegnsA3n8OmQRIgARIID0IYI43RDM+ELSlRYElurBfkJcrY4aWBYlqiG3kPWdUucmP+asHjzVLU09wtWhG1dGFRcNFRoYImnq44ZS8v/eoY3XXNiD4EUVdPzgPITOsIjD/HPPVj50ItsxrWW5JIBQBuJKrcIbYtAU3ykBsw7IdKrmnRYbKq67p85e8EaqqtDmPlwbKYdf+QCC4tOlcnDsSlaW7dssm+faTjwV15f6HHpbqqRcHneMBCSSbgP4xm1AV29vxZPeb7ZEACZAACZBANhOA2B43YrCxZENga4KYhbVZxTnOV40YLPsONcnRplaRplZjqfZrWUZdaEtdy9EWjr1SQX7A9tTU0iGY840EsQ9R7U6BenptVQeON0tpcb4Zjzsvj0nAiwBcybfsPmICoUFgwo0cCd8BtW7r1qu8upzjGoKpwQKOOiDm8dy6l8FFNHN8VIR71Znqc29v7V17vLk1eLWBVPct3u33/tXzWXN7e7ts3LBevv3M96W8IiBsmk40ysqfvSTnT54ihYXJWffQZ3eZbYAR0AiQ6fwHZoDdEg6XBEiABEiABAwBFbY2DohpW4TjWnlJoYwcUiqHG1tMVnUBh5CGyzqs4bCkI8ESjvya4MaOlJsTEMihBDfyQOwgQbBoqu9xiUefUC/6gL6MrCzps/Y4XgycPaq8T/+1Lm5JQAmsra0zFt3SU/nSfKrDBEdbuzXgnYnfrH6W94KlW6OZq9UbIh3L9TW3dZqttqdbBGFL59/E67b1im7tc7ZuIxfdbYG3f4VFvWsv6n57WytFd7Y+KRwXCZAACZAACZAACcSZgFtwa/UQuXDnxprfKrDh9g3hDbfw/PxcaWvvMqIbVnLUAwt1R2fApRwu7Ejq1q712lvUC1Heffq0CY4G8a0u6agPfagcXOi07xbwyLuzviEiS7zdPveznwDmaNvL2GJ9bSQsD4a5zEgQzX7TFdVVAgOTbRF3i+pF911r5oaj3V31jXJF9Ti/1Sc1H6zwcC3XFwn2/PSkdiRJjUUsur0ENsQ2kl6LZ9/r9u2VRU88KidPBoJsqBs7LO5LvrtYtm56zzR3zXUzZc49X4xn06wrAwno2+oyRi7PwLvHLpMACZAACSSawP+4/KxENxG3+iFy7SW+IKDx/3xDcyAgmzaE+dUQ4xDBauHOz+t1Bdd8XlvUCWu5qfdkb73FPS7wKvi1LMQ42lIBDis8xP7witDB2rQst6kjEHjuA4H9ktkLrKdtpyurx5mYQxCYus72RRP9BxDzYxFHezrNEsvozrE7kEb7WAINCS8S9AVEGnUv7l2JXHQXFsq0Sy+X++/7fFBnIIbj7VoOYb1p4wZ58uklpm4I8Fd+scK4sW94Z53c8dd3y1fmf02Q7z//zxLB9XFnnxPULx4MLAJ4a4bkfus3sChwtCRAAiRAAiTQl8Cf9jVIzYY6mX3V+X0vZsCZosLeNbbh8g2RjSXI7GBrsFojdXecNsIYlupwqbw0ILobTrZJW0fASg63cszX9koQ13Bxh/iGJb6lZx6qinMIcrWge5XnudQQwHP/4C1nJ71xuHdrwjra6haOudjzFq8ylt5EWKJhPUay29d+pMsW89uR8NJZcz3EAAAgAElEQVRBRTcs3xpx/cFnXhdEY7h31rSs+F0fsegGHARM+8GPf5rwewYRf/Os2512GhsbpLikxAjwK2dc45yHpf1U6ykpr6hwzmXKDh4uvO1KxBcuUxiwnyRAAiRAAiRAAoknMChnkAwuyZNvLf+D/K+/ucw0CPftTEnG+nzmjAwuKZDhFYHAZ00tbXL69BnBWuDHrQBohQV5cvbIwUZ4hxujmdd95oy0tAbmgsNCPmZYachy5SUFkp87SArzcwVedepZhzZg9d57sNGI7nNHV0hOz7zycHzD9S1cOV7zT+CJF96R8tJ8aWpqksrKShk0yDuwnv8a/ee0Ra/tEg5huWDuVUGu5/5r7T+nGp8QYNgWsv2XTF4ONZRBA+nyZ3Cd1yXS0BNY6jFfPVRCHQhQh5cZGOcVH61KW4Eeseh+7dVfy+ixVUGRymFpfunH/ymzbv+0E1wtFJxIz9tu5BdefInM+/IDThXoy4vLlsrgweXy4NceiXvbTkMJ2MGXAHMt4FqCN18U3bFD1sjl+nYv9hpZAwmQAAmQAAlkD4EpZw+Vc0aUyN4jp+Tvn/qtzLx0nPxm/YcZNUBYpGGF1jnbdudh+W4+1WkEMyzYfsVVZ9dpae/skvbOblMvRH24hHxnzpzpE0AN506f6ZZZV0w0kdrxIzuc8H5vx0F59d0PZPu+7F4qKRzLRF/DNAAI7qrKfHMvcD/8Phex9g2/S/W3Kay1cC23E4SxRjC3z8drP5SQjVf9iaoH4hlJ53i757zrSwRsYQ3XqaWm0Jn09XaNWHR7AYal+dixo16XYj4HazdcyJHgPr74m/8qX7p/vhHY19/0ScFHhfn1N34i6GVAzI0nsIJ5TwVcStDE7v2NTkvzn3ldEGQhG0U4ljTwOxfFARLBzsEeFx513YmgKLOSAAmQAAmQQNYTyM3NlXtu+oi8vWmntLe1y9jSZnlo1jly4sSJjBn78t++L3deNyVkf1vbO6W40Ns1PGShngsNTW1yvLnVV2Ar5K0sD1jb7Xr//Zfvy3/WdMqoyhK58sJxJhCcl/CG4H76ZxvlzqvHyrUTxIh4ux7ux07gwIluaWk5JSWlhXLW0AIpKiqS/PyA+I699v5reHnNdpPphkvHy20zJnsWUFdqz4sxnlQjFKzBtvU4xmr7LY5xr6utD+sWDgs2EgyPSGU90zngcaJz3c0Fcy34JRjmgoMbPAcQNA6aSpN7Dr2eT4etb9GNZcGefOwRObjfO7T7XXPmJtzSPGLUaBk+fIQ0nTgR1BaEefWFF5m+xbJW+IEDB5J2T/QtDhrEPto+1d5l3CjwIP7NX02QGz8W/EbMq3P7DjcLPhecNcRxtfLKl+pzb9YelPc/OGzGmai+HDsWeHnR0RHgmah2WC8JkAAJkAAJZCKB7u5uaW5ulvNGlUpLixhB2NbWFveYPIlk89fXXyiFBb1zu91txRJfaPSIQhk9wt9UReT1ShedN0JWbz4g312xSfJzRS6eONJYVm3hvfHPh+TfV2yWm6eVS3fjh3Jm2LCMugde407Hc+eOFOnoyHMs3Hg2GhsbpbOzU/LyfEugqIf2h+37TdmPTShP6O/fUB0cMTgQTHD3h/j9XR4qW9zP/+69PfKnDxvlT7vrpCSnNzih3ZD+ZsdXGRpoVHngRdnm7XXyh529htySwjxzHVrnv97YJWeNKJWjTW3ypw9PSEVBl5w9skw+f9Nk+f6rgRccHx5qSAlre2yh9n0/cViT+/GFT0vtlk2mrljEbajO9Hf+yKGDcvTokT5zt2Hprt26WWDpjiWNGTMmluK+y+ocBryBguCGW0ROYbnsqgu89UFF7+w4Ln93c2C+VaiKYTnWwAML77tWxozxH/0wVJ3xPI+xwa0G7jNvvLjFLFuQSMbr/hyIcD/l3JGSyHbiyYh1kQAJkAAJkEAyCXR1dQk+mEsMd2h8mOJH4FD7YPnt1mMyvLhDnv3VH+UfZ+fLtPNHOcIbFu5nflErd19zlgzJPSUjRlwgw4cPl5KSkvh1gjUFEYA7OT7w9IDYxjaRCZZYuEZDKCJde3loz4xE9mPalFwjVA+e6Ezq7+LLphwzovv4qUEh2/3n5zeYoQ+tKDV5ysqOmeNDTZ0ON5yYOG6ouX741GF5/8NGOd7S6bjs/+a9g7Jg7tXy6TFj5GMfOddYvCH201UD+Bbd+lAkU2y7rev23O1lzz8nv/ttjXZLYGlPZt+chqPY0YAAcIMeJaXmiwnX6M07e0U3hDlEK9wnIKwRGAD5VWhDZGuQBHRhy87DSXUd8TNsBEOAu7yddEz2uXjtawTRRLrqxKuvrIcESIAESIAEUkEAoiMZVr5UjC0d2jx77DDJLSiU05JnXMdh0f7H2RcZ4W27lAcE9wgjuMvKyozrs20NT4exsA/REVi0/B2nIOZVD7TU33Jl9lz3CWMrDR6dGup2LVd2Oq9b58jjvJ3X1kShprMmUoNoP8NtIxbd4SqL9zW1rnvVizW5U7ku99rauqjnXa/uWZcOlm47quGW3QHRjUiasH7jYUJeuJvjrdmSr97ooMB5BF/AA/Tsyo1iz2HA27V0+JLjRYA7uaMSuq/HcgwWTCRAAiRAAiRAAiSQKgLTzh8tObkF8uGxNvl4dWAZWwjvWz4+Xn65bo8R4hTcqbo7iW8X+gAJc5Xn3zk9yECW+NbTo4XS4uA52O5evd3DyD6vots+h32dix7KoAYjpQpu6CK88Fi7rd7EkIJAxz7uxQuralMeMyutRbcbfLocQwAb67MrCqFX/9xvVVRAI+/ooaXmA5Fcs36PcZeA4EawBUQ21zdlOIeoh0j6YKEPc2ZWO8erN+wxQn3X/gZTD/KHCtrg1c94n0P/ajYEFr1HP9EvvEQwb6gmxLu1QH0Q9EhTJw68t4qJIcpaSYAESIAESIAEIiUwemiZHDzeLIebuhzh/X93HJLbpg/vcSmnhTtSppmS/+2tgdhXiFSuv9lT1Xe1DtsBm1PVF7tdjUoOfXBFdZW5pOIaB2p8tMtgX6OxYx8C216CDedwvGxVrdEaEOMvr9kh0EeaUr1aVGCGvfbGxxbLdOm8bs2OOdVw94Y7eLYn3ERYltX9O9R4Ibbxmb/kDWOlhtiEiMbDgISHDOJQ3+ys2xb4kmKBeH0AkQ8PyLKHb3He9Gh+PHjY1y8U8uLtmrpdvFCzzbSTqn/AR/uCcar7iJ5LVb/YLgmQAAmQAAmQAAkkkgB+n2EOceeZHIHrOCzec2ZUyaiSbhkxgoI7kexTXbd6rdq/5VPVJ7UO9+cJiusaaT0efR1dWWqqsb157XrVSAZG9osJaB6kOTdWO9nVVd05ISLwFHYLbr2u3GH8u/LCgKDXa1t2HzEarj8emj/e24hFt1cHErlkmFd7qTwHa63erNseftlYvFWIq6CElfozj//KCG5cgxheuPwdI7717c7sGZONkFbRrHVCTOMBxJxt7CMUvn5pMG59E6QPG65hKQK4nmOLNz8aoE3748UL/Upk0i8U+oQx6pdGlwhIZNusmwRIgARIgARIgARSRUB/2z310rvyjaXrjMUbwdLGjRsXtznc6sacqjGy3b4E1OCE3+G2mOybM/lnoD9C6QKsdQ2DIvSLfiL9va5aCGJXDYSh2tPzbkbwAFaPXyVku6rr90qFteaxt6qP4FaO5ZdR34K5V5ks6CNeLkCjpeL749u93B3UzB4g9pOxZJi7zVQcw03aTj96dauzKDtuJB40fZia6wNzjCHUIYZVcKO8imc8cLYbBdxR9PpF911rN+XsQ4zbD9z8u6aba7rFA4+3SwjOpg++U7hnB32CMNaH0309XsczLx9vXho4lu6etbTjVb9dj75R0zds9jXukwAJkAAJkAAJkEAyCKixBL8H8ak/dq6cNzawQk48gqZBEC1Y+pbULL4zGcNhGz4JqMFJDU0+iyUlG4Q13LnViozYUDdcNt60DbELF3T1xsVJ6AzVKn46iN/g0EEoF05bqJhXq7ZdN7SC6gUYExF4WoU28s279RLzscu496GrTMys4y3GK3nF47ebLNBOqsNg6MT3x+bhricRx75FtwY1U9fyTIkUHm9oza2dTpV4YGCJnvPYLx3hrYIbmSCmcfNxk/F2xTwEDS3mvFOJiKnDjmhuX/PaxxdG/6B7XccDijYRNGDC3KtC5sVbLeR1v2nyqjPScwgEh1RWFFh3T8U/+OBhD9f/SNvS/Mpe29Lz3JIACZAACZAACZBAsgjMnjHJTCFcu7XOeERu2X3UEThFRUVm3eho+4LfUI8ufSva4iyXQAL4rY+kwjGBTUVVtXrVwjiHKa8IaIYltyCSYYyDrkHfMQ/a1jt+GttVH5hi3N/Y9bc6hL47YTqqGs5i0SYQ2BgD5nSrQVK1EdrEOPGSAfG58F1NhCZxjw3HEbuXQ2wPXMHd4bwlgYBW12/Mw8axHV1cryNyIazceDujb5fcDxKO8TbJ7xul/vLpw4O3SfrWzX3zEe0cX75FP1nvvhSXY+cPT1VgKQBUii8BUqg+xaVhVkICJEACJEACJEACKSSA32H4rXbFhQHvxd0HGo3QjkVw6+8qDEvXVlcRlcKhsmmLAFyakWxvVOtySnahRzTp729dolh/l8MNG/mgWebdGgjcrFZhLRvpFoZHpJkPLHem5eLY3bZdL74z8TCcfebGajMe6DNNtvbCSwYIb3x/Yh2n1u9nG7Ho9lNptubBWyBNmB+g4hYPKwQ1bujsqycZdwUIchzj4VE3C2yN9bsnUIDWFe8t2rG/ZF71qys2/ojbf8jtvOoCYp/jPgmQAAmQAAmQAAmQQP8E1EiC31mw8EW7DrdZd/ipVeb3Gn57qoBQEdV/T5gj0QT0HquXa6Lb81u/l4jV4M06pRV1qW7B86WCOZKXOrp0sbanzyjqtvUTXkAh2dfNiTj+gz4gCLXqL1St/dK2dexqoY9j8yGr8u1ebteACOYvLltqn5LRY6vkoYcfFbihZ2uCG4Im+2HBGy0V4LBoh0vJWMYLDxY+Kqx1jvfMS8cHPXTaT4hrezw4j3Mr1mz3bX3XurDVL6l+afUa/vPBGyWs363/Eem1WLeh2oy1XpYnARIgARIgARIggWgJ4DcirHvh4uxAsLl/h9nt6e9PnYpoX+N+6gngN6h6jsIQl+5JfzPbQtTex7OI3+t4qeP397pqDrse5YDnXwWwGvr81qt1xLpFe31iINSIWcdbPZFjbaO/8hGLbgRUq926Wb79zPezWmC7weENpT4objGpgttdxusYcweS8WZSLd3oN0Q3vjxwI/H6MuCa+2UA5oNH208tF+4/EC82sZxLRZux9JdlSYAESIAESIAEsp8Aphj+v/beBTqu6srz3vV+6G3JtmwZ2/gJxDaGj5AEGk9CCDAk3TSkuyeZjNMTZjpfYPVkQoMX3zCsReiPpBcLCElmPjuTZEgndIZ0egJNHjSYV4YECMaAsQ3Gb2RbtmTJekv1rvrW/5R3+apUVaqSSlJd1f+sJdWte889j98599bZZ++zj44TIezosbXm8LHz765bl1PA0bEnzGFVUKkNpE13R0Jph73WtHg88wSwPTDaCW2k5tkzX4qJczSC9Nl15yon5LpLr6FfWpfO5oqb7xyEbPRXeENXzbpa0ILTbAes8YY8h3ZDufTZms5yTcq8vLm5RXz+wHSWq+LStu5fNxVhEi/bmWhYdVCgDguygeLB07D7cLfRTqPTWf/w46Ave41bzKfmqT8Keo8K/GqCouf5SQIkQAIkQAIkQAJzkYAK2VCAYKui7HGVjruefPlA2hrw8OmMxSB4qHZb08E5dVY1k6axc7FtylUnbVP19VSudKcjHe1POibPlYd6X0e90G+LCXBMhqCO0CB0Q4Osa6exRZdymgk5aKIy43lS7fv2HcXVcaI0J7pestAN83F/ICDdXZ0TpT2nrqtzBOw7bbegnRtm3dkBs1kQrvFDAE+GW7a+OMYrJn4MSg0qdOuPgt6vD7iaoOj5XJ/6YOa6lutcqfFzpcFzJEACJEACJEACJDAdBCB0YLylWlHNA5aFCLiObZ3wp97JMQbD1k4M9iBgnRiplBJjIsBqVq1j9ELlsyoXoakuJqBvI+hYX+9RwfaVPR1mUgnnrelrvNn4hJUv5CCdZJjuMhRlXn7iWLs89M37ZGhoMFOeZ379VOYYB3N5TTdehOikaBi4nrdqvcdAqPAv5sFxiHH0pkVVt/p4WPSB0U/EGTm7RRqE2se2v5vZYF7vz/7Ej8P2nekfiOwHT2e/MBuGPKwvJ5iwwAmd3gOnC1gfc+e/ubzgw4l0HvrZjswm9/pwZ5eL30mABEiABEiABEhgtghga6av/+h3xqkUxje5lAXQCmK8qdpvaOCw9RGC1Xqw5qx5uXW8Nlv1qvZ8tR0rwWS6UFtAaYi+hPE2gpqQ57pHlXW4hv4Ibfdkx9cqYKuJOdKEaXclBMgccLg2U6EooXvJ0mXy7e89OlNlqrh8MDuDoB0ue/1zxRXYzCKlO/Thk31ideqAje/ViyAeuCvXt5mHUNepwxQEwrDOhKlzgW1PvW1mqCC442HE3ndwDqJMwABp64wY0rY+tLiOzg0hH6btKAOu4xw07Hig8eJ68LarDU784OA7ZoR/8l8/M0ZAt/LGvUgL5UdZreWxxuMxCZAACZAACZAACcwmgZs2rTVjFhXUtCwYT2FfZGgl4akcjtNgnThWUDm3BSs8L0N4mow1oubJz/IQUHNtqyKpPCmXNxVso5XefzutkdZlqPlygZCOLbcwztax/VTG2DpBhPG6CuL58p7N83g2Yd28+dp1ZS9GUUJ3dq7V5r1895G0iTVeinYJNQGvKSpe4tkzSvqyVyEYETGRgIcJ53IFXIOwbATrsxEg0Od6APGg6mb02WlpOvixgAAOJyD6o6Ezu1ZhHQ+pzsphLTh+kKwB9UNAftbJBWscHpMACZAACZAACZDAbBOwjm+w9ayOWzDWUctCFUggeKugkl1ujNWgpLCmlx2H30nASgB9RrXd1vP5jnUcj3H/RNrujFxh2RNc082WQSba5Unvm41PtURB3rC+LfdESslCt9V7+Y7XXjFm5Y2NTdL+wdE56c0cHQ1/lT4zk9050cnxQq49O6OkGma85L/+o9+b6HhZo0PBJKaQwI3IuA4OKiDjAdPv+tJXB2nWzeizy6UTF/ghgdk+AtjiHmisdTuxkXBamMb1Qk4cNF4hM5nsMvA7CZAACZAACZAACcwGAYzHMG5SgRtlwFhMB/g6psoncGuZNZ5+5ycJTERAtd0Yd29YtWCi6OY6vLFDbtDxtt6EpbZq+ava/lyKO+3Xel+2EK7nK+ETvqjUGhi7IpX7GSvZkRqgZHsvn7+wVY4cOiAQyOdawL6KCDrzaJf6oZOjs2i5YXqNhwwvecx0IaiWGg9hrgclu65IA5pm/Gm61u0q1EFaobRQLs0f6aNM37vj+syPjwruOmuGHyfEwR+Cntey6Xctj57nJwmQAAmQAAmQAAlUGgGMpfCXL2AMZV0f/JUbLzGmrjpmy3cfz5PARATQt7CFHdYxFytQ6uQQxts6EYRPtUKdKE9c1zE8jit5vI5tnfXZy+V8upi6FopTstCtW4VFwiGj5e48mV7vXCgTO19T6JU8M1MMXzxcatKBddwQZlU41geqmHQ0jj6sk9muAvlrMJ4D59VkyoL1JqpNR5wtn/uIPPmNzxpNOL7rftx6Pz9JgARIgARIgARIwC4EdPxUqLwYG0FQgcUijiGk65it0H28RgITEcDkTbb2eaJ79DocFyPotsPW8brGyfWpgrZV+M4Vb7bPgYtq71UJWM4ylS50+3yyYtUaGRwYEJiV//qffyG33fIFOXOmZ87t3Y1ZHe1Yc+Flp7OkELJv3rSmnP3IpKWsJvpBQf4Q+rFeArNKCLhHTc91vzyYjCt3nRhQZw64R83TdVbKJMR/JEACJEACJEACJGBjAhgPFauNxFI/jL/wyUAC00FAhWWMu+FYTR1Mq2JS+16+pZ4qf6jwPR1lLFeaqmQ9cqr81tslr+lGpa7c9PFM3eaSV3PM2KjAmO5YO4wpBTobPEXOpaBC7GTrlL1dRanbqGFtefZMGx5KrKXQ7TFU4EYZcU2dOeAT11QAn8gD42TryPtIgARIgARIgARIYKYJZI+PCuWPsSsEoUJObAvdz2skMBEBCMuqWLP6WULf23x2WzGkkW88jjH8O4dOiy5DnSi/2byuEwO6fLWcZSlZ013OzCstLdWwQuCG0wCsWcBs49dvuSqjca20Ms9WeTKd8mR6JkgdDxTrYj/XDwocGGjAREe2Nh7OHBDgyh9rSXRmDVpzBhIgARIgARIgARKoNgKqoLCDQDMX20aFs3xa3rlYZ62TCuLYzQhB+6Jet35iDJ89rrder6RjtaAt1ny+2LIXremORCKy7bsPy55db8n6jZfKF/7yP8i3H/ym6Jru2++6R9Zt2FhsvhUZT7cGe2z7u6Z8nDWcuJngzRAPnT54hZyDTJSaPqwQuK3O2vQ+Fcr1Bafn1TpBv/OTBEiABEiABEiABEiABKabgG71lk/LO935z0T6MLnWcT4cscG6FfXGRA/G5F19o6YYOo7PVSYo26ZqZZsr3ek4B3njyMl+U7dyyhhFC90H9+8zXsv/50//t5w41i5P/vxxufU//Y0sWbrMfH/6l0/K6rUXis/nm476z0ia0JyiI6EDQfDTPepmJHObZYKtyBDA6s6tL5rjcmicsc4bZiiqSbdiyfUwwxIhV1zrfTwmARIgARIgARIggblIoPXs3sjUdM/F1q2MOsERM4RmaH4xRscf9rRGn1PBGyUtp4A6mzVf2dYoz+2EjFPedd1FC93QaF9y2eWGAbYICwSDUt/QMOY7PJrbWehGZdQFvnqvm81Gr+S8IehiXfYTLx8wxawNeOTWGy+dcpHVw3q+hCDYY7YNZuxYV4IZNwYSIAESIAESIAESqEYCqpDQJXfVyIB1nn4CsGS9KRTNZKR7Wqt1sJpkZyLY+ECVeZBtyhmKFrrLmakd0lKv2nYo62yVEWYiM20qgjUzELzNw79pzThnbLPFgvmSAAmQAAmQAAmQAAmQwFwlYPXHBG0wgi75nCtabtQJddn+8OfK3ox0pJYDKQQ7a8fKEYWnZokA1szcemPaoRrbaJYagdmSAAmQAAmQAAlUDAF14gVtN/5gEajCUMUUkgWZUwSgdIO1qS4tvXjV3HRqrGvZy9F4JWm6H3ng/jF5/vaF7ZnvrYvb5MbMN3sewHEatqvS2Rt71mJul3oqjtrmNhnWjgRIgARIgARIoBoJYNyKtbVYcwth+8nfHTDCEJzSMpDAdBHQ9d3od7q/9XTlNRvpfuXhZ4xDtW1/c11Z/EcVLXRfc/2nBX9zOXzxunVpoduyddVcri/rRgIkQAIkQAIkQAIkYG8C0Dpi69bHtu81XqXtXRuW3m4E5pJpuZW9ejE3kwpt57Y1tsYp5bhoobuURO0aF84ooO2mptuuLchykwAJkAAJkAAJkEB1EcBOLth1x+pMrZxmsXOZJnYtghNlCI4bVi2Qay87v+B+09ksOntHzKmaMjvdys6H32eeAOTBcnox55rurDaEtlv3g866xK8kQAIkQAIkQAIkQAIkUFEE4OPm67dcZRRHD9KkvKS20W2hoM187Nm90tmXFqKLTUS3alOP18Xex3iVT0DbVNt4qiWmpjuLoG69kHWaX0mABEiABEiABEiABEigIglAUztXzXynE/iRU+m9mGEpMBKOGa33FR9qM7vkFJOvWhTMxTXNxdSfcYonQE138awYkwRIgARIgARIgARIgAQqmoB6lIb2liE3ATic27L1xYyX95s3rTURs72+w2R/+xtHcyby6t4T5jz2qOaOOjkR2fpkrT+9T/eMarojkYhs++7Dsm79xQbe44/9aBxEeC+/6577pL4hvW/buAiTPHHiWLs89M37ZGho0KRw+133yLoNG0XLtGfXW+b85zd/ac47epskQt5GAiRAAiRAAiRAAiRAAiRwlsBwOGa8veMr1sRbAyYrNoukhfKT/bJh5XyBp+7s8MqeDnMq17XsuPxuPwIZ8/Kz6/anWoOizMt9Pp98bcvdmbxmyos5BOtdb++UB76zTVAGCOBP//JJWb32Qjm4f59cc90NplyDA/3y3x95UC64aJ0sWbosU04ekAAJkAAJkAAJkAAJkEA1EbDu2y0rq6nmheu67Z/fMltAfeXGSzIabtwBy4CRUGzczUOhqAyHosYzPDTeWIKK71/8xq/NPWpaTrP+ceh4IgeBijYvh6D9mRs/awRulL2/v08CwaD5Dm03/hCgXV+99oIc1eMpEiABEiABEiABEiABEqgeAq3zakxlrd7Mq6f2+WuK/cuhxT58sn+MkH3luiVjdi5Sc2KrY2U1MYegnRbET5hPCOyqEc2fM6/YnQCWEmA5ArzdTzYUpenOTvz5Z34j2Sbm02VebjUjX7/xUrn1q3dkF0eg6Q6HQjJ/Yeu4azxBAiRAAiRAAiRAAiRAAtVCQJ0C63ZW1VLvYutpnYyAVQB4Wb2W63WdvEC62AMdwjocriHAJB33XrF+SbHZMp4NCWBSBRMt255622zJh2UJuv6/1OqULHRDwN275x15ZOsPZcdrrwiE7cbGJmn/4GjZ13OjMlbTdpiXP/x3fyt/ffuWTF4Qyp/6xc/lxs/+RUYjXioEjX/q1Ck95CcJkAAJkAAJkAAJkAAJ2I6AOxU2ZT7e1Scc26abb9/xtJdyfHtj3wm58Ly0D6qLz280jM6cOXcdcfa8f1Tamlzm5msvbZPfv9sl6jgNJzd9qOVsGgkyNpTm5j+3I2EqphMxcLQ32WeqZKEbOTc3t4jPH8jQhYb5peeflfUXb8wIw5mLZTxAPi0t82VwYMDkA4H7xz/YJjf8yU1lyXfRokVlLC2TIgESIBoVnjUAACAASURBVAESIAESIAESIIGZJeD01YvIO3K8e0TqGpuNhhamsdv+5rqqNYU+PZoWoNESfSMxqa2tNY2ysKVJMP7H39WXXyh3bn3RaDaTnhq5fH2brPhdu9z5havkpo4+ufVbz2Ya8vL1K+mxPENj7h7c8++b5aGfvW7W9Wst0Zcms46/5DXdKmxHwiGj5e48mfbcpwWZzs/urk7p6emW+oYG471cBW46T5tO6kybBEiABEiABEiABEjALgRgLn3n5z4i37vjeiMYqpbuiZcP2KUKZS/n7kPntk8DD91ObWXb2F2X1AkdNJrYBuyL160zZcG6bd2KDSe4RVjZm6giE0Q7f/1LV8mnLjvnvd7al0opdMmabph7r1i1xmibYVb+w63fNeu7sd5aBfJSClAoLkzZH7j/XlHBvq6uXu68+16j1X7s0e/L66/93vxpGtw2TEnwkwRIgARIgARIgARIoFoJWLexUq3ca+/OnKKs0rjD+Zk16H7cNQGv9bToOm71Zn7FunNrtlcubjRacKvwPeZmfpmzBLZ8/iOmbs/tPGr2bd98djKmlAo72rv6U6XcMJfjLl3QMJerx7qRAAmQAAmQAAmQAAlUIYHN9/9K4JW7Wk3M1Ww8u+kfvO3qMabC0ILXBDw5NdlY0w1rAQjft/7ppdlJ8fscJ4CJm5vvecJYkZg+8K1njfXDQ7ddXVTNS9Z0F5UqI5EACZAACZAACZAACZAACVQEAZhRQ+jGtlkwP7/2svPNZ0UUbgYKoduAbb52nfFEni9L9fye6zq03lbNd644PDd3CcDUHHu8w4pElyeUUttJCd0zuWVYKZVhXBIgARIgARIgARIgARIggbEEYBL96t4OYxqLK9DUFRIwH3t2rzGlvmnTGtsLmtBQ6rp2mAXjOyYfGEigVAK6XVjt2a3jRsKxopMoWei2bhlW3zDW+UDRuTIiCZAACZAACZAACZAACZDAjBC4ct0SwTplaOiw7/Dhjv6CwrTGg6m13bW72984ahhjb20EmIYPh2KC9bm63n1GGoGZzBkCcKyHoL4BiqlYyd7LkWj2lmHFZMQ4JEACJEACJEACJEACJEACM08AWm1oeW/etMZkXqx57O7D3UYzPPMlLl+Oz+38wCRmdS4Hx1gqhJcvJ6ZUzQSsXvFzcShZ6IZ22x8ICLbvYiABEiABEiABEiABEiABErAHgZWL0xq6Iyf7CxZYr8MUG2bpdg3QROKvxj9eY49t1RhIYKoEMIGF52TzN34lW7a+mDe5oszLTxxrl4e+eZ8MDQ1mEnrm109ljnHQurhN7rrnPrOd15gL/EICJEACJEACJEACJEACJDDrBHQdN4SEQsF6/bk3jhrnUYXiV+I1CECdfSOmaLlM5LnXdiW2mn3KhD3d4aBv+46jAsuJFYsbBZNVEMJzLVsoSuhesnSZfPt7j9qHAktKAiRAAiRAAiRAAiRAAiQwZQIQImA6qwL7lBOcgQSg3baa0KtZ/QxkzSyqhACeB/WKjypD0IbQ/eqeEzmF7qLNy+GxfO/uXVWCkdUkARIgARIgARIgARIggblHAKbWCFZtdr5afuqy880ldUaWL16lnT9sMZ+HRlIdX1VaOVke+xJAv0K4eNUC86k+A9SHQHbNiha6s2/kdxIgARIgARIgARIgARIgAXsRUAHUKphaa6AaYpjLXrk+7fFbz1njVfKxbhGGrdLgQI6BBMpNACbl2x/+XGbphT5X+SazKHSXuwWYHgmQAAmQAAmQAAmQAAnYnADWPNcEvLaphQraKLBOEkDgVg2kbSrCgtqWgGq/c20lVtSabq35Iw/cr4fjPulIbRwSniABEiABEiABEiABEiABWxHYfej0uPKOhGPjzlXaiZ88u9fsvQ2TeF1rW3vWlL7SysryzE0Cus57OMfzUpLQfftd98i6DRvnJiXWigRIgARIgARIgARIgATmOAHVxhnN8MrxlX3ydwfMSWiJ1QtzLs3d+Dtn98zuI+nJgiOn+o3jN5RGTX5nt2TMnQREaF7OXkACJEACJEACJEACJEACVUKgdV7aAZTVHFurjnNYkwpnaypw67VK/kSZtT46QYA16QwkMJME9JnJZS1CoXsmW4J5kQAJkAAJkAAJkAAJkECFEtC10OqROV8xIdiqkIs4T7y8X66942dy59YX890ybedR5pvveWJM+pg0eOi2q8ec4xcSmCkC23celce27x2zQ0DRQvc113+apuUz1VLMhwRIgARIgARIgARIgARmmMBzbxw1OcLrtwbVGKtAjvPbnnpb7tz2ohEqcH734W4TXddS670z8ZmtVVSBG47gGEhgJgnUBNLb8WFC6rFn98qrezsy2RctdGfu4AEJkAAJkAAJkAAJkAAJkIAtCaxsS5tdQ1iGWfZDP3vd1APf8Qeh1erxO5fwWhvwGE33ky8fEAi9r+49YdKwar9nCs7hk32ZrFTg5lruDBIezCCBmzetlSfuv1k2X5vepk79I6AIFLpnsCGYFQmQAAmQAAmQAAmQAAnMJgHrNmAQmre/cdSYh//Ds3tNsSA4WAVtCNgIVoH6pk1rzTkI6dkB6SHk2684O/5Uv1v3G8dkAQXuqRLl/VMhgGdH94ZX/wJIryTv5VMpAO8lARIgARIgARIgARIgARKoDALYBkw1cd976m1TKGiKb9q0ZkwBVy5uMmayVqF75VknZbvPastxA+5FmkgL1yEM43M6hWCrA7XtD39uTLn5hQQqiQA13ZXUGiwLCZAACZAACZAACZAACUwjAd27Glq4bG00BGSrlhvFwN7DCFYzbsTRdd9HTvab61+/5SrBHtlIc8u2lwTrw3Npwk3kMv1TLbeWpUzJMhkSmDIB7ZP6DFDonjJSJkACJEACJEACJEACJEAC9iCQrXmGhlqDbnmk3/GpQvdwKGY9PWbdNy5AmN/y+Y+YOBC8IWxAEz6dQZ2oqeZ9OvNi2iQwFQIUuqdCj/eSAAmQAAmQAAmQAAmQgE0JQBuHNdyFggq0qtHWuFg/bRXYVZhXDR/iqVdzvafcn6p9n2iLs3Lny/RIYCICC5vOWoh0pB39UeieiBivkwAJkAAJkAAJkAAJkMAcJACHT7rNEaq3YdW5rcK0umpuDu11tjm61cu5xrd+Ij72K7Y6lLJen+qxmpdj3TkDCVQSgdazyzJGzlqIUOiupNZhWUiABEiABEiABEiABEhgmglg721opGFODsFZtXL5slXttQq5Gg9acqzj1i2ScD7bRB37Fd/6rWfHCeyaBj4hlD/x8n7rKXMMgV3XxGZfhECvzt3UBD47Dr+TwGwR0D65fedRufmeJ7hl2Gw1BPMlARIgARIgARIgARIggdkgAO21bmuE469/6Y9MMbIFZi2bbhuma6j1PAQLrOPWtPQ8PiHYf+XGSzICfbbADoEafwhdfSPyD9vfHaMRhxAOgX3L1heN0J4tfGt6OiFgzZvHJDDbBFToxsQQJoio6Z7tFmH+JEACJEACJEACJEACJDCDBCBcWwVsrMeGgJwvqPl2LvPz7HsggGP7rvu+dJVZL65CsWqlER+abQjU+MP5wx39RjB56B93mOQgpEAIR8C6ccR/6PHXM5ptTQOfE2npTSL8RwIzTKD17JpuZHvFujYK3TPMn9mRAAmQAAmQAAmQAAmQwKwSyN6LG4Up5FBtZVujMSG3CuoTVUA1fbq21Sp0D4fPeULv7BvJmJBDuMY+39hyDII3BHZsRYYAbfhPnk1rxvEdgjoCysZAApVGQPs/ynXTprXirrQCsjwkQAIkQAIkQAIkQAIkQALTR0CdoxWbA4TfK9YtKTb6mHgqFGNtqzjSl7a/cTQTB4K21TO6ru2GhvvWGy8R9YqOG57beVRWLWkyGnII5QhXTrJcmQLwgASmiQAsPtBP8bxR6J4myEyWBEiABEiABEiABEiABOYCgVKFdGud1fwbmm6Yk2eH1/Z2GMEEQvZIOGbWgkPrjq3KVOBGGtB0I2z757cySWAywKpRzFzgAQlUCAF9dih0V0iDsBgkQAIkQAIkQAIkQAIkMNcIqOCMekFIhok6til7dW+H7D58OmNajr22YYaby4Qd90HLjQDhHOvGC5nDzzWGrI/9CVDotn8bsgYkQAIkQAIkQAIkQAIkULEEIDRDwMb2ZLq3N4RmmJJjDTeCCuS5KgET9ed2pp2mPXTb1dRu54LEcxVNoOKF7hPH2uWhb94nQ0ODBuTtd90j6zZsNMeRSES2ffdhaW5ukc23fLmiQbNwJEACJEACJEACJEACJFCNBCAo5woQvGF++9DPXs+p4dZ7sG57JBQzwjbNyZUKP+1EoKKFbgjVu97eKQ98Z5v4fD6BAP70L5+U1WsvlO6uTtn2374lN/zxTXKy47idmLOsJEACJEACJEACJEACJEACIkbzDWdTVjP0bDAQtHPtBZ4dj99JoFIJVPQ+3RC0P3PjZ43ADYD9/X0SCAbN9yVLl8k3HvyOLFt+fqWyZblIgARIgARIgARIgARIgAQmIMD12RMA4mXbE6hoTTfoqgn5nl1vyfqNl8qtX73D9tBZARIgARIgARIgARIgARIgARIggeogUPFCN7TdX9tyt2kNmJc//Hd/K399+xapb2isjhZiLUmABEiABEiABEiABEiABEiABGxLoOKFbivZ+QtbpaVlvgwODEyL0H3q1ClrdjwmARIgARIgARIgARIgARIgARIggSkRsJXQDedpPT3dUt/QMKVK57t50aJF+S7xPAmQAAmQAAmQAAmQAAmQAAmQAAmUTKCihe7BgX554P57pfNkh6lYXV293Hn3vdOi5S6ZHG8gARIgARIgARIgARIgARIgARIggQkIONq7+lMTxKmay0sXTI8GvWoAsqIkQAIkQAIkQAIkQAIkQAIkQAJjCFT0lmFjSsovJEACJEACJEACJEACJEACJEACJGAzAhS6bdZgLC4JkAAJkAAJkAAJkAAJkAAJkIB9CFDotk9bsaQkQAIkQAIkQAIkQAIkQAIkQAI2I0Ch22YNxuKSAAmQAAmQAAmQAAmQAAmQAAnYhwCFbvu0FUtKAiRAAiRAAiRAAiRAAiRAAiRgMwIUum3WYCwuCZAACZAACZAACZAACZAACZCAfQhQ6LZPW7GkJEACJEACJEACJEACJEACJEACNiNAodtmDcbikgAJkAAJkAAJkAAJkAAJkAAJ2IcAhW77tBVLSgIkQAIkQAIkQAIkQAIkQAIkYDMCFLpt1mAsLgmQAAmQAAmQAAmQAAmQAAmQgH0IUOi2T1uxpCRAAiRAAiRAAiRAAiRAAiRAAjYjQKHbZg3G4pIACZAACZAACZAACZAACZAACdiHAIVu+7QVS0oCJEACJEACJEACJEACJEACJGAzAhS6bdZgLC4JkAAJkAAJkAAJkAAJkAAJkIB9CFDotk9bsaQkQAIkQAIkQAIkQAIkQAIkQAI2I0Ch22YNxuKSAAmQAAmQAAmQAAmQAAmQAAnYhwCFbvu0FUtKAiRAAiRAAiRAAiRAAiRAAiRgMwIUum3WYCwuCZAACZAACZAACZAACZAACZCAfQhQ6LZPW7GkJEACJEACJEACJEACJEACJEACNiNAodtmDcbikgAJkAAJkAAJkAAJkAAJkAAJ2IcAhW77tBVLSgIkQAIkQAIkQAIkQAIkQAIkYDMCFLpt1mAsLgmQAAmQAAmQAAmQAAmQAAmQgH0IUOi2T1uxpCRAAiRAAiRAAiRAAiRAAiRAAjYjQKHbZg3G4pIACZAACZAACZAACZAACZAACdiHAIVu+7QVS0oCJEACJEACJEACJEACJEACJGAzAhS6bdZgLC4JkAAJkAAJkAAJkAAJkAAJkIB9CFDotk9bsaQkQAIkQAIkQAIkQAIkQAIkQAI2I0Ch22YNxuKSAAmQAAmQAAmQAAmQAAmQAAnYhwCFbvu0FUtKAiRAAiRAAiRAAiRAAiRAAiRgMwIUum3WYCwuCZAACZAACZAACZAACZAACZCAfQhQ6LZPW7GkJEACJEACJEACJEACJEACJEACNiPgrvTynjjWLg998z4ZGho0Rb39rntk3YaN5vj5Z34jjz/2I3P8+c1fkmuu/3SlV4flIwESIAESIAESIAESIAESIAESqCICFS10RyIR2fX2TnngO9vE5/MJBPCnf/mkrF57oXR3dcqRQwdl66M/Nc314x9sM9eXLF1WRc3HqpIACZAACZAACZAACZAACZAACVQygYo2L4eg/ZkbP2sEbkDs7++TQDBovr//3l65YtPHzTHirVi1WnCOgQRIgARIgARIgARIgARIgARIgAQqhUBFa7oBCdrubd99WPbsekvWb7xUbv3qHYbdqZMd0rq4LcMRx2/v3JH5zgMSIAESIAESIAESIAESIAESIAESmG0CFS90Q4v9tS13G04wL3/47/5W/vr2LdPC7c19PdOSLhMlARIgARIgARIgARIgARIgARKoTgIVL3Rbm2X+wlZpaZkvgwMDsmhxm3Se7Mg4VcMxzk0lzG/2TOX2Gb23+0xM7FTeGYVTQZmxnSqoMYooCturCEgVEoVtVSENUUIx2GYlwJrFqGynWYQ/iazZXpOANgu3sJ1mAfoUspyO9qroNd3ZrOA8raenW+obGuSCi9YZR2owP8cfnKrhHAMJkAAJkAAJkAAJkAAJkAAJkAAJVAqBitZ0Dw70ywP332s02gBWV1cvd959r9Q3NJo/OE+77ZYvGJbYMoyeyyulW7EcJEACJEACJEACJEACJEACJEACIFDRQjeE6288+J28LYV9ubk3d148vEACJEACJEACJEACJEACJEACJDDLBGxlXj7LrJg9CZAACZAACZAACZAACZAACZAACZREgEJ3SbgYmQRIgARIgARIgARIgARIgARIgASKJ0Chu3hWjEkCJEACJEACJEACJEACJEACJEACJRGg0F0SLkYmARIgARIgARIgARIgARIgARIggeIJUOgunhVjkgAJkAAJkAAJkAAJkAAJkAAJkEBJBBztXf2pku5gZBIgARIgARIgARIgARIgARIgARIggaIIUNNdFCZGIgESIAESIAESIAESIAESIAESIIHSCVT0Pt2lV6ey79i7e5c88sD9ppCti9vkrnvuE+xFHolEZNt3H5Y9u96Surp6ufPue2XJ0mWZyjz26PflzJkeufWrd4jP5zPnn3/mN/L4Yz/KxPn85i/l3LPcmufHP3mtbL7ly5l7cO2HW787Lr9MhCo9sLYHEFjZlsrTGh9pZbeBIh4c6JcH7r9XOk92iLVvoO1/+8J2jWY+86UxJlIVfSlnewFbvrawIrXmmf3M6rXm5pYxz5v1/mo9VjZ41yFM5dnC/dbnK99zYc3T2lZ8tibuhVZ22e1V6Dk5caxdHvrmffIfb/uqrNuw0WRULG9rntb2QiJ6jc/W+LazPgvW3xBllmt8odesPLPHFshp/cZLx4w/NHdrnvr8adsPDQ1qtJzjmszFKj2wsrO2F3BY28D6jsS1XONBa1rZz4wVrzWetpdexzWOB5XGuU99Rsrxm5X9bNx+1z2Z9+O5HM+957Kf2WLfoda0qu14su2lbWP9zQI7K/PsZ1HZWvPMfv70mr5jaV6u1Kb5E+Cfe+bX8qnrP2MEZ7xUT53sMINyHCNcc/2nBQ3/9C+flL/8q1slEg4ZQeyGP75Jjhw6IH/xhb8cI3TjRa0DmlzFx6Do0e9vlVu+fJsR7tF5LrnscnMPjhH8gYB87MpNY4T8XGlV0zn8+CCALRj+90celC/e8mWpb2gomSfSgiCNti0UrG2De97euSOnwPZPjz/G9soCWc720hfkNdfdUPDZyvfMdnd1yrb/9i3BM3uy47j8+ec3Z5W2ur+Ws63wrvzJo9+Xv759i3m/5SObr610AlPv47OlJM595msvTArne2eB994978iqNWtl+fkr8z5H+Xjnay8+W+faJftoMuOLYnmiPXKNNQqNL6zlw3P62isv811ogVKovaxjQNzy4x9skxv+5CYz/sDEfPZ4EGlpHDyX1vut77hC7cXxoKVxsg7zvQMnMx60vjOz28Oabb53oLU9ET/fO9SaVrUdT6a98v1mIS0di2c/Z1au+dor1zuW5uVWctN4jIflMzd+NiM0X3DROiPwoiEhfF/+sStN7vMXtppPNBa04N948Duy/uK0psBavL6+XmlsbLKeGnd8rP0DWbf+4syAFAI3OhACNN5Wrfe4m6v4BIRtncxAG6xee4GhMRmeqrkuhBMvX4TVay80n0uXLTeWDXpe78X3cCgk2kf0fLV/lrO9Du7fJ5iR1PbPxbbQM4tBD57ZZcvPz3Vr1Z8rZ1u99Pyz8ic3/3nm/ZYLbqG2ssbns2Wlce44X3vpuynXOwsTjF/bcrf4ff5zCWUd5eNdqL34bGVBtHydzPiiGJ7aHvhNyg6Ffg+tcd9/b69c+KH11lNVf5yvvQAGvK7Y9HEzVkS8FatWm3P5xoOIEwgGpb+/z3DF/bgH562hUHtxPGglNfY43ztwMjwXLW4zShjkgPsx1kC7WoM+c7lkAmu8fO9Qa5xqPJ5Me+X7zYK8BLkJAc8Tnks8X9ZQqL1yvWMpdFvpzeCx/hBBmw3TcQ3ZL1A9n/0J4eve/3KH/Icv/Jl87Su3mNnN7DgQ+KwBQnpodNSY6FnP8zg/AeuLbbI8saQA7YQ/nYWz5jg4MGDaRc/5/AEJBoKC89aw47VXzAsg+8fUGqfaj6faXmjjpuZm80yhvb794DfHPS+TfWarvW2y6z+VtsIPHcK+d/dkni2dbbbmU2xb8dmyUst9bG2vYt9ZuVMSyce72PbKly7PpwlMdXyhHDEJCUEhWzDA9WJ+D9FnoFTQyRlNl59jCWh74Sx4WQOsDLLPWa/jGEIzBAT8ZiHksqwrpr2y0+X3sQSs78DJ8NR2QTupBnVsDmIsXIuRCfK9Q7PTq+bvU20v6yQJOKrS0sq01N8sCt1WejN0rIIXZmQmG/CS/Z8//d/mD2vAYZKuA9HJpsn7xhIAz6d+8XO58bN/MW7WeGzM/N/wktV2emTrD+X5Z582Juv578h9hYOX3FysZ8vRXkjv0IH98sB3tpl2g6XI71563poNj8tAoBxthYFJU9M8005bH/2pHDl0MOfk40TF5bM1EaH0GsOpvgs1F/JWEtPzWY7xBUqGZxSDTNW4Taa0FAwmplaO9lKzZYw1ENRcfOLcGaNYAuX4zdKJYbQTNKi5JvWLKQ/foRNTKkd7XfWJa8xSKUyS6ITWxDkXjkGhuzCfsl/FCxY/ZDrjla3VREeBNnoi03FrwWBuDPOintNd8l+3/GfTOfDSzZ4hhfkR4lFTaqWX+xjtgHVSn7jmusws/1R5QlvQtuQ8o8FG++AhRns5HA4ZDY2aGU6UBjNn+I41QxoweMGsG9tOiYz9LGd7YT23ctY2x48l2gtWJWe6u8dYIkzmmR1b+ur6Vq62gmmeCgRoLzWxLLWt+GwV7n+52gvvponeWflSzeZdanvlS5fn044FyzW+gJYbQbXcGOiXMr5AfKztz2WazrZKE8geD+JstnYNGlWcyxewhhtjRrUmgKCA0HH8WEntlS99nk9PQE11PKjPg/5mQemG8SCWklrHg9FoZMLxRfY7lG00lkCu3ywdy2nMYuQhjCuwVEoVZ7gX6UzlN4vey7UFZuBTX7DWtdRoVF2zA/t/PIAIpazbxY8jtD4tCxaa9aRaFTzkqlnFDyd+jHV9gsbh53gC+sDCeQnaRAMGD1PhiR/Hg/vfl+tu+ONxa+ohQJg1Qhs2jlvroy9rOMRjGE+gnO0FXwuwGsEABs8mnhkMeDBJphNlKMFUn9nxtaiOM+VqK7QN2gWDD7QLnpHjx9rNJBkGM8W2FZ+twv0uX3vh96TQOytfqrl489nKR6u08+UcX6DdX335t8aBl5YCbQ5/FRrQloV+D/FsWn3K6H38TBPI1V64or9BaAMEWPBgLJIvWCfA8F7EGBLvwrr6+pLaK1/61X4+3zuw1PGgVcGGZwnPD8aDcGScvaa+0Pgi1zu02tvIWv9ytZc1TRzjeYWchbFhKWOM7HQodGcTmabvELiwHQO20dAtoDBjgm3DMDOJLcOwBZi6m8fLM1/AQ6fbSyGOppN9Dx5saO1uv+0/mqSwRcRUTNrzlWeunf/5T38sr7/2e/OnddOtAkrhiYdft4JDOtq2aJfsABN2bVNtT42DwUsuhxt6vdo/y9Ve4IhJFvzg3XbLFwxWPDNWAU5Zl/rM6n3V/lnOtrK2Abhi+xXrJJmytsbTZ1DflXy2lFLuz0LtVeidlTu19Fruid5lhdorX7rVfr6c4wuwxEQ+LBkKTf4XGl9gjLLjD6+aXT+qvW1y1b9Qe2X/BmHskeu9pulmt4O+47LHGdnxOB5UgoU/C70DSxkP4jcHkyfYSlG30+NvVmH2k7larvZC3lZZS8flOnawlq2U3yxuGWYlx2MSIAESIAESIAESIAESIAESIAESKCMBrukuI0wmRQIkQAIkQAIkQAIkQAIkQAIkQAJWAhS6rTR4TAIkQAIkQAIkQAIkQAIkQAIkQAJlJEChu4wwmRQJkAAJkAAJkAAJkAAJkAAJkAAJWAlQ6LbS4DEJkAAJkAAJkAAJkAAJkAAJkAAJlJEAhe4ywmRSJEACJEACJEACJEACJEACJEACJGAlQKHbSoPHJEACJEACJEACJEACJEACJEACJFBGAhS6ywiTSZEACZAACZAACZAACZAACZAACZCAlQCFbisNHpMACZAACZAACZAACZAACZAACZBAGQlQ6C4jTCZFAiRAAiRAAiRAAiRAAiRAAiRAAlYCFLqtNHhMAiRAAiRAAiRAAiRAAiRAAiRAAmUkQKG7jDCZFAmQAAmQAAmQAAmQAAmQAAmQAAlYCVDottLgMQmQAAmQAAmQAAmQAAmQAAmQAAmUkQCF7jLCZFIkQAIkQAIk7WVFBAAAIABJREFUQAIkQAIkQAIkQAIkYCXgtn6p9uPu44eqHQHrTwIkQAIkQAIkQAIkQAIkQAIkUEYCjvau/lQZ02NSJDDjBDyJUYm5gjOeLzO0FwH2E3u112yVlv2kvOTfPtAl4VhcLrugVTwuV3kTn8XU2E9mEb7NsmZfsVmDzVJx2U9mCfwMZktN9wzCZlYkQAIkQAIkUC0Ejpzql58+/55IKiWjoZh88rLl1VJ11pMESIAESIAExhDgmu4xOPiFBEiABEiABEigHAQOHOuVplqfXLK6VV5772Q5kmQaJEACJEACJGBLAhS6bdlsLDQJkAAJkAAJVDaBPUe6ZXFLndQFvVIf9MrpvtHKLjBLRwIkQAIkQALTRIBC9zSBZbIkQAIkQAIkUM0EOk4PSdCbXsV2pj8kj7+wr5pxsO4kQAIkQAJVTIBruqu48Vl1EiABEiABEpgOAr/bdVzqanwS8HtM8muWNsvRU/3SOxyWebX+6ciyatM80jkg7xw6Lf0DIRkKx2RevU9qA15JpRwyGolJKByTSDQpixfUyvpl82XVeY0ZVu2nByWZSMny1npxOByZ8zwgARIgARIoLwEK3eXlydRIgARIgARIoOoJdPaNSEvDuV0lYGLuEIf8+pVD8sXr1lU9n3wA+ofCcqijX9462CnRaFJWtDXJkgW1RpB2OV3idTqluTFgbn/vaI+8ebBLPjjVL26XS2qDXkklUnLq9Ig4XaMSisbF43ZLwOM0AvX7R3vkQHuvjISj4nY5zR/ij0bjUuP3itfjEJ/XI45UykyWJBJJCcXiUhfwSH2NT5rr/OJFegGPjISi4vE45aMXteWrypjz0WhCnn+zXUKRmNTUeKUx6JW6oF88Lgj6DlmztGlMfH4hARIggblGgEL3XGtR1ocESIAESIAEZpHA8dND8ureDvn4xqVjSrF2WbO89u4JeWPZKfnwBYvGXMOXo50D0nF6WEYjUbn2w+eb68dOD8rQSEQ+dP78cfHn0okXdx6Tgx290tU3YoRlp0OMID04GpG3D4jE4kmBItrlxHm3QCAejkRkXl1ALlw+X7y4UGQYGIlIKpkUj8stNcG0JUIylZSB4aj0DYclkUhIMiISiybE63FLKJKQ3sFB2d/eK06nQ1wul0SiMcF+sy+9eUyCfo9EYgm5YGmzXLVxiTTV+qV/KCKv7DkuhzsGJJ5KGu/1IimJxpJSE/BIMpmSRDJlPpGO2+mQRAplcko8kRKPG9vLpYyG3u1ymImBRfNqZcnCWmmdVysNNT7xuJ2SSIgMjIQFkxXLWhtkZVujmUwoEsWkow2HoqacqENd0GPydJbZUuBwR7/EEglprPVL67yaSZeVN5IACVQGAe7TXRntwFJMgQD3NpwCvCq6lf2kihp7ClVlP5kCvLO3/r8/flUagl5ZvuicGbOmemYgJDv2nZR/d906+diHFonTeU5Y/C/f+634fR6BAOhwOsWZShkhr3coLMlUSj5/zYfkwxe0SrmFGy2b9ROC4PBoVAI+d04hbir95HT/qBw60Scd3UPyQeeAhCJxiSWS0lQTkGWt9UaYtJYl+xgm+rF4wmiJ59WdsybIjjcT3yPRhPQNhaR/NGKE5OFQXDBhkIinpK7GY4TnWr9HFjTVFDRfhxk87kmlUuL1ubHLnIQiEfG6PeJ1O02anX2jMhSKmHiYjPB53aYvhKNxI7zje8DrlotWzJPz5jfIpWsWGgTxeFJiyaQxs59Xn7YSKIbN7kOn5WjnoIQjUUkkHUZLH47EJRxPytBoxHz3e9E/HMYqAVYB9QGPXHT+fLn8glZZ2tpgsimlr3zQhcmNHvmgc1BOdA2Zenu8TmNhEDdMveICYIeYiQz0T0xg9AyGTD9ta6mRC5e3yMc+VJwFQjEcGGdmCJTST2amRMyl3AQodJebKNObcQJ8Uc04cltmyH5iy2ab8UKzn0wN+d//yx5p7xySS1YvyJtQ98CoHDnZL6vPmyc9/aNGa9ndPyKJpMjFK9P3hWNxY47u80DjKQJBtfPMiNTXuOWqi5fKJavTAlXeTKZw4Z9e2i+/3dUuC5uC4nA4pbU5KG3NdcZ8e2FzrSxpqZFGT1xirokF3t1HTptyQ4BPisiZgVGBU7lkyiHBgFuSKZHWhqDU1/qmUOLKuRUCMLTZHrdDAr60Fn0mSwctPrzkQ/sdCseN53ynyynJRFJSZzXni1ugKYc5vUuGwzFjQRD0ekwfc7rErHEfCsWMMJtMJiXg9YjXm9a8u51pM/55deP9EqDuA8MR6R8JSzSeEL8XGnCHrFgYlHmN9TKvISALG2skkURZHPLByX45dWZYDp7sE6/TJQOjYYnGEuJ1uaSlMSBt8+sz6DDxFIslzKQEJqVgfYCCw8ABlgd+n0tc4pDBEeQfFcxlQUMe9Dkl6MfuAT5ZPL9ONq4a/1zGz1od+NznJsAyGfNgxgjwt2fGUM9aRhS6Zw09My4XAb6oykVybqfDfjK327dctWM/KZ3kW/u7ZNehLtl7tNus/b10dWtRiUTjSenuGzXOvqC5hXkyzJcLBQjrvUMhY0a8+dp1OeNDUDrdH5Kjnf1yvGtQhkMx6R0MG63oisWNsmxhnSxZUG8EH2iZvS6HvNveY4S1gyf6jOn20oUNsqAxKEOjURkYDguEMJhUO11uicZi0lTjltqaoEBzmjJCUFzaOwfF63ZJjd9jNNeRGJyYxcXhcogjJWa9NBycLWgKik4mFKorr02NAKwH0L5Yk65O4sKRmPQNRwTm4TANh6CMHtdUGzBCLNrS73MLNNgwYZ9KOHVmSLr6RsXtSInH6zH9EBp9aKyxlr0m6DWWFDU+jzTU+E0ZYao+1RCKxqR/OGL6MSayYrG46b8Q0KEVh/k+JhHwqHlcLnMNFgaYiMDafSfM+cNxmd8QlAXzggLtPq5DiF++qEGWzK+bahF5fw4C/O3JAWWOnaLQPccatBqrwxdVNbZ66XVmPymdWTXewX5SWqv/+Jk98vaB09JSH5CF82rMX2kplB4bgjCE7/7hsBGQfG63uN0OI/BCswhtp8vlNObHo6GYyQBroOE8DAKW2+00whBM1rGlGb7DnDyVTBlhvBihord/UI73hIypMYQ6rEGGnrAm4DbOyzCh4HE6jAd3Ctilt/FcusOViknCMXVhuhxMegZGZSSckFQyIS63w6zr1/XiWM+PSYohTEqEo+LzuM1zFInFM1lDaG+o9QvM2OuCPjlvQa001gZkyYK5L4hHInE51j0ogyOx9BIYcUhbS60saqnN8JnKAX97pkLPHvdS6LZHO7GUBQjkelH1nO6Ut1//vVy44VJZsmxF5u6dr/4fwTWEZStWm+s4PtF+RPa+/YY5D1OtSz7yR9KyoHXMeVzEucuu+FcmHv/Zi0CufhKLRWXH718Sn88/pl21/8ChkNfnl49u+qQEa2pldGRY/vDyCxKNhI1Jn/YTkNi3+y1pP3LQQLH2LXtRYmlz9RNQwTsCbWxt80L9Ide7Rvvb0EC/AW1919iN/JsHuuT37xw3GrWNqxYaE92ZrgM0lRCwsU4cQjW8adfX+qWlLq01nM7y5BKknLFBcQ0cFofAfNgpiYaVkvSkTYRdw8fFFT5tipTwL5BE7Xnm2BnuEfdwuzlG3HjDanPsSETEPfC+OJJxSTndEm+4QFKuqWleTcL8N+MEsvuKI5UQ98ABccRHTVmm0h+s/QSJzURf6RkISd8QrD8iZrIJHukxYXXxmoXyf61plZWLx/tymHHoBTLsHQxJZ29IegdHjZPG4XDcLAeAR35MSsBKBlYAS1rqzcTEyCgsXOC0LykOs0gBicMaIGYm7Or8HsGyE1gJzGvwCZYg+GBJIA7jFBEO+iPxhCxtqZPF82vF48ntwzrXb4/1d8Q6/rT+9ljHKCiZ/s5kj2us41xrWgVQ8VKZCeRu+TJnwuRIYCYJYGDce6ZbFi0Z6zkXLxyE6//035iX0lt/+L0RwBua5knHsQ+M0IUXEQSuw/vfE5xHoABlMMy5f2jn3W++LuctXykjw4OZ+uEHC+2vwhX6zXvvvGn6Bz7XXLTeTORY+8lAX68MDvTLJz99k0lH+xb6E4P9CWDggzC/dfGYyuTrD10nT5h4ud41GAhd8qlPm0mcMYnZ6MsLOz+QV989aUxkp3Nt9URI4FAKa2tzra+d6N7puO4I90q86SIjHEOYdo10SKqhRhzxEXEmRiXWvNFk6x48JKnYoKScPnGFuyXWtM7c4x44KLgv6W8R1/AxSQTbzLFJa/hYRiCfjrIzzZkj4Ij0ScI/37StEZoHD0oq1jDp/pByBSXetEJSjrT/g+muSUtDQPBnDRDCDx3vkwPtZ6S5ISh1Aa+sWTpPAh63ROJxMzGGOAMjUfnYujbZsHL8bgTwe4Bt78oV9h/rlTcPdMrp3lGzxj0UistgJGKWfDTVBczyAnjAh2MFl8thvPBjHf2yhWkP+PArEQknJNDoEZfTKfPPbtWXXb7u/lEjrGN5STQeN34asJuAy+00y0qwJR8sBGBpA6eAOE45RBprfOYc8k4kUuKC1wdnennKaCQurtigtNaJ+M/7qCxfEJQT7++U997bJ/6mRXLsvTfE37xMfIEWGTlzUnb84TW5/MpPyOhgT85xDYR0KAU2nf3twW8axjZWpVR2vfh9YgLxZFLePdIj3f0hOdE9aNoWy5KwdGRBY8D0G4/HZZwfhqMJodA9MVPGsBkBaLcRIHxbQ2fHcWltS2sXPB6vtC1dLt2dJ432GhqncCg964xz9Q2NgjjQSNU1VPasrbWOPC6eAATiq//1jWaSxSp0Q4BG0EmXhYuXCPrOmZ7TEomEZV5L2hEN7sePFuKjz6A/oc8goP9o3zIn+M/WBNS6Zdcbr2bqgUFMvv6Q710DawkEj7d8A8tMgWbg4IU32+W9D3qkbzBkTEqxvpPhHIFE3fLMl5SnTlKRPvPdERmQhK85IxQlXUHBOXEHJOmuzWiwk74mcUb6BPcipHzpvavx3RHuFgho1HZnENv2AJMqGtCeKXfaIZ8jNlRyf3BgbzenK9O3NN2Z/myq8wv+EPYf75UT3UPmXYF19RCk3U6n2YEADuZ+8dJ+ae8akE9/bKV09gzLv+w4auIGfemt12oDHrOmvg4CuENk+YIG2bB6gTRanA3CB8Sb+7vMEhM4w4vHkkZbDbN4aKxP9Y5Ind9rtqBrqg9KbcAt9XU+WeltMg70iuEDz/BShOX4/Mag4K/YgPJh+QnWtKAeUKunEgmzLWA0mZSRCIT0pDgdfuk7k5TQqQ55PhKT5YGI7O1sl9FUtywPjsrB44MSjg2JV2JyYeOg3PuD5yWe8sjqJedJojchwUQ0U6TentMyr3l+ZrIXY2H8TkHoPnlm2DgLhMUCnAxONmAyYV97r0TjsABwS3fviGBpAhwDxhIpOdY1YBwLLmttlAvObzaTHXAkCT8XfcOjcqwT3vqdMjgSNg4OsX1iMgk/BEnjIwPbCsLPQE2Nz5j1z6tHnwtILB4XlyPtBBA+Exrr/GZCYyL/INn1xG8bHHbCgqoHzhjhz2MoLOFYQnxul1lCAQE6FI6Z9hseichoLC7JhBi/Hig7+mtPfxiPpPQN4t64WaYBywmzE0Z2pvxOAnOVQE1tnRGitX544eAcAgbVmPmDiXm2Zhvn1PR83SUf5sygApyjnxCM4vGYxKJRI0RDqIaA5XF7xO32mGM1NR/s7xNZJjIyPDRmcgYTNehfDHOXAATnfP2h0LsGFhEv/OZJAwb95PI/+kRmsqaSab2864T85rXDsnRBnaxfsdBohiq5vLNdNghQKZffCEOORFhS7nOaQRxDuB4XXF5jmu5IhMxn5rrTLeJwCgQsCt0ZKnPiABMpGLWn3DXixESMNRTRHxAdEzjeyJvmTusSBWtSM3m89ry0lWChPN/cd0qefvWI0ZjDFPviFQulsc4n8URCQmHsCw8zbzihc8qp7hH57TvHjCNEeKDHFnAw98ae7ljIEfBgvTy2F3QaAQfb6LW1NAiE90oMNQGv5Np1PXsZgrXsWJLgGhw1y1LwHnCNDsmGhQvT7xcsVxgMyb9at0AGY37pHhyV99sHpMUzLC//w2vG6mCxt9doXV/v2CtwJhh0hqQ23i9PP/p/pGcwaq5h0gT+LSAEw8M+hEto6xuCPuMEcCgUNQIw1vTDsR7+MDEC7/fwrI9lPghwWuj3ewWeMkbCcfG5HVIT8JkdBOKplGnXV/YcTzunjCWMKb9DHNJUH5AaP3TBDvG73SJJh/mOfMyuCPGEdA+EpKNnWA4d65VQLCYjoZjxMQBfHigDhHhMZGD7QDhChMPAebUB43wznkjKqiVNZtnAskUN4naIvPpuh5zsGZZTPcNG0A94XeIwHgfh+DK9XWQQErND5PSZEbNMYDSWkHq/x5R3aW3aWsHaVoWOqekuRIfXbEUAs3XHOgcEZjkw1XEMjUiL+GRBLGFm2+YtWiX7dr0i7f/8j6ZeVtNfCNyY+YPwDQ05vuMYWnPVnEPwenvHK0bTqRorWwFiYYsigLbFxMvLz/3GxMd3nz8gbo9HVq69yPgK0LXeOmlTVMKMNKcIwKohX39YdeE64yvgmax3DfoSrCs04F1zaN/ezDtGz1fiJwa9SxfWy4oce29XYnlns0xY242/eN05fyKzWR7mXZkEjCCFZQS1SyetqYaQnWy5JFNB6xKFzMkKPLh07SIZGolKMADT7XM7FrhdLqmrwd9Ya6CewVGzL3kklpT6Jp801nmNgF2BVZuWIrmG2yUZXGgm3Yx1Q55cYFWAP2fMKc5wTBYEagTbzYVSceN3oyc6aiYvfL6kEbA3rFwgTte5yYlINC6Do1GBqT+0uh6nWxIJMZMjWKsPgRpr+rFPPZYKJJNRY8XQ3BSQFYvmGTP+PEWb0dOxZEIGh6KCiYJjpwcFWvyRMHaxCMlOR6fA5B9CeH3AayZ72ubXmTX5011ICt3TTZjpl53AEy8fkPbOfnHATQWW48TiMhhOmLUxyAwmLlgjs9Q/LIe6QvLEzl7xu9yCGbahUb8Mh1yysCkg65J90hUKy3N7X5TFvkF5s6tWYjvfFEcqKW2eIfnghd3i86fXJo1GExKPx8U5KvLsK/tE/A1SG/RJ3+CoMXupgRmT2yUNtV6zryteXJh5w9YxWMeE7Ufq67wScHsk5UiZdT54MWLrEJifRBMJ8budEoomzT6uZpuTGp/AQUdzU1DqAz5xOpPiSDlM3Xzecy/JXIBP941INAHzJIfAZAszlyiPnYMxw8J8tkOMuZLWBe2NLWCw1Qv6xLx6n/lhgEkSfmXAoFAAf7yQO3pGxO/F7LlD5i9aJlcvXmbM0HTtNjSbLTWt8qk//jOTnE7CQJDK1mxiWQIF8kLU58Y1TNzl6g8QyK/8xHWZSmISL9cyFawRV18TmcgzeIBtsPDcQDsArRG21cKEUu9QVC5dc24/35ffOW62uKLAPXHjGGdqo6ckXr8qExkab0c8rQXCSRwbLXi2xjsRNQ7YUq4AXKmLJOPYhDn9mUqaNb+ZRHlgawLGmdrgIUkEF2WsF8ZZQEyiP2CJgrWvVTKkbMG6UFlb6oMi57YtLxR1zl3DRAraVZ0ywhdEse8HNX13hpPGuqZ2YdoKAX4inBG3xC0CN8BBuzsffwXM5bHlIf4qOXicLmluCJi/Sionhe5Kag2WZUICz+44Km+83ymLm2vE63EbpxSOlEvaWgPiz/II6RoOS6M7IIst66c0A0eoWxyhlDj9zRIKjYrbCZOlqHh8fgmkIuJMRuXw8T4ZSQwYIR7Cc70nIuf5RuTEcJ2MxnsllRKzvyzMcLA2p67Gb7aMwUQANO046fa4JBpNpD37plJGqMaelz6fW+BoI5FKGrMeeP5MpFLmGAIgZhOdThG/x2W2sonFkhKE2Y3DafaKxd6bmIXEfTC7MWUQh9k2B2XFeA2zeCiD3+cxXjejsYRJH9uEID2sj2mqh5kQ4ibE7/WY7ULcHocxR0K5MRuIbYDgtbPLOCNxmPUzHrc7vT7FBaEWa3DgQXhU/H6PJBOoZ9zMXtf6veL1OE0ZYOqFmW3UK+D3mK170vukJgTOTOBkwud2GksFxInFk+nfFUlJMp6S4VBEHMZ8zCmxeEpOnRk23orhtAJmZcakyOMy62+wlgeiNuoM1igTEgsGfGZdEa62NAWMl9EGd0iaXEPyL/vfNlsOORwpAToII6lkXC5u7JWuaK28+sQ7Zk0a9vzFBMZF9Wck5fbL328/IHWusMxz9sp7Z9Jr2jyDHdIVa5AXDr4pzY1BUxbYJxnhvntYfD7MIDvNHsOj4agReuAJFWZaMInCPAHqjwkFTNhg71/MLMOUC1sh4TrWhKEPjYSjZuuWSCzNESzdDofZymhgJCT9QxFxOV3i9bmMSRSuB7weM3mDCQf0VZxD+2OLIzDGHrZYi4etjuBnBjPayDTgdRsG6G+RaNLkjWcK5n4o04LGGuO1FSZ/6Jvzm4KSiKcEWgr0BXDvGQpJfU16kgr5RRMp06cxcRWOxgSaDMTDDLXf6zLPeDiCvhiTOjP5JKafwhQOfRR7yiKe0+Uy/aS+1mv6PrZuQt3wHMB5DdZloUzwHAsTNlzDs6N9B6yxhZRb4pJyeY3ZGthiELJ0Qb3UB30SCUWl/dSghKTGzJiDudfjEmwlM3B8tzg9AXnrcL+Eoj3mmYQQ6xjtlgbHgBwNNYvzg/1mjRqeJ+RVGzoq4vbLrqf3mP4bCHiMR1yYyWFSD5NlMLPEc4JyYkIQa+N0z2lM4DXXBcXhdBiG0BJBawTNBtJvOOuUCDP8WE+GVxUc/eBZA2Ostwv4vaafJZJ4B7hN2yOvX71yULCn9VUblsgf3jtp9rLW9yc/cxOwCtxWp1YpX4O4Rk9JMpUwN8KpmhG2nL4xa7Vhco7BtVnn63BL2kTdl/50uDPCWe7cedYuBKwCtwpSKHv22v1S+4PRnIe7jZM2u7BgOQsTyAjclnHsZN4P+fpW4dx5tdwEuGVYmYhCcDlxekBSkh7wYLBvDRhIY9CDQV7/cMR6yXhMxDoCaEoHR2PidUOg8piBXEqSZkCLGRto7PpHImbdgjUBs27B4zID4NHwuf0UNQ5mrCBz9A6kHYVhYAUvimYfUY9TaiCUQduRFOPV0ed3SzgcE2hrERcD/ZFIzJj2aJr4hDdHODKAcID9Us06Dq/bCBihaMw42sFaDAyiIcC4nW5xOFMmLazLUY+zWLfjgUlRwGvWVESiCWmu9wsUsxhgwlw8mogLxit//8we+eiFiwVrYjTkWweDLVowc6xOS6xba2Rvq4FZP922Rbd6wRor67Yeet76I6llmI1PCO9wSoI+hb5jhC2n0wiEbvd4L6YYYGPwDocTmBiIx9JCNQRkpAUhFUI2tOkBv9sI+0gT214gDwzcIZhA4ILjOb8HwpvbCLXo/xjsY6BvbhQ4jfAITHxiiYQk4mLyGIlEjZAOIRIZQqDzuNxGw4wTyfTSGXGkUmfXa6XTwbocOLLQgD4HYQt9Fmmg7+ULELpQb0lEJZJyid/tMuXE+aDfI+74kPEYbDUFtW7vE/W2yKivzTwP7tgZaU6mt5wbTgbkUKhVQuGIqfLKukFZ6B02xeiJ18sHI42CpUGYHEL/N0IrzLXcHgn4TKGN4IOiY4oG/R0WGqgLouMceGNCBM8BAmqZhOAVx57DrrRgLikzKQKBEm0AoRzXMCkDbX8qma4nBES0JSw+0E44j2cT6YEPhFDkgjxUGIX1hdeLvVrTfDGxhfJiOicSSZiJINxjhPR4wvQNr9tthGJMfHjdDkkkHRKPY92WU3yodwqTIXhPOcx6MQjs6CMQXvFOSWAyweM0fQwmIW4HfLom02U3PNLbtoBNyoH0Eccp4XhCPA6HeQ4iiYRhZ54NNIIDE0t4O0N4dZk6oByoMc7GY+lnCOy96GZOsE2YeoA73oGo92J3lxwf8stoKmCesxbvqCwPpB3v9Ua8sneg2UyQ1HrjckFNt7gdCYklnXIosliiCbe0+IalzdNj2hL/hhIBORFfZMqG+oMy3nWwgAE/5IuJETgJwrpHlA+80H6YOMPziXOYKEHAhFF6+Rkm7SBWpyTpcIjrLBf0PbQe1st5vU7DO1OYrAM87x09Q+a5RRuvWTLP/IZlRavar9m/O9bfF4Vi/c2wvlOmskWUps1P+xDI7ivGBDx2bscM1ET7hHUsgrHGRFvIWfuVNR370GFJlUB2P8luW8TTPmF932SPZxHPLHEJ94xZ4pKvb2n+/Jx+AhS6p8j4jfdPyeGOAdnX3mOEYTN4hTbF7TDaJQzrojEMUc2Y2zgewPgZGkwIPkabhcEghA2HwwysMGjGEAoDSmhQMHDUgMGWClhm8IqB7tn7MTDCoFsHbxhkQnDG4EsDBm4QuvBpxvFnTZ0h0EOzhZjQcmCQjHxjSZTNYcoN80NojBAQFwN2k3YKA710vk5HyggZRsmKAa5DjCOFGh8EfeQJITxuBGkMBBEgGGDrBKcb2sqk8TyJUTI0VhhYQwjHYLwBDgucIivbxjrpyH5RmUT5jwSyCLCfZAHh15wE2E9yYuHJLALsJ1lA+DUvAfaVvGh4wUKA/cQCY44e0rzc0rDfe2qXMSmEhmNeQ8AImFhbCyEWAZ78hkZjxsEDzA7h9h9aHZhgwoFAU9057aslWR6SAAmQAAmQAAmQAAmQAAmQAAlUKQEK3ZaGh5klLBHjDpGB4UjajBIb1sMhVgCu592ywO/JOKdqa647a3Z4TpNsSY6HJEACJEACJEACJEACJEACJEACVU6AQrelAyxpqVLXiBYGPCQBEiABEiABEiABEiABEiABEigfAa7pLh9LpkQCJEACJEACJEACJEACJEACJEACYwikvWKNOcUvJEACJEACJEACJEACJEACJEACJEAC5SBAobscFJkGCZAACZAACZC7x2lHAAAGBklEQVQACZAACZAACZAACeQgQKE7BxSeIgESIAESIAESIAESIAESIAESIIFyEKDQXQ6KTIMESIAESIAESIAESIAESIAESIAEchCg0J0DCk+RAAmQAAmQAAmQAAmQAAmQAAmQQDkIUOguB0WmQQIkQAIkQAIkQAIkQAIkQAIkQAI5CFDozgGFp0iABEiABEiABEiABEiABEiABEigHAQodJeDItMgARIgARIgARIgARIgARIgARIggRwEKHTngMJTJEACJEACJEACJEACJEACJEACJFAOAhS6y0GRaZAACZAACZAACZAACZAACZAACZBADgIUunNA4SkSIAESIAESIAESIAESIAESIAESKAcBCt3loMg0SIAESIAESIAESIAESIAESIAESCAHAQrdOaDwFAmQAAmQAAmQAAmQAAmQAAmQAAmUgwCF7nJQZBokQAIkQAIkQAIkQAIkQAIkQAIkkIMAhe4cUHiKBEiABEiABEiABEiABEiABEiABMpBgEJ3OSgyDRIgARIgARIgARIgARIgARIgARLIQYBCdw4oPEUCJEACJEACJEACJEACJEACJEAC5SBAobscFJkGCZAACZAACZAACZAACZAACZAACeQg4M5xrqynhkYj8g+/eUfe2ndKorG41Aa98pU/v1xWLZkn/98/vi5/cd16WdraMKU8Dx4/I0++sE+++m8/Kn5v8VWa7H1TKmwJN0djCUkkkhLwe0q4Kx31Fy+8Ky6nU/70ExdKKBwTl8spXo+r5HQme8PgcFi+/b9ek7+6+TJZ1FKXSea3bxwVcMd5a0gmU/LcHw7Jr17eL6FoXOKxhNxw1Vr5s09eZMpujVuJx6mUyEg4KkGfR5xORyUWkWUigVklMDjQLw/cf690nuww5airq5c7775XlixdNqvlKpT588/8Rh5/7Edjotx+1z2ybsPGMefK/SUSiciPf7BNbviTm0rmo2VuXdwmd91zn9Q3NI4p3t7du+SRB+4fc+7zm78k11z/6THnquWLtV9OxOGxR78vl1x2+ZTavxz8Txxrl6d/+aT85V/dKj6fryxNBQ6Pfn+r3PLl28b1Ge1TmlG+vqXXK+0zu/wf/+S1svmWL1daMU158Oxv++7Dcs11N8jSZctztgnqg4BnVuPv2fVWpj54R+E9e+pkR8XWM1NYHpBABRHAO/63L2yX9RsvlVu/esek36/5fsOLl1AnASUSS8j/+Kc35CPrl8hX/uxycThEIIwFA1452T0kuD6vITCJlMfecvjYGVk8v7YkgRspTPa+sblP37dHf/mWrFjcJNd+bFVJmUBYP3qiT264ao2Eown5zuN/kBv+aI1sWL2wpHSmErmrd1h8Xo801Y9tXwjcH1q5YFzSz752SI529MrDd/xr8XlcEo7EJZlK2ULgRmXaT/XL3//qLfmbzVdKfbA8g6BxkHiCBGxKAD9AGNBDqJlugbXciKyCGISdnzz6fTMYzhZmy53vZNKD4LR3zzvyyNYfjhOckB4G6y+9sD3v9cnkafd7drz2inzik9fO6KRDJQt9+drT+hxg4gDP81QGpfnyKfd5lBXPxNZHfzrpAXS5y1QovYP790lzc4t5T+J5nij8/Kc/lnXrL5avbbl7TNTVay80E3d4Z1XyxOaYQvMLCcwiATwrodHRaX1XTKt5eUfXoIxGYnLJhYuNwA2W9bV+cbuccqp7SFqba6U24J0SYmgY97efkQvOHy/IFUp4svcVSrPc177y2Q+XLHCjDEMjERkOR2VxS534vS75f7501aQF7ncPn5afb3+35KodPt4rbVkTIcOhqJzuHZElC8daNoSjcdm1/5R8/LLzjcCNzPw+twQnoeEvuaBlumH54kb5+v99NQXuMvFkMnOLQCQcktGREWlsbLJ1xeYvbJWWlvkyODBQkfVAuYKBoPj8Yyc7UVgVyHNpvyuyMjNUKGgDobnNFX791C8Mt1zXqvkcNLAIeK4rPUDjCyG2HBYBM9Ef3t65w1hTFMMVk5lnzvTk7L+o7xWbPi4vPf9sMUkxDglUPYH+/j4JBINleVfkg/n/A0IyrNY1aQi7AAAAAElFTkSuQmCC) ###Code thusd = web.get_data_fred('DEXTHUS') print(thusd) thusd.describe() ###Output _____no_output_____ ###Markdown Write the code to show the minimum exchnage rate, maximum exchange ratethe standard deriviation, the exchange rate which at percentile 90 ###Code thusd.info() ###Output <class 'pandas.core.frame.DataFrame'> DatetimeIndex: 1300 entries, 2015-10-12 to 2020-10-02 Data columns (total 1 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 DEXTHUS 1245 non-null float64 dtypes: float64(1) memory usage: 20.3 KB ###Markdown get the date which the mininum exchange rate, maximum exchnage rate, and the exchange rate which is at percentilie 90 are ###Code thusd.DEXTHUS[thusd.DEXTHUS==thusd['DEXTHUS'].min()].index.tolist() x=thusd['DEXTHUS'].min() x thusd.DEXTHUS[thusd.DEXTHUS==x].index().tolist() thusd.idxmin() ###Output _____no_output_____ ###Markdown **Hint**: to get the index name of the data you need, you can use the code example ###Code import numpy as np a = pd.DataFrame(np.arange(10).reshape(5,2),columns=['c1','c2']) # get the list of index name which the value in series c1 is equal to 8 a.c1[a.c1 == 8].index.tolist() ###Output _____no_output_____ ###Markdown Loading the gold data from fred (using this data "Gold Fixing Price 10:30 A.M. (London time) in London Bullion Market, based in U.S. Dollars"),Show the covarience, and correlation of the value the gold price (in US) and the exchange rate. and the covarience, and corelation of the percent change for both of them. ###Code ###Output _____no_output_____ ###Markdown Data Loading Pandas support many data loading scheme and Data sources---There are many format that the pandas supportStart by the very simple one, the csv file---csv is standed for comma seperate value, the value is seperated by the comma you can see the example [here](https://drive.google.com/drive/folders/1SERgw7Ow98SmnVfF3tsH_JCJa6d56aOr?usp=sharing)If you used your own computer, you can refer to the file directly, but as we are in the Colab we need to upload the file first. Open the small tap on your left.![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPsAAACTCAYAAABI1W+RAAAgAElEQVR4Ae1dCXRURbr+YzCAE1kCvgkEgkhAJiDK80FkU4cGZjKjBhcii8Mbos8BnwnIJtuBkQOyBQaJc4wiIAooYR7H4MK8kQZn8thkEASSw6oSCIQDRpYIEgx55yv471R3bnffTrrTt5P/P+fm3q7lr6qv6qv/r7o390ZUVFRUkIggIAjUegRuqfUtlAYKAoKAQkDILgNBEKgjCAjZ60hHSzMFgbpB9t1LyJGeS8XS34JAHUagnh3bXnool5YsWE5535RSWVQ0xfdOo6mjUygh2o61DVKdMEFNzPWoPGW+kzLu9xjtPULpJsp0ZlBX7yklthYhEFTLPm/ePCosLPQLrrK9S2jEC6uIhmbRuo1O2rgui56JXEXpU3OpuNwvVbZK7DcW92eQ0+m8cazOoERKpIzVN387q0F0W6EilalJBIJK9kOHDtHly5f9aE8h5b6eS9G/X0hT+sZTdBRRVHQ8OUZnUJ8D2ZSz96aq8kJyLkqnlGQHOZJTKH1RnutEUFpAy0enULLDQckjXqXc/SWudSgvpjw9/zt7qDTIE4n/WLhW2fSXp3aUF9O6dAcNeqvgZrZSypudTMmTnfSP1xw3PYZcGu9w0JLdppolsBYiEFSy+43XhSO055tY6tc73jVrwyRKW/hH6hNbRkSllDf3D5R94TeUleskZ24WPXj8VRoxN49KVa4Scs4eRxtbZNCKjU7KXTiQSvfs1PSV0s5Ff6Alkc/Qio+d5Fw9h7ruHk+TN4Tbit5LOyJjadDYNIpe+wblolkFq2jJ/yXQyNEOenC0k5zzU4gohTLFQ9DGRe2/tBfZr5TSJYqh6IbuwEdR7H1J1DUuiqjYSTmbO1Ha6GSKjyKiqHgaNCODOm3OIScGdnEeffhFJ0p73kGx8AxiEmnYMMe/FBY7adVfkyjj+SSKiSSimER65ikHFXy+k9zs/7/y2PHKVzvaDqMJTxdT9rLVtPyNdRT77FRKibVjQ6RONYVAQDfosC6Fu6rL/Pnz9Z+0fPlyl99+/yg6TgVxXWlCjJYzpg0lNC6g40UIO04FjRNolB4fiVnhpiA/OemPjzg55MY5sRPBbwiUBB0LC+1I/N0USh42nlZHDaKsx4XpgerbcNUTULIPGTLEZY0OoiOsdevW1vBpGE23UwmVXqmcvKz0xs68+Yb8VSozyVNZy82QxoMoa/1ISvSYoPoR1cbCShV8taO89AaWUWV0FTNZJY/JSiGSprYgEFA3Pj4+njp27GgcAAlEdw/zCF7j9tS1bTFt+sJtB/9KHmWmDKLl+4korg0lFu2hfN3nLimmwrJEahN3M/7CUTqux5drNvuOFhR/YSfl60UEYXOu2lh4BOlmhM92lNLONzJp56/HU0bbjfTqe7xZ50uxxNdWBAJK9uqDFE/Jo1Ko9J3ZtGR7IZWVE5WVFpLztSWU13kkpd5HRLEOSu2bT8vfclIxOFxWSLkLsim/byo54KnG9qGB3f8VX1ayh7KXaS57vIOe6V5MyxeupqMXiKi8hArWpNOItwoC6sZXHwsfGny1o2AVZW7uRBm/S6aUF9Modu0btE4tc4BRC0qgQio8W0Zl2jzoo0SJDnMEbEZ2ouj7M2jFnAepeFk6pQxwUPLgcbQxKo2yZqdQLDbUKJr6THqT0spX0R9SHORISadNd2TQm5P60A0XP4YcUxdS8uklNCLZQSnjNlKLHklaN8WQY9ablPFvm2jcYAc5BgyjBYX9aOrQRNJW9lp6u156aUd5Ia1etI6ih44kB/Yu4gbRqKeLafn8m88qxPWh1IdOU/bgZBr3V90FsmtbpV6BQCBC/sU1EDCKDkHA/gjYzrLbHzKpoSAQnggI2cOz36TWgoDfCAjZ/YZMMggC4YmAkD08+01qLQj4jYCQ3W/IJIMgEJ4ICNnDs9+k1oKA3wgI2f2GTDIIAuGJQEQFUehfJS1vsw7P0SO1DisEbGHZ8/Pzwwo0qawgEI4I2ILsp06doq+//joc8ZM6CwJhg4AtyH7hwgU6d+4cFReH29tiwqafpaKCANmC7D/++CP98MMPdPHiRTp//rx0iyAgCAQBAZeXV8yfNy/gRfCn5HC+fv06TZk6tVIZJSUlVK9ePRWPF1RGR0dT/fr1K6VzD7h69ap7kPwWBAQBDwi4kP2WW26h5s2be0jqfzAIziRH7vJy87dEREZGKrJHRUXRrbfeSjjj8CU8kfhKJ/GCgCBA5EJ2Jl2gCM9khEXH4Y3sIDkTvUGDBpYsu3SgICAIWEfAlOwgXbNmzaxr8ZCSLTtIDrL/9NNPpinhwuOANYf7zodpYi2QJxMtSC6J6MrVciorv07X5fkFGQ8aAi5kj4iIIBxw53G+/fbbtaSkwlwCfPzQyY5rT2TnMlEuvAv2MHyoVxOErzR1Lf7y1XK6VhFBEbdEknqxT10DQNrrEYFKZAfh9EPfKAMpdXH/rcfhWic7LPu1a9fck6jfXB4Tnc+mibVApBNxRaC84rqaLF1D5Zcg4LZmBzl5fc1nhLkLSK4TXb92T4vfnN5TOj1evzbTpYd50qenqWvX6C3Bpa71urX2utxnZ7Jjja1f85qbJwA+80TAZ2tFSqpQIvB9SQm98HwaHTt6JOjV+PHHK/Ty+DG064sdQS9LCvCNgAvZGzVqpDbmYmNjqUWLFtSqVSv13veWLVuqW3K4/833w5nwOIPsQnjfYFc1xdq1a2nWrFl08uRJyyoWL5xP6/+SYzl9qBNiEhqW+gTF3dHYODBRYMJgQZv0eFyjjZwX8Sw80SCM493z4jcmIhwoG+lY9DCU4V4X9zD3uul1YZ2hPKM9LmT3VBmsjXE7rHHjxvTzn/+cmjZtqtb1THgmuxDeE4JVC8cThQsWLCBMwvreSdW0hUeuDz/5Xyo6e4GOnbjx6PSMaZNdCJ/1xlIVjzQ4nngqlZrGxNDMV+fRJx/lGl7Epx9/pBo88r/TVfzqnPUqPfI/858jlH7k79b9gWoDA+KfPn3K0Am9Y8ZNrLbeQCnAJLY4c741srsX+rOf/UxZfkwAILhu3YXw7mhV/ffmzZvV57Puuecey0ow8GCxFsydTemj/suwhO6W5uiRw9T/4V4qXrdasAD6bwwU3e1ni8lW0t0i6la0XetYWrVyheW66wkbNGhIk6ZMp5OFhbR/31d6lOl1u4T2NOrF0fSXnA8o/8B++kvO+/T8yP8m6Amm4FHvnTu2UdIDPatVFvcb44p+YPEUx1jrac3CtmzeRN2SHqga2bkSePjmtttuMzb1xMIzMoE5Dxw4UC2l/NEGSwfLMmHSVNKtoG5pLpw/T++uWEY56z+iA4e+sUwo1AMWs0WLloZ1hcWEZYVgIpj76kx6MnWwiod1hhWtqjS8rSG1io+nE4X6t7o8a/tl336qLQN+2Zu6J/UgTADBFhg8EB0TK0hZFUG+/8n5QPUF+k73OEBkPQ6ez5SJ49SeC3AHibfm/cMotqTkO2rcpAklJHRQYegTTEa9+jxYPbJDW0xMDDVs2NDFwiNcLLyBv+0uMBjg9mKw8ICxSig0ZtfOHS7rW24gW+DfPPIoB1XrDKuMiUUX3VvhNTfHoy2YaBI7daZHUx7nYEvnz7c4qfPdbQ1PaOBvf2UpHxJhgv3H9n/SG6+/VslT8qUElhhkHjN+ojFpch4QFZ4K2oS2QbDs+O2jKfTV3j3qN9p5+NBBoz8Q3m/Ar430Vy5foUsXL1JMTLPqkx0lwsLjdo9dLPuxY8eoX79+lJiYqI5du3YpYALxR9eNMvC7LgkGNqwJE6Oq1swKZhjsWAvronsrugVEGtxhAOEK8g/QW9l/dlnr6zrMrh/+pcPFssKC+iPwIj77fKux1/Ds8GEGAb3pgSWuoApFRk/pWsfHe4qiuFat6PZGjejo0cOqvWj7vfd1NdJD//nz36vfpht0n376KeXk+OeSYPOOb9mxVeezUXINXiQkJNC2bduooKCAunXrVqnkRYsWGZMBrln0cEwW7hNFu3btaNOmTUo3yqiLgiUBiIYlAKxSsAhfdPIkffnPXeRtsDP+mBhAcFg9WFnk4006TlPVs6fy4XW47wngN/YKQGAQzZfA4jZp0tRrMt3rcp8AUR6WEXDlgRcEEwCLrr8S2U+fPk2HDx+mvXv30rvvvst5fJ5xWw679vpmnc9MIUhw5coVmjBhgioZEwGOsWPHGjXBNYd/9NFHlJmZGZbWO77NnWqthsHhr2BTDO4f8mINfuTwIVMVvKbmSAysE4XHjUGX/ecs0w06vk2lbyyxDj7DvZ0+5WVFXis75lhCgOBwa3mzDhMR9FRX3NvFrjfWwWYCVzqCIlysNbwObIjqm5/IC/e8w90dTT0RJrLeDrQT/YP9CRZY8tLSUtq+bSv9Kvk3LhMQ+giWHxNPJbLj/rrD4VB6duzYQcuWLWOdPs+8O8/uvM8MIUhw4MABdSdBJ7inauCfgXCbMVSyZcsWmjZtGmVnZxPe5oOz1fvtvG7GjjjWtu678Z7aBGKxm468fR39qX2Hu1VykB+DlXeMEQ/rBtceApI9lTqEHuzxHyoNJhxsFPojWCtDP5YJWKvqG4vQ475mh1cBImHTCrvxvCmH9mNzD5NVVSY8vc7Q+dL4l412cd2AFYjv/nwAyPl69tvGulnXZXaNNgJH7it9LwLYAgdeNqGdvN/CulA/GNuNH28wNuY4jicMrP1d3i77zooVhAdocHz77be0fv16ladXr1707LPPcn6PZ7xt5syZM+qeMJ5358c2+Qk8PBt/V7t2lfLr5d5xxx3qfj4mDl+C2x5mgnU07k/PnTuXmjRp4pIEbjqAwW2tffv2qbiVK1eauvpw4bGcmTlzptqE1BXhjTqTJk1SXgJce7tISal8cN0ufWGXemBCenHkc67/z65XDutc3E9/7733aOvWrSrKF+HxL6pMbCa6rjPU13DhsUw5ceKEspKYCEDoyZMn05tvvkkgLdJMnz6dPvnkE1VdTAS42yAiCIQrAlgqYLe/khuvN6hHjx6UlpamgkB4Xy49/g8+lJtyet29XY8bN86w+J07d6auXbsSXo0FAbHhFWDdvnv3bmXZ2cPxptMucbe4/WeiXeol9QgtAlhyeCV7VarHG3RVyRvsPCAy9iRg2a0I0mPCw5ImXCSqXsC7NFyaLvX0gYDXkbF9+3Zavny5UmFl3Y41uZ0359CQPn36KGvNb7HFhh3I37595aetkAZrduQJF7mtfiQ1uDWSxMKHS4/VXD1dXl6hF4u1LLuvVoiOvHgzLNbqdlyvc9uwF5Gamko9e/ZUQV26dDHW7+7rdSTwtHnH+ux4BuFxiAgCOgKmZN+/f796cAQJrRIdaS9dumR7sqOeTzzxhDp0IHDN63Ws2UUEgdqGQCU3HjfnnU6naucDDzxg6ZYbg4J7wWzZ9TPHy1kQEARCh0AlsuMeNB6que+++2j48OGWa3b27Fn1jjm83MIObvzRo0eVq272yKvlRpkk5GfjsQxAGSKCQLggYOrG4/+n8XCLP4K3qOAFC3hklq26P/kDmZafXw+kTtYVTN1chpwFgWAgUMmyV6WQgwcPqmy4z86W3Q7WvSptkTyCQG1FoNpkP3TokPoYI6w6f7KJic7n2gqetEsQCCcETN14qw3Ys2ePut2Gf2/FTjasuv5MvFU9kk4QEASCj0CVyP7NN9/Q119/rV5Cia/G4J9W4MLrRIdVD4dHZ4MPsZQgCNgDAUtkx8Mm33//PRUXF9OpU6eUBYc1B9Fh0d035sR9t0fnSi0EAR0BF7L/foT5ywHxP1848Dawf9dzh/jayr/BhriKUrwgYBsEqr1BZ5uWSEUEAUHAKwJCdq/wSKQgUHsQELLXnr6UlggCXhEQsnuFRyIFgdqDQL2ctWvVmynLysrUK6W8Nc3fXXb+33Y+84stQvkSR2/tkzhBoDYjUK9NmzbqzaX4X/Sffvop4G0F0Znk/H46fBxSRBAQBGoWgXqtW7dWL5bEv7biTTOBFpCdSY4zDry9VkQQEARqFgF1nx3/4YZ71nDlAynsvoPguMZkgmt//6MukHUSXYJAXUXAeKgG3wC/evVqQHHQyQ5XHssEkB1P34kIAoJAzSJgkB3F4rHXQIo72fHsPMguT74FEmXRJQhYQ0BuvVnDSVIJAmGPQJ0kO75h99prr3k9kCZUsnPnTrrrrrvUgddYm73+at26dSoeac0EeZB3zJgx6is3ZmnMwlgvyseLOfEPUBD8MxR0cb34rNcPaZGH4/T80KHrRhr8dpf58+ereru32ZtuHS8uG2fo0sWTbk7jDTOOg169zZyX2+apP7yVbVU3yva3P7l+OLu48XpEbb5Gh4Ds3mT06NGEF27WtGBQv//+++prNHgeAYNo4sSJ6ms8+A3S4XNVHTp0UO8JNKsf0rz++uv06KOPqv9SNEtjFgZc3Mt+5ZVXaM6cOeq/GxcvXkw4WJB+3rx5hA9gokykffnllykpKUklwQBfunSpqj/S4qtC+fn5ShcGOL6mi6/x4NPXaDc+L/bYY49R8+bNuQh19qUb5eFfrlkYo7Zt26ogb7r1PJ4wQ93Hjx9PK1asUHXlPDhzWZ76w1fZ3nRDPzDEf5oybnrZ/l7XScvOIGGAmB0cH4ozCA1C8YNHIAP2Pr777sa3vj/++GMaMmSIOjzVD2kg3bt395SkUjgGLYgO3Vx2v3796OLFi1RUVFQpPQL+/ve/G+nxkU2kBfFZmGz4XVhYqG654l+iIe5fyEXZGNi9e/fm7MbZl24j4c0L1Bd1Qf0h3nRzXk+YARfEmREdeX31h7eyfenGhIjXs/Nky3Wt6rlOk90TaLDqbJ08pQlV+KBBg7zWDQNkzZo19OKLLyoL6m894+PjjSzYSMVdGp5ojAgitbTAuweZUJggOnbsqKw4rBnqsWTJEnrooYdUNkxa+N49u+6w+NAdFxen4l944YVKVpPL86Wb0/EZHzdJTk42Ji1vupHHG2aYOHD87W9/M5Ynuivtqz+8le1LN94EBYEnx8sTxo/b6s9ZyG6CFrvwsA5w9/GRx1AIZn64lvjeHFxdK4KBPnToUMvpWScsLj4IAkuEciGwWhs2bOAkLmd3QiESyw248ffffz+NGDFCWUOeNFF/EAauPAYuBB4MW3oX5SY/vOnWk4O4+iSkx3m69oYZJrrPP/9cPRsCLxDuNCQrK8uTOsvhvnTjjVAoGxM3ygZ+mEDh+ldFhOxeUGPLA9c2kITHgMQmD8/W7htZXCUeUOnp6Rzk9YxBgPXdI4884jWdp0jO16lTJ1W3H374gYYNG+bimiMv6o/vAMJa6wI3HJMFCJGZmUkDBgwwLDny4DewRDzq6anduk6+9qab0+AM4sLD4KWIHmd2bQUz7CMwNpic0AbUnydFM71Ww3zpzsjIMCZuTJjYh8GSqCoiZDdBTd+th6uFR4phLQNFeHRaXl6esV+AAeo+ODG4UY8ZM2ZYtn5YQ8MSM1kxKPm3FWuAgQxry/sYKSkpas2or8MBF+rr7m3wBMDLB1h0EB8H3HrkwUBFOMrBOhRLhk2bNpn0gGuQL92cmtNhErEqvjBD2+HhYd8g0OJLN/Y8YN0DJUJ2C0jyw0EWkgYkCRN92bJllSYBbwXA1WWi4gyiwXLAkrI7jfwgPrwKlONJeBcZE4Y+EXkj1Llz51zW9yASCM0PUenWEJPo7t27VbynOujhvnQjrdkkpOswu/aFGfYU4OHxpARrDlyx5LG6BDErF2G+dMNzggcFzCGMvbtH5Ul/pfCioqKK4uLiijNnzgT8gN7Tp09XnDx5sqKwsLDi2LFjFYcPH64ItSxevLiibdu2lqqxbt26ii5dulTk5+dbSl/dREeOHKno3bu3qh/qyMe8efOU6h07dhhhHIczwt0FYaNHj664fPmySxTrYJ0cyeHQhzqgLrpAD/S55+M0en7o0MvmvJ7qnJOTU6ldeh286Ub5iNfTc51w9qVbTws9er0RV1JSUvH4448b9YM+Fvd6cfsQDvFVtjfdyK/r99Q+rouvcwTIzp9sqjQTVDOALSIekcWz8fyPMGbfQq9mUX5l5wdqYP28Cdw33HLp378/4ZtxIoJAOCNQJx+q8dVhmAzg9uKhGuzMiwgCtQEBWbOb9CLIbmVDyySrBAkCtkWgTlt2vt9r296RigkCAUSgTpIdLrov91zfvQ4g3qJKEAgZAnVygy5kaEvBgkAIEZA1ewjBl6IFgZpEQMhek2hLWYJACBEQsocQfClaEKhJBITsNYm2lCUIhBABIXsIwZeiBYGaREDIXpNoS1mCQAgRELKHEHwpWhCoSQSE7DWJtpQlCIQQASF7CMGXogWBmkRAyF6TaEtZgkAIERCyhxB8KVoQqEkEhOw1ibaUJQiEEAEhewjBl6IFgZpEQMhek2hLWYJACBEQsocQfClaEKhJBITsNYm2lCUIhBCBsH5TTTBe3B/CvpCiBYGgIiCWPajwinJBwD4ICNnt0xdSE0EgqAgI2YMKrygXBOyDgJDdPn0hNREEgoqAkD2o8IpyQcA+CAjZ7dMXUhNBIKgICNmDCq8oFwTsg4CQ3T59ITURBIKKgJA9qPCKckHAPggI2e3TF1ITQSCoCAjZgwqvKBcE7IOAkN0+fSE1EQSCioCQPajwinJBwD4I1EqyHzt2jPr160eJiYnq2LVrV8AQ13WjDPwOJykpKaHhw4fT4cOHg17tK1euUHp6Om3fvj3oZUkBvhGolWRHsxMSEmjbtm1UUFBA3bp1q4TEokWLjMkA17qcP3+eBg8ebMTrk0W7du1o06ZNSjfKqAlZu3YtzZo1i06ePGm5uDlz5tAHH3xgOX2oE2ISeuyxx6hhw4bGgYkCEwYL2qTH4xpt5LyIZ+GJBmEc754XvzER4UDZSMeih6EM97q4h7nXTa8L6wz1udaS3ROwGAQTJkxQ0ZgIcIwdO9ZIDkv91FNP0bhx41Scp8nCyBDEi4sXL9KCBQuoUaNGVL9+/SCWZB/VmzdvVgRn4k2cONGF8CtWrFC/0Y84MCnHxMRQZmYmffjhh4YXkZubqxo1ZswYFb9hwwaVHvmfe+45RWzk79GjR7UbD+KfOnXK0Am9kydPrrbeQCuoc2Q/cOAAtWjRwoXgOqgYJJiVzbwBPV1NXGPgDxkyhO655x7LxWHgwWLNnDmTRowYYVhCd0tz6NAhSkpKUvG61YJF03+DdLrbj4GMeLaS7hZRt6Ig4dtvv2257npC6H/llVfo+PHjtHfvXj3K9LpDhw700ksv0Zo1a2jfvn20evVqo56mGQIUiBeobN26lXr16qUwqapa7jfGFf3A4i2O+wtt5v7kvua+QhqE1Tmy5+XlUXR0tKmbDvcda9mDBw8aLjwsB8JDIQMHDqRWrVr5VTTqi06ePn066VZQtzRoz9KlS2njxo1UVFRkmVCoCCbDli1bGtYVFhOkhqDcGTNm0NChQ9U1iA8rWlXBwG/Tpo2qnxUdAwYMUGkx6Hv27EmYAIItDRo0UETHxApSVkWQD5MU+gIY4mCPA0TV42AA4K3oey6YUKdNm6b686uvvlJLTD2+b9++1LZt27pFdoB4+vRpAmDZ2dnKTV+5cqVyueC+f/fdd3T06FE6e/as4cJ3796dZs+erTqgKh1pxzxNmjRRbi9IigMDCxbUqmAAsput52ELnJKSogdX+Rpkx8Sii+6tIB51YUFbMNF06dKFnnzySQ62dP7ss88oLi7O8FhAEKuCCRYk+9Of/lTJU/KlAziCzJiMedLkPBiviEObOA59BSPw5ZdfcjLVXixjkKZ169Z05513qrHMCTDxo451zrIDAKzHMeAhnTt3pq5duxqDF5tuaWlpjBNh4F66dImuXr1qhNXlCwwaDDgmRlWtmRUMMdixFtZF91Z0C4g0sGYgHFzarKwsvybo/v37u1hWGAR/BF7Ezp07jXH09NNPG9fe9Jw7d44qKiqoWbNmHpPBu/EmMEggOQQTINrOngHCHn74YRVXp8gOILBeP3HihGq8+x8GHBZexDMCsEIgGtxOWJ5gER799MUXXyhX3nNtbsSgPhjksHqwssjHm3S+8vqK90Q2eB0YU7rgN/Y0QGAQ2Zc0b96cmjZt6jWZ7nWZTYBeM2uRdYrsaHefPn0oJyfHWIdjww6Dqn379sraY4bWBwmuEcaegIadrS+xRsPGEQaHv4LBhXw4sAbHZp6ZYGDrRMBk+e233xqT6eLFi0036LBZhLy6C+6uH+7t+PHjFXl1K+Wejn9jCQGCw31Hf/Fmndlyg/NYPbu3i11vtpjueuBiR0REEIjMAq8Dewn65ifi4Hr/4he/MPVEgBE2/jChcjvQTvQP9if8lTpHduyyp6amqg0cPHSzcOFCtX5nMo8aNUqt6/mBHACq35rzF+DqpN+yZYvaeMH+woULF1Q9rd5v53UzBhMGDe/Q+qoPiMVuOvJiUN19990qG8iv78QjHtYNrj0EJBs2bBjde++9qkxMOFgv+iNYK6O+WCZgrapvLEKP+5odXgWIhE0rEBx1gKD9mIgwWVVlwtPrDJ1Tpkwx2sV1A04gofvzASDnO++8Y6yzdV1m12gjcOS+0idCYAsceNmEdvL63EyXt7CIoqKiisjISDUTeUtYlTi4MjjKy8vp+vXrdO3aNXUNKxoI8fTeeGy24f703Llzg2aRsaM9adIkdc8eD9qICAJ2R6BevXph/Z0Iu+Mr9RMEbINArXXjcQsN91rhjuuPu1YXeXgNeCYeulGGiCAQLghEnDlzpiJYlQ2VGx+s9oheQSCcEai1lj2cO0XqLggEAwEhezBQFZ2CgA0RELLbsFOkSoJAMBAQsgcDVdEpCNgQASG7DTtFqiQIBAMBIXswUBWdgoANERCy27BTpEqCQDAQELIHA1XRKQjYEAEhuw07RaokCAQDASF7MFAVnYKADREQstuwU6RKgkAwEAjrf3nDy/5EBAFBwBoCYinGI80AAAAwSURBVNmt4SSpBIGwR0DIHvZdKA0QBKwhIGS3hpOkEgTCHgEhe9h3oTRAELCGwP8DnGSiFzxU+8MAAAAASUVORK5CYII=)and then select file menuu![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAX4AAAFkCAYAAAAnl1ABAAAgAElEQVR4Ae2dCXhU1fnGX4mGghFqFMQKscZQJHEh0poiBNGwu8QloAhYiSigEK3ihv6rWI0oaBGoK8YF0Aq4xA0Qo2BYhCpxA6XEqEQrCsZCEZoo8H/em9zJzTCZ3JDJOUl4z/MkM3Pn3PN95/ed+57lnpk54Msvv9wDT9qzp8pLzzt6KgIiIAIisC8EDjjgAOc07yOf84+au3v3bufx559/dh47duxYrRlXo/no/vH8X375BaWlpdi5cye2bt2KzZs3o2fPniHLOZCZ3eQWyNfe5+77ehQBERABEfBPwBV6nuE+52OzZs2c11FRUY7o832TmnvgUUcd5b8WyikCIiACIhAxAps2bQqM+t3Re8QKD1PQgWHe01siIAIiIAL1SGDXrl1O6ZwBmBzxN6vHOqloERABERCBMATKysqcpR53jd+U+GvEHyYoeksEREAE6pMAb+YyHXjggYElH/deQH3a1Yi/PumqbBEQAREIQ4Ajfne5J0y2iL+lEX/EkapAERABEfBHgKLPZR7+caRvaqlHI35/8VEuERABEYg4AQq/KbH3Oi/h99LQcxEQAREwSMD0TV23ahJ+l4QeRUAERGA/ISDh308CrWqKgAjsG4HPPvsMl156Kf773//WWADzMC/P8ZusLPXYMOoXiPKJgAiIgG0Cd999N1avXo0//elPYcWfos88zMtz/CRb+qsRv5/oKI8IiMB+S2DGjBk47rjjnFF8deLvij5H+szLcxpykvA35OjIt0ZLgLs1+C2JSv4JcPS7atUqLF682PmmSf9n1m/OQw45BE899VS14h8s+szLcxpy8r2Pn1/3+fDDDyM/Pz9kfVJTUzF69Gg0b9485Ps8OHv2bPzwww9h87311lt48803cfPNN9cbPDawlStX4tlnn3W+unTs2LHo0aNHtX7rjcgTYAy++OIL5Obm4qOPPnKm0L/97W/Rr18/sC396le/qpPRdevW4S9/+QvuuOMOJCYm1qms2p7Mb7x98skn8c9//hMTJkzA0UcfXdsimmR+8uCIOVS68847cfLJJzsx+/bbbx1+8fHxjlYwP7Xn17/+dahTjRxzxZ/+c1TPRwo8k3uMI/3GIPr02bfw8+tDTzzxRBx55JFOb8wgMv3hD39wPm7cpk0bME9jSBs3bnQCdNhhhzn+h+us6rs+7Ojmz5/vCET79u0jZo6jEK4zUvSGDRsWsXIjURBHwy+88AKef/55p3M/9dRTERMTg3/96194/PHHHcG86qqr0Lp160iYM14GP4hD31u1aoUWLVoYt19fbaq+K8Lrke01Li4OHTp0qG9ztS4/lPizEHd5p7GIPn32Lfz8LonTTz/dgcXRP7/kn+mCCy4IO8p3MjWwfz/99JMz87j66quNjwYbGAor7ixbtswR/TPPPBMXXnghoqOjHT84C/j444/x448/1ttsz0SFOQAaNGiQ82fCXmOzwbhzJhaqU/zb3/4WqM5//vOfwPOG8iRY/OlXYxrpuxwPjOQXAhUVFeGZZ57BJ5984vzQwCmnnIKhQ4eCswE38eL+4IMPMHfuXHzzzTc4/vjjcfHFF4PTulDpf//7HxYuXIjXXnvNWQ7o3LmzIxadOnVyPuIcfA5Hk++++64zovzqq6+cGcr555/vLOUcdNBBznLTSy+95JzGpYC2bduGHG3TT9aHy0GsDxvpaaedhvPOOy8wEmXnx9G6u/zF+g4ZMgRHHHGEUz5HXm+88YbzKzgLFiwAp7AnnHAChg8f7tSXS1+uL9dcc03AF/5Ggtc2G1ufPn1w9tlnO364o3lOjf/973879eWsJT09HQMHDsT333+P7Oxs55GjaNo499xznZF/SUmJ4/OSJUscH7mskpGRUSVGwUwj+Xr79u2g7T/+8Y+OXVf0aYNtkbNKb6opnszL7zsh51dffRXbtm1Dr169kJSU5C3G+XRkOKZVMgMIx6mmuLIsxpbLTVyy5K8hMR5nnHFGlXixPXHgxPh+/fXXNeZhucHtMrhtVNemfvOb3zgzKV53vC64/MTrgnFoKDN1Cj2Xi5m4tBMqUQ9YBy6l8bvsOUPIyspyrm/3q42XL1+OqVOnOvwPP/xwXHLJJU7br+vyYSh/GuuxqFtvvfX22jrPi/G9995zTvv973/vLPXwQnniiScc8briiivQtWtXZ1sT128pULzA+fydd97Bhg0bwF6fFyinSa+88ooTQE71uO7LC5SCxJSTk+Pc8OEFQvHbsmWLI1y/+93v9hIrdwmBjYJr9hQ02n3uueecstgz0wYvAnY+I0aMQN++fUGhZafgTRT7e+65x7HBjok/hZaXl4dPP/3UqQ9F/6677nI6Iwo5l7xYJu9PULw4zWddFi1a5HRwFOXu3bs7F19hYSHIjR0EBfu7775DZmamM6OiL+QzefJkp3PgiPiYY45xOj+KPMtmPTlqXrp0KbguPnjwYCcG5MjlomOPPdapEzl26dLFqSfr3rJlSzz99NNOR8Z1SfpAEWZHcdJJJzlleBnUx3N29hToc845x6lXOBt+4kkhfPHFFzFv3jxHWNnBkdPLL7/sdAicpXLgwbiFY8oZrZv4jYnhOBUXF4eNq9vW2UbYjjlD5uCANy4pRBwccMmUHQjFi4MB3giuKQ/baE31YF1DtSm2RYoh2wPbFDtLMuIAyjswcxnUxyPjwkEIr13GJfiao6izbTCdddZZzqP3NWPNewGzZs1y2g+vSy7bPvbYY057ZxtnZ8t7duwEOOviOVxWZB05yKxrCr6Ry3hSw3g9ctBV22VjMmF7IQv3u3roO3WqNon15KeAec24P8G4Y8eOau8vVbb22lgJkTc2NhY33XST4zwdZ6LQTJo0yRnNMChMBx98MP785z8HRvgU1Pvvvx9vv/124JiTEXCCyM7ihhtuCLzHkZx7k5kNyHvBctRE4aWo9e/f3/ElOTnZEXp2BnxOewweEwU11I0/XoQUbM4u2IhYDybaI1gGl3Y4C7jxxhsDQWJZrAtnJ5dddplzDv0bOXKk01nwANeyOZ3l6J++UKgZeJbN57T9+uuvOx0XOxR3NEahmDZtmtPQ27Vr55TNGQbt0D82ai6RsMNKSUlxpp8cDbJhUliY3CUudh7dunVzGhsfGS/XjpOxHv+xfhzZsWOsKfmJJ9mxQyYrN+bsxMiZFzyTH6aMhZsoityEEI5TTXF1y/I+cjAyatSowBIHO222Fw4IXPENl4fXUE1tI1Sbog9sG4wxB1tsa+wAOBK2MQrm9cE/N5FzdSN8Nw8f2bY5uOGsgJ0nhZL14LIRj3NGRbHjrI8fouJgimLIWWZthdRr130eLPpc02dyb+7ykcd43TX0FDHhZ4/DEQ2XRihqvDAoZOx9CN9NFFOOuN3EDoOiRYHnBepNHKVwhE/hD04cSVGEvcJPu2wMFDo+MrmvGXiO1LwXeHCZ7msGmKLDUYcr+nyPFyoTBZQjDfpN/93E55zdcDbk1oU3qby7Onih8T33e7jdc91H1zaXq9zRjvseH73nsRN0/WM9WTZHTeQSKjEvf3z5kUcewYcffujMyjjV56jPVGJnyd0ZvDhrSn7iyTq5MXZjToHjDIzLa0y1Ycr8fjjVNq4slwLnXdfmzIzXyPr16wPCHy4PZ4Nsl37ahlNxzz/GmO339ttvdwY7nO0x9jaE3+NWrZ4WFBQ4esI98sH75MmN7Z7Xd1pamjNI4mCP7YDLzbw23fZRK6MVmUOJvivwFPvGJv4RE34uT3BZ5qKLLgrcBKaYT58+fV84B85JSEhw1kkb6w4PTuE4Kq1t4qimd+/eIU9jI2TyikjIjEEH2fC5rswRMaen7Kg5deYokzOH2k5Tg4r39ZIjW4omxYt+1LYOvoxUkykcU+8pNXFi3n2Nq9fOvj73Ww9v+YceeqhzHX355ZfOIIt75bkEev3114ec9XrPjfTz6m7u+r2Zy5kvOy5vYswoxJy9cka9du1aZ/mF9eTInzN3LkF7B4re88M9Dyf6PI92G5v4R+wDXByJECqnoxxxMQBc9w8e2XGNkutabmIeTuE4Kg4WAS7FsNzPP//cze706u+//75TduBgxRMuh3DmwZ0hfGRyX3Pq7neLGAPJkdiaNWucqaNrh2vxFExe9NxyRr/pv5v4nOeEqoubJ/iRo0suLbgj+epsc/RLe9WN5oPLJX/yZNnuOXzkGihnLBztcVcTLwqWy/VoE4lLMFxuoPDzxjj9cxNjxdEvL2wuTfiJZ6g8rCe3G7uzrtoyrS9OwbNatmu2b++MK1wev/UIblPkS1HlPSi2W25Q4MYG3mNie20siasF1BiKOR85c+R1w2Ui3jti4qz+gQcecK5RbgmeOXOm07Hxhi/b/b4kdhq87qlt1S3lMDZ8j3mYl+c05BSxET/FjjexuKuHI1XOAPjhnOBEAZ4yZYqzy4SjeK71MWi8URPcGxMiBYqzBu5o4VSVgsHGStHyLrPQDsWathkA3rDkkg97fk75edOvup1DwT5SMPlBIq6/shFx7ZAdGOvDESt34NAOl3R4D4M3bpm4/srRAUdkwXUJtuG+5hIUufFmIutLUXRt0z5vaLu2+T6XB/wkjt65Y8ndccT4cJTEm6AUefrMdXby5CPFwlTiDIPx4T5+3uznvQp3Hz8HBuzwKdp+48mpvRtzLn/xu1JYrpu88ayOqXfQwTXh+uDEAQtvsLI9sc2zXXITBIXfHUCEy+O3HqHaFMWIN0H5mQleUxz5cyDjbtF2WTXkR7Zf6gBv5vMDmOy4KPRc1ydDdgwc2LAtcHcTr3dej+xcuRTD+4v7krgzi5+J4fISBb665Io/RZ/nNOQUMeEn5DFjxjjTR25do0ARdrD4s9FxHY433jgK4Tr5rbfeGlKU2dB5Y5QCwA6Co0DeQKXo87zgxJkGt6jxHgJ3LPCGD0eEl19+ubOcwff9Jtcv3rPg1JG+eLdzstPiqIn14IeOKN4UMPrmbuf0Y4vcuMuCIsiLn2vwoWxz9xHvOdAPd6knXPnseHiRcNcIOyTu/KHPV155JebMmePswOL5vCfB3U3BnWi4suv6HuPAHVcUPbaPFStWOHVix86btBRyd+3ZTzzZqXM5jW2EYkp+nNZzRuGmmpi6+fhYX5w4uGE7cW9kuts52em6wh8uD33zU49QbYqdLROZcDcRrwveIG0swv/G4qWY//yr+NWvmuP8Cy7CmvffdQZ11APeMGeb4XIPB0oUaM4a2Qlwc8O1114bcmDpAPHxjwMudiZ+kiv+fvLazHPAzp07y9dEbHoh2yLQhAlwxMnBEDs7jvZDJT95Qp23vxwbfNHlGH/tVc5yzdOz5+KJxx9oElXnkiRnuxzQcXmWG2E4MAq3CcW7jM3n/ON57jZOzpb52RHO7DmQDJUiNuIPVbiOiYAIiEAkCLz77vvo3v0P2LVrN0aMvBplZT8jOrrqZ28iYWd/KUPCv79EWvUUgUZMgDfcORLm388S/TpHUsJfZ4QqQATCE+A9qgcffDBsJj95whagN0WgFgQk/LWApawiIAINg8DYcTc7Wzb/dMng8u3jUc2QlGjug4gNg8K+eyHh33d2OlMERMASgX79znD28D/19Fx89q9C7N61G2ef1Renn97d8UidQPjASPjD89G7IiACDZDAoIyzHa/GZd2Mb4o/xJYtP2LgWUPwyqtvOMfPHJiGrHGXN0DPG4ZLEv6GEQd5IQIisA8EDml1iPOZj/btj8Q333yLlctfx7Zt/0XvvhkS/jA8Jfxh4OgtERCBxkOAncDBB7d0/jZt+r7xOG7B04h9V48F32VSBERgPyRwkGf/ftu2h++HBOpeZQl/3RmqBBEQgXogwA9phUoF7+UFDr+5aF7geVP5NG+gQvX4RMJfj3BVtAiIQO0JLFz4FoYMHY3fde6Guyc94HxK11vKkUeW/7Qpj7VqVfmlaaf3Kt/R482r56EJaI0/NBcdFQERqIYAvxnT/YW5arLU6fBPOw/EP559Ak89MR133zMNx/4uZZ/KC/XrerUtiF/01hSTRvxNMaqqkwg0ZgJ7+EM30c5o/u67bsHzc3Pw22PiGnONGpzvB7pff9vgPJNDIiACDZIAv8q5qKio3nx78aXXMWrMdXj7zRfAm7c9e/4RC159ttb26tPHWjvTwE7QiL+BBUTuiMD+TuC8cwfi0ksuxKWZWYH1/drs3tE3d9bcgiT8NTNSDhEQAcMErh9/FbqclIQJt9xVa8vff78Fbdu2qfV5+9MJEv79Kdqqqwg0IgLZd93ifB/PrNmVWzb9uL/y3ffwx5SufrLut3kk/Ptt6FVxEWj4BDJHXIwbb74T69cX+nZ25uOzMWLEEN/598eMEv79Meqqswg0EgInnZSEO267HleMHu98B09Nbj/+xDM48YTOGvHXAErCXwMgvS0CImCXwMiRw9A7LRWnnX4uPvxwbUhn+Atd3PN/0813YsSIi0Pm0cFKAvoAVyULPRMBEWigBP7v1utwzDFHY8BZF4M/vpJ+Tn9nn/+WzSX4+JN1mP/CqzjlD8n4dO0yHH5YbAOtRcNxS8LfcGIhT0RABMIQGDY0AxkXnAX++Mpf/3qfs/Rz2OGxOOH4zph631/BZSElfwQk/P44KZcIiEADIMAPnI664hLnrwG402hd0Bp/ow2dHBcBERCBfSMg4d83bjpLBERABBotAS31NNrQyXGXQOE3i5HQ+kj3ZfljzPFVX+uVCIhAgECthH/jxo2YMWMGxo4di7i4ffi2vPenIe3JozFnejrauS54juHFcRg6w/M1qC3ikHbZLcg6LwExAAoeSENO3BxMPy9wtlsKSlY/jOzJuSgoKUN0XBpG35SF9E48y5PWPYz0cRuR9UI20lpXHme541+ueN0iFslnjseEK1IQG1WZJxLPtmzZgvXr14OPhx9+ODp16uQ8suwdO3agZcuWNZvZlItxQ6chQIn+ZkzA7cOTERNVzihQF7e0c6Yg72pgWtp45LrHKh7T783D4I1VuUfHJmPQLbcjs4tDvdrzsrrC4X7bpFys21oGnpd+/QSMPoW7KgowLS0HR8+ZjnRPuDa9OA534ZaQMQxyzdfLjds346/vzcJTHYqr5k96Bmge1BlUzbHXK6cdLB2E6fNGI7FK7Dchd9xQTENWoO1uX5+LaZNzkP/FdqB1ItJvmlhRbwCeNh2ouveYE8PZSJo+D6MTK92g/fweeUhd5mmPlW8DiZX2yw+TcWVMq/IH4KOtONfTaQUYP6gA6a9OQGqLSoNlS7Mx4LVkzLu3HWZ77Lg52HbYBpQaHwHfwk/Rv/feex2B4uMNN9ywb+JfEyNHpJLLc5WsQ85t4zCl9TzcfkaQiHvLWfcwRt1fgtFTczHlqGiUfTEP1115M8pmTsegoyozrlu6AGgNLFhZgrT+Vbd8JY6t6FDKNiHv/nEYOikL825JdTqcyhL2/Vlubi74x0TBZwdAoU9PT0eLFi3www8/OM/9WUjHlLwsOJS2FiLn1nGYElfJKFCXKoUVAEhEVpAQM8umjQA83Levz8HN10xB3j9ur+ggQ5+H9TkYNakEox9cgOntgLJv8jBl3HWYN/UJDNqHcUEVd32+mFTwPHJ/+BkvtmqF81pvqzxr09PA0TdWvvb7bGsuXlqZicQe0ZVnbMzHS4XRiE6oOFSSh+zr3kTSvU9gQmIsULIK064cgSnXzsH4U8K008oSAZRg3v1zMPCRoYir0skAyVfnOR11dZ1nlWI8MXX4XzcU2WPmYcJprh/h24pTVmwK0rpOQ/77ZUgN1LsMq5blIfmM0YjFV9W2naq+6FVjIeBrjd8r+qwYR6cUfx6v1xSbiIFnJCD/4w1hzJQh/8V5SLoiC2lHlV+s0ccMwi1XALNfC4yLgV3r8M7iFIy/ZQA2vLUKJdWVGN0OaddPRPr7OVgQoepR8N944w0MGTIEOTk5uPHGG51Hiv6zzz7rPK/OnRqPt07wwajGUqpkiOk0EL0T8rG2hk/Jb/9mI0q6pSGtYlgbfVQaho1JBkq2Vymvvl7kf7sWz2xY6hR/2zfNsXuPx9IPC4H/srOrXUrrk4r8ZatQ5jlt3cLZiOk/AK7ub3wtB19lXI2hFH2m2BRkXZ+GvBdXwX/Nk5HSZjYmv7zJY6luT8l/wq3pWDVrAUI23WrbSixSzkiqWu+dq5D/VgoGdKs6QKqbhzq7oRCoUfiDRd913Ij4by3E62+tQ0piR9dsiMdCrH0/EcmJ7ginPEu7TknAxxsQuKw+eBO5nVKQ3LUnBhS+jvzAGyGKjEpEymkb8c4H1XYPIU4KfYjLOhT+c889F3369KmSKSIdp8OoEKknhGNUxWyNL7avfx1vFqYiyVW6as6I6ZKKlLem4PYX16FkV3mmuD5ZGOQsEVVzUgQP3/PBC4HSNpZFYUWzoF9q+vapwPt+n8SccS7SV+cizw09BwwLO2Jgt8MqBgvbsWHdJqScEAQnIQnJq9ci3BClqg9xGHT1aODxaZW2qmbYt1fHpyDti3dQsDnE6WHaSmy3AUhemo9VO8vPK1udj7xT0pDiWRINUaIONVICvpZ6uKZfXFzsjE7denbv3h38i3h6eTzSvOvtGVPCL/OgFGVbQ3gReyQqVxvKkL94AZLPyEQMYtCz/0ZMXroR6RdW5ghRArDLO+4LmaPGg5999pmzlBMs+hzpL1++vMbzQ2fIxfg0d7U+Fqljp2O8Zyls3YyhSJvhnuldplmHaUPTMM19y7O8Ay/3NqnImjrecx8k6Dx3rTk2DdlPx2Leo5Mx4tFNiE5Mx/jrRyMlsLAddF6F3cSxrgP7/jhnw1Is+7ZyRndsq3bokTwJWDscKP26vODtHwJbXgcOH+jb0PadcTi3zwbkrC7BgP6xKFv5EnJPSUduVAGmOAt/27E91LC+dWzlfSu/1tql45ZL38SIh/KRckuq37P85avoiIHwbSVQWOsUpHWZUrHcA2eZJ63/eM9SZ1As3TYQKEBPGhOBGoW/upu4vDl53HHHRb6uXjHyVXpzRIcalZR8i42ouLnHaeviZKSNKZ8VJJ46ACX35mPjhUM9nUMIY1Gedd4Qb/s5xLX7UAy57MO/fUvuui1vOo7CV3FxngsUCL3GT0veTiDIssudNwSv/Apxcd4ZVJjz2vBG8BMYtKsMG1c/jLtGjsOmme4N3b3PK7+5G2S7li937dmNSQXzq5x108kZ5a+PvBT48s7K97jWf3h/ADVObp1zvi3ZjsQzh+GHO/KwsX86vlqWj9Qe4xENd9koBjFeNK6lrSWVs0v3mI/Hduddj2GjxuHh1clI85HfdxbeN3DEP3xbqSwvBqn9UzGFy1xdgfxlaUi91tv+945l5bl61tgI+LsaIlWrKCC6LGgUzVF1M8DbxGpnLgFJXdchP2hZZtP6tShLiHNGYduXLUAeViH7/DSkpaUh7ep52P7NXLxeOWCsanLXOqxaGoeeXeq+vnnYYYc5s6WqBqq+4qyAS2e1T+0w4MIULHhywT6JTkh77QZg8CkLkLMw3FpY+ZkbF0/DvA8qhr9R0YjrloXRfdbhzZU1nxvSts+D9xQ8j+LtWwK5e7c/CYPiK2afsWlAq1MC76HsO6C2Sz5xqTi3xUvIX52H3NXpOLebt3XGoGNiO+StDmo8hWtRkHh0+VCjNu08Kg6DxqZh1f05yA81k6isib9nn6xC3jE9kbzX75DU3FaiT0lF6rICrFqZj/zTUpHi2eHjz7hyNRYCZoW/UyoGbJ6LuSsrFlDLNmLenDwkpqVg3yU2GqnnDcJXTz6MvG/KOxXu6rnrUSAzg/teSpC/eBWSr5+HvLy8wN/0DGDB0qCLl1Hjrp7JtyG3ayYG1LAS5CfInBXt2bMHixcvDpmdx6dPnx7yPT8Ho7sNw7CdOXjpEz+5/eSJRuqFw7B91ktYF1guCH1eDDYh54HZKHDXw0tWIX9lNI5ss+/RDG2p8ui3O37EvZ61fb5z4gHpeDq/LPD3+ndD8dHPnr+Nu7B1q+tkZVnVP2uH1DOPxNxJD+OHCwcGbe0E4s7MRNJrD2HOuooyuatnch7ShqeVL/fUsp1Hd8nE+K55yH2reo/8vMNdPdl35iJl+ICQM9ka20qLFKT2WIDs+/MxoE9qHQZjfrxVHpsEalzqcZ2jgHFHSp1Si2RkTc1E9h0jMODW7ShrEYeUi+/D3ecEFoVrLL7q+rW7rDEaj1z7MLKvSUe2u49/6t1I51bOklXI+zgN6ROrihGn87Gj3kTBFeUbqQPl0qeM8ZgzPKXK8kmNjlWTgUtivLHrbuX0rvVT9LnWn5mZ6W8PfygbUXEYkNERg55egMH3DnByBOri5neWcdwXPh6PGYDBCYOQs3gwpnCVBEHruwCcPdx9svFI1BRMGzUA48m9dRxSh09HVmBLoA9btcxyZMtDce1J6bj/w/J7HG13nI6l/4zDUvzsKelYAPwrTxefehBObO0dtbvvVP8Ye1o6Uh7IQcduIXr/2DRMuG87pk0egQGBffxPIMvdylnrdh6DlDHjkfbW7dU7VO07lbGJjkvBoGvnINP1I/icEG2lapZopPRIBZbGILVL1XeqbQPaxx8MqlG8PmAPh6NhEoWJN3bDJe7pVwpPwN3SuXPnTufeCHf0EH2o3T7hS9K7O38pQ5d5V2PLT//D8Vsm4cDdoRbdyznFxhyA2Ve2RHPfQxzxFQFzBPLz8xETE+NsAGnWrBl2796NqKgodOxY/S49V7L56P7xvF9++QWlpaWgxmzduhWbN29Gz549Q1amxsuhQ4cO+z4aDWly/zzIPfvcBcX1fN7w7d27t9MB+Pq07v6JrNpatzgwGjckn48H3vgprOizgGHdoyX61ZLUG/srgRqFv0ePHvsrm4jXm8s+4hkZrJcd1wcpMTXchABwfPugj8VGxrxKEYFGTaBG4W/UtZPzTZqARL1Jh1eVq0cCZnf11GNFVLQIiIAIiIA/AjV6aAwAABVkSURBVBJ+f5yUSwREQASaDAEJf5MJpSoiAiIgAv4ISPj9cVIuERABEWgyBCT8TSaUqogIiIAI+CMg4ffHSblEQAREoMkQkPA3mVCqIiIgAiLgj4CE3x8n5RIBERCBJkNAwt9kQqmKiIAIiIA/AhJ+f5yUSwREQASaDAEJf5MJpSoiAiIgAv4ISPj9cVIuERABEWgyBCT8TSaUqogIiIAI+CMg4ffHSblEQAREoMkQMPq1zMuWLcOKFSv2gscfexkyZMhex3VABERABEQg8gSMCT9/arC63+zlr1Lt2LEDl112WeRrqBJFQAREQASqEDAm/BT2cGn58uXgX3WJP/au3/atjo6Oi4AIiIB/AtbX+A877DBf3nJWwD8lERABERCBuhGwKvz88fHJkyc7P0Jet2robBEQAREQAb8ErAk/Rd9d0+cjXyuJgAiIgAjUPwErwu8VfbeKEn+XhB5FQAREoH4JGBf+uLg49OjRA9zl4018zeN8X0kEREAERKD+CBgXfgr8Pffcg2effbZKrfiax4M7hCqZwr4oRfGyJVizuSLTrs1Yk7cSxeE3E4UtUW+KgAiIQFMkYFz46w9iMRZNHIPH3q1Q/p/WYO7VY/Dy+vqzqJJFQAREoDESMLaPv/7hJGDk4k8x0jXUqh8mfdLPfaVHERABERCBCgLWhL+4uBj33ntvIBB8rSQCIiACIlD/BIwJf8uWLavUhp/kre0HsoLLqFKgXoiACIiACPgiYEz4uVsnMzMTBQUFzvfy+PKuIhMFn1tAteOnNtSUVwREQARCEzAm/DTP7Zr8UxIBERABEbBHoAnt6rEHUZZFQAREoDERkPA3pmjJVxEQARGIAAEJfwQgqggREAERaEwEJPyNKVryVQREQAQiQEDCHwGIKkIEREAEGhMBCX9jipZ8FQEREIEIEJDwRwCiihABERCBxkRAwt+YoiVfRUAERCACBCT8EYC4dxFrkB0fj+z39n6nTkfey0Z85ly43zxdp7J0sgiIwH5LwOgnd5ctW4YVK1bsBbtDhw4YMmTIXsd1oA4ENs1F5qkL0X9FDga3q0M5OlUERKDJETAm/PyBlZycnJAA+WVt/NI29zd4Q2bSQREQAREQgYgQqHGp5/vvv4+IIQp7uLR8+XLnS9z4RW6h/rxf4VxtOV8vwm2DU9A5Ph6dT83Aba94vup512asnD4GfbvGIz6+C/qOmYm1rkscHcdnY+7rtyGj4v2M/1uE4m1rMfNyt7xMzPy4tMI0l3IyMfOVmcg8tTPi4zsj5epZKHTLC+FgcV52Zd7LZ2LNthCZgg/tqLTf5YLbMPejoJM2r8Tfx/RFl/h4xHftizGPrgU93Dw/E/Gn3oQlWIKbTo1H5vyKxaFwDIJt67UIiECTJVCj8D///PP47rvvrAPgrCD81zgXYuaIa1B8/hx8sKEIH8wajOKbh+HvH5e7Xvj4UGR+dDoeXFGEog3vYOLRc5ExeaUjlOU5ZmHh5sF4aEURPn3nDrR/8Rqc3X8GmmctxqcbPsUrWcB9V83E2gCJJZi7JgET3/wURe/Px9gd2Ti7SnmBjChdmY1hk4DLX/sURRtWIafbCmRcU9Na/TYsujkDc9vfgzc+LcKqhwejdGVuZaEoxMyLM/H+aQ9i1YYiFL09ER3mZSB7WSnaZOSgaMUk9EIvTFpRhJyMNs55NTPwFK+nIiACTZZAjcLPmk+dOrVBiH/4KGzD958n4NSTE9A8Cmh+bDomzpqEboeWj9I7XDQfq2YMRkJzAFGt0K13L5TOW+ER8uG4fFgS2jQHmrc/BxdeVIptAy/B8BNaAVHNkdCrP7p9/T7WbnK96IaRl/VCB/7MwKFJGJ41Fm2fWoiV7qTAzYbNyH1sJnrdch26HVpuO2noJRi+ZCFWhrtLu2khnnulF8Zm9UIH+tQmCcMzhwdKxa4OGDx3FR4aVF5ftOqG/mmlmPvPyq6pMnP5s5oZBJ+h1yIgAk2RgC/h54i/4Yt/EtJvbYUZ56cg8/9mYu6yYrQ+sRtObk+lB5ofXIq1j7pLPfGIHzwT2FFVpdlhuKk5734cWH6ue6zqY3PAe4ck8WT0QzG+/7FqLqAYhUuAWZlcEuIyUzziO2diFoD/7QrO63n9dSGWtD8ex7KzcNNB7hN2IM3RqnQtZo6tWOqJj0fGo0Bp1Sp5TvDHoMoJeiECItAkCfgSfta84Yt/cyRlzsEH+XMw9pTmWPvUUKT0vwmLnBF6KdZMzihf6skvQlFREYrmBn6dNzKB/aXUs2wUXGQHXPdqhV3adv7quNumdA2yB3uWeoqKMP+KYLve1wYYeM3puQiIQIMl4Fv4G2wNXMd+LMTKZYXYdmgCTj57OCY+thj3dJqL55ZxPWUbCv9VjOGjBiPB/QXIn90TI/T4dTHWoAPaekfoTtFt0CG5GG8XeG4083i40T7fb5+AXl9/gs+9Mwivzz8WovDr4Rh7UcVSD4DSX8LVxQCDcOb1ngiIQIMh4Fv4jzjiCFxzzTXgY8NMn+O50VfivryKhfMf1+CTj5qj7SFcrmmO1oc3x6LcRSjeUYrSr5Yg+y4uttQlLcHMR5agmEsrpYWYOykb265IR7e9Voc6IP2yc7D27tswa135rpxtH89C5uC/Y22YZRm0648Lz16CGdPKbZRuXoO//83jc8vWaNtyEXIXFaN0Rym4ayj7H576tOmABHyO4m9LK5Z/6oOBx56eioAINBoCvoS/4Ys+b7D2wz0vjAQeOcvZzhnfewa+HzUfE/u1AtAK/W6fj+Gb/oK+x3dGyrVvI2HUSCShFNvCiW/YMHZDv05rcdvpnRHf+WzMajMRj2edjL10n9YHTsUbd3dA7sgUZ42/5+2F6H/3SCSFyhyw2Qr97p6PwV/fiL6d45EyOhdt+qcH3kWrfpj47HAUT+yLzsen4M9LEnD5ZUnAT9vKl5yiTkb6re0x64LO6PLgmnpiUOmOnomACDQeAgfs2bNnTzh3H3roIZx//vl1HulzK6avvfhhnLnhhhtw3HHHhclh6i3u45+BhLp+Ktb5dC332wcnbsOs4z2A4CL1WgREoMERyM/PR0xMDFq0aIFmzZph9+7diIqKQseOHav11ZVsPrp/PO+XX35BaWkpdu7cia1bt2Lz5s3o2bNnyHK8+1JCZrjgggvQtm3bkO/pYB0JtBuMnKLBdSxEp4uACIhA7QjUuNQTKdFv2dK9q1o7B725I1GGtzw9FwEREIH9kUCNI/5IQYmLi3O+iqGgoMD5Xp7alEvB7969O1hGw0gnY0JR6O8dahj+yQsREAERqJ6AMeGnCz169HD+qndH74iACIiACNQ3gRqXeurbAZUvAiIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCUg4TfLW9ZEQAREwDoBCb/1EMgBERABETBLQMJvlresiYAIiIB1AhJ+6yGQAyIgAiJgloCE3yxvWRMBERAB6wQk/NZDIAdEQAREwCwBCb9Z3rImAiIgAtYJSPith0AOiIAIiIBZAhJ+s7xlTQREQASsE5DwWw+BHBABERABswQk/GZ5y5oIiIAIWCcg4bceAjkgAiIgAmYJSPjN8pY1ERABEbBOQMJvPQRyQAREQATMEpDwm+UtayIgAiJgnYCE33oI5IAIiIAImCXw/zbqyaAswoLjAAAAAElFTkSuQmCC)create a new folder name examples, and copy the ex1.csv to the folder![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAVsAAACKCAYAAAAE2unTAAAgAElEQVR4Ae2dC1hVVfrG36KBNAdH1LJJcEIcFaxEm85gYtpREUtRA8qULqSp5a3ULLVBuhBeM7XREumCZIFm6OQlozBUwhLsAmWSJehEoviXIQ2K/D/vhn3OPodz43KAo996Ht23tdflt9Z+17e+vTj7svPnz1+ABCEgBISAEHAqgcudmrokLgSEgBBwIQLffvstHnjgAfzvf/+zW2rGYVze40gQsXWEksQRAkLgkiDwwgsv4MCBA7j//vttCi6FlnEYl/c4EkRsHaEkcYSAELgkCKxevRo9evRQrFVrgqsKLS1axuU9jgQRW0coSRwhYIdAVVUVzp8/byeWXDYn8Msvv+D33383P91sx3/+85/xxhtvWBVcc6FlXN7jSLjM0guyiooKrF27FpmZmRbTCA4OxuTJk+Hh4WHxOk9u2LABp0+fthnvo48+wocffoinnnrK4QJbzdDKhQsXLiArKwsbN25ESUkJpk6div79+1uJLaedRYDt8MMPPyAtLQ1ffvmlMkX729/+hpCQELA/XXnllQ3KOj8/H//617/wzDPPwN/fv0Fp1fVmisXrr7+Ozz77DPPmzUOXLl3qmsRFGf///u//lOef7W0ennvuOXTv3h0PPfQQRo8ejVmzZmHr1q1ISUlRtOcvf/mL+S1NemxJVFkAWruqRVsXoeW9V1iqgZubG2688UZce+21yqjDTsTwj3/8A1dccQU6duwIxnGFUFhYqIxU7du3V8pva4Bwdn04uGzatEl5IDt37txo2bFj0G9EkRk/fnyjpdtYCdHqe/fdd7F582ZlUO3Xrx/atGmD7777DuvXr1dE6tFHH0Xbtm0bK8smTeeyyy5Tyu7p6YlWrVo1ad7MzFn9qrEq0rt3b7DNtYEDLVl16NChReqJauGq4sotQ32FlvdaFFsK6qBBg5TEaeXSImS46667bFqzSqQW9h+nKbSwZ8yY0eQWTwtD0WzF2bt3ryK0d9xxB+6++264u7srZaG1+9VXX+HMmTNOm9k0RaVpeERERCj/miI/V8vj5ptvVmaUlsr9n//8x9LpFnHOXHBZKPpo62rRqpWxKLbqRVvbo0eP4q233sLXX3+Nyy+/HLfccgvGjRunjFLqfXyYDh06pEwNTpw4gV69euHee++Fr6+vGsVk++uvv2Lnzp14//33lWlmz549lYeT0w1aD+aBFtOnn36qWE3Hjh1TLPExY8YoboI//elPiivjvffeU27jFPPqq6+2aFWynKwPXQ2sD0fc2267TZneqNYWBxxapaprhfUdO3YsrrnmGiV9WhcffPABBgwYgB07duCnn37CDTfcgKioKKW+dKuoZZk5c6ahLNddd51J3mzgIUOGYMSIEUo5VKu1T58++O9//6vUl9Z5WFgYhg8fjpMnTyIuLk7Z0lJkHqNGjVIs3NLSUqXMGRkZShk5XQ8PDzdpI3OmjX1cXl4O5v/Pf/5TyVsVWubDNuUMShvstSnjVlZWKqz5oJaVlWHgwIEICAjQJgPzNjXnahIZgC1W9tqWabF96cqgS+zs2bNKm9x+++0mbcY+RYOFZTl+/LjdOEzXXj2s9au//vWvyoyB03I+G3Rt8NlgO7SUWen333+PSZMm4ZFHHlHKZt4mbGdqwbp16/Djjz8qxhLj6/V6RXP++OMPpKen4+WXX1ZmSX//+9/x8MMPY+jQocoM3Dy95j52W7BgwUJbhWDn//zzz5UoHKFo9bJjvvbaa4pgsHJ9+/ZVlkDQN0NR4APF/U8++QRHjhwBLRo+EDTBt23bpkDjtJ4+PIocRYAhMTER2dnZSoek4Jw6dUoRC0Kk60Ib1KkpfWX0wVJEmO8777yjROMIxDzY6Sj4Dz74oNIIFDcKsTZQYBctWqTkwcGgW7duSiN+8803Sn0otM8//7wyAFA86U5hmvQ3Uyw4fWRddu3aBQ4qFMJbb71V6ewFBQUgN4oyRfLnn39GdHS0MnNgWchnyZIliiDT6rv++uuVAYfCyrRZT1qGe/bsAadekZGRShuQI10RXbt2hSrYnK6xnqx769at8eabbyqDB6dALANFj+J80003NVlnJA+K4siRI5W6abmb7zvSphSfLVu2IDU1FRQzDixkRX8fH07OyNhX2Ha2uLIfq+G3336zyaqoqMhm26r9nf2EfZmzQQ7K7MucJnNQpkuOol1cXKwMwnyZZi8O+6m9erCulvoV++OKFSvAPsF+RTZkRMPF/FlSOTT2lsYT256GB/9xmRT/ffHFF/D29lbKxOt8nmhYsa55eXm48847lf5JkU1ISFD6PJ8Z9o+XXnpJYck+zmeQvl4y57sYcqdbKjAwUHkmGlofc78t25IaxueRhk5dXZLGHleHknl5eeHJJ59ULBNatQx8uOPj45URmyAYrrrqKjz22GMGS5Yitnz5cnz88ceGc0pEQLEKKNBPPPGE4RqtFfVFHQVX+4DQMqDYUUiGDRumlIWQKa4UYO4zPwJjoIhZenHCTk+RZGOzwVgPBubHxiVQ5kNrd+7cuYqA8zrTYl048tLJz8DyTZgwQRFoHtMv+eKLLyodjWWhOPLBZNrcZ97bt29XBguKuGpx8MFcuXIl6G/u1KmTkjYtaebD8nGGwKk3BwmdTqeIK60ldgZa0wyq+4SCHRQUpAww3LK91HyUiE7+j3XkixIOSPaCI21KfrRmyEttdw4eZE2/MIMjXNkeaqAQ0dVki5W9tlXT0m5pBNASU/24HCzZZzgQq4JnKw6fI3v9w1K/YhnYP9jONHLY3yi69913X4NfRGrr5+g+RZH/1MBnlAOlrUCrlzPJZ599VpllMi6tcrKkocH7ObixXTjgsg9QdOfMmWN4hm2lb++audDSdcCg9eHyHJ87R0O9xJbWBUdtTrs5YrHCFA++laVprwYKGMGqgSJNoaCo8oHQBo7EtGQptuaBECl8zEcNzJfTUIqL6mJQj2nR0hrRPlDqfeZbQuVDztFUFVrG4YPBQNGi6LHcLL8auE8rnla/WheO1to30XzDzmu0nCwFNW+6QjjCmwftfRx41PKxnkyblgO5WAqMS5fGK6+8olgSnH2ws9KyacrAh4NvljndtxccaVPWS21ntd0pKrSO6L5hqAtXxneEVV3blulSvFWh5TFnIXxODh8+bBBbW3E4Y2HfdKR/MH1tYDuzDy9cuFAxDDizYfs3dNWHNg9H92mAPP7447WiU1CtBbo+OAuYMmVKrShkxn5Pg4oaw7/iYv04q6ELQX1Oat3o4AlLQquKKgW2voJrVC8HC8JonPpyyn/PPfcYXqRRQFetWlWHVGpH9fPzU3xeqp+0doyWfYbTPlpedQ1cRjd48GCLt7HhGbQPrcWIZicpRPQRcsTn1IeDI5fb0JKihVzXKZBZ8g4f0oKjUFEwWJa61sPhjCxEtMVVG90eK8atb9tq86nvvqP10Kbfrl075Vmir5PP5u7duxUXGy0/SzM87b0tZZ+zuldffRXUBWthzZo1Sv/m8k76pzkjpA5x8K1PsCW0TI+iW1/BrdcfNXC0pZXJaQ6tCk5N6cc1t17og6E/TQ2Mw6kvrT/zh47TfKarHe04eh08eFBJW01D3XKqTQubb7O5ZVCPOSXkA+5IIDxaGzk5OTh37pzhFvpWKVJ8yHx8fJRys/xq4D7vsVQXNY75liMup6yqxWotb1p45GTNajVPl/zJk2mr93DLFza0zGnRcDUG/V5Ml9Ovpgqc3nMqS7HltJBlVAPbi1YeHxBOex1pU0txWFcuT1RnGHXl6ixW5jM49m32ce3swlYcR+th3q/Il64bTt3Zd7mOlS+I+d6AfdYVAp8rzpLZb7TPN0WVVi/PcSZM/yndJBMnTlQMQD736kvs+tSTrkQ+99Q2a24CtguvMQ7j8h5HQr0sW4KgM5qrEWiR0dLlYnXzQNFbunSp4lOhtUr/Jl+Y8CWU1iXA+1hwigJHJb6J5xSIoNk5KBTaKTzjUyCZNyvNlz50J9C5zqkkfTjWVjyYl5EixYX19KXR+U5fEAcN1odWGVcOMB+6C+iT5ssvBvrSOArS6jCvi3ke6jHdG+TGF1esL0VIzZv586Wgmjevc9rpSKCVypUWaidj+3BaxZdIFFaWmT5T8uS2odMsR8qkjUNrmm3EdbZ8aUr/s7rOlgMyB1oKpaNtyrfRarvTvcKXLkxXDdo2tcZVO9hzxYQzWNFQ4Esq9in2e/ZNunMoturAbSuOo/Ww1K8oAnzBxPWtfK5o4dKAUJd0qqxa6pZ9ny+9Fy9erLBjHfgMvv3228qKIg4i+/btU/6Ihb57ug84cNN9yFUX9Q1cTcI16/wTXIqqtaAKLoWW9zgS6iW2FDL6Uvjmn8uOCIZ+DHPBJSD6TfnigiMt/Z4LFiywKITsWPTt8IGjKNPS4XSHQsv7zAMtakKlT5hvWek0p9XDEY4PN687GtRy0QfNF1osi3bpFwcKWgasB992UjApGCybuvTLkbzIjW+GKTp82OhTtZQ3Ow59yCyH6kawlT7FngMUfVwcBLhigWXmkprk5GRl5Qjvp4+ZqxXMBy5baTfGNbYFHxwKDfvI/v37lXpxQOWLLoqn6kt0pE05mNJdw35CASNDroqh5awGe1zVeNw6ixWNCvYVvuRlUJd+cbBTxdZWHN7jSD0s9Ss+AwxkwlUQfDa4KsJVxJZ9mm1KvzUHDQ6utGBjY2MVYaXrh6ucGPhegitU2J9oHDVEbGnkMC9Hgiq4jsRlHIt/ruvozRJPCAiB2gToKqARwgHG2lt3R+LUTlnOuDKBevlsXbnCUnYhIASEQHMQELFtDuqSpxAQApccAXEjXHJNLhUWAkKgOQiIZdsc1CVPISAELjkCIraXXJNLhYWAEGgOAiK2zUFd8hQCQuCSIyBie8k1uVRYCAiB5iAgYtsc1CVPISAELjkCIraXXJNLhYWAEGgOAiK2zUFd8hQCQuCSIyBie8k1uVRYCAiB5iBQrx+iaY6CSp5CQAg0PQH+pKH6JZKmz71pc+RPkjoziGXrTLqSthAQAkKghsBlF9Rf5hUkQkAICAEh4DQCYtk6Da0kLASEgBAwEhCxNbKQPSEgBISA0wiI2DoNrSQsBISAEDASELE1spA9ISAEhIDTCIjYOg2tJCwEhIAQMBIQsTWykD0hIASEgNMIiNg6Da0kLASEgBAwEhCxNbKQPSEgBISA0wiI2DoNrSQsBISAEDASELE1spA9ISAEhIDTCIjYOg2tJCwEhIAQMBKQX/0yspA9FyBQcGI3/Npea1rSNr1Mj+VICLRAAlbFtrCwEKtXr8bUqVPh4+NT96IfXAn9612QvCoMndS7NeewZRrGrdb8pFkrH+gfmo/po/3QBkDuS3ok+iRj1WjD3WoqKD2wFnFL0pBbWgl3Hz0mPzkdYd15lybkr0XYtEJMfzcO+rbG80x39taa41ZeCLxjNuY9rIOXmzFOY+ydOnUKhw8fBrcdOnRA9+7dlS3TPnfuHFq3bm0/m+I0TBu3EgZKLG/4PCyMCkQbt2pGhrqoqY1civQZwEr9bKSp52q2YYvTEVloyt3dKxAR8xciurdC3ep90/tC4R4Tn4b8s5XgfWFz5mHyLV5sLazUJ6JL8iqEaZqreMs0PI/5FtvQrGgOHRaWl+DZz5PwhneRafyAtwAPMwE2jVHrSOkHeyKwKnUy/E3avhhp08ZhJaYb+m754TSsXJKIzB/Kgbb+CHsytqbeADR92lB17TmlDTcgYFUqJvsbi8H8M/unI3ivpj8aLwP+xvyrT5OxsU1N+QNwoK8oz9NtuZgdkYuw/8xDcCtjhpV74hD6fiBSF3fCBk0+agz2HfYBCfUnYFFsKbSLFy9WRIHbJ554on6Ca69cijAEVscqzUdizDQsbZuKhbebCac2nfy1mLS8FJNXpGHpde6o/CEVsx55CpUJqxBxnTFi/p4dQFtgR1Yp9MMoCMbgP7VGxCuLkb58GsbFT0fq/GBF5I2x6r+XlpYG/mOgyFJ0Ka5hYWFo1aoVTp8+rew7lkMYlqZPh0LpbAESF0zDUh8jI0NdTBLLBeCP6WbixyjFhQA03MsPJ+KpmUuR/vbCmkHJ8n04nIhJ8aWY/O8dWNUJqDyRjqXTZiF1xWuIqMdYbFJcBw/iczcj7fRv2OLpidFty4x3Fb8JdJlrPHZ072wa3suKhn9/d+MdhZl4r8Ad7n41p0rTETfrQwQsfg3z/L2A0mysfORBLH08GbNvsdFPjSkCKEXq8mQMf2UcfEyEHQicka4MjtYGLJNkNG2q8J81DnFTUjHvNrUctvuKkpaXDvq+K5F5sBLBhnpXIntvOgJvnwwvHLPad0zLIkd1JVDLZ6sVWiZGK4yCy/NODV7+GH67HzK/OmIjm0pkbklFwMPTob+u+gFxvz4C8x8GNrxvsP+Aqnx8sluH2fNDceSjbJRaS9G9E/RzYhF2MBE7Gql6FNkPPvgAY8eORWJiIubOnatsKbQbN25U9q0Vx+75tn4OMLKbikmENt2HY7BfJvIKTE7XOig/UYjSID30Neab+3V6jJ8SCJSW14rrjBOZP+XhrSN7lKRjTnjgjwuaXE7vBP7HAaZuQT8kGJl7s1GpuS1/5wa0GRYKVWsL30/EsfAZGEehZfDSYfocPdK3ZMPxmgdC13EDlmwt1uTUsF3yn7cgDNlJO2Cx61rtK17Q3R5gWu/z2cj8SIfQIFOjpGEllLvNCZiIrbnQqpGbRHDPFmD7R/nQ+XdTs7WwLUDeQX8E+qsjeXWUTt0DgK+OwNCVD32ItO46BPYdgNCC7cg0XLCQpJs/dLcV4pNDViXZwk2WT9FlQLEdNWoUhgwZYhKpUQYrhVEBgm+wxcgkW7sH5Ye348OCYASo6mLljja9g6H7aCkWbslHaVV1JJ8h0xGhuB+s3NSIpxcdeteQWmGlG/ZfrjMcKzs/vWF67MBRm9tHIexAGtLVpucgvbMbhge1rxmgy3Ekvxi6G8zg+AUg8EAebJkFptn7IGLGZGD9SmNephHqd9RLB/0PnyC3xMLtNvqKV1AoAvdkIvt89X2VBzKRfoseOo27zUKKcqqBBGq5EeijLSoqUqwwNe1bb70V/NfoYets6LX+0/Cltl0IqEDlWQul8LoWxplsJTJ370Dg7dFogzYYMKwQS/YUIuxuYwwLKQBVWvvGYgy7J7/99lvFTWAutLRo9+3bZ/d+yxHSMFuvel+9EDx1FWZr3Cz5q8dBv1q9U+sCyMfKcXqsVC9pXAfQcu8YjOkrZmv82mb3qb5DLz3i3vRC6qtL8OCrxXD3D8PsOZOhMzgqze6rydd/qlqA+m+Tj+zB3p+MM5eunp3QPzAeyIsCKo5XJ1z+BXBqO9BhuMMZlZ/3waghR5B4oBShw7xQmfUe0m4JQ5pbLpYqTqVylFsyX9t6Gd9DOJpbpzDMf+BDPLgmE7r5wY7e5Vi8msEPsN1XDIm11UHfe2mNKwGKC0E/bLbGjWbWlmofMCQgO/UhYCK21l6E8QVPjx496pO+7Xu0AmA7Zs1VD7hbGn1Lf0Ihal6QcEq0OxD6KdXWr3+/UJQuzkTh3eM0gmwhMzeN387CZUdO0RdriSFdCvxXv6D64fjiZhKO+fhoHgrAss+WOWmF1yxnlTtfqjxyDD4+2pmCjfs68mXaa4ioqkThgbV4fsI0FCeoL8Vq31f9gsws7zoeVl34A/G5m0zuerJPePXxtQ8APz5nvEbfbYdhAEwmbMbrZns/lZbD/47xOP1MOgqHheHY3kwE958Nd6guiTZoo0Wj3n+21DiLUs85sO00eg7GT5qGtQcCoXcgvsNR6AdWBNd2XzGm1wbBw4KxlC6UvkDmXj2CH9f2/9ptabxX9upLwLFeWZ/U3QD3SjNrkdbj5YC2WeuWtB8C+uYj02zKX3w4D5V+Poq1Ub53B9KRjbgxeuj1euhnpKL8RAq2Gw0j0yyr8pG9xwcDejfcX9W+fXtlVmCagekRrV+6ZeoeOiH0bh12vL6jXg+6xfw6hSLylh1I3GnLz1J9Z+HulUg9VGPmubnDJ2g6Jg/Jx4dZ9u+1mLeDJxflbkZR+SlD7MGdb0KEb80sy0sPeN5iuIbKn4G6uhN8gjGq1XvIPJCOtANhGBWk7Z1t0M2/E9IPmHWegjzk+nepHt7r0s/dfBAxVY/s5YnItGQxG2vi2N7X2Ui/fgACO5pHt99X3G8JRvDeXGRnZSLztmDoNCsTzFOT48Yh4Dyx7R6M0JIUpGTVOMQqC5GanA5/vQ71lzV3BI+OwLHX1yL9RLWQczXC868C0eF8X1+KzN3ZCJyTivT0dMO/VeHAjj1mDwz5cTXCkhik9Y1GqB0vgyO4af3zk267d++2GJ3nV61aZfGaIyfdg8Zj/PlEvPe1I7EdieOO4LvHozzpPeQbpqKW72uDYiS+tAG5qn+zNBuZWe64tmP9W9NyTsazP507g8UaXy2v3HhZGN7MrDT82/7zOHz5m+ZfYRXOnlULaUzL+l4nBN9xLVLi1+L03cPNloEBPndEI+D9NUjOr0mTqxGWpEMfpa92JdSxn7v3jsbsvulI+8h6iRy5wtUIcc+lQRcVanHGZrevtNIhuP8OxC3PROiQ4AYYQI6UVuKQgIkbQUVC0eCb9AaFVoGYviIacc88iNAF5ahs5QPdvcvwwkiDk89u8qb+SHXKPBmvPL4WcTPDEKeus13xAsK47Ks0G+lf6REWayoAnCp6TfoQuQ9XL3Q0pMsyhc9GcpTOZGput2BWItDdwpdj6rIvre+WQkvfbXR0tGNrbC3l4eaD0PBuiHhzByIXhyoxDHVR4ysuAvXAge31oYj0i0Di7kgs5QwcZv46AMoayyFxeMVtKVZOCsVscm/rg+CoVZhuWD7kQF51jHJt63Z4/KYwLP+i2md99blB2POZD/bgN01KXQHwX3W4t9+fcGNbrXWqXrG+9botDLqXEtEtyMKI66XHvGXlWLnkQYQa1tm+hunqsq869/M20E2ZDf1HC60XyOoVY9u4++gQ8XgyotVymN9joa+YRnGHrn8wsKcNgnubXrHaB2SdrTmoOh2bfF2XYsCXY7YC19xKsE1AXf51/vx5xdfNlQi0eC2tUrCdklw9/3sleqfOwKlffkWvU/G44g9LTtRqTl5tLsOGR1rDw6IJISyFQPMSMOmW3t7e9be6mrceLSp3rqnl6g36Z/nSbPDgwYroOvRXYy2qJs1fmFZXuOOJwDF46YNfbAotSzr+VncR2uZvMimBFQImlq2VOHJaCDQ7ga+P23EqA+jV2ezPs5q91FIAIWAkIGJrZCF7QkAICAGnEXDeagSnFVkSFgJCQAi4HgERW9drMymxEBACLkhAxNYFG02KLASEgOsRELF1vTaTEgsBIeCCBERsXbDRpMhCQAi4HgERW9drMymxEBACLkhAxNYFG02KLASEgOsRELF1vTaTEgsBIeCCBERsXbDRpMhCQAi4HgERW9drMymxEBACLkhAxNYFG02KLASEgOsRELF1vTaTEgsBIeCCBERsXbDRpMhCQAi4HgERW9drMymxEBACLkjA5MfDG7P8e/fuxf79+2slyR8or/+XZmslJyeEgBAQAi5BwCliy8/AWPuGmfp12YceesglAEkhhYAQEAKNQcApYmvvU9379u0D/1kL/OCkfOvMGh05LwSEgCsSaFKfbfv27R1iROuX/yQIASEgBC4WAk0mtvwA4pIlS5QPIV4s8KQeQkAICAFHCTSJ2FJoVR8ttzyWIASEgBC4lAg4XWy1QquCFcFVSchWCAiBS4WAU8XWx8cH/fv3B1cnaAOPeZ7XJQgBISAELgUCThVbiuqiRYuwceNGE5Y85nlzETaJZPOgAkV7M5BTUhOpqgQ56VkoOmfzJrkoBISAEGg2Ak4VW+fVqgi7Yqdg3ac1avtLDlJmTMHWw87LUVIWAkJACDSEgFPW2TakQI7d64cJu7/BBDWyZwjivw5Rj2QrBISAEGhxBJpEbIuKirB48WJD5XksQQgIASFwKRFwiti2bt3ahCH/oqyuf6RgnoZJgnIgBISAEHAxApdduHDhgjPKzB+iyc3Nhb0/3TXPmyLL5WJ9+vQxvyTHQkAICAGXJeA0sXVZIlJwISAEhIATCLjoagQnkJAkhYAQEAJOJCBi60S4krQQEAJCQCUgYquSkK0QEAJCwIkERGydCFeSFgJCQAioBERsVRKyFQJCQAg4kYCIrRPhStJCQAgIAZWAiK1KQrZCQAgIAScSELF1IlxJWggIASGgEhCxVUk4ZZuDOF9fxH3ulMQlUSEgBFyIgFN+G4H155/r7t+/vxYKb29vjB07ttZ5OSEEhIAQuJgJOEVs+aPgiYmJFrnxB2n4ewnqN8ksRpKTQkAICIGLjIBT3Aj2fnxm3759iI6OtvpP+3OMVnlXFSEjPhq6Xr7w7aVD9Ks5KKsCUFWElGhfDF2TV3NrBTKe7omeM3ahjGeqSpC1agqG9vWFr29vDJ2SgDzDFx447Y9GwrYERPfrCV/fntBNTEBeWRF2PR2O3r6+8O0bjpjtxp+IzIn3RXTiViRM1KGnry969puJpO8qrBa74qskzLyrN3x9fdH7rhhsPaaJenwXYiLVdMIRs82YjyaW7AoBIeCCBJwitg3lQOvX9k8yViDrhfGIw0Ts/uIojmYmot+n4Zi5pQRw80bk/Fh4LlmGlGIA+QmI2xKERU+FwBNAwfpxiP5yEP69/yiOHvkEsV1SEL4kC0Z5zEBKjh9iP/wGRw8m477SOIwb9Bj2D3wR2Ue+QfZLfZA1dRl2KcpdXdOMjQfht+ATfHPkEDZNKkPcmDhkGRM04jiegiljP0a/xdk4euQbbHsYWBYVhxwlbgESHpyJojHJOHTkKA4lRaLoqfF4+Svj7bInBISA6xJokWJrF2dxGtYlDsS8x4Lg6QagXQCi7otCxq4sKB/K6RqF2DnHEfNyEhKWLkPnBRmhRXQAAAmFSURBVLEY2ak6Ve97NiF7dST8PAC4eSJo8EBUpO6HagcDQZjw0EB48yd52/XByIgglPWIxAS9NzzcPNCx/zAMxFYc/M5YyqCHJmBgFw8lvYDxszDVKwk7LahtzoYYlMyZh8iujOsB75AoTGiXhI/zmVYZTn7vh359/ODhBnh0DUNsUjyC2llSbWPesicEhIBrELji5MmT2Lx5s83S3nXXXbj66qttxmnSi8cLkIEkZPRMMs12YIDhOCA6HlFDwhHnMQvbIrwN5z2uqkDOv6cg5vVdKDijnjZ8YAeAB6DxZF95hYdy6ko1qoWtxxWaq24B6DMUWHdKY/oq95Sg4LsK5L06FL6xpolMoPsDAQhb4IlxY3TYP3oihoUMxLCgIHhzMJEgBISAyxO4giI6ZswYrFixAj///LNJha655hrMnDmzZQmtWsLOs7Dtk0dhlFf1Qs22qgwVpQCu/RUVv9OK5fkK5CwJR/T3j2Jb5hr40Xr9PA6+kWb3NuiwAhU2jNGRqw9hxXA6NMyDBwKik3FodAFy9mYh7Y1xiIkdhBVJ8QipscrN75BjISAEXIeA4kZQRZVbNVg6p15r9m1Hb/Q5/jFytC+XFOtQLVkFspbHIGP8CsT7J+DJRNVJUIaC74oQNSmyWmgZ/Tf1nsbaFqHoS8C7g7mgesK7iwd2fZan8Q/zhV1NvmcKkLW3AGXt/NBnRBRi1+3Gou4peGev+r32xiqfpCMEhEBzEDD4bLXiqt1vjkLZzbNLGCaOyEPcc0nI42y9qgx5G6IRvqZGyPITEPN2IOZNGonIObHozJdlx5mqB9p28MCutF0oOleBimMZiHvezBVhN/PaETJeWY2M4xVAVQUK3o5D3JkJCAuiU1gbPBAUMRXeb8QgLr1aQCuOZyAmbCZ2KYff453Jj2BZzTWcycHXX3rg6j+bp6NNU/aFgBBwFQIGsWWBVZGl64D7LTd4ImT5B1jUKQ3R/Xzh220AYgqGIf7+AHhUFSDp6WXwnDMVIe0AdI7ELL4si01BUZUnQhZuQlTxvzC0V0/oHv8YfpMmIAAVKLMx9bfHIWh4APJiB6Bnt54YseFqxCbMQh9LGun/KDZtjsTJ+CHVS78i38TV8+cjpCNfxoVg0bsTgFfuVJaQ+Q5ejZOTNiE2xNxCtlcauS4EhEBLJOCUb5Bx2ZZDa2VtEHniiSfQo0cPGzFaxiWus13tl43EcCqmBCEgBISAZQImlq3lKHJWCAgBISAEGkrAKWLLz5E3NDRGGg0tg9wvBISAEGgsAk5xI7Bw/CGa3Nxc5XcQ6lJYiuytt96KPn361OU2iSsEhIAQaNEEnCa2LbrWUjghIASEQBMTcIoboYnrINkJASEgBFo8ARHbFt9EUkAhIAQuBgIithdDK0odhIAQaPEERGxbfBNJAYWAELgYCIjYXgytKHUQAkKgxRMQsW3xTSQFFAJC4GIgIGJ7MbSi1EEICIEWT0DzM9ktvqxKAfkxyfPnz1stbPfu3a1ekwtCQAgIgeYi4FJiy79Ks/bVXhUg//pMvtyr0pCtEBACLYWAS4kt//zXWqBF26FDB/DLvQxNJrhVFSj5MQ9Zn6Zhx8ad8FuYjVk3WyulnBcCQuBSJeBSYmvvE+mqwDal4JZsmYIhGz0RNbYbkF9i+hWGS7VXSb2FgBCoReCie0FGwaUrgYK7fv36WhW2eqKqCBnx0dD18oVvLx2iX81BGT9ZU1WElGhfDF2jflqnAhlP90TPGbvAj0R0HJ2IQ5tXYFZ4EIyflbSaC3CuAClPh6O3b00+q7JQon4ap6oIu54ON5Qh/OmtKOK1M1sxxTe6+tPsNUlXZMTA964kFNnISi4JASHQcghcdGJLtFrB3bhxowO0K5D1wnjEYSJ2f3EURzMT0e/TcMzcUgK4eSNyfiw8+WmdYgD5CYjbEoRFT4VA+YZCnb5+W4KtM0Yg4apZ+OTIURzdn4hBX0ZjytvVklmw/iHMLI5E8sGjOHowGZHFczH+33lAu4EYNSIDOw3fI6tAzp4U9Bkz0DGBd4CARBECQsC5BFzKjWALxenTp5GWlmaIQv8tf66RqxfshuI0rEsciHnfBMGT4tkuAFH3RSEuOQsl4SPRsWsUYuckIfzlJJSdWIbOC/ZgZH2+ePv9TqxLj8S81TX5eAYg6skVOPlpCcrgjbLSAvgF9YGf8kkdP4TFJMO7xBMV8ERQyEhM2Z6FsvCR8KzKw8epAQh73yFb2m71JYIQEALOJ3DRiO2pU6dMxLZO6I4XIANJyOhp9vHHgcYPpQdExyNqSDjiPGZhW0Q9Re5MEfKCAtBV+32yriGY1bW6tAEj58EzagR0WZGYGDIMA/VBCOpcfc2jfyhGTt2BrLKRCDmWhV3dw7ChS51qKZGFgBBoRgIXpRuhXjw7z8K2o0dxVPsvMRKGL4tVlaGilCn/iorf65WD3Zs8/Ccg+UA2kif1hccX6zAueCierP70LuAZhNARW7E/pwJ5e1PQUVwIdnlKBCHQkgiI2LI1Onqjz/GPkXNM0zTqSyvlVAWylscgY/wKxPsn4MlE9WWZJr4ju+28EZCVh++1X/ItyUNWbhG/74uCvVkoKPOE380jEfVsIna/0BUp73yM6g+fV7sSktITkLHTE2H/rKd17Ug5JY4QEAKNTkDElki7hGHiiDzEPZeEPC4xqCpD3oZohK/Jq17KlZ+AmLcDMW/SSETOiUVnviw77lhbVBzLQkZutVyi6zBM1Kcg7sWs6pUOZXlImheOZfkAPQvfp0bjkZUZ1asTqsqQ80U+PDq0Va4xN0/9KERtWY3ViERQjevBsVJILCEgBJqbgIit0gKeCFn+ARZ1SkN0P1/4dhuAmIJhiL8/AB5VBUh6ehk850xFSDsAnSMxa85xxMSmVC/LstOCRbtiMGV9Vo112hEjX9qGCb8sw4BuvvAdFIODA5OReA+tVE+EvLANE35fjTtvqi7D6jMTsGlhzaoH5uMRhEGjK+A3Jgh+dvKVy0JACLQsAi71DbJFixbh8OHDdSLIvyybO3dune6RyEJACAiBxiYglm1jE5X0hIAQEAIWCIjYWoAip4SAEBACjU3gohdb/mGDBCEgBIRAcxNwKbHlbx7UNdTnnrrmIfGFgBAQAvYIuNQLMlaGf35r79e/1ErTqvXx8VEPZSsEhIAQaDYCLie2zUZKMhYCQkAINICAS7kRGlBPuVUICAEh0KwERGybFb9kLgSEwKVCQMT2UmlpqacQEALNSuD/AZwIKUEkFEAnAAAAAElFTkSuQmCC)Now you can see the given file using this command**Note** the `!` infront of the command means we try to use the linux command, not the python command ###Code !cat examples/ex1.csv ###Output cat: examples/ex1.csv: No such file or directory ###Markdown Then we create the dataframe from the given csv file![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAASUAAAAsCAYAAAAjH64LAAALWUlEQVR4Ae2cf2gVVxbHT7NSTUsUoyVbf9ToUl+zsIZmu9jglrUKgVqM/iGtNcEiCCrUklBLEYoSsZVai4KFKBW0FK0t/qGhtTTY6MKKlW1dY0E3htWkibbrmvhjabNdCrt8Lp7pZDLvvXnJi87LOxdeZt79ce653zvnO+eceZn7+vr6/idWDAFDwBCICQIFMdHD1DAEDAFDwCFgpGQXgiFgCMQKASOlWG2HKWMIGAJGSnYNGAKGQKwQMFKK1XaYMoaAIWCkZNeAIWAIxAqBQZHSli1b5ODBgwMWQl1hYaH7VFdXS29v74A+97Li1KlTsnbtWunr67uXagyYG33QC/3iVNi/5cuXy8WLF/up9d8bN6Slulo6PvqoX32cv6DroUcecZ9vtmwZVlX/9eWXcvrll+XnmF1nw7HoVNcCfBDGE+n0GBQphQnlwt2/f7+0trY6o29qapLi4uKwrlaXAwhAlBs3bpQFCxbIzJkzc0Dj1CqWPv+8LPn2W3ni7bdTdxxiK0R06cABmbFsmYwqLByitF+GQ6RHKyvldnv7L5UxP6uqqpIDBw5kfLPNGin19PRIaWmpTJ06NeZQmXpREDhy5IjrtmjRoqTdH5w2LWlbvjb82N0t9xcVyfjy8qxAoJ7ImJISGT1xYlZkDoeQsGsBp2Tbtm3S2NiYUdQUiZQ0vNDQbNOmTQPW1dnZOaBuuCsIdzSUVN2C7qI/pJw3b15klfD8CF3a29uFUBT5wdDPLzsYrgYxC44lNFK5bN6ePXsi60ZH1q5r5uhft1+2X2/Vyd83rI7xR48edetlfLDcP368zGtqkoeefDLYJIQuGib57+x4EIQ0/rBG+3KkaB8d7+9L+PX3d991YSOhY/enn/YLxWjng0eh41XuACVDKtT4dSyy/EV1TdaufTsPHZLxs2b185JSyUZf1kMfCp4QuOn8l/bvlyfeeUdKnnpKp8joGMTUPxeCmEfXxFEx4xjsG1aX6lpAPl52WVmZNDc3R9Y7Eint2LFDJk2a5MIyLuINGzZ4E0AKXLgrVqxwhoWBBY3E6zwMJxDk5cuXnW6EjoSQmgPBcHEfr1y54tpbWloy0qCtrU1qa2sd2yMD4j179qyTEZS9bNkyF+6AD+XYsWMeiWHkFPU+6ENoxBjOaV+5cqXrE+UPc9fV1XmhMjKWLl3qDd29e7cnm7adO3d6BDZnzhw5efKkm5cBtN+6dUsqKiq88ax73LhxGXu9GNS5N96Qqi++cKHSH7Zvl69eecUZHKHM4w0N8kNHh3Q3Nbm6b958U/708cceuX3f0iJla9e6sYvb2pw+9NWCgT6+ebM8WFoqVz7/XOZ/8oncbmvzDPqrV1+Vohkz3HjkIl+NXWWEHTHcv23cKDNqatzY6tZWYS41UGRc3LXLWxdhIOFgsLB+9JlUVeU1pZNdVlfn1tO2a5cj5Qs7d8pv6+o8+Y+99JKMffRRT14mJ8z99WuvyQMPP+zWhd7cTCASCvpe+ewzYb208dEbzbhEwvW5dWcf+PJDZ6c8VFnpjXcdIvyZO3duv2su3ZC0pISBX7hwQVatWhUqa/369e7C3rt3rzMsDCxoJKEDRZwxqrcAkekn6FUkG089xoyBUggdCSEJJdEBQkK/oeS2IGTYHhmVlZWOmJjrxIkT/WQTP2PcXV1dTpeFCxd6uRjWBRlAnhQltlShkesY8kfXVV9f78kP6Zb0IgjqCQFxJ/PnjSBfbkLonUnpPXNGflNb6xkRIQwEohc2xsBdH0PA6LnA1QiYZ8qzz3pjIbGSOXPk35cueSpMra6WojsGSs7mVwH9EmvWeMaMUY0uLpb/XL/ujU92oiHXlOpq1wU9Iajvjx/3hvzU2yusL1WhfWwi0c9o08lWsobMLn/4oROveqSaK0rbjdZWdxNIrF6dtDt7o/vj7xTEAIK7ce6cTFuyxN8t0vmECROcbXDtRilpSSmKkMH2wdBJiKOs/6N39sHKzVYomUgkhI8WCA6PBF2vXr0qhINKpJMnTxYMXAtkPnv2bK8dTzIbhblZ37QU+RwlafVa8ay0UAcJnbljYJArdzJ/UfL010U5h0DwVjQcOJxISNfhw/2Gctef/MwzcrW5WYLGoqGLjkfWUApE8lNPT1oR9PnHBx8I+obNrWR6fscO1x4Ma5gAbwqyDRptOtmMRf7M1aul/b33nKeYrQQ5ng03hYIxY0IxYC/wZv/83HNuXf5wmQHFFRWeJwq5Uh6YMiVUVqrKiXdyYdcj3CCQc09JCa9qqJ5SGBipDDas/2DrCAf9ZHr69GnncbCudevWCd6MtuNJZqNAgunWRx+InbkJaSEpPzGpO01ICrn6iRcdp0+fPmhVebqloYAe/d4QIdE/T56UqYsXu5CJOzAFoybUI3TRcUN9UoanNHrChEhrQR9CRp2b4+/Wr/fGYsALTp1y7XhRf3nxxX6hId7GqLFjQ402nWzImPBw1uuve+GuN/EQTsKSz0Fx7I2umTCPcE/3BAJiTawNLzCYKwvKSvZdyUjJKVk/rU9LSgi6efOm5wWQJA1LdKvATI7Z9pTI2eBFYGQYJiEIngAFz0U9CL+O6tFkEjIim3CMfBoElKwoeTDH9u3bvW64sx0dHV6oR4gYNdEddW6djP0rKSnRr+6oJIS3xDrYB39Bb8gKUsuk/PrppwVvAiMLKxAPeR5Cr+kvvOC6+HNGVKghIQNZgy3kaJKRRFAmoZ7muoJtYd9VR23DiJP9DCCdbMaSR8J7JHwlpEX3TIp6mEFPJ93cwTnIx/mLhtA3z593YbQ/V+bvl+6cdAo5Sq7dKKVg8+bN0n3HNQsbwAVL2KKhCq59tu76YfNlWocxoyMLJoe0b98+z8jIg+Eh0IbngvEXFRVlOkVof8I4EtWEbcjno8SGPrQpZsxNwlwL+ZuamhopLy934/BM/A8PtF+yY9jc+kQt6H2iH7qQD9OCfpARe+9PcGs7pAW5a+5L69MduesSDjTPn++FQRrqQEh4F5pH4oKHnAjReAKkOQwNJfCa8EgyKW2Njd68P373nfz+rbfcUzAMH4MlNGM+7ac/omTuP77/vstzafjGURPdHP31f62vd7kxxlEIbX6+fVsggWBJJRu98Ewomkci/OtqanJPEanXp2NgSo6Io/+pZnA+//ewuXU//LJ1be5BQkNDvyeHEJGG4Lpe/xxRznEMuN6wkSjlvlx+yRuEAxFt3bo18oKjgGJ9xP3EgKd0uYItxktOyx9y3a19hNzwMsKeyN0tHeI6D1ECNz8cAm6GUUra8C2KEOsz8hDQJ4P6M4aRt8LsrQgiNEIaiKfmVtesWROZkJBipDQQy3tao7/70pDQfwz+QHM4FWXehoYG9yNK7nZWDIFMEeAHk8HUQRQZOR2+RVmg9TEEDIHcQsA8pdzaL9PWEBjxCBgpjfgttgUaArmFgJFSbu2XaWsIjHgEjJRG/BbbAg2B3ELASCm39su0NQRGPAKDIiV9h1EQHX5VrI+w7+bj66Ae9t0QMARyF4FR2VKd37Lo63D9r8HIlnyTYwgYAvmBQMFx3ztjhrJkex3uUNCzsYaAIaAIFFy7dk3Pkx75b3H+2VRDs7C3BGTrHUZJlbAGQ8AQyAsEIuWU+Ge6uL4ONy92yRZpCOQRAgW8UiBVIVc0XK/DTTWvtRkChkB+IlAwZRCvt8xPqGzVhoAhcDcQiBS+3Q1FbA5DwBAwBEAgLSkN5+twbQsMAUPAEAgikPOvww0uyL4bAoZAbiNg71PK7f0z7Q2BEYdA2vBtxK3YFmQIGAKxRsBIKdbbY8oZAvmHgJFS/u25rdgQiDUCRkqx3h5TzhDIPwSMlPJvz23FhkCsETBSivX2mHKGQP4hYKSUf3tuKzYEYo3A/wE/Ti5uKqNO+gAAAABJRU5ErkJggg==) ###Code ###Output _____no_output_____ ###Markdown Some file may not contain a header row, for example this [file](https://drive.google.com/open?id=1fuFYVsCFo4vqwbEjvdg9K74Y7HvpsctL)We can assign default column names![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYEAAAAeCAYAAADKHqIGAAANhklEQVR4Ae2df2xVRRbHTxGEKhQoP1Z+WSArjSRQwrJgKcli3TThn4KBKJSkCiHBf2pKNCZFQpUgRMDQiH9IYrQSbcBAAjUB0z8KJpK6sHFbTOi+Evkhv3aVFtpmLSLQzWfquU5v73u95T3oK52TXO68OzNnzpw7c35851ZT2tra2sWR04DTgNOA00C/1MCAfjlrN2mnAacBpwGnAaMB5wTcQnAacBpwGujHGnBOoB+/fDd1pwGnAacB5wTcGnAacBpwGujHGnBOoB+/fDd1pwGnAacB5wTcGnAacBpwGujHGnBOoB+/fDd1pwGnAacB5wTcGnAacBpwGujHGnigTqCtrU2KioqkpqYm6VSOTMiGjMlEe/fula1bt3YR6edvv5XD2dnScuZMl7pkfHDr+nWpzs+X/U8+ed/lvt3WJv949VVBR/2Bzu/bZ3SLjhNF0dZdovjbfBoaGqSwsFCamprsxw9dmflhY5Jtnl2cwL59+2Tz5s1y6dKlh+4l9LUJ4ZgqKipk7dq1fU30LvI+OnKk5FZWSn5dnQzPzOxSn8gH1+vqDLuRWVkJYYszwXk9CAeWEIEdk0ANEN8VFQ2S/PxHpakpxbThXlg4SBoaOn4HdkzQw/T0dMnJyZHS0tKkCjY9J9DS0iLbt2+XtLQ0GTx4cIKm7djcqwaIFsgASkpKhMUTREPGjJEho0cHVfXrZ/85elSmFhTIwNTUuPVAdH22osI4r2U//ijTi4vln6+9JomMuuMWUkQenzxZBgwZkghWDz2P4cPbJRK5/0Y/SJHLly+X8ePHy6FDh4Kqe+WZ5wSqq6tlxYoVMmPGjNCCAJ2sX7/ewDv5+fmSmpoq3O10h7LWYcw++uij0PwxgqSlXPDmsqEkmzd1tLOJttrP35d2Nt/c3Fy7a7dlhbaUv3/eNm97bGTytw16VlVVJdnZ2eYKEmbMM8+YyJoI2yaFQjRy/d6CkjSiVZhE2wKdUIa0jfbXthg92v37gw9MRHzm44/Nb4WktJ72PKO/zdeWMVoZWEPHBTqyDa3KGq1eeQKPtUQiXbKNaLxpj7zUK6EzHR/9znv/fVE9p8+eLe3t7XLz2jVt3u2deSgU5tdLtDqdry1X0DMGn/zii0bGRDg9/2TYg7rG7b3nX/9+yNK/N/319t7MysqSGzdueEPH4q119FfZ5s2bJ0BKYWnhwrvy5ZcDJAj5JTMgU0hNHWIuMgdtV1MzQLZuHSh79z7i1VO2ya6zMw67zdKlS+Xw4cOd7KRd/6DLnhNYsmSJTJw4scfjt7a2SnFxsezYscOkOBiu3bt3Gz68MFKfgoICU8fCWLNmTY/GWLVqlZw7d870/+STTww8Al8/78uXL5s6XaiMVV9fb9rRFifHouE5RDugFvppfVjBaP/GG28Yj06Zq7Ky0ovYWZC8ZOVNPXqBMn+HQiKRiDfchQsXTL1G/LQ/fvy4LFy40GsTtlBfViaPjRsnRK1LIhH55epVz8DhOOZs3y7fb9liDOylykrD9i/vvmuiZgxSc3296Uv/v33xhdeWhv87f15+a201z+veestE25Nw+t99Z/hQD++/Hz5sxuahjmEaxPgH53H288+9iHvqypXyr9JSzznBR+eFbEBLaphtthf275cJixZ1qovFO+2pp+SvO3fK6bIyc75C259ramTBp5924mGP0ZMyOv3mpZeE+SA3F05FDXbkww8D66j/U06O/Pf4cU8Hd2/elNstLYIjehC0adMmmTJlilnf9t5j7LKyMm/9s6euXLnSKQg7ePCglJeXm77sA/ab7k32x5YtW6Surs7Ucx8xYoQ3pe5405CgTWXDdh04cMDr311h+vR2GTdO5OLFztkAxr60dKAUFNyRtrab0tR007AqKxvosdy0aaCcO5di6qurb0lFxSMetIST4Pfly7+aevjAT52IMpk0aZIMHz5cbBugdb1x95xAPIPz0qZNm2ZYYLhYEBiy2tpa82zx4sX3zB6ngZOBZs+eLc3NzYb3xYsXZdiwYaK8MaA4m2PHjpm2/F69erU3LsaXhXbt2jXTHwcQC2rxOgYUmBeGOxZWzwsOesl+OdHTqVOnhOhAiWfMc9SoUfoo1B2Dg9HPfOUV0x5DAixiGxKixjHZ2cbgXj5yRJ4uKvIMEkb1z6tWeWOB3Q9OT/eiXsoZy5aZ+klLlogfc6d+znvvGeOpRqz17FmPX6wCEM6M9es9wzs+L88YvF+ssymMM3OMRtSRBfiNZHe8cY7APD+Ulxvox5bDHotIvH7XLhk7f77gPMLQlaoqA9VMzM+P2tx+P3Yjvw6aIxFJy8wMPbbN617KGzduFOALyN57avR1/ZMpsPcIXFi7EHtPgxruBEHsGQiDvXLlSs9mmIe//xOGN01t2Wyb48/Ag7IY+s+Zc1cOHOgcxdfWDpALF1IkL++ukQY0sajottTXp3iGfs2aO1JcfNvUZ2a2y4gR7aJJ4bFjA6Sk5Lakp3f8h5nh09zc1dkgE5CQ6sMw68V/EuIE/PIzOV0M/rqe/uYgBaVBOJo9e/aYxdXY2Cg7d+40ZX3RZA02abpI/YQJEwKNst0+bJn5ZWRkeHL5+yEnjpFohbH9Xx2xochSWPA4M4joQAlHdT2GsdN2/jsQRePJk1KZleXBKl+/8IK/mXESGCciZr8xAwpRyAU+GJ54CKeE8YxF1NMOWaONrc5L52bDJMqbOfmNZBje9MdI32ptNdkGTiGIyLKgp38PSoLa+J/hBMlgNPL31yuvg5mZZu5kIko4ZeajmRbO7Ilnn9XqXruzPk+cOGH2lO49P5xK1K913MkqwlAY3vCxs2QczK5du8x4OC1sj//STFxlmDWrw9A3NuqTjntGRrukpvb8v67PEr9yJUVyc/+AkiZMGCyRSLCJJYsB4UgG+iPPSaA0sQxkAocx0NK2bdsCjTERAQuRVJRIBIOrGUW8MjA/op5YxKJTR4gzAj5SWe10EIcyc+bMTnMYPXq0jPRh/bHGsuuI3oFjgqAS2mEUgVmmrV0rDbt3y+MZGaJGD8NKtM0XPPQnsj65bp3NvsflWAbQzwz4SWXx1/F7RkmJuZALiAXCOUDMi4iazCeIuuONgU+fNUuaamsNfKZ8lRfOUWGiaAZd29r3YVOnSqxsCF7AQ1ycT3xTWGjgKdUDRp+D6bELFhhHeb+/rLJlj1Umsz5y5IgX7dttgXvYa0CwanzZA2EpFm/dU9F4se/9wSBtkWXWrA5Ilt/ElVOmtMtXX3XOBsgE2tpSPEfQ2JgiN250ho2ijd0xzi3Jzu5wMLHa4QBwBMlAwW7qHiXD0PKyNXoHzjh//rwX7RIdBx0Ma8SumGGY4VkoGNBYp+y2M+KcQuEZIhPSMYWOdNGGGZc2Yca2eflfNuOjo9OnT5toIC8vz25uHAKYIdlOT+ixiRNlYFqagDNHI8Xon8jNNfCLng9oe/srE/jcayaAQQNnDxO5KnTkl0Vl8t/5CgY5bYr2WWgY3noOkPH88wYe++Gzzzr9/YXtAKI5V82g7Ege+YCmLlZWhvqbBb704osvm9To45w4I4g2vt3HLrO2OTj1Z6N2m56WNYjRs7+g/mPHjvXgTPa1nQmwHxQ6wma8/vrr3sFwGN5B4+mzsJkA7YFr+DT0+vUOIw+8A1VVdZhFonswfrB9hXh0HP8dp5KTc9ccHOvnp/42+htHBmSOfUoG8pzA0aNHZcOGDfIhG7+52dzD/r2Awh5ALmCDiiMCi4D9cfqP4ePlg+UlgojuOXgC24e3XupIMKw4CdpRN3ToUO9QlvHBM2lLHYsQB8UZQxgKGtv+4sePSyLj22+/bcZS/sinDgx+NiETToJ+3UU+dj8MHoe8QCsKq3BX6IQ7hlnPAcD0MaZE1UTXYNAc7io0MWjYsC5f2djj+cu/NjVJ1XPPmbG5c+CqES3GEVmAc65WV5t2+mURfIi8OTxVuIe2+nURUT5lnRPykWHY0TpQCUYSHfgpFm/kAobScwDgMTB/InLzpdGZM8aIY4Rt2TD6YQh+C/bsMRmVyq/zQuf2V0PwRweqM/hj9JnXqc2bu5x1hBkfozp37lyzF3qylmLxZn2S1WLIKOvFuofY9/Pnz/f2Pet4nZVR2ud4ixYtMmdzahC74x1Lrp7WYdjHj2+Xn37qcAL8Li//zRh+vg5KTx9i6pcvvxOKNe1wGMBAQV8XKRMgYGwswWQyUEo8/3tJFhUwB4Zf075kmNTDIAMR0ssvv2w2SF/QrUJHM958s8s5w/1+Hxjr7995xzidnkbK91u2ZOBPpo3BVjgyGWTqzzLwPgiINVjubV14mUBvC+LG76wBsgO+XmLB4BAcRdcA0XZOeXmPoZLoHB+OGs10nQNInvdJtsT70GwoGSRzTsD3FjC4+sdtmubad4zygyIyALKsWNjrg5LFjdP3NMD6IVvXL2f63gweLomxLZyF+KHh3p5lXHBQbwvvxncacBpwGnAaiE8DLhOIT3+ut9OA04DTQJ/WgHMCffr1OeGdBpwGnAbi04BzAvHpz/V2GnAacBro0xpwTqBPvz4nvNOA04DTQHwacE4gPv253k4DTgNOA31aA84J9OnX54R3GnAacBqITwPOCcSnP9fbacBpwGmgT2vg/6KOYKp+RNRbAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown or we can set the name ourselves![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAhIAAAAiCAYAAAAK2DA7AAAQGklEQVR4Ae2ce2xXRRbHB+IqKAWpsqs8rJCVhk20LKtixc0qriRqLBiJGvABxgT5A1PiK4ARSRDjK23ExJIYrRSJGv5YSsQEtbKJpBJdpJjQLUYEKehGHoFGqq5ZNp/Rc5lO7/397uP3a2t7TlLuY2bOzHzvnMecMz8GnTx58pRRUgQUAUVAEVAEFAFFIAUCg1O00SaKgCKgCCgCioAioAhYBM4YNGiQQqEIKAKKgCKgCCgCikAqBDQikQo2baQIKAKKgCKgCCgCIKCOhK4DRUARUAQUAUVAEUiNgDoSqaHThoqAIqAIKAKKgCKgjoSuAUVAEVAEFAFFQBFIjYA6Eqmh04aKgCKgCCgCioAioI6ErgFFQBFQBBQBRUARSI2AOhKpodOGioAioAgoAoqAIqCOhK4BRUARUAQUAUVAEUiNgDoSqaHThoqAIqAIKAKKgCJQFEeis7PTLFq0yDQ3N/c5hBkTY2OMfYnefPNN8/TTT3cb0ncff2w2V1aaE1980a2sL7746dgx01RVZTZcdFHRx/1zZ6fZ/uCDBowGAu176y2LLRgrGbNnzx4zdepUM3ToUPsXpm+yYJZF9mRtfu7JNN9u27x5eeU5S98ig8w9DfUEZswP2QUnoSjMpDzfNQtm+Xj31fJcmLnygZzw7BI2h79C0OD29vZC8FEeGRBAAa5fv94sWLAgA5e+0fTMkSPN9MZGU9XSYkaUlxd1UMdaWiz/kRUVBekHRYQD1BNOUEEGrExMeXm5OXjwoN0YVFZWKiKKgCLwKwITJ04027dvt/KBnPg0Y8YMa3fCHHC/br7nokQk8nWq5acROHr0qI1ELFmyxJSWlp4ucO6GjBplhpx/vvNGb0Hg2w8/NBPmzDFnDB2aGRB2cXvXr7cO0OyvvzZ/qq42nz70kOlru/9zLr7YDB4yJPN8BxKDLJhllb2SCRNSQ52173PKylL3PVAxSw1YLzdMs86wN88//7x5+eWXDXYoC8VyJEgDLF261KYqqqqqbBiRq9s591LGAF955ZXY4yKkL2GWsDCly5tyPxyDRyXtuPoeFvWlfPr06bHHRUVJ00h7f94ub7dvxuDXDXu3ZcsWw04qajc16qqr7A6fnb5LEtKSHbQbQpWdNVdI6rqhRKkj7aUuhpN6/37pJbsz/+LVV+2zpFeknPq8o73L1x1j1D2hU+mXNIhrrGWsUeXCk1TPiba2blGPKN7UZ7xuuBfMpH/wnfrii0ZwLp0yxZw6dcr8cPiwdJn3yjwkrePjElUm83XHFfaOzi++4w47xkI4TjIZkTtkUNa4Kz/++nfTgrRdvXq1Xees9U2bNlkebooOXsLXD6/6vH15kTFmuUZhJhjLOgtbw1GyF2c8fCPWE/2H0Yk9e3LKT5a+JSoID5/8ecv6d+tFYebWibrPNW5XNv95++3dWOTDrFsD70VY3+ipz5YtC3RY+zvvWNzd7+3LpiuLdOHrSr/cnRfrSXQpbX283X4p99vS3tXlbnnYt8qKGVGLSZMmGexQForlSNBBR0eHqa6uth4MCgDDt2bNGts3z8uXLzdz5syxhhfDf//99yca1/z5881XX31l27/22ms25AJfnzdhTNIAouzoq7W11dajblNTk93h8x6iHvUl/El5XILfo48+akaPHh3wb2xsDCIH5Jw2b94c8Ka+OAQSSmprawu6279/vy2XyAP1t23bZq699tqgTtyb1tpac/aFFxp2z7Pa2szJb74JjCQCdflzz5nPV62yRrq9sdGy/cszz9jdO4JzvLXVtqX9395+O6hLxe/37TP/7eiw71uefNLu+sfhOO7YYflQDu+/b95s++al9GEr5PgHIdv7xhvBzn/C3Lnms+XLg1wpfGRejI00iRh3l+3+DRvMmBtv7FKWi/fwSy4xV9TUmN21tTY/Td3vmpvNNa+/3oWH20eSezD96N57DfNh3PxhSMTot9XVhZZR/odp08x/tm0LMPjfDz+Yn0+cMDgzPUHI3vjx47vJHn2///77wZkikamNGzcGw1q3bp1ZuXKlKSsrs8qIUCrySF3kY9WqVaalpcXyrq2tNQ8//HCwAYFPlGwFHRTp5tumJjNp0aJAfugm7hrOOqQfwWbNmlTyk6VvjNq/Hnsslnxl6SesrS+b6Jyeoi8bGgIdBu5/bWiw8nWyvd3KHPpH5JaULPqJ8ULI9Z66OjPjgw8CuXadQzYoB999N9BnyL3rwOVaZ7T9ct26gHf5woVm3KxZZlJ1te3bx8zXlYXCD/uDHcIepaXYjgQdoAjwYCA6P3TokO18586d9t3MmTPtNc0/OB44KtCUKVPM8ePHLe8DBw6YkpISI7wxwjgsW7dutXV5vu+++4IuMeDnnnuuOXz4sG2PE5ErbRA0DLlhXhj/XGcXcBRcZ0HY+OPkI+3atcvcdtttUsWOj3med955wbs4NyxuHIfyBx6w1TFGhPhdY8RiH1VZaYWChY7SFKOGYf7j/PlBV5xlOKu0NNh9c182e7YtZ2H7ZxAov/yFF6wBFkPYsXdvwC/XDemIS5cuDYz36BkzAqGWdhh45hhFlBGN8A1tPt4IOCmLL+vrbRrDHYfbFwq3dfVq8/urrzY4IHHo0JYthnDw2KqqyOru93Er+Rgcb2szw8vLY/ft8kpz/8QTT5g777zTNnVljxe33HJLIPNEFqZNm2Ydfuln1qxZ5pJfMUIuhzhplx07dpi5c+cG7SdPnmwdDldecPTFQRGeXN2DYhLR4OpGO9z6Se/H3nxzgG/SNZy0L79+mPxErQ2/bZZnzhSxCRC9kYVXkrbIE2nDKHlLwitNXVeHYYzPcvQtzsSZJSWB3KIbqYMuEcLxk02UvHOvyCt/YZRrncFz2Pjx5uyxY23TC667zn4fNhJQPn0W1l+ad9gfsbdp2tMmkSPhd4KRzeLFuPxQUCgKCGdl7dq1dud/5MgRU1NTY+9FobCDcskNy44ZMybUsLv1494zP3ZaMi6/HePEuSJdQh037EtdlLLsznCIoHHjxgVscHaO5TCYQUXvhnD7kU8+MY0VFUGKICxUiMLAwLFz9w0i4TMJ68InShC8riMfcWxQGLmIcuox1qi+xQGSuflhRPgzJ9/QxuFNWwz9Tx0ddlfm7hzccRPtgWRn4JZF3eNIEUkRZ82vJ7z+UV5u5y47HuqhvJiPKCsUCEqlL5Bv0H3ZyzVGIozUF7nFuXZTnjgvRPCQWeqQKhFCtohuoF/cPzYFhSBJdck6/PSRRwrBNhWPLGcZknT4/f791tnVMzanUfvxyBFDxELkkvXgrgVkk00TkUzK/PSCRDpFp/mpi1zrjO+OHseZgZB7Nn/0GVefnZ5J+rvzfz1/hz1KS5kciVxGNu2AwtoRrWDXEqZQUD7saiR1wVXSCmG8krxjfvkIRSjjIkxLKoRnCKdhxIgR1rFhd3bZZZd1cUr4gCO9sw/5+pNyogiE4SSM7ofSWYiE7CYuWNAlVEd7jDO7fmnPNesvLHIZURmzXAlruuO+qbm5i6Nz6ZIltlzCjK4zwbzYvUUZ2ny8cRJKJ0/ukgqScXHFwQKbP69YEekUuPXlPt9hJxwMUh3MmzDpJ4sXB+FTeDAf5nXy4EE7tqzfQ8aV5YrMkYpYvHhxsMZJOyYh6ot8yFXSf/DBMeC9pCzFmfAdGHFGChGRIKLFQVqiU7IOSQX2FmHgk8hP2nH2lMOSdny91Y6IBelhWQtc0UFCOAvoKN4TrSCF6UZM2ZBIW74j6SP0VL51JpGRLddf/8vmorm5W7Qonz6TMWa5igMhDkUaXqkcCRQMAi1RBEIj+/btM7LrZpfu7jxkYBI5kPMN8j7XFaeAyICbl/Xruw4N5zYkdIrywbhLGgTlJOkTn0fYc5y+3XbkmV2ifzDavXu3DQfzcxuXKMfRIOqShAiFnTF8uCHvHkWS771g+nQbUpTzElLfPZUNn7QRCTxuvPUowy79cZUQsj8Wt457z86JcboU9ZPPOLyJAuAklN16q031kJ9k/ELiROQ6NyGRHDeiQHvSLAcaG7s4B8LXv/ILHE7kuySOw9GdO+2ZCXYlSUgMrx8VS8Ijqq441PRBdDAukf6kPu3yEbIg/VC32BEJ+hDDKms43xjdcpwfsPYPkLp14txjbMjJx5Ef4Zf2W7PGSG2IbhB+Sa5p+kY2MbCSLgBvHOmkhN1gnRTCmZS+k2Iia0ba+9ewDYW08dcZ57xcZ9Y9DxZHn/l9p33G/mCHwDYtDa6rq7OHpeL8fxISwicUST5U8qoIPbnQiooKOxgMKnnXQhDh0Pr6entgkonKnzgjGGccDepRNmzYsC4RCc43yAJkd4WTw5mLOBTWt3uynN2TjIcr5zFWrFhh3wl/xidOEPxcog2OBu1QTHGJRcbBSdIEEprlKjt3rhh3ORfBGQcMsnjS5ORRKBLO+11JSaKIBDlD8aK5XlFTExwwwsAyFlIT3zQ12Xryiw/mR+oCr15SF9SVcCBePPcyJ8aHAnIPN6GMOJwIBj7l4s24CD9KnpZdBmcgPrrnHutMIOQ4Ahhyd2w4DnEIftesXWsVpIxf5oWxcH/NAX8wcFMrOA7Ma9fKld3OfsTpn+jXlVdeaWUhyVrKxZv1ipyL3CM/d911V64mXcqIPCBvohdY7yI/YohFfugLp190ShdGBX4Aa/CXcDTRCZ6TkMgu556SbgRc+WEtsCbdtZBvHGm/NfPGScZxkTXqh+qL1TdpVpx4+gVvdAZnE5IQG7sbbrghOJuXpG1U3TBMGKNsFESfCV44QHI+DJ7oWinjCrYSzcy3zjiHJikT4eF+j1z6LGo+ad6z0ZagQJr2tBnU2dl5Kl9jhJ6QPUrFDUvma6fl+REgujNv3jwb4v0tYItRRJguXbasSzoi/0yz18DYf/7UU78ooYQ79uy9930O7NQ4AP3ss892cWb7/sjTjZDdMb8YwVnxnfR0HJO1YoNChLOhoSE4UJqMQ/ravfmte6tvsUM4nIU6K5P+C2RvySaFCIa7UZKNi5taydoTNoZ1+vjjj3dbp4WSoVSpjawT0/anEUABIhQIJx9cKRoBdv3T6uuDX3xE1xxYJRJxG0hORG9+YeSUyEpvOBG9+a17s2+ivxK16g9OBNFXIsousUkjahOWHnHrFeqedUyUceHChZkd8QEbkQBEIgHvvfde6HchNdOTCxZB4ZR7T/YZOvE8L3szIpFnaFo8wBBgN3X33Xfbn1Uzdf6PmN9CVG+AfSadbgQCRFhJrXLAWohDv26EQt6nubrywUF/P3ImB5sLkU6M5UikmYS2UQQUAUVAEVAEFIH+j4CmNvr/N9YZKgKKgCKgCCgCRUNAHYmiQauMFQFFQBFQBBSB/o+AOhL9/xvrDBUBRUARUAQUgaIhoI5E0aBVxoqAIqAIKAKKQP9HQB2J/v+NdYaKgCKgCCgCikDREFBHomjQKmNFQBFQBBQBRaD/I6CORP//xjpDRUARUAQUAUWgaAioI1E0aJWxIqAIKAKKgCLQ/xH4PyR7/5Ogzkn2AAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown What if we missed the column name, what will be return? ###Code ###Output _____no_output_____ ###Markdown If we want to form the hierachical index from multiple column, you can use the [file](https://drive.google.com/open?id=1mXKSZGxo53IInCL2sWsfHH9EyOOMfeFv), and the code as given![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAaAAAABHCAYAAABBAJPXAAAW00lEQVR4Ae2de4xXRZbHC5cdGic0b3ZHEBAXOqw2zLiuDmB2BSYkdAJCNIPyGiAmDn8wgQzDhEcgGIQMg4EMyYDEjAiCMkpsOhNIcAE3TtujO8vQ7SppifJ2dgGh7VZ5ZBI2nwrnpn71u/f3u7/+vbtPJb++99bj1Klv3a5T51TdOl2+/vrr20aDIqAIKAKKgCJQYAS63nXXXQWuUqtTBBQBRUARUASMUemjb4EioAgoAopAURDo2qVLl6JUrJUqAoqAIqAIdG4EVAPq3P2vrVcEFAFFoGgIqAAqGvRasSKgCCgCnRsBFUCdu/+19YqAIqAIFA0BFUBFg14rVgQUAUWgcyOgAqhz97+2XhFQBBSBoiGgAqho0GvFioAioAh0bgRUAHXu/tfWKwKKgCJQNARUABUNeq1YEVAEFIHOjYAKoM7d/9p6RUARUASKhkCnE0CffvqpmTt3rrl69WrRQI+qeMOGDeaNN96ISi5afKli1tDQYBYtWmSuX7+egM2ta9fM0alTzZl9+xLiS/nhow0bzFuDB9tfvvmmrnzXkSuspS8/+NnPzN+8fs6kjtZTp0z9vHkGeh0hXP7Tn8zBMWMM7SrnECmA9u3bZ9atW2cuXLhQzu1T3jsoAgjF9evXWwHUvXv3sm9l9fLl5qlz50zVwoV5bQsDVmtzs7ln0qS81qPEFYE4CHT1M7W2tpqXXnrJPPjgg6Zbt25+sj4rAkVHAI1n69atZtasWWbEiBGR/Hx3yJDItM6acPX4cTNw8mTznd69ywIC+JxQV1cWvBaayYr+/U1Fv36Frjan9SVpQEePHjXPPPOMqa6ujl0RA8KKFSsMJpGpU6caZqRcXTMX95JGOuYmCWLiOXXqVJDHNa1An2fKxaHtm7HgS8qOHj3atLS0SNV5vcIHP9oq9cOLBL9dzz//vCSlvYr5qampyTz66KOWvoupT9tNg3iq/iA9W8xot7SZq9tulzZp0l/Ck5s3LO7EiRMWnyeeeCIUJxm0+v/wh0npmJ7E1IWZTkwyaAaYNFzTFKYqN4+Yg6Q86QRMQ39ZudKc+t3vLG3i+ZEPUwkBuvwk3k2zGdL8gY7UG2Z6cdsVRRv+Lx46ZPo89FBSbanKR6XBk4sPRMPikirLADNps2AttGgLZjnqAw/y+WY6F7PDEyeam57Z3W2X244474LwEXX13xWXN94XnqVtbt3w5OaFflgc7zaCuVwmElE4JQmgadOmmUGDBkXlj4xva2szixcvNps2bbI2+TFjxlhNSgrU1taanTt32rSLFy/aAckdaJqbm83s2bNtedLPnj1rZKA5cOCAueeee2xZBta6ujrTp08fS5rnNWvWmJkzZwa09+7dGwx4YqppbGy06Vx79eolbOX9On/+fHPffffZuhHuCAIGVcKWLVsS2rV69eqM+Hn55ZfNqlWrzKFDhwztev/99w3t9WlT3xdffBEM9KSn6o9sMUOg0Af0I/3Dj/eBAC/btm2z/Era008/bdPoU/K9++679pk/V65csf1VVVUVxJE+btw4K+CCyBg3DEif79ljpjY2WnPXsFmzzF/WrLECpHL4cPOvmzebT7ZssXZ18l5uaDCPvfpq8E9+rrbWPmMqgwbp5CPcamszV0+cMBP/8Adzvq7O9Bg2zDz861+b/z12LODsz7/4hY2n/L///vfmo/XrAwEYZAq5YUBseuEFM+nIEcs3fP755z8PypKOYJF2QT9M+H5x+LCprKoytNUNDHAuLm75VLR73umTr5qbA3LfnD1r+o8ZE2AWJITcpMNs6IwZtr3gGBa+OXPGYvijgwdt23m+1thos/qYgV23O2MGGbJ9F8L4kTiEzx9/8hPD+wWW/B79zW9M1zum4gt1debu730vSHMFCZODr0+fNt86Sx9tn39u/mHcuKC81NMRrkkCKJtGMaCKSeTxxx+3gx6DDGHBggWB0JCBBiHjBikflo6wkoHbLXP+/HnTo0cPI7NhyiKMZBDbv39/WlONS8+9pz5Xa5MZvaudufnD7hEqMsAyiCL8GFQZ5E+ePGmee+65sGKx4kaNGmUFNm2+9957zdChQ82XX35pcULgCG34BpP6+norDCCeqj+yxQzhs3z58qC//caggR4/ftyPts9PPvmkxUX6mnw1NTUBLd4n2jakHeY1hEH1ihXB4Mg6yN9aW4N/dgbtf1682Hy2c6f5fO/ehLww90/z5wdlmXky0DLgShg2c6b5u+7dDQNz2BoL6zsMqgTyMCDeuHJFikdeMZvdP3t2IDh6jx5tvjt0qHEHfu7dZ58Ys+7/q683/zh+fEISgyXCx8UlIYMxlm4YbTBgkBUhSx3XmprMkKee8klEPqfDLLKgMRa/h1980faJ3x9n33orATOfTrbvgk/PfUbQ0z+Dpk51oxPumbyAvR+YHAwYO9bQ5wTyfPvXv4a+T37ZcnzOqQDyAUDAiADyzS6+uYnB2Z3lMoDJwM2VmfHAgQPtrFdMNtTHgLt582Y7QImAQOvIRWBgR9uSmbpcWX+grvYEBl94zkV45JFHrOCBFvzAFzgh4D788MMAL9ImTJiQUGW6/kjInMEDdd++fdv07ds3tBSYoiXTZ/Dlm2oRpD179jRoxOCNifEhx2REnD9xCa3Ii2Rw5B/5P3/848D0UTd6dNKgzaDBzJwZqq9FMGsWswnX5m3bvFoye8QkdDPGu8AMGO1J6q6tqjLna2uDykR7k7b5JhwyimaA8HIDApD+6hbRX+loM2NnU4MdKO/M2u9uhwXF5Snf97l4F1LxSH/x/ojG4+dlEsLkhfePPkUDdQOTBCYL8Ingh1a5m9rc9rn3eRVAzFIZZJjtY57DBCWDeKbmJgQSZTHrMMN2hdCzzz5rZ/1Cmyv5sw250IB8HtCAogZnP282zwhz1wQGJiI4c9EfUbz169fP9E6zwI2W/MEHH9j+RDObN29eoN3yvmBeQ4NFuyUglCSQ3h7tR8pj+hKzCNeahoZAsyDPyS1bTJ/vf98KK3dgwKTzX0uWWNOZlM92xxoaUNTAL/zKFTOU1CtXV0ByL/EMWP/9y18mbFtmxh9mxmERuyJC+EjdqWgjbLpWVtqBkll771GjIgdeoVcq1/a+C+n4x/yaLsiuR8ymaKDuu4Z2LJo5/eZrrelol1N6XgQQAzdrHa6dfsCAAcHAy+zb14DiguYPQAy0zIhZJwoLrL+I6Qm+li5dGnsTQq41IHYXMrtnQGWgRhtipk9AoIZhIpqKv4kgrK0SJ1oE9UWFVP2RLWYjR460wg6hly6ECRM0HtYU33vvPTNlypQEbZP+Zz0wUy2I2SgDcKp1F1n3GTJ9uhm5aJH57LXXEr6z6NavXyAwyJuNBtS8fbsduONoCwxAsjaVDk/S/QEQ4Rm19ZqZNetCJ7duTRBYUfX4tAXXlk8+Mcz8w0yPUbTyGQ+fokWgnbFmJpsQhOds3gV4lw0lvAtuQCtkHdCPd/PI/V0VFdZcJ89cpU9a76zn+lqrmzfsngkmG5MyWSoIo1OIuCQBdOzYMbuwvX37dvPVV18ZrnG/B8LMwwCBqYyZrZjQmPGOHTvWsAONdDSYJUuWxGofg5i7Aw6hwAAktHlmcwM0oS0/Bm6CuzY0efJkqxmFDXqxmGlHJoSK8MTaxcaNG+0zfKOlCWanT582r7zySjtqSC5CfdRDfVI3V9Ea0/VHtpjRLvqINkr90h8iUCVeNq6QVwL8sa7HpMI1y0o664v0t6wTSXy6K6YP1izE9IH5Q8xVDBaYsGQtRGzxf5w71woheWY3FeVYI7p/zpx0VSakI7DEjIY58F9+9SurLTBAshOKNPKIuU1mxWggbDyQusnn75wSulyZUf9g7dpAE0EzQchEmXGYjaM1YdoTOjJ4woPEhdGmgQgdMQlG1ZEARIwHzE+yUww8BDt/N1wUKVl/oU3/UVNj+5V1GQnZvAtCI+rKu/LYrl1WYxbs5D1z20Ua/IG9rA0KTdbRmtats5OJKFOe5PWvTEAxz7tLIH6eUnnucv369dvZMoOQWLZsmRU6stspW5odoTwDPoIlF+bAjoBHrtog7xsatkxEckU7X3QYyNEQGOwLGRBumA6rV65MMDUWkgetq/AIYDFxJ7yF5yBejUkaULximksRKB4CaE9oxXv27Am2nRePm9KuGY1k3M6dKnxKu5tyxp1YGMpB+NBoFUA56/rOQYiZlZjP/Ku/oy2fiGCm4+NnNlbEWWvKJy9KOxkB1p3kA1ExQ7lXMS8ml9SYbBDAAsX/g2w4yoZWIcrmxARXCEa1DkVAEVAEFIGOhYBqQB2rP7U1ioAioAiUDQIqgMqmq5RRRUARUAQ6FgIqgDpWf2prFAFFQBEoGwRUAJVNVymjioAioAh0LARUAHWs/tTWKAKKgCJQNgioACqbrlJG4yIgR5FkcnxRGG3Ky+kRYemlEiftZVt8e7bCu+WhIadWuO1j27R7+gJp8lV/MbdUyykS7eWhHNvFOymfQPjH7XA6CHGZnhLi9nUh75ME0I0bN8yOHTvscTz4msE1twZFQBEoXQT4JorDXbPxdeUeXltKp5mIgJGjbEq3F+Jz5n8jJccexaXA6R9868Phzn7gSCtOCMFHWjl8H5ckgJj9DB8+3J7/xvE6586dM5wPp0ERKBcEZEDWI5By22OcpcbhmYUKDMw4dotzwGk27tcL2S60RvxNcVYcp5dzIjdHJSGUwkJ72oWA4izGqAOaw+opVlySABo/frzhR6isrDSDBw82ly5dKhZ/Wq8ikBEC7kkNvvmMZ35uHtfcxIzRPfjWP50cs4broNClD03X/CVmLTdPuoaQV0wrviksVVo6urlI57BM16tnGE3RVlxTncTJKQhiKgsz37lxlMMVwb/t22cq+vcPq87GcdQQHkVd1xSRmUMS2tsuhKO0yXWRLu11tRo3joNFf/DCC8HRSLheoH2+X6hs24Vjx4MHD5a8KS5JAIX0kUYpAmWDAFoPgiTK35TrIp3TxzlVW0wVqVykkyeV63dO9eaUdVxgkJejUDjxPe5hqQgYeHF9OIkpDCHppmF6oT6EXKkEBllxQy0uphEouDwX19Ti+4bBWVwiiMsE2oEbatxR486AAZiDWzM9CTrXeIS1C20lykU6fONsDuEpQbzJImwKFcQli7h7KVS9mdaTUgB9/PHH5tSpU9ammClhza8IlCICrot0/A7hcgSBwWCeykV6OtfvaCxr1661NBAWBHFrkQ4HNCvKIDxdtxSUgzfScG8iaQimadOmRbo1d+vztTbRsPzFa7dMpvc3Ll8OhI/rVgCB8p0ePQLX1AzOrgtvTGvieI06cRuBO2rcGZRCiGoXfKZykY4rBfESSzsQRrSb9vsBv1CYADP1+ePT8Z/p5/b4zfLp5Pu5a1QFFy5cMG+//baZPn26GVTiLnaj2qDxikCuEBDX77gSd4OraSEgFi5caLWT3bt3W3OamzfqPp0bc8q114cVPOFWPp/hfzZuNHcPHGg1F7cezEqf7d5tf268eJJlQMZPEQM6jvmuNTWZ++fNc7MW9T6qXbjVEL9NLoPDZs60j66XWLQefD9V/fSnblZ7jznyckODeezVV/Oi6eFYEncwpRxCBRDCBydv7KZ44IEHSpl/5U0RKBgCuH4Xh4JhlaJFbdu2ze4gxfMu/0OitYTll7g4bsxxLiYmObQijttngEkX0IBwef7OO+8kZE3XloTMaR5wF07A6yiDqTvTv3fatMDxXhgZvL3i3K/nyJE2OY6H2DA6+YhL1S7SXG3PrV/Mi2KGw+Gciwl5ET54uWUzgp/m0srmHuET5x3Jpo5syyaZ4FzhI5sRsq1EyysCpY5AOhfp6Vy/y7pPTU2NdSOOsEjlEt3FAyEV5cYcUwoTQcxwCBPCiRMnrLfLSZMmuWRC70UDgj/3l+vj+hmMWftgzYe1HwKz/2/OnDEXUmhgYnr6eNMmM2jKlJxrArLhxN1sEgpURGRYu+K4SMe8iBmu+be/NZjk3OAKn3yZG2WS0l7N2eU3n/dJAqi+vt7wLdCRI0eCb4HiuuTOJ6NKWxFIh4C73sEONjYcMIDH2YnGQM0aTJSLdNKjXL/zz84nCwRZ92EXUm1trd1xl45v0qk7yo05GxlYA8LVPe3x3ZjLDjlc3r/55ps2n7sjL079ucgzcvFiS+bwhAl2WzEzezQiXITLjjGu7g4x0RZutbRYgSV8yI448uOSG5ffuK/O9Hsg3LcT0CDbG/x2pXORTj20HfPi3/fqZc2LUjebGsDj24sXE1ysZ9ouoRd1Zc2S9c0wl/ZRZYoRr/6AioG61qkIlBACmA6ZZLILEEFb6IBGkC935bRtzpw5tm1iwixU+z7asMH0GDYs0lSXLR+yOzLMLIzmh/kt7i7MbHlpb/kkDai9hLScIqAIKAKZIsCW5s9eey3JTJUpHT8/Wik7/dAKEayFFj5oeWwwiPMRrc97ts9oxKwRijaeLb18llcBlE90lbYiYEzCh6+Y0NxfMUxlYZ3C9yJi4mvvekkY3ag4OY7m8MSJZtTKlTnfeg3G4q69kMIHwYPZkNMNHn7xxbxsMBCTK+ZiP2CGZhmFTwLAoNSDmuBKvYeUP0VAEVAEOigCqgF10I7VZikCioAiUOoIqAAq9R5S/hQBRUAR6KAIqADqoB2rzVIEFAFFoNQRUAFU6j2k/CkCioAi0EERUAHUQTtWm6UIKAKKQKkj0OkEEB+mzZ07NzjWpNQ7SPlTBBQBRaCjIpB0GCkuGF5//fWgvdXV1WbGjBnBs94oAoqAIqAIKAK5QCBJAHH6NcdyEDgTbteuXdYltx5Mmgu4lYYioAgoAoqAIJDSBFdRUWF69uwpeSOvHHuxYsUKwxfU4rLY/8LbPSiSL3Q5q0iCmMVwfiflXYdZcqyGfEGejrZ/+CR8SVmO5mhpaZGq9aoIKAKKgCJQJARSCqDW1lZz6dIlMzyGh8K2trbglF4Ehn8cPScDc5owabgdRii4R35wFMjs2bPNpk2bbDqn13LsPOHAgQP2pGDK8sPBlhyayHMqV8kIt/Xr15vGxkZblmuvXr2KBLdWqwgoAoqAIiAIhAqgY8eOWVcMnLI6YMCA2B5ROfRvxIgRljbHoHMgHgKCsGDBgkBoIDwQUP4R6VI+LB1hJf5QhHmu6Vwl79+/38yaNSvgyy2r94qAIqAIKALFQyBUALHewzoQPwTQjh077HpQpmwiYEQAuWYwzGH4a3EDfitc3xX4R5GjxLkisOSwRNfEJq6SEVpiZsMPjAZFQBFQBBSB0kYgVAC5LGN+u3nzprl165YbHeseb3wIBcxgONE6evSoFUgIpdWrV8eiIZkQSJTDfId3SFcI4V4Y7Yh0+ZFfgyKgCCgCikDpIpBWAHG0N1pQZWVl7FYgDNhkgCthBBABGn379rX3aEO+BhSXOPRcN7PpXCXjlIk2IJjga+nSpboJIS7Ymk8RUAQUgTwikCSAZP1n1apVdh2IuuN+ByTujDGV4UJYTGisC40dO9Y6h0KAoMEsWbIkVrMQHOyIoxw/TG24LhbaPEe5SqYCccpEvsmTJ1vXx64Ai8WEZlIEFAFFQBHIOQI58QeEkFi2bJkVOoV0/pRzNJSgIqAIKAKKQMEQSNKAClazVqQIKAKKgCLQqRFQAdSpu18brwgoAopA8RDIiQmueOxrzYqAIqAIKALlioBqQOXac8q3IqAIKAJljoAKoDLvQGVfEVAEFIFyRUAFULn2nPKtCCgCikCZI6ACqMw7UNlXBBQBRaBcEVABVK49p3wrAoqAIlDmCKgAKvMOVPYVAUVAEShXBP4fhk4TJelIS7YAAAAASUVORK5CYII=) ###Code ###Output _____no_output_____ ###Markdown To write the csv from pandas, symply use `to_csv` method. read the data from the example [file](https://drive.google.com/open?id=1B2vyFi3KsY2BWsesPujkX_cb2ZYUcWoV) ###Code data = pd.read_csv('examples/ex3.csv') data ###Output _____no_output_____ ###Markdown Write out the filethen refrest the Files pane to see the output![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAP4AAAAhCAYAAAD5/harAAAJx0lEQVR4Ae2dXWhWRxrHH0t3a7r4FaPbqqkfbA1d0LjZLjZmYW26BLpgFNRWUrHqTXqTklArpAVjpK2UUhJKL+pNDYUGW72I70UKwaYubEjFkk0UzEbBj8boLo3RKDV2t6zLb+hzmHNyzpsT35Oyb94ZOJ7zzjzzzMz/zP/5mKM4Y2xs7L644hBwCOQUAg/l1GrdYh0CDgGDgCO+2wgOgRxEwBE/B1+6W7JDwBHf7QGHQA4i4Iifgy/dLdkhkJb4Bw8elCNHjjiUEkIALME0WL77+mtpLy2V2xcuBJv+L3//++ZN6ayslGNPPDHl8/5xbExOvfqqgFEulMuffWawBWO7jIyMSE1NjXBPoqQl/mQHOH/+vOzYsSOxydnjT6Vue5ypeu7u7pbW1laprq6eqiF+Nr2/nDdPylMpqezrkzlFRVM67s2+PqN/XnFxIuNkqyHJz8+XsrIyaWhokLGxsYyxSJT4Gc9mmirASuPp6+vrhRcYVmYuWCAzCwrCmnK67p9ffSUrqqrk4by8nMHhV8uWyUMzZ45b77Zt22TRokVy/PjxcW2TrfARH0tCOJGXl2euAwcO+PSxgSsrK712DVu1vri4WI4ePSqLFy82MmvXrhU8NSWom3HiWK44utHPXHTe9ri+BUT8CM6NNTKuFkJ01c0d703hHpQNq+vo6JDS0lJzqU77vuCZZ4wHxZPaRb0TITXXWStNIPSlTkNglSUs5pmiMtpfZQkjkfvHhx8aHRc+/tj81nRD25Gnjv62XnuOUc+ErDouaYEduupco9pVJ6nP7YGBcVEFOGhfnTN9VK+ukzrmobhxbysqksG2NvnrCy94OpCJW1iHpjlBXKLadF72OGF1zGHZiy/K2g8+iDR0mzdvlvb2dt/+jDt3W85H/ObmZmNRIALXvn37bFlpa2uTlpYW0zY0NGQ2PhsdL5ZKpaSvr0+2bt0qtNH/1KlTsnLlSqPjxIkTxqhQr6SKY7ni6IaY165dM3rRX1dXJ3v27PHG8S0i8AP5vXv3+tbNWtQzY7gAWteEPCSmFP0U5g4MDHhar1y5Ytq1P/JdXV2yfv16TybuQ39zszz6+OOy5dtvZdPAgNy9ft1sZPpjLJ5+7z05+847hlRXUymj9vfvvms2DZtwtL/f9KX/nz7/3JNF8PvLl+U/d+6Y+r79+41XLcTg9fQYPbSj+8/t7WZsKnUMI5DmD4h38dNPTSrA2Cteekn+3tDgGST06LpoJ20IGj3UXzl2TBY//7yvDfKAA3jQ97e1tfLNa6/5DEvU1FbV15t+hZs2mXXTnwuyxSlg+reXXzbr0b42SQc++ii0jWjl12Vl8q+uLg+D/967Jz/evi35JSVxhvZkCgsLZc6cOWLvOa9xEg8e8dng/f39aXPQ3bt3e4RgY0MANnqcsmHDBs8I4DXJVy5duhSna1oZjAi5c1VVlfHKCG/cuFGWLl0aC5ze3l6zhnS5NyCHAQ0GjHvy5EkzR0h+5swZwSproW50dFTmz5+vVbHubDI2eNErrxh5Ng8hr7152LALSksNyYa++EKeqqnxPAVE+s2uXd5Y5OKP5OfLveFhU8fz0i1bzDNECObQtD/9/vuGdLpx71y86OlL90B4vuqNNzzCLqqoMJv87tWrXrfvurvTkpX14+1tYlCHQbFD/yWVlUJoPGoZX2+QhB+udXSYsRgzqtjvx5YJYsB8ZxcVyewnn7TFJnyGO4T7cXkXpdAjfpSAXY93t0PeYCpgywafMSyE4Np/l7Upg7KT/T137txJE0vHAECMBPMKK0QsRELl5eVGJpiilJSUGIOJARocHDQqsMpahoeH5WbghFbb0t0h6I3TpyVVXOyFpISnwYJhYEPiGYObyA6J0ZMpOTBEhKjpCu3I2aF0cGw1WLo2OwRW3awpjBgYpEcmaURVZ6Z3DB+RStR5w1O1tWYI0gnSADvlwBCzHo2oMI6PPfvsA01p+fLlGTvN2MSHuLW1tdLZ2WnCeDxZMBWIWgWkIPQmBKcf1+HDh6PEJ11/69YtuXHjhtcP/XEtIqSfqBDZ6LyxtqQG/KbYoVdPT4+sXr3aZ0QKCgpkXiB3n2g8bcdLc3KuYSV3O7SEZITQK6urjSe0Nxpkwqtq/yRO4NNtep2z3kkt7Hn/pbvbZ5gIu2lnXnhxm/ysC88ZRowfRkbkB+tdEzKTlvwcZdaKFWmHwSDwflhXxZdfyum6Oh/5WQ/rujs0ZIzjg34RIVKG/JkUj/hsUAikIS15c9CjL1y40POseP9ge1BHcGJKMoxIU1NTsNmcGeB59dDQFojSrSkH4b6SkcM0xlqzZo2tIvSZPB0jEee8AQVBwJkvacu5c+eMFa6oqPCNQzs5mW2YfAIRPx5dskQenj1byBujiubcj5WXm9Ba832Vt0+H0fOgHp9DtnPNzaFE1LH0rmlBcC7aHrxzes087RL1CQ+vaVKb1lYv8tDw205Vvv8p/cQQfvP667Zq460xYHjcqKKRkm1IkSXtGEylfGSO0sEXGr7U2EWJPtLba3L+sHMNWz7smT3OeZZyKUwmTp1HfAjE5yYNabEqtlcm5F23bp1wcs9mhmh4cLsEdejpuubCqhvvv337drvrhM9RuunIvPHEyOjcGhsbfZ43agD6cGDJeujLZZ/UB0/0kQvqhuxqONBnF/RhGGzDZLdHPUMgDuoIm/UEm7t6Ru6QUfN6Nj4E4vCJXJicEk+oYecvZs0adzoeNTb1eNaO554zY3P/Q1OTOVCkDUIwF0L1652dRs4+XSeU50BPQ3lk9asA3pxnXRPzg4j2ARuk5DAsLKQmUkBe10W08LvGRiOLPHiAC/ovtrZK8f7945ZJekQ0pHNQTMcJBipIpf74ySfGk2tfXReY26f9rB0MOITVAtFZ15m33vKdXWh7nDvpJGdGerAcp0+YzAz37/HDYEm2jlRn586dxkDpF4FkR0hWG5uYMHXVm2/6wvNkRwnXRnRx9u23jaF5EI8YrnX61BINE3XyTT+T4nn8TJS4vukR0GiFl4YRcCUaAbxqWUuL90UgWjL3WvSzNV+tMi3TmviQzP4LRxrK6z3sLCFTQKP64+n59Hfo0KEoEVfvEIhEgL3M3wcJppmRHSZocKH+BAC5ZofAdERgWnv86fjC3JocAkkg4IifBIpOh0MgyxBwxM+yF+am6xBIAgFH/CRQdDocAlmGgCN+lr0wN12HQBIIOOIngaLT4RDIMgQc8bPshbnpOgSSQMARPwkUnQ6HQJYh4IifZS/MTdchkAQCjvhJoOh0OASyDAFH/Cx7YW66DoEkEJhx//59999kJ4Gk0+EQyCIEnMfPopflpuoQSAqB/wEEgzbgRARicwAAAABJRU5ErkJggg==) ###Code ###Output _____no_output_____ ###Markdown We can reidirect the output to the console to see the output before writing the file, using the `sys.stdout` instead of the file nameIn addition We can change the seperator using the given snippet![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAASQAAAAyCAYAAAD1AabeAAAL6UlEQVR4Ae2df6xN2RXH1+PpeE9feX69jh/1dFpGO0WliUF/iEl1JEUaEYIoKtF/JGREqvNCSEoiGjKESky8RgkRLS+NTFRFf4hoMgZpKdpReX5U/Z6HO8Vo89mv6zrn3HvuPfene2/XSu475+4fa+/93ft8z1r73LNeVSwW+4+YGAKGgCFQAgh0KIE+WBcMAUPAEHAIGCHZQjAEDIGSQcAIqWSmwjpiCBgCRki2BgwBQ6BkEDBCKpmpsI4YAoaAEZKtAUPAECgZBJIS0pPbz+T4N/8lp+fckU8e2a8CSma2rCOGQIUjkJSQSnXMD/76RN7/3i2BME0MAUOg8hCoTjakTj06yKjf906WZWmGgCFgCBQMgQQL6crPH8p7NVfc53zTfV/D5H24ts25c7h0//xlzFfuzh/+LdTx6uBcRV1B1e91CXEN+a46KPO7wdcFq0jr/fGrN+Tmex/Lb/tdc+1qvurP5Xj8+HGpqamJf3bv3u3U3blzRyZNmiTkqwTTYrGYLFy4MF6X8pQxMQQMgcwQSCCkft/vIm/G+slrP6tPqqn13Qfy5Q31Uvv5arnREpPRxxrkwV/aSYMKl37aJg8vPnU6vv5Bg7Rue+hIBcI5+9Y96f+Ddv3fvt3X6f/b6o987fxp/E3p8sVqV/+zU2vl6i8eiVps6Ov1Zmd540ofl/+t8y/Lp1/t5KufzRfIY8uWLXL69GmBXPhMnz7dqerevbuMGjVKjh49Gld9/vx5dz548GB3PHDggPTp0ydet6WlRahnYggYApkhkEBI6apDEl1ebff0IJcOtf4aL0+rlS/8+DMuseZz1VL7SrU8vvlM7r//WB59+FQavlvj8jrWVskrP6rzkRkZA9+qE0gR6fWdzvLxlU+KsrF+7949OXnypGs3+GfKlCly7ty5uNUDOc2YMcNHOlhQZhUFkbPvhkBmCGRMSJmpby8NESFYVR1qqlKqgIRUun/jJRnW3F0gr0IK1sy6detk/fr1zu0Kulz9+/eXrl27CpYRpHPt2jUZP358vEtYU1hRffv2dfXV3YsXsBNDwBCIhEBRCAkiQiCmZ57gAlhOj0vkidmgQYPkxIkTzu3C+pkzZ07c4mFvacyYMc5tg5Rwz4Iu2bJly1zdq1evyq5du8RIKdL6s0KGgA+BghLS9b2PHAnVfamT8EFu/Drmjuwptb770O0psUcURV7q1dERWNvZJ1GKZ11mwIABCXWxiHDbNm/eLLhwYQJ5JasfVt7SDQFD4DkCPibQJ1084frzD++6DWrOg0/bnldPPLu+55H8psdV9xQMwvnar3q6TWlIh3PS0EmZzv06xveLEjUlpqBj8E+6Chvf3qdwiSUzSwk+YVu0aJFz4bxWEOdDhgyRbt26CS6cSvAJG+WwoHRTXMvZ0RAwBNIjUJXPAG08sodwXttcX/B9n/RDy3+JNWvWyMCBA41s8g+taTQEHAI+C8kwCUcAK4qPdzM7vLTlGAKGQDYIJP2ldjaKKrUOJDRu3DgZOnSo7NixI2Ezu1LHbeMyBF4EAnl12V7EAKxNQ8AQqBwEzGWrnLm0kRgCZY+AEVLZT6ENwBCoHASMkCpnLm0khkDZI2CEVPZTaAMwBCoHASOkyplLG0mZIPDRxYvywdtvy9NY+1sLZdLtonQzLSHxY0B7Lyt/cwGWYFppcuHCBZk9e3b8/b9KG18m43l8964cmTRJ/rFnTybV4mWpR330eIUXu4m7VclRJdISkheQKOeFXJiF1B1lbLmW4TdNvHi7YMGCXFUVpb6+FkO/iy0vsu1ijzVqe7yWxEveK1ascC9yR61XTuXyTkjlNPhi9pW7GpYRUQG878gVsw/WVnER6JLkJe2oPejS2CgdOj8PxaP1eEeSdyUJCliJkkBIemfScK6rVq3yjZsLi3hBmq/uh6YPGzZM9u7dG48NNHLkSMGyQYK6MT9JSydRdKODvmi/vO2m009+sG/BmEi4Wqqbo1oNHINlk6UdOnTIxUwiblJQctGt2GjfomKqfQhrGywhzm3btrlfqqt+yqswTk1n3gly55Ww+VCsFUPqoFfXUpS2ve0Ez1W/9i04P0HMgmOifS8u3vxgW8m+f6q+Xsa1tEiv119Plp02rXHaNBn5zjtSXdMezDBYgWgTBw8erEjXLYGQNmzY4AvHunz5ch8e+/fvl+bmZncBE/uHRcWHxUvoVsLATp06VchjYRBjiFhDyOHDh50PTDqLAonC9FF0s2gInIZe9C9evFiWLFkSb8c1FvKH8kuXLvWN2xuGFkJlAeiYKK/EomFsNawtTVy+fNnlqyVE+WPHjsnYsWMTepCpbqJV0rbq3rp1q4teSRt8Nm7c6EgioaEkCanaxpIDy/nz58uRI0ecbvRrFAPqrl69Oh72l3knEoJKLvORrm1tI+yYKqQwY8DlIeYV5xq/ykuO3IQvXbrk8hnXzp074zfVsDaLme4NGFjMdovRlo+QWGTE/Em1xzFv3rz4xcBFwcXBBRhFJk6cGCcn7l74w0x8rsKFw94Miwy9yOTJk11cIi9RhLVz6tQpN4ZU40ZPMl1gQLsac5tFfubMGV/MJNLu378vPXr0SNqFqLoZJ/MTjMcE2dFGNhLWdjpd+/btk5kzZ8bn01s+1/nw6sr2HIKhH0FpbW2Vuro6tz7IC84faZAwIWgQLv7Gxka5ffu2++61+tQCy9Qad4py+EO7uG1Rr7scmip6VR8hRWmdidaJ4Bh06VLpgPCYPK0/d+7cVMUzyuPuHHbBp1PExBJUjX4lEyw8LEdesqVM0C0aMWJEPOY2Cx7xxky6deuW3A08MdF2MtENeRCTSS1OdOiFw4VF37x3em0j7Jiu7bB6UdJzmY8o+lOVSRVSGGIhVLHiBWZR1qFe/BoZlBuAfrxeQKp+5TOPMDj5uJnns0/50JURIUEoXABeEz7o0oV1irsVLhSulE7k9u3bw4pnnM7+hd7FqEwbuojSKYsS4RFLUPvN3QkXj++I14TmHwUQGcBLbj179pT6+uT/xYX6UXVjhQXdPtrBTaMvuBfMTyaklKrtdLilys9lPlLpjZqnxKEumXcfCAuI9ajzyZHyqUTXSClYSPQTMoKUKk18hMSFw0JS14RJDFpAvXv3jlsiLPxgflBHEDCdWMiNO1VQ1AJj4oMSppu7HRcWbpuSBJvItDV8+PCgmoTv7ANBXlH2s6gcXAiQAu7n2bNn3UIJxkwin38S4CXMhE78LyGZblxC9mva2tpSjgd8GhoaElTrRZSOqJK1DfmqO+pVTFl1FfVmo5vaUedDbxj0K2ilgFlY2/SDecZSTecuoUfXHPUynWvWBP3UvUIlOi+ZFdtCom32S73j8s5NOZ/7CImFBODqmsDCXisGE3/06NHCExUmGgLA4vFKUIcuGNK5sFQ31tKsWbO8VdOeh+mmIv1mAVNG+7Zy5UqfpRLWAHXYqGc81OXjfTIDMWt6mG5ISAkNfV6hDoTlJUzNj6Kbi6GqqirB8oII6Kf2jf96AsaQcxSJ0jb7anqToB3qIOzRIYx1woQJDn/vBZJqPtADmeh/eQGXTZs2OX3eP2FtU0YxZb/OS/RKVIoJ/WNd6GZ8srmmrJesebJIOdLpG2uD76UibAuwJ6kkWSr9ykc/LB5SPlCMoAPy4D+ZcKFGJQxVS11csaamJt/+keb/vx4hEXAhcJ53Xy0XPNAJCa1du9YRUi66wury6sjfm5vlK01NoY/2w+qSjsWLhaokm6psueX5LKRy63w59Zc7LGTEYoJgMhEe7Qc3szOpX2ll1TLMNxmVA05YqLhraqGWQ58z6WPFE5IuXjXhg8dke1WZAJhJWSwjXCoIJoro3g8LkIvPpB0ByJ3fiRV77+ZF489aZt8u6lbEi+5vNu2by5YNalbHEDAECoJAxVtIBUHNlBoChkBBEDBCKgisptQQMASyQcAIKRvUrI4hYAgUBAEjpILAakoNAUMgGwSMkLJBzeoYAoZAQRAwQioIrKbUEDAEskHACCkb1KyOIWAIFAQBI6SCwGpKDQFDIBsEjJCyQc3qGAKGQEEQMEIqCKym1BAwBLJB4L9g7oIuegTt2AAAAABJRU5ErkJggg==)and handle with the NaN data using the `na_rep` paramter![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAU0AAAAkCAYAAAANfGjiAAAJZUlEQVR4Ae2cT2gUSRTGK8uymNXFf1FRN7rZg8GLkbCgMRfxIHiICiKKiqgInhQDIigi6EFBFEUFEQQFQRRXiDkoiIiXoO4uwWRB0T2oiMqiMbqyzB4WsvxKXlNd09PTPZnpnRlfwaR7uqree/W9V1+9qh5tyOVyI0aLIqAIKAKKQCIEvkrUShspAoqAIqAIWASUNDUQFAFFQBFIgYCSZgqwtKkioAgoAkqaGgOKgCKgCKRAQEkzBVjaVBFQBBQBJU2NAUVAEVAEUiCgpJkCLG2qCCgCikBR0jx8+LC5fPmyIlUmBMASTOutPH361GzcuNG8f/++3ob2xYzn31zOPNixw/xehfH59v59c6Ojw/z1xx95/mA+3bt3L+95pR4UJc20iis5eSopO+04S2mPYy9dumS2bdtWSvfM++RyObN9+/ZMA1IG+X/qFhv0+hkBCOvnWbPM8ytXAki4h1yFaN06/xn9IWOeRxWRFVWX5NmqVavMoUOHDPyQRSk7aWZhdC3qIANjRdyzZ4+ZNGlSLQ5Bbf5CEPjuxx/zRjqxrc0MDw4WJL68DhV4MGbKFDOmqSlP8pw5c8zevXvNqVOnDIttpUseacoK39jYaPgcPHgwZAOTf/ny5baOetlqyvO2tjZz9epVM3PmTNtmwYIFwQrgyyaLSTLIJLIxElvEbldvaAAFvvi2MUb0SmFbLbK5ynaAq9826tmtW7dMR0eH/YhMuY5GtmAjtiXFtJhusITcz507Z5YsWRKM3T2qYZyiF79/+PBBxNprIX8I1oIhjd1jiyS6Q4pSfAEvyZ6JEez3MXPH5fq6mBp3XDJ2Pw5d2X4dGAgOgquLUTH9o63/urHRLDh50vywZk2eqHEtLfbZ8MBAXl0WD6YsXGiW9PaabyZOjFQ3f/58+/zhw4eR9eV8mEeaJ06cMDNmzLBkRhDs378/pK+np8dcuHDB1r969cqSB45lgvX29pqBgQGzevVqQx39Hzx4YFgJKLdv3w4CVAjp+vXrIflRX5LIJthev35tiQ693d3dZteuXSHii5LNM9rv3r07NG7GIhkhaf+NGzeCMdEeAqS0trba65MnT+yVPy9evLD10p/2fX19ZvHixUEbuUkr++7duyHZZ8+eNevWrQv8xWrLhEtS4nSTEeOjrVu3mjt37gTy165da0XTly0R/mZ8XCdMmBCoHY0/iukOlJR4g38gtZs3b1qf8l0mG2N+/PhxMF7GTluJ1yQqWWRaWlqsjJUrV5pr167Zbj5mzDU/Rjdv3hz0LUV3EvtKbQOZDv32W6ndK9qPmO/s7DTMj0qXEGniVAIm7sxty5YtAZlACpAHQZekdHV1BQQqg3z27FmSrrFtCGjOCiEPIYwVK1aY2bNnG5fMCglhwjCGuHEjJ0oWGKBXnAWBDA4OGs5ZpPDs48ePZvLkyfIodE0qWya0KxtBEDI6SimFdBeTBRGsX78+8KfbfrT+cGVV4h5yP3r0qI1jP4b5ToxLYVGk/bt37+RR0SuJhiwuLJQs5vinv78/hBnZkR+jbt80ulmkiH3/U85MdWxzs8m9eRP5MqYoKBk0AEvBupLqQqSZRJG7vcBB/vY9TgakLFsi+rKqlqsQ2IVIqZgOCBPAsSmqkCmTFcg21d/Otbe328UGsnj58qUV0dzcHIhiwg0PDwff3Zs0siG4uXPnhohq586dVhyTHfvTTJJiul07096Pxh9pdZW7vWytwZNjpqjFMk6nu6MgqZDsnwSBmEcuH3zG8Udc4chjaGgoromtg6QhZv8jO6KiAhI0+GrMGPN9V5d539+foHX2TZj/JCelJhBJLU5FmpAek9Tdrvnb90KKIRS2ImybxbHnz58v1Dz1cz+40JE0A4YwixWCT+zm+ILtvDgHghw/frydXGQT8+bNCxFwU1OTmVjgLAa9SWWTzboTkr5MPjkAZ4uMf9IQZ5zuYpjE1Y/GH3FyK11HxgZ+crzEVY5gyqGbmJc4kmscsSVdfLLINBn/+NZW82dfn/nn7VsLB+eg306fHgnN2ATzKrJjiQ9ZXJiHzIlKlhBpMrkJdllZcYSfSU6dOjXI6Aguv96X4RsvBAUBHz9+3K+2AcugWe39Uki2bLHYoguR8eIFXXJA7MtyvzMpINgk56v047zKLdjLecqjR48M2cTSpUvdautEnJkkY4iSzfaf88NPnz7Fjgd8pk2bFtLNF8mcipFplG4WCDl6cAXTVo4FZEEkdihJ/SGLGnb5uw4wLaQbHfiZjN9/meLaWOq9u+vgzFjmQ6nypB8LHjFP7Ccp6CZu3F1LoX5ZZJro5kXMtM5O8+6XXwJTeNsOkcpPinhZ9Pfz55Zgg0YZ3BBPxEympEmwcwgv21AIwM0G2c4tWrTI8KYUwyApMke3+DIkqHnO5BfZZJ0bNmxwuxa9LySbjtgNYLQR2w4cOJAIQPrwcovx0JeP+0bcX8Vp58uGKIV0kecW5EGqLqlLfRLZkHpDQ0NeBgtZub9kYCsJxnGZi+jlmkQ357yQmuBCHwpnxhTGumzZMou/LIg8j/MHsiA8CIR7cDl9+rSV5/4ppJs2ginnx0kWI1du3D1+ZPJJHI0bN65smSZ+4ZhH5g9jcOMMu0hCeM6H87kjR47Y+zibs66b1N4eOtfkBRHZZk9rq/0956/d3eanY8dCb7pf9vQE9fzm0//d5pMzZ2xf6vik/YE9i2ihl63lxqdB/+f2ckMaLQ+C27RpkyWTpKQmkujLtnvfvn2h80yp/1KvkDm4XLx4sS5wYUEiUWHB0ZIOAWKBxTeLRSa0PU9nprZOgwCZC5Mh7c9X0ME2zX8BlEZ3vbWVDLueCLPefJTleDju4PiK3QsZeqVL3ZOmTDDZ8vjXqLPTSoFOhsn2GRJMUuQskm0aBKHlMwIsQPyO1v0NcBbYiD/8GOK7v83Owh7V8RkBfv7Gvwji+DCLotvzLFBWHYqAIlA3CNR9plk3ntKBKAKKQFUgoKRZFW5QIxQBRaBWEFDSrBVPqZ2KgCJQFQgoaVaFG9QIRUARqBUElDRrxVNqpyKgCFQFAkqaVeEGNUIRUARqBQElzVrxlNqpCCgCVYGAkmZVuEGNUAQUgVpBQEmzVjyldioCikBVIKCkWRVuUCMUAUWgVhBQ0qwVT6mdioAiUBUINIyMjIxUhSVqhCKgCCgCNYCAZpo14CQ1URFQBKoHASXN6vGFWqIIKAI1gMB/NSl7LeCYI34AAAAASUVORK5CYII=) ###Code ###Output _____no_output_____ ###Markdown Handons loading csv With the given [file](https://drive.google.com/open?id=1mMDzEoVv-umbnDToczq1W-FHPq7nxAo1), create the DataFrame which is similar to the data frame in ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQEAAAFXCAYAAACiO/sXAAAgAElEQVR4Ae1dbXbsqA70urygrCd/ZyNZyfy7i/E7IGQEEuA4ufNA1JzTY7c/+nZJpULQlnL8888/F16wATiwLweO4Pyd//v33393hn8BP/wPEYAIQAQ3tkAYBCACEIGNQ+BCJgQRAAkwHcB0AJkAMgFkAhtbANOBC5kAMgFkAjoT+Pq4juO4Xx9fP5fJP5/ndVQf9PVxXMf5ef35+cebnxD/zeO4zs/+v6CCwAF+xs5+rExf2Ms//o+rR2GP+NnBMcYGMRDwlyLw5/M6j+PKpPm6Pn4hUC0R4C/6+9uv6yMC/7o+z2+KgAf8AYPwGQlCOxCKIPCA/yo5GwMhE1rRzR/+BDEMZufH9TGIgYYInFd78PwTA4tHmOOoyJVIxOc/vurrc1Bq59TXys+mcx9fFOD0+b3vGQxB93wrE4jfv/e5ve8Y/kkS0TnwJzIMMOkg8IU/iqAQxVoFfOLneBnHgBaBFDjHYROhDtzSwDwCc/qdFdnKBMrPSsElFLscwTj48vcq769d+1IEXOFPNonTGymopa2KIHCHv+ZkiT2884g/x9srESAjxQCL6wI56GiUE+/jpcHI6ViHbPlLZScUQRxHq5qo4rMTOYVGXFdMd3prCmMDaBJ4wh+wjG1QBoET/JGLaV2rIA3hk/93h7+IpWf+L9cEpHVidnted1ZQpbqc8t7nO0E5FAFTQAgA+VDupy/Z+ffoirEBwnUWCdgMlI0kkVsOf9DJ8eKrZ/zBj+TDeoBhD3vzfx0n4xgI/u+KAI8kMRALhclGvPfMQKazQxEwP/v/lwncmGQGYn7HfGXMTOo1knT6/4H/iQCEr9cTgbX9fxv/OjlbFe7iXV/4afqTB+j8K1/rlzglAoGsxUJaDGyeAoxUhb5Avr9aE6gWZ4rpAM9FReoWA+e+p1a4669MB3zgJ1u1nM7k560MAhf4g1jXPGqIc7CBO/zs2LgdxSzhrzKBRKD7OQEWAP7k+nyVbsbRktVH3CuOs0iUIhA+v1KxWwDCOfp3hW87IlB9TsLC/y4j4a0kAf87WUkFhnjDAvijcLMPxLYwHqMvg8AFfh5QmhzO2MOeO/8X8IivLe4z/koEik/Y4k1Jgi0gFyCBH08MQgRQO1CIwm5vIILDhUH/lAAJMBL6Z3kbYeA/MgFkAm2GbHAGg0ASgWAIvGADcGBPDiATQCawwXjfhohMANOB6ieiNlm8nkEQYE0EmQAyAa/69ggXRBCZADIBiOAjsfB6URBBZAIIAq/8foQLmQBEAJkARPCRWHi9yFUmEAuOjH5qfJzqAXRJqZeRgHGq58RF3UawQV1C4B7/XU7MdRRlPYh3/MwLroex/O9gOsDVi1ZPwVzJGJRcFy3VBSQr6v0AvxTGoiqUsK4fBD38qZ9AUYxW+tg1/jAACOwkCOVAGPA7EAF26rhiKhpBGCXcuT4JOvhVjwddjekaf6xMLUd+thZvfeNnlGkbs8LSHpuJAI8Y3AORDOOZBFr0tFB6xs8t8c6TpwKHan3vGn+lAVbjmz1EQNbX1xMi55mAFgGaEsl1A9dBUE9/0vqIpIFr/IUI6AEgnN5DBIQhWnMiccnCu9rJWgT0Na6DoDEd2kYEBZtb7ea2EwFOD+XfVdgxCLYZCdUceDMRTCLQEoBw2r8IBBIIxu+WCXDLttsEamR0vjCaWo3dI389PXA+HbzbxVWL4SJB8CICtODHv4PylhxPys/H7vbowgrrZwI9/PFH8vin5cgG5cowjwTCHAvufge/x+ckOvij6IlFUe67eI8K7jKBd/xdXwTe4ea7gB9VhI6eE2Baf2+LIEAQfI8xvq4O/IcI4Nl5X6z+JhoMAhABR08MfpP96XIEATKhmAkEIuAFG4ADe3IA0wFMB96lEE7uQiaE6QCmAxBBJ3L2DkYQQWQCCIJ37HFyFzIBiAAyAYigEzl7BwOZgKvHRt+T4N2dPu5CJoBMAJkAMgEfavYSxR6ZQPX89F1MkozmeiSosOcaitxiygt+Kg47rtq/HBuxkk62Wlvc/4y3VRNC8OraGbupivOFwVBcIYpmVGmppyo6pnt/W/dZXF8EqIDm/LR6TCZbBDE8P66PU4vEivh1n4iWz0kERL2QujDg9y0CMejzqEeltUIUdlsTcC2CulcAMZ4DwT6/nghUA5sKa3mAsctj5b5/EeAOw0cQAi8kKJ34nXd1FhDuXS8IWoht/8ZRMw6F9vnl8Cch7/VNzBYizHkaWA6A7H/fmUCyBs8HD6O5wnIkyB7+5h6lzXVq6Ae/EeRFJmicX1EE4zqPCOaIUfdJsMhB6wgyM96in0BJfDKCMOCKJLC8++BYax7pVwQo6LPoeRIBGcg2LpsSZTyEa4L/XWcCmvjaYH6CwHY7HdXO56v94K99S5hzKiw67IiMcDn8al2nxs2eNbbq3g1EQPVZN1Kn5Uhg+HZ0yEoD+R4/+EfBYJ9fD3+Fo5ge0LkjpT9fH2XWG6fFQgADBwJ+15lAAHmvB6T+avXvyOuRgMP36ZZGxBo3370+fnvE13ir4EkGWBJ/Gsw4y6mnPCwCNAjaGZD0v3sRYLCt7ZIkaIF5cRz40VQEIoDHZl9Ih59bIIIbTAdGdAUJMBKOOOL5fOA/MgFkAp45PsSGQSCJQDAEXrABOLAnB5AJIBMYjpaeL0AmgOmAo2fn34UqggBrIsgEkAm8Uw8nd0EEkQkgE4AIOpGzdzCCCCITQBC8Y4+Tu5AJQASQCUAEncjZOxjTZwJU+KJbQtXPROvnxNkgqaCC/y572OYHreNFXkaClq34uH7OnGzkHX9AWdqgLKqZGT9/75rffLzlU2b/lRrp8HVxa/B/0ukAF71YfePCOeFIozyyNkKFO5920U+gY6tgG1E1RuSRtehURVYYZLk3HfwsAMIGNbw5RaCD6YFPM0YaBEf8n1QEGIZR+RWDXhK5EgW+NW6fGaG4Zdk3hq1qLIZgzhkE9Rd/8t7C3+MGfebc+C1MlS0Mn+YrnvF/PRG4y4ODEIyMROdzOiQyiGSpuUmQ3TneG9kiGu46Yr/F/Gmu8acA6fXjmxv/O59m7z7j/5IiEEDefQI6qV42Bu35TIcZ5Ygw9vm5g4CxPdka+IqGG3FucJ1H2Y9vbvwGpsIUo/PFxWltRGbRSzQVsUDSfInnORTYeoQv4fO78t5wdG4S8Pd+srVsle+zusq4xx9FQJJe22hu/+vvmz2aBsJvDILUcl+L4HKZQAz6AnjfUNJolzF/mpsExbcfvGnboSUA4QNd41f+1jaaG7/+vkyCnk/5GrVV9lg1E6jVPQJjdSOj8c+AT3usKWMtecAiTLJHIZoluLmDoPyu/Xdt/PdPbPX0YHoRbGOy2uffPwmmNPkp/yfNBChtzwt61CeNnXmvB6i+gYn0PFeITh/3WOuTa/azHVvV+Pl5CbbP9EHwxPYd/OH2NEgwlwT0+OFzimAHU9en7/g/qQg8cf7vXDMnCX4H25NPAX5UEUIE8NjsE61wew1EELUDjhbG3sUpggCZADIBZALv1MPJXRDBlAkEQ+AFG4ADe3IAmQAyASdj+jsYyASwJoA1AYjgO/VwclcQQWQCCAIndH4HA5kARACZAETwnXo4uQuZgIsn5n7GRoyE+IkQ0wGMhD9TkcXvhgiuOh2onp/mmgLNx/QsNT8zv12PQeD/PEXtyGL+pzJ5o8cmt027ed0qpX/m/wUzgVBcIUAb5ZFZDMgIddFIPu+hlJaKTc5Pqx8j8AcRWM//PZ+mxqmdytDM72f+X08EYtDLRhGVKGQLBL281iRBAeLhG8JaZkXAv7b/LZ/2+F5T5Zn/1xOBu7UYegyWLrcIQ8e4jPaQGVS62c+c2CN+A1PKfHt9EzMvnvl/SREIIO+eAo/SIjLL3j0Guf++zKI8TIeY8kbA8Km0Xc//Bqa4HlZPh/tTHjZDC/+CIkDzJZ7nETBhFEZsbst7wyW+R8LaCMD/sXqj0SgCUsgNoajdfr+3/b+cCMSgL0b/bxjBWETcSgSA/zqrKdHc/je4rXxoXHMHfbWj7l2ix2AFIrytlTAC43SIDIIeg2S3pz3mDCsveEgHw/r4NSZe7L4XgIvpwTv+L5cJkA6Uv/3eBuG/vcZzhWggcW2RQRDP5x4JnsQipXh58Y/wRpsA/1XYZRn/d3waKJEGPsbGdGeB4EGw/pudVnPSwP8lReBJaDy9Zn0ReIrUvg748dgwRACPDdvqsMlRiCAyAUe/DryLWgQBMgFkAsgE3qmHk7sggikTCIbACzYAB/bkADIBZAJOxvR3MJAJYE0AawIQwXfq4eSuIILIBBAETuj8DgYyAYgAMgGI4Dv1cHIXMgFXBUTvWImRED8RYjqAkfCdeji5CyI4+XSAyoR1jzU+rp+drpmZCiruXmzHdT9XnS71QgK2Sa6jyLbonfONf3P/17UjdxzkUuTg/0kzASqgMPvmheIJUQhCBM+gMvXDHpEgF1iUZ8O79YOgY6urd45s4Rv/7v7XfI/NeERATCwC/OXJidboxldQRVWrqcgOJGBL9GzVPre+CPTww/9snbhds59Am7w3uJjy9DMBnjbs12OPrdS24w4iAP8TD+osIBx1kAm0yc30l1tr6uA7CBh920574Cc77Ov/gJ+mhmImEI2yvAhEZRPrA0z59lYbYo8ggAgQJ3b1f/tvFSwtAt8XAO7IUq4fQAS8/E7eFrp7UGjMie/zS+/08GvxY6iLigCBtVol8a8B/DPg+j3m2FVPtj0StM95FkH4n3hjTYOYUROLAClXXtDp9M3j3z7jZCcJBE986t9JjanD+kHQsVWaB5p2TCxwjX97/wcnEz9av7BNLAKsU39/u34Q/MxGwO9lOvSOBxABFw8LvXM+3wURgAhM+sQgU/TvbxEECIK/z7J5/wVkAsgEHDw2/bMAwyCQageCIfCCDcCBPTmA6QBKiX82lC5+NzKBaasI/ztmgQRYE/jv2DbfvxT4j0wAmcB8zPwPvxEGAYgAFsYggv+h5Mz3TyETwK8DEEGIIKYDSAexJjDf+PzffSNXmQAVSXy/H+HMIvAMU6uhSiBSqqXg+oqw5bqKxLOZ8X8nFFq2uor6gbKCNHy+B/yMnWtEChfHykmqvQnni3MJv4OFQS6Q+Lo+z0oEHvQjnJMEHUyhIEQUQlndYnLwkAjUjs/nPQRBx1ZRAETgx/elaM7pf+mhwX6X42ybP/QhtT38iAAbqV0uy1dY/QjnJsEYUxwFhCjcWOPODiLAiLWttG1CUAhRcJIJsAXI5Z/XyRiV6Gk+BP47yATYBJoEfObeKqPMPhKOMFVKfwPlHbqf08Tteiwqf5O9ZGY09yDAfvzGVmDWIqj5tJkIaAME085NAvs7F/NcyegBV2ju6CwdvjHbtorTpXtN5LzOs5wXz+3/G9zDndIGWgSuK9hD9hbYSgRa7cjmJkHpVIsJVmBb19ExzyPh2FbcYEPq5tz+b3vSOlNzXIuAttE2IlAbRxpwbhJop8nvHveNvnnqGj5gXDs3fv7iT7YPbCVSZf5EL/hNjiu8ZKNaBJ2vCRBoux8h0WBuEhjEDoEsvFhmAglvOo8eexzqYWuvn8ztf/n9W/s9jleZnxIFmg47EAECmhe/nvYjJKPOSYIOJvXbv1ztLkWgWDsI82LjV4Q58bcIbx3v2CpmPvk3cjkX5k9aHn8M7IzxjgMeKAobSK5k/jsQAXbnu+3yJHgH+74L+PHEJEQAz47fgrDjDkTQ1XMC7ygMEmAkfMccH3cF/iMTQCbgg80vUWAQSCIQDIEXbAAO7MkBZALIBF6OoT5uQyaA6cDkjw3//UBDEGBNBJkAMoG/rzQT/wsQQWQCyAQgghNL1N//akEEkQkgCP4+0yb+F5AJQASQCUAEJ5aov//V1s0EquelrWfCyXzpWfq7nny/HntUYMTPlpe9BIKNZh4J+buX/h37NIfO+NqZ8Wcc4z3bVqHNZOg0xP4veymw/xecDoSCEVEIEUGK94W9iARcS1GcSm/WJwEV0JyfRo/FB/0I58TfwzT2afbz+No58WcE472erfictx6DMejliBaA7iwCTBMifDlq8jnaxtGiqiScOwgsTOPAzqjH186NPyMZ7xm2UqXD2h4B/4KZALVIOo4gBAbwwlp0/i6vNMTCNQkKW1SjQjo3N37Lv2OfZtjja+fGn5GM97SttOjra5YVgWCQ2EmlUSPfMhjNm2QWMfecuIXDPq4dHK+T6yfGvGjuIGhgEgawfCpOF7vWtXPjL77+4I22lRYBNz0GaURjPpNjW9OB2m7lveGsZxLU6NcLAk3sGhN3DGI+6PPyyF7+1yKg7blkJvAEmHR7sW8sIu4kAh7/7oKFqfC5fLOb/92uCdTAomP5pw9SOv5TW9v32Au2EUOkh0yg71P4v1wYrjKfOnZSJrzwwmD+7TMDL0mwfY+9bj9CGiLnzISIvHlBl3wd/SzXONSaEPzPNrtjIg2SdFxPm5ecDsjs7jf25wyC30D27DOAH1WES2YCz+j97CoEAYLgGVN8XoVMwNWvA+9IChGECCITQAHNO/VwchdEMD0xGAyBF2wADuzJAWQCyAScjOnvYCATWLR24J277btAAsyJbWbscTTwH5kAMoE92N5AiUEAIuCodqDB8sFhBAEyIWQCyAQGMuH7NEQQmQAyAYigb5UboFtzTaB+dvzun1b2CSDs6Vny+xr0GPyqSOFlJKTiqOO6n5kPOB9wxTX+2GLwvLieIGxFPVlkwpoiUJGYfK2Dmy4jEaiBy49YnwRUbOOrx6D00Gi/h1/fG5vRCEK49n8oHhLt5FpVpOuvCcQqKV0dRe7fQQSY6IS1GAn5VNrqXgx7NVWxeg+sLwLs5LH/W/iXF4Fa2dkktCXD5HRIi8U+JOARM3WeTYbaB39qSSeygGCCnfDT9KicNgf8i4sAEbvya6kD4l0rHRKXLLzbGAnkvNgw1D5BYHNlH/w2P5YXASu97UexJsI+JAh/hyIsEumRoG+zVc7aJOdv3+LKLv6PGbNYH2C7LC4COqAZWHNrrB/sQoJok23xt7myg/9bAhA4sbQIWKPaxe20Utrb70dHUuGaBCHoxRTAsplr/Gk0sHCnU87XBNKamJEBSPyLrgmQsuuV8ASaiS/nw6ofnRcRIFvkxU/Rj49F8X5OwuPCaA9/8HGLKxv4v+Y/84DjY/VMgJXsp1s/I+E7SwA/agcWzQTeEd66C0GAILB4scuxwH+IAJ6d34XvJk4MAhABRwtDJseHBxEEyIRiJhCIgBdsAA7syQFMBzAdGGYLni9AJoTpAKYDEEHPGjfEFkQQmQCCYEgUzxcgE4AIIBOACHrWuCE2ZAKuSkmH/jYvwEiIXwcwHcBIaIrDLgchggtPB6gohJ6TPw79TDyRONUS8DPTYSuemw7XeCdBaaeyjNgTfsZZ15Pwca6tqNy/mf8d9RiMju1URuVRjESgdnw+710Evq4PYSerC9P6IsgFQl/X51k1Gg1VlAI/CUIphOvjl2yu9h/iX3A6EJzeGvkrI6Qqun1FoLSHJZ5+goAEv84ECgu47qdQILXfNPCvJwIJyHnyVECn+NkC9XRAi4efIMio7T0eMfftMdjqsWfby+HRWFqsM6H1RCACEcEcRUHPdSwXbpcOBiPImnIjJfIjgqNMwD7vB7/FeHmsjX9REZBqZoOT8PM+jYYyFvYhwd49Blsttnbxfw//eiKg5jXfEAF1r/eFwSx/cc81/jYPWgEQbLKDCIzwrycCabHvXgAqpgdEBP4ZcK8eg1XAh7ch6EXa43s6ZIlA4oP4haC2km8ReIZ/QRFI5Ba//WeelyJQzIfD9QYZtiDBbSuxlpKiYX38NMXj5wB4GwcJuR5y26BcSF4ffy1r4v1D/GuKgMD5013XJHhgHODHY8MQATw2/EAq/F4CEVz4seHfoiVIgJHwt7i04ucE/iMTQCawInd/7TtjEEgiEAyBF2wADuzJAWQCyAR+bVRd8YOQCWA6sMXDIr3gRBBgTQSZADKBnka4PwcRRCaATAAi6F7oegCDCCITQBD0OOL+HDIBiAAyAYige6HrAXSVCVBxTNVeKpYZnBc/T34csgSZTLPcSNB6HtzAdqViq4y/fG4+WGA5/A1G2/5PtSSNugFP+G2zPMPvYDrAHXOMHnPXDj32Qt8QHdxECiJBLrDSVFlfBHr+3wG/9mk+8gy/AxFgyAT4LjHmw2IbR4uqknD5IDB6BGTIz0iQr195z/L/Tvgt3z3Dv5EI8Ijhq8deOwsIpCAS5OmAx1JiJn9bBPbAz3aQ22f+9y8Ccg5t5MVrZwIkbAYsyYR7f7+mIjf0uOMbf4nVetfC718EhDVaRhCXLLUb8VTTmz4ALRpri6BEa2UC8nzY94y/xmq9t/FvJQKx3Vb1NwvWDQLtUMvtxTFj/WBd/AWye+rTWxPy5f8a/4P3Df/7FgHHPfasrOZeA0jzg716LOpMYC/8WgSe4ncgAjQi5sUf+qMkNCI8WxjR5pv9CGHWo17Cy4sEcj3EbY/Fjv+3wN/h6kP8DkSgY4QHp/ykww/AGpcAP6oIIQJ4bNaQhn0OQQRRO+Dmsdm3YYsgQCaATACZwFv9cHEfRDBlAsEQeMEG4MCeHEAmgEzAxYj+FgQyAawJYE0AIvhWP1zcF0QQmQCCwAWZ34JAJgARQCYAEXyrHy7uQybgqLPOW0ZiJMRPhJgOYCR8qx8u7oMIOpoOUEGN7jHITI3NNw593gsJbPx17YRuQ+Yaf/3s/N1rMPea9IKfea62hQ3spjIOMgEuprF6DCaTBEOcH9fH6VEEevhJBLieSBHExXSoh18jrjsxuRaBKAAi8OP7LIDBOgG/AxFgRxPhW5V1H1/2eT8ksPDtIAIj//P52Hr6Ot30kxC4Gru66UwQTCEKu4hANEQcCq0g8dNym3sJlCJImHOZdUmAwB3fIlhGR50F+MJfYo3v1MhPWZPMDP1nArGTCqc/O4pASQxaN2B70Ll9REAHQLCAH/ylr/kdr4XRQHBe53lcG4lAnQpDBHz32LP9y8GgU2M6410EGD9ttRA6zwQIcE6FqeNQfC+ac/ohQT8IIgkaPeZKoqz6rodfk59R+vE/I+ps1fRgm4VBNopNEj8k0Pie9phjC6291fgZjzUN4nN+/M+IWlsSwnLNyI0I2CN+DdZeOPMwJ+zgj8pvZ0BMlfWDoIM/grTJ7wc/IzG2MfPL/tcx4UYEDPDfOLR+EHwDrHEp8OOxYUfPCRgMf3AIQYAgeEATt5cE/kMEUDvgluBPgGEQgAi4/514FAgIAmRCMRMIRMALNgAH9uQApgOYDoySBdfnkQlhOoDpAETQtciNwAURRCaAIBjxxPV5ZAIQAWQCEEHXIjcCh0xggyqyJyQYXeP5PDIBZALIBJAJeNa4IbZ1M4HqmXjrmWhCT0UlRSWhLKbeIRMY2MrLSEhFQnX7uH38b+Pnbkq5fqCifxwEF1wYDAUhokOOUR6b5Y9IUAPP5z0UEEk09f7YVuuLABcIWT0md/B/Dz+f+0PEiAOCiJ00CK4nAkW3oICtInoRBzuQoABcvnlgq/VFgCGTr8uscCf/G/hj0MtOUtoey04HqGVSAGcAZ07ELZ3P04FSBcMlfoKgAH6/GdnKD36LCzv5X+PX3ZT0NcuKQGD43TtNdAm6md/YsZpL+AmCBuiBrfzg1wSvLeLb/xq/FgGKG5ktLSoCNM/heT45Vo/wNQHofXlvOOYnCCzEJV7LVn7w6yDQFint4cv/Gr8WAX3NkiLwBJh2fjpiLCL6CQKN+omt/ODXBFcWce1/A7/bNYEaWHQst1EmQxwpTdirx56ifMj9ruMQC0OFreh6zyKwl/8NEYiL5hwbcV5Y8mHZXwfkHDf9bbk8xylFgIIg/0Z6GOsHfoLAEIGurbyIAKX4efGX/B05EUXQu/87+IOLk/CTffS0OfB/vZ8Iba6/PupdBEaGAX40FYEI4LHZkU64Pg8RRCbg/NeBcfwiCJAJIBNAJjBWCsdXQARTJhAMgRdsAA7syQFkAsgEHI/zY2jIBLAmgDUBiOBYKRxfEUQQmQCCwDHFx9CQCUAEkAlABMdK4fgKZALuC4jG7MVIiJ8IMR3ASDhWCsdXQARXnQ5Uz4Tn2oGaramWINUYxOenuQY5XTozCaj0t+6bR1+8dy5bYW38Gcd4r2cPPmc9Pz+z/8eo8xWMUcXCIFYC/gUzgVAwIQohjPJQYZrr8xRVVPnEvTcnCago5Py0+ub1zt2w0g6JQKV7xUVz4i++4uBN3x4xOIzCMf5Q3/jHsbKmCMSgF+WxrnsMUhArdY8M7p1jiu8gAiXW0lZVEPClYru+CDAYgw8PYmVNEbjLY3foMWg4ln0+7K8YLqT7c5mtyKDS5/gPgvM6T1FOXKVFrvE/iJVlRSDwd48egz8VgVsx4g7NG2UW5am9mmGrOB8Wwpdq66UOeBeBUawsKgI0B2RHErGFo0veV+/Ke8PJuUlgEPtG1Dt3X1TtrIa/+vrdt4Y9oghI0dPXzO3/LuDqpMZG7fjzmpgVK0uKgF7oscBX9uG3xiLi3CToYeudY8DVdjn81ffvvjXsofDqa+b2fxdwdVJjexIrS4pAv28eGcJPj0Ht2Ox565w3/BnteK9tj3uxsJ4eTJ8JjlHnKwz8dSbUmA4t+BOhWA9w22OQ0va8oCf65qXmkfa5UgT26LHYs1XdYy+nxhw862cCffz32pmKFbLAmpkAe++XtuuT4GeGAH48NrxkJvAz2pd3IwgQBCUj9nqHTMDVnPAdeSGCEEFkAiggeqceTu6CCKbagWAIvGADcGBPDiATQCbgZEx/BwOZwJJVhO+c3boLJMCcuMWNHY4H/iMTQCawA9ebGDEIQAQmrx1ocvfXTiAIkAkhE0Am8GuCsuIHQQSRCSATgAiuqF2/9p1drQlQmWTdjy89S79oj8Gfe3of/Kb/YwGNaChy8yCXF9WdKRkAAAWqSURBVM+cCZiYYjnEecnaES6r13x55n8H0wEqoLD78ZER2kaavZ+Aduv3juyAv+d/ba1YUCMIMacIdDCFSkDRM5GEIotaifiZ/x2IAMMmwHfZaDz8zAj8Cf62O+G3/F95VPUXmH0QeIcpo37m/y1EIKdOugPRnCNBduPP9ogEe+AfB0ydBQTbzu3/MSbVX6MgzDP/OxeBwiKXlTrNTYLy+//0nW/8o4ChFFvMBKI55/b/CNPofMmYlv+3EoG651ow0dwkKJ3483c6EPzg7wdEDAAxl2Zbzo2/jylmNgYmxqa3tv/3EoHl5oTajT864hp/L2A0+dmOq4rA9wWAuyyVU+KA37UIfH2UgC3DzU0Cpuq77V742yJgpcFs0bn9b2GiY4eZAaRzac7z1P8ORIBUPi9+iX589e/EhuHmJgFT9eV2C/wd/0ez0fnyV6Nszzn938FU+5SffYiBX4rA0x6TDkQgO/TN3pwkeIPk3T3Aj9oBiAAem32nHk7uggi6WhN4x0qQACPhO+b4uCvwH5kAMgEfbH6JAoNAEoFgCLxgA3BgTw4gE0Am8HIM9XEbMgFMBzZ7YlAHLoIAayLIBJAJaGXY6AhEEJkAMgGI4EaSp6EGEUQmgCDQzNjoCDIBiAAyAYjgRpKnobrKBKhIpO4xSKB757yMBE2MsXIw99lbq55ek7Z1pIk/3RCLxw7NDy/+N+3SqjM4cjsyJyLABSJf1+dZO7l3jsy2Pgl6GPncHwIbSVFWVvrGfyvAdZwf14fix279JK4riqEYCZyIAGugVXY5Prd+EHQwxqDPqn9dZCPBAUfToZb/GbN93o//mQedbaOfhKOFQdvJZJL2OT8k0BhjilyUT+trPOMPvo82EGW2dUmxH/yd4E+n6iwgHA74IQJuFsZ0gGsRoHRQBoKfIND4rzjycSZknN+qvRxNDWUWCBFI6ug5CLQI6EDwi5+wZtJr7BwEiQquN5oLBBeZgKuRwCD51msCNPLVHafiezFF8iOCPQ2zs4BwB0TAuwhclfOVKHhaHTdEsIgL+/wOIhCzAPGzoDSLExGwFZ/mvb1zZIr1STDAWDwnUP48yCOBJMV6+wP8N6BdRYDsI9eBbpP4ywQktOf764vAc6zWlcCPKkJHvw5YFB8fQxAgCMYs8XtF4D9EwM1PhO+IChGECEIEIALv1MPJXRDBlAkEQ+AFG4ADe3IAmQAyASdj+jsYyASwJuCogAZB8MYCEAGIAEQAmdAb7XBzTxBBTAcQBG4I/QYIMgGIADIBiOAb7XBzDzIBV7UD73iJkRDPCUw9HaDCh7plWGoUwX+X/TiuXC5aBwI9L15UklUXewmClq2CRXrnvONn7MQB7i2QeQL8004HuOjB6BsYCmJEKSg5WTuX3EwiUMV9ZoCLTKBjq1RFeH4adkxWWD8IBvgFV1qddQpCLPfm5/inzgS4J16rAir6y+iblv24gwgwWrtKjs62z60vAk/wJyt8ntchRCEcBf5pM4Hnjr2MGnm+m0UkTwc8ltIy2nagsx0sMd0nCHjETJ2Xk9mAf3kR6BGfgyNvranDHiRo28k9/jhIpL+7YMwLgX9xEYhzvCq9yyFv7dFoILngngTRDBuLgKDBvoMAGaGFf9k1ge8LQFwmv86jnBJABLz8RNYWulsHjPWjPfyfLNDAv6AIkLPrBZ6kdfGvEB1pqP/6KAPeEo49SNAOENf4A+lF2tcaCW+RWHrH8PFD/JOKAKXteUGP5nRxYUvO8cSzAhT4SSDY8fW1xtRh/SDo2Cr9RGjaMRHeN/7Eh5sn5aAQTAD8068J/H1pXp8EP7MR8HuZDr3jQfD/pJnAO0Bv7kIQIAje8MbLPRABF+ngz+gIEYQIIhNAFd3PVGTxuyGCaToQDIEXbAAO7MkBZALIBBYfy3/29ZEJYGHQwU9ECIKfWAAi8O/1P9fd53dBnT6UAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown and then select only the index which total score (the summation score in section 1,2,3,4 is more than 50 and write out to the file name out1.csv) ###Code ###Output _____no_output_____ ###Markdown Reading the Excel file One of the popular data source is the excel fileWe can get the pandas retrive the data to the data frame for further implementation.With the [example file](https://drive.google.com/open?id=1XycPSQGocHRTyUK723GDL5nC71hP0cW7), load the file as given![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAVEAAAAnCAYAAAC2ensqAAALR0lEQVR4Ae2df4hV1RbHl70orefvxJc/Gkdo5vUgx3yBjb54MoowBqOilCma/iH6R8ZIZiihKKFkhkMGKgkak2apMA5lNNJk8GRSez6H4PWukjrljx454y9qXo/Ax2fTOpx7vOf+OOfey7zu2nDd956z19prf/fZ373W2mewV3d3922xYggYAoaAIRAJgbsiSZmQIWAIGAKGgEPASNQeBEPAEDAEYiBgJBoDPBM1BAwBQ8BI1J4BQ8AQMARiIGAkGgM8EzUEDAFDwEjUngFDwBAwBGIgYCQaAzwTNQQMAUPASNSeAUPAEDAEYiBgJBoDPBM1BAwBQ8BI1J4BQ8AQMARiIGAkGgM8EzUEDAFDwEjUngFDwBAwBGIgEItEN27cKH369HGfffv2xTCj54qeOXNGFixYIF1dXT3XyF8ty8d8oGP8+PHCuP0lH7rRB47gGdT/S3e3HH/hBflq40Z/tz36+82zZ+VwdbUceOghaa2rk/9eu1Ywe3/44guHDzj15AIGYAEmYANG+S5gEaab57StrS3fXabVF4tEV61aJd3d3bJmzZq0nZTSTT/ZFHuDSTcfkFddXZ236WEbv7PdHNLpznZ+eVbWrl0r06ZNk4qKimzFemy7fg8/LNPa2mTqp5/KvYMGFcxOiPPc3r0yeu5cubtPn7z1w4YVRkZRO7ln4ECpaW6WuvZ26V9ZGVVNZLlZs2bJhg0b7tikIyvMQjAWiWahvySb7Nq1y20ukAafOXPm9AgcBgwYIO3t7Z5tzc3NMiiw+CHL48ePF4TkDh065HCYPn16KB59R48OvVeqN366eFHu6dtXBlZV5QUC9RZ7Dx0q9z7wQF50FltJ7yFDpHcK29mcV69eLVu3bnXPeTHsCiVRwi3COn+YjpeVi/eCW63eGLXqyofuqOBAaoCMbeqZBcfkt7uqqkquX78etbskOdVLTcGWZcuWuQ/fKUGPkft6T9srpsxHvorOSa4eqvavY0M+VTqAcR0+fNiNlTbBgoc1/s03ZdQzzwRvuTBZQ0TCxAvvv+/aaAqANICGuYR6tKGmaBuu8fG3Rc+/3nrLhZ/ov/jRR66NphTQwXfaqbz2fYeRKS4oWYXJqq1h91Vlx4EDMnDMmCQvNDgutRkZvvvTC5p2UNvP7dkjj7/xhgx98kntIutabQ7i68c1nbKg3X47s9U95IknnLeL15uqjB071l0+ffp0qtt5vxZKojB6Q0ODbNmyxbnGLBI+u3fvvsN7SWUVi2bbtm1Jno96ZHF1p+ovl2u3bt2S+vp62bx5syOo6upq2bFjh1MBmRAOqMdGjQeXj0I/eKmQH/ioZ7Zp0ya32XBt4cKFMnfuXGcXpMmOqqTDfAwbNszdo+3ly5e9jSmufcwJHmiU8QYxw84VK1YkpQoSiYT0799fRo4cmZOpLLp/rF0ro+fNk9nffuvCREiABQfxPrZunfx44YJcbG52ZPvVhg3y1w8+EBYa5fvWVnlk2TInOyORcNdoqwVdj736qtw/apRc+uQTmfzhh3IzkfDym4lt2+TWuXNOnrD9m3ffzSrPl85u+oZgz2zf7lIBjItPqg0EAsSeYVOnqsmu/rqhQe578EFvXD9dueJtLo/U17vxJLZvd5vI11u3yp/q6z39f3z+eSEVEaWA6+Ovvy7gzBgUyz+/9loSyYfppr3azZgJ/ZUM4+rWPlkvEydOlKNHj+qlgtahJEqvLPrly5c74ty7d68Q6gXDv3TW4cGdOnUqZZM4uvFoASr4UQ8vZYeBiyx0iIMyadIkR0iQ1sGDB2XevHnevYBYVj8XLVqUZJvfLjYSxr5nz547PLOWlhYpKyuTVOGukuaSJUucDYwdsj127JjnqWYyjvnAs1bc/F5uJtl095ljP2Z4AowD4tTS0dHhNgD6zqVoKDuirs6JseAg1O8/+8z7jVd16eOPBUIcUl3tESgNRjz1lEcYkO7QiRMdKaoNI+vqpO+vhELO8XcB+0bOmCGQEuW+ESPk9+Xl8nNnp4qH1pnsRvDnri7pClkfqpj7/SorPaLhOuQFaVYuXeqaMS5s//exY440dXOBfM+/955ro/ip3jg1ZA/O4A3ubFL0mW35oa3N26SCMnF1qz6eP5wMjeL0eiHqtCRKhyxoPDc8IBZ/tgWyxdPDk2XhBEPmOLohIsAJfnKxLzgOFnm+AA/mRIN2QYRHjhy544Dl/PnzoURz9epVOXHihAwfPtwjwZqamuAw0v4O5kT9Xm5awQw3sdu/cTD3O3fuTJKiTZQCYX3T2ChNlZVeSP3lSy8lqcKrGl5bK5dbWjxi0QYaymrIHJTVdrnUP3Z0ZGyeyW42A8j/nw0Nblz+sFaVQ5aQVNns2XrJ1f+5elU6T56U5qoqD5PPn346qQ36K5YulbNvv50zySUpCvkBgYM3uOfi1SpJqu2aYvB3E1W3X8fgwYPlxo0beVvTft3B7xlJFI8NzyJK6KghIuSE10SoikelJarufHiiaoPW7Fy5ekkqm0sNFpxQL168WPDu/V5qeXl5WlWVlZVy6dKlpM0jX0SYtuMsbgY3Dsbp3zwyjS1dF3iDhOIa9lI/umqVJ0JojxdGO0J/QmkKJPTliy+6UFZlCUXjlvvLyrJSkcluPd3HNrzrvz33XJKHdiORkLv79XMecLBDTr45AddxUZNTVo+QzYN0wZhXXnEYgEW+iqYqKpYscd6o5kez1c/cYS/24836iTSubrWhs7PTpY+KsabTkigLnM/MmTPdgQAhKPmvKAWS8pc4uvPpiULq5CjJoQA4i11DZO6R2wseLEEQhMKpDlD8Y0z1XfOgU6ZMcekRzY/Sdty4cdLU1JRErKqDXCI5Rc3d6vWeUJMO0dx5mD1RwyvIQnOeqXRDDuTnCGfLn33WNdE8nbZX0oNY8PyiFvRiSzav7mSyO2iD2qjXIZOw15pIK0Cu5DxTFWTJg+Ilks4g9A5rm0qea+rBpzowUnz/UFMjj65e7eVHw3SFXb+rd2+Xu/Xfz5fuqOkjvy3Zfg8lUUiOcFHzoHiVEyZMkPnz5zsihWD0dHv9+vVeOKcn8MhDSvrRgxxCvUy6szU+TjvGhm2Ex3jJeuil+UjsrK2tdeMPbgCZ+vWHtvShmFBDNhAw1zV3qB46GDc2NrpDL8VN85b85gCKiEDvUavuTPORyWb0oI+c6f79+x0umoLJpBuPk6jCn29VWe0XL5oHO9cTU8LSv7zzjvNYNCSnxvuBQPHeNA+quUFCdrwbzZ8S6iKDV4rHl0v5rqnJSyXgNWELein0gd6WyZPlSmurC681LE9nN7LY7x/PyeXLXXivusmp/nLzZkrCZpwc5JAX9evAHgj07y+/7OzTPCjpgO+am70/ZPDbfa293dmf7fuiyLIRaR6U1644lFMvWsdFuA4mYKO6sQ1SVptJ0XDIpAdqmXS7QWXxD04OjhCbezFKr1L7L5MBeOXKlY44/eFmMcAu9T4gah5ufRuhp+MBIeANZnvynM/x8JoS78wqweRT929dF04aqbJiPWehnuhvHWgbX/ERUC9fUxrFt+D/p0fyhkaguc+Xvm6n0V7uGnKXMBLNHTOTiIgA6YJ169a5V7ui5tYjdm1iJYIAryjyxzT6+mIxhl1y4XwxQLU+DAFDoHQQME+0dObaRmoIGAIFQMBItACgmkpDwBAoHQSMREtnrm2khoAhUAAEjEQLAKqpNAQMgdJBwEi0dObaRmoIGAIFQMBItACgmkpDwBAoHQSMREtnrm2khoAhUAAEjEQLAKqpNAQMgdJBwEi0dObaRmoIGAIFQMBItACgmkpDwBAoHQSMREtnrm2khoAhUAAEet2+fft2AfSaSkPAEDAESgKB/wHNao3yTYCMLAAAAABJRU5ErkJggg==) ###Code ###Output _____no_output_____ ###Markdown Then we have to select which sheet we want to retrieve data![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQsAAAApCAYAAAAiRgzbAAAJyUlEQVR4Ae2cXWhVVxbHV4fSaSgZ0UQcdGyiQ71PQ6R0tMEyY634EDAKDq2YIAqK7YPFUCvEB4NCI1ghAR+KIGhbErSMoHnwQa11HiTVlo6ZwkgSamL9YEBjjfMRZygzw2/Dut33nHPvOfd68nWzNpzcc/bHWmv/997/vda+N+eZsbGx/4klQ8AQMARiEPhZTLkVGwKGgCHgEDCysIlgCBgCiRAwskgEk1UyBAwBIwubA4aAIZAIASOLRDBZJUPAEEhEFgcPHpSTJ0+G0CKvoqLCXY2NjfLw4cNQncnM6O3tlZ07d8rY2NhkmhHSjT3YhX1TPTH2OsZRc0DtL6VP/vxBz0Sn//zwg1xqbJThU6eKUk39byfB3qRG3v/ySzlXXy+PBwdDTcC51HmXiCxCGkVkYGBAurq6pK+vzy3Gnp4emTNnTlRVy5vGCLS2trrx3bdvX8FedHZ2yvz586W+vr5gPb9w48aNTvbx48f97ClxDyH88cUXs9fVd9+VHydo02GxF9KnJFeoTj4QN2zYIO3t7W795quTL//ZfAVx+SMjI1JbWysLFy6Mq2rlZY4AOxXXiRMnpmVPX6ipybGbxXqzq0sa+/rkudmzc8om+wHbvm1vl/lr1sjowEBec56fO1eer64OlS9ZskT27t0rR44ckUOHDjmvMVQpT0akZ6EupbqfBw4cCDW/detWKG+8M5iQGhKpbUHX2HdtV61aldgkPKXNmzfL4OCgEFIhPxjC+LKDYVcQs2BbQjSViwd27NixxLYFZfsuO5hgK58krevr1zzFLGh7oX7FGYns7u5uwQPxPUvV6dsRtDVOto+ZPx5pyEY3RLCqp0fmvvpqjil/++ILmVtfH0sUhCLqfbCINenOr2XBMIe6WuaHC4QNPP/pzTfl9pkzciaTcfUIlZDJhW2/O3VKIIN8if7Qr3xEt3TpUtf0+vXr+URE5keShbqUDAqX74IyURm4rVu3ugnPBOE5uGgjtaWQCXENDQ05uwiBCIVY6CQmIxP37t27rvzSpUtFaezv75fm5mY5fPiwkwEhKqBB2Zs2bZK2tjanByUXL17Mkoue3Zw9e9bpB0Pq0oZ7yrdt25bYNn88aHvv3r0s3rj9uPGMC2WqU3cN9O3Zs8eFCNxz+SFjXL/ijLx9+7aMjo5KJpPJqcqc2L9/v4AhNmEbNjImSUOVo0ePZjHDbnZD5KYhO8fYwMMvX39d+j/6qOC5BOWVixfLH77/Xl758EO52d3twhRClT+3tcnipiZXhneCl6JkAiH85YMPZM3nn7vy33Z0yNfvveeI4BcvvSQNvb3y+88+k4Xr18v6/n5XRxc+i/83ra3ybEVFwOLiHsFvxYoVcvny5aIahsiChXfjxg3ZsWNHpCCNYZmgTHgmAQNJ/BmXqKu7qw46n/7uEycDnbt27XLVCIEIhQiJsCFqh4uTFyxnYeKqQYJMavWgANbfPdfgBo6OCouFtHbtWteOex0MSI2khLNu3Tr3XMwfMIMcdDyQDelcuXLF9RlZYI+tEOe5c+ccntQjoZs+aPug7rh+BesHn8F+1qxZrs/BMjCEeLEJ27AxKVGoLL+fmsdnGrJ9ef49OzOL/H5vb87O7tfJvPOO1L71lsua8/LL8uPjx/LfJ0/kX3fuyHOVlfKrxkZXxgKHOPAISA+/+UZ+3dwsEANpdl2dvFBbK6P9/e55ov7U1NS4ecW6SZpCZJG0YSn1GGB2NQz0L90xSpFJG13QpbbXduyO/g4JOeghHAuWsIZFyLVgwQLBE9EEyS5fvjxbjueVRnrw4IFcu3bN6VPdUeEVZHDhwgVpaGjIkhb6wYaJoeTh28QYxPXLrx91H4c9xItN2JaPsKLkkqebAvMG+/GC/PQ0sn05UfcaouA5EJJcbGiI/HYh2PbfIyPy3aefZkMIwo2v338/W+3vN2+6Zw1DCDUIOSY6VVVVuc1uypJFGp5FFKgsholIuNA+yV29etUtTPq1e/duaWlpyZanecIPgWlopfp9giWPMGf79u3Ou/IXVRJs8vUrCaZx8rEF7wCvyg/bksiGIOgn/SPkhDz8vj2N7CT6tU7m7bfdGQFEkCT5IQRkw0X4oImwRfP1M3huonXH67OQR5hPZ8izqK6ulkePHmV3Tc4iog448wkslJ+2Z0EszM7GYmJi8dWdxmHs9Loz+TapB1BM6INsYjw9F/Dl+fe6cNDR0dGRLYLFh4eHsyELoU7SA05CLdx84vd8Sc8pVq9e7UIl306wASOt48tI2i+/TfC+0A4FiWILYRMXKcqOoMyoZ+blvHnzskVJZJcy1lkF3g0hwpP79+XnVVVebvTtrExG/jk8LHd6eiIrcB7y187Ogl4Kev4xNORCmkghKWQyJ1gvzIGkKUQWLGjcb3W5ibvT3CWTGpavHotM3VLOKPi6jmcSbi67DQCw07MoKysr84kqKp9whAlP+IF8LiUc9FOmmKGbg1JNuMtNTU1SV1fn2i1atCjn0FjrRX2ih8NKwgXVy6ceKPMJMWEL+Zx0Q1pbtmxx50nYBkZgpe39b0MK9cv3BNkwCK183dirZHb+/Pkc82mLDXpOQTswQgY24y2ozeQhnzqQC8nXTT640x55cbLVEGxbtmyZI0v0JUkcUPL7BQ0T+PyqpUVe++ST7DlDITmEL699/LE71PRl6AEnHgSHmuffeCOrQ7/tULmcZ3CuoXW03LeN0Ea/MSn29xZggbe3cuVKVZno85np9D4LiIBJryf9iXpolcYdAcaFRe4T97grTagAuyBamzM/AVbqOgp5Fj+JtDtDIBkC6j0UCpWSSUqvlnqYRhS5mBKa8QtO9epySws/GVkUxmfcS9n5cLOjLj9cGHdDnlIB50MsTBbpVEgQGO62fxA8FeyabBtOnz7tfsFJaFxsmlZhSLGds/qGgCGQHgLmWaSHpUkyBMoaASOLsh5e65whkB4CRhbpYWmSDIGyRsDIoqyH1zpnCKSHgJFFeliaJEOgrBFIRBZ8vae/GPTRIE+/8ptOX/P5fbB7Q8AQSIZAyW/K4scd+lq9Ur6zTWae1TIEDIGpgkAizyLKWHutXhQqlmcIlC8CkWTh/5MPYUbUf53GvcegfCGznhkCMxOBSLLwX+MGcUyl1+rNzGGyXhsCk49AiCw4ixiv1+pNfnfNAkPAECgVgRBZlCrI2hkChkB5I2BkUd7ja70zBFJDIEQW4/lavdSsNkGGgCEw4QiEyILXsE3l1+pNOEKm0BAwBBwC9j4LmwiGgCGQCIGQZ5GolVUyBAyBGYeAkcWMG3LrsCFQGgJGFqXhZq0MgRmHgJHFjBty67AhUBoCRhal4WatDIEZh8D/AfA4pwjBRioXAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown The unnamed: column is shown as there is some special data is stored in the excel file, so we can filter it using the given code![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAYUAAAAuCAYAAADHjwXRAAAOgklEQVR4Ae2dDYxU1RXHD4YoiHwt0qqw8mFxQxtctRbcUqNdKw1rXWlj0LLEYEOK2wiBQGsRwgbCR2kgECESDIlaXKqiBteILVaUGIpEqq6mIQtRduVDTVkRjKwaU5rfjef1zps3s29mZ8adnXOTt3Pfffeee+//nXfOPf/7ZqfX2bNnz4klQ8AQMAQMAUNARM4zFAwBQ8AQMAQMAUWgd69evTRvn4aAIWAIGAIljoBFCiWuADZ9Q8AQMAR8BMwp+GhY3hAwBAyBEkfAnEKJK4BN3xAwBAwBHwFzCj4aljcEDAFDoMQRMKdQ4gpg0zcEDAFDwEfAnIKPhuUNAUPAEChxBMwplLgC2PQNAUPAEPARMKfgo2F5Q8AQMARKHAFzCiWuADZ9Q8AQMAR8BGI7hVWrVskTTzzht3V5yvr27euO2tpa+eSTT5Lq5LPg0KFDcvfddyf1yzgYj47NHztz0fLZs2dLR0dH5BCzkR0pKFS4f3ejLJtV6Y6HGqbI52fagxo7ty2XNfNvlI+OtgRlZPw21Pm2EjiCX09JX506Jbtra6X1ySeznhIy9s6YIWcOHw5kkN9ZVSVPX365/Of114NyP0M5fdO+JyYwABedH5/758wJzqPmTJ1U9wPd85/jqPb5KMMOTJgw4Vuzcenm5I+NMXLe1RTbKUR1xAAaGxulubnZGdampiYpKyuLqlrwss2bN0tVVZUbF0b/rrvuShjDI4884q5t2LDB3eyEi52cdCY7XXOM/YE9T8lvFz8lSzY3y++W7pB+A4aka+KuTaiuc/V/fucfIut+9WWH/HXjffLev/8ZeT0Xhfv27ZNt27bJrFmzciGuKGTgLDDuvsH3B/51R4e81dAgwyZPlgFjxgSXyNfs2yeTXn5Z3pg3L7bTUaPoO5KosqCjIsqcP3iwfHfiRIcXuGWaJk2a5PQPPSxkuvLKK2X//v3Ozg0aNKiQXXfal47t+PHjUlFR0Wn9OBW65BTa29tl5MiRUl5eHqevgtXBCZw4cUJuuummnPfZVdlEBWVDy6XsO5dHjq1m2mJZsHaPXFKemxsc2UkWhUReRAgLFy7sNo4/i2mkbNJvxIikaziCU++8I9esWCEHN2yQKEN2rKnJtRteW5vUngKcw4/WrZP3GxvTrpAjG/fAwpF33ikXXnqpKG6pphh1P1hwrlmzRjZt2pTEDKSSY+WZI5DSKWD8oFaUZlm2bFmS9La2tqSyQhSwUtBxVVZWyqeffprQLWPPdmz5lM0gT358JGGsekIEAW0ErRSmlLROqk/opD/NuV4Ov/uaND5YH1BTUE6aiCCUsgrTU1wjyjj2XnMwhjBFtWvXLhd5EX35iWgR+u7w4cMBXRem5Aj59X6FKcYwzUc9PwwOX1f6QPXT70vvXSYrSVav1U1NMvT66/1puXzb00/L8Ntuk0urqyMNGSv44y++KGNnz5beffsmtdeCC4YMkXPnzskXJ09qUfDZb+RIOa9Pn+C8s4yLTBYtcpQUNAv0lE9BMSYoGiINpa849x0a0Q/tOPwoiDZvLVrk2lN+7IUX3HW/vUYt2j5MuyFDr+26+Wb5MoJOHnHHHQ43ZIVTuvtBXVbGY8eOFfQx06T6obqouoQcX0e5HleHVA/9+shSipU8bAR6z/H888+7Z0Gv04683z95TSpfxxx+frReLj9TOoX169fLZZddFtAvS5YsCfplEgzynnvukS1btriVI+f+ZILKOc5ghFauXBlQVlBXGtJxDYMybNgweemll6S6utqNMy6Q+ZSNkcUo//3JPzvjjRHnXA03kQERArRSn34DMkKN6OKPD74uY8bdIHVzNjmaCWoKyomEw/nbU6sDyuqXv1klz265P2EvA4fS9JcGqW941tU70vJGsK+BYu7duzdl5NXS0iLTp093qzjCWBzy22+/7fpG6aGcKEfOtGnTpKGhweWp4FNx3MurrrpK0D0efupTlzbkkYEsZKJvS5cudX0999xzbuWIXu7evds5L9d5F/6osVNnUXHvvW6179M6p1tapPeAAXLh8OFpe/qyvV34b8R9Lr44oR7O4vz+/RPK4px89dlnjpK6bu1amdLSIjiWE56R/Ly1Vd5duVJ+tnOn1DY3C+enmpudaKKf3hddJHd88IE7vj93bkIU9N7WrTJ62jQpr62VQ5s3yw1bt8rXZ87I2WPHnGOBKhtdV+faIpsISDFB9jsrVjjKDPlQZxdE0MngBW7gl02CAUAf0Ym4CZ2ZO3duYDdoq5RyWEfRIepiD3KRHn/8cVm+fLmMGDHCOTOoqIMHDwbRDgvuI0eOuPnQNzrOYoiEbvt2uBAUfaRTAAwGnYo7hkIAVHj5mTNnugn4IKcDMrzyUw/or/jStX/mmWekrq7OGY1wPZ9fu+WWW5yBYFxxgcynbAw3hpo9AYw3Rtw33OG55PK87fABue7GqQElVf69a2Tw0OGB0aevCy8aJL+audrtb0BtQXHpBjgYnj59WoYMSb33oYacEJ9oQiO1V199NYFyghdG1tGjR53e8EAqzQcNOX78+KAtdfr37y+33367gwPZOAhkkpRO2Llzp9vbol+NZLqqZ9Ac4xYudP3wJ2oF+3lbm4sgUkUJumreM3WqjHvgAScjENjFDJQU1BR9w9N/9v77gUQMMQ6DMXMMraoSxkqizfBbbw3qll17rTP6//3iC1dWPmWKDK6sdHmMP45LE44BJ6ZUGbKp89Err7gqRFZXTJ+esLeibf1PxgyFpGPyr8XJo4foEHoZJ1EPQztv3rwku6HX0Cv0iYQOTZkyRd5888044jutg6wx3+w30U+fUGSIDcUJkdgXYJF70osqeUbUSXTaWQ4q9M6BjIxEADxG2lLhEGj/6Igc2LPdRSl+r+N/Oi04HTZqXLDPcf4FfeXX920MrqGgpyJCfa2AIvubXCwaSDxw7O0QsfmJaIDEgoDVE0aeBxEn0Nra6mhLrrNntW7dOnf47f2olYVATU2Ne+gfffTRoFoh9Mw3xEHHoUxFfX2CcwldLvgpNNK/7r9fju7YEfRddvXVQT5dhoiHSILDT8wx09R/9OgER5ZJ+4u/ibjQSzXk6dqjhyxSMMipEnrYHRPRDFEE7AeJhbhGOPkab2SkkK/OkNvVFVw+x9aTZROhEJn4xxU/+HGsKfMQDh48OFbdqEqExDyYehA+Y8xxCoTGhM/k2R8KR4F+JKrt1enQF6soqASiCZ+WKoSeYdjSJagnP9oI12XVzia2H2mwvwAdFE6s/v1Ve/h63POD69e7qtBO6SieVPKIJLStUlDp5phKDg61M/xStdVVtDqHVPW0XBcfeh71qZEt19AzFjPdJSkzo/Rpvmn6SKcA2GzewhWTGETURnM2oOkKTh9w/Yz7auioUaMCPpEHf8GCBUkbzdmMiza5kK37GnHpsGzHGm7H6n5g2SXS0rw7fEkqKqvltRceTqCLkiqlKeChGjhwoFu5p6mWdIl2EydOdBtpUeGv0pS634Au+Ksgog8eVnjVqIRM9hFYAeoqUOt2Vc+i+guX8YbM2Q8/TNjE9eu8yx5Hmu8hRL3uqtSKUjLIY78gzt6F33e6PNQN/RA18FZV1GZwVPuBFRVufyLVm0MY+Y/37nVy2UQ+MH9+pGz6BbeoN4yi+g2XEUGij+iXn9Afnjv/RQWup9NDvebz+OyHoXdQnXGTOhUWKey15iMx1kJENJFOgQcK76QbtYQvhC3dIfn88uTJk904cwVUPmV3hp1+Oe3h5VPdW0Brf18dvIWk30HQjWqoIPLhN4Ru/EW9tLYcCN4y0k1sIgI2l5FNO45M3nDyHxwevEwSRh6DTfiLHA51mEQLvEniX+O6roTQQyghHlhty6dyrDNmzHC0E9QT5fTDA6ntMxlnNnXVSOombjYyotqMnTvXGU19i4fN3GuWLk2IKKLaxSnjzZ+jTU3uDaEdFRUCdRS1GRwliz2Enzz2mNtc1rHxqRvNuteA3H/U1Lh9lKiox21anzkj4JdNgm5kscE9j5ui9FD1JHwNfp9XX9E/EvXoi0h2+/btTl/15RXK0WdoTvLo6saN/6de444vqp46OeRyMB4ia3/hFNWuq2W9Ojo6znVVSLG1Z3VJVJBvcIsNl3TjZVWOEWaxoJu56erHuUakwFsZbFLrA0jZ/PnzZe3atUmbgnFkFroOq31Wxz9cvTonRrvQ4/82+iOCIqpgMz/TFKUzmcroifV5PnFmixcv7vJzExkp9ETQbE5dQwCjjUPAoUZRQdlIhwYIf8eENz54fTMuX5xNv7lso6vjVJRKLvvqCbJwolBHilsmc0LvoIvr6+uDRUQm7a1uPARK1ilAM2jolyklEg/anleLCAGKhu8W5CIhj8Onjwi/oYw0cshFP/mUATcPtcOX2FL9K4x89l9MstlnIKrKlgrjC2voX64i1WLCLtVYiZz0u1m6B5yqbtzykqSP4oJj9QwBQ8AQKDUESjZSKLUbbfM1BAwBQyAOAuYU4qBkdQwBQ8AQKBEEzCmUyI22aRoChoAhEAcBcwpxULI6hoAhYAiUCALmFErkRts0DQFDwBCIg0Bsp8D76foNQF+wftuP1zv1W37+dcsbAoaAIWAIFA8CXfovqbwjqz/Hyb8ssGQIGAKGgCFQ3AjEjhSiptldf44zaqxWZggYAoaAIdA5AimdQvifMUX9l1T9z4Cdd2M1DAFDwBAwBIoBgZROobv+HGcxgGpjNAQMAUOgWBGIdArsFeTr5ziLFSgbtyFgCBgCpYBApFMohYnbHA0BQ8AQMASSETCnkIyJlRgChoAhULIIRDqFfP4cZ8kibRM3BAwBQ6AIEIh0CvqDKt3x5ziLAFMboiFgCBgCRYuA/Z5C0d46G7ghYAgYArlHIDJSyH03JtEQMAQMAUOgGBAwp1AMd8nGaAgYAoZAgRAwp1AgoK0bQ8AQMASKAQFzCsVwl2yMhoAhYAgUCIH/AXO4inxKTpdWAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown Or we can make the read_excel shorter than call 2 times as given![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAaQAAAAkCAYAAADGi05ZAAAOE0lEQVR4Ae2dC2xWRRbHR8MqIOVRcBUKtpCVLptAWVeDlTWLFUjchFIToshDgUCQRAxEJAETUEIkIoZGTIRgBMNDNJBASTABRDaxqQTjUkyWLQQo8loCLa+sxV2T3fzGnuv0cu/9bvvdj35tzyS39zEz55z5z9xz5pyZfveuhoaG/xlNioAioAgoAopAKyNwdyvzV/aKgCKgCCgCioBFQA2SDgRFQBFQBBSBrEBADVJWdIMKoQgoAoqAIpDSIFVVVZkuXbrYY8SIEeb48eOKWiMCYDN37lzT0NCQVZggD3IhX7anbdu2eeNrxYoVkeJSNqhM7WefmQOlpeY/V69G1s+WTORE3u0PPWT2FBebGydOZEw0aFdOm9YmsPl+xQqLCbjQp0mnnxsazKFXXzXwSTq1lPblb76xMlE/G1NUu3gfOZJMkQapvr7eKoADBw5YpXvo0CEzePDgJPkrrQ6OwMSJE+3Y2rBhQyQSGNetW7ea2bNnR5ZrC5n39OplSioqTGl1telRWJhRkc9s327ynnnGwDOphJHDkCat2IcuWmQm/PCDKZwzJylRs4KO4IWhvROTELfRcSYk9GNLJkZjx46172SSE99OrvD+6ytXrpiePXuawgy/NH6+eq8IuAjIxGjRokUmNzfXzfKu7ysoMHd37uzd64WxXtGPFy+awpdfTgwOPJdLlZWmb0lJYjTvNKGcQYMyxtJPG2/429deM4+tXm3uf/zxjPFtCWFk+/qll8yAsjJzb58+kST87aIw7+KqVavM8uXLrY0IezcjCfsyIz2kuro6c+3aNV8VY2e0EhIihEJIzx/Ow2pKqI+zWFHOixcvtiEl6uzevdvWdUNfKKDS0lKvftJu4W0NanyAbLQHfiK7n7ebV9KMl9INo4Vh5tKm/eAgSeqLXC5elHExY2B89NFHUjXWOYy39KP0n8jh8pdnIptf9jDasQQzxuzdu9cUFxfbI6hOwfPPmxHvv286denSJFvCDTIzdWf0hEp4zpkkZQnpSPhEykh9KcuLTLl/fvCBpXHi44/tvcwyJZ/yPKO+S7eJkCE3KH7h6w9Hiqxh+ULywt69pmvfvrd5R2G0ZSbvhsvATPiTf+vy5V+w7tZN2MQ6p6Idh4i/P0ROwcPFWMpKn0Gf8cE4YbwkncJoX6+psaxSecKnP/3U629pFxWlbdLX7hgmn7Em4V/KSF15vvfpp83FAwdMRVGRpS9jlLqntmwxj773nnngySetjEF/wtolZYmYDRkyxL6j8iydc6BBEgWCwt23b5/Jy8uzCtofvyd/4MCB1kCVlZWZHTt2WFlQjseOHbPPUVaE/KgrCnb16tVm0qRJhjrr1683mzdvNtevXzdnz561dZYuXWrzqXv+/PnE3cIowJYtW2ZOnz5t5aiurjZbtmzx1s1QyoSNkEnaFUUrKC8MMz9t8AEH+JD279/vrVcJjrt27bJ5lHExI3/mzJlB7AOfRfHGEBBOk/4TnitXrrRjAt4LFy40/fr1s7JyX1FR4XkyUbQDhfE9hF5lZaUZNWqULyf17bHycquQCQOV1dQYvAV5YZmtPvruu+b7t9+2L/W5igpL8E/vvGMVFy/09WPHbAiJ+n/5/HOvLAX/XVtr/nvzpn1e/eabZtCkSWYAk4jvvrN0yIf26D17LG8eCg9bIOIPShRlQUgP3oMmTzZ/X7rUM5TQwdCQx0H4zx+SQ/7zX3xh8idMaMIpinb3hx+2M/l/lJfbdS3KXq6qMn/+5BNLn/zfv/JKE3pxb1LRTkWH9hxfu9aM/fJLr91iWFCaf3zrLdsnYENZsKfPWtsrEUOEJ4JcQenszp3m1qVLtl3ITN9L2agxjLFiXDA+GAeMF+rSbxIWBi+8WRlLf62qMvQFib6U6yC54j7j3eQd5V1NNwUaJInrY0jGjBnjKWBCJm5asmSJoSwJoS5cuGCFYoY+Y8YMryghP0J/hABJKMvhw4fbaxRv7969vbIYpZycHDN+/Hj7DFqUOXjwoFcm6kK8D5mtc/Z7b1H1kW3evHm2yIABA0xBQYHBUwRsjFFU2CiKruSFYUb7XNrEZ8VIU3fcuHHe+h1tGjlypDWc5B05csSSF8yEV9xzKt70MYYJ47xnzx5rGJGBBO8zZ86Eru2kop1KRnAHB3eMpKpDPi+0G65CaWE0CDfxIpNQaPcXF9uXGOU9ZO5cz8vihf7d9OkeKxTLvbm55lbjGOZalD0hj15FRV5ZLshn9gkdeD8wcqS5eepUkzJhN//66iszdPFiz8j0GzvW/Hzjhvnx3DmvCoZClJb30LlgZt6pe3fTtX9/56kxqWijwP8wb545uXGjObV1axM5mhBqwU26tH+qr/cMvp89OIM3/YhSpl9b2xghoxgG5BEvBYPhJsbPkEad446zVGOY8XBPTo7pX1pqycEL40Qf38nEu8k7moRBilxDStUod9bqD6lgGPA2JA0bNkwuI88ofzwoDjehyOMklLrfcMapF1UGhSsGNKpcnLwgzOhIjLk/BOhixu7GqVOnmqNHj3ps4mLiVQi4iMObamwmmDZtmp0cuBtbwCY/P996S37ycWn767n3TGKuhsws3XL+awxH3eHDVgm4ebz8bmJ9hdkrL7J/tkh4pObDD73iXfPyvOuWXGAgMYYYqLBEPuX+9txzTYq4vDGkGDcUHAlPT7wF7qGBMcEAu7zi0KY+Cg7DjReWtFJvKW0xOF+/+KL59vXXTe7w4Z7nZkEwxvYfGzgwSHh12ZTYsMGBMaJv/X0WJGuqMfxTXZ05uWmTPdz6d3pTSJ/G9Sfe1XTXkdIySC4I7jUhP0I1hLYQkBCSeB1uubBrvBQJCYWVCXvuN4SUQ7Fv2rTJ8zDC6oY9R+HeiYRHimH3J/BbsGCBmT9/vueRgjGhxaRSGG/oY1gICc6aNcuGWMFD5OQadz0qRdGOqkceg71XC3eIMdskZIYyC0ooaEIeg2fPNsfXrTP35ed7CpjQHl4IoQ7qM1s9PH9+EJnYz1DwroGIqpgq3CQKDrkwqCQxSsyc8agkXOTnk4o2YSIUfv2RIzbEKXT9dFpynw5tJgyEnEj0D+2WcCLPUPYYUiYd9KuEX1siZ6bqYOAxGHG95VRjmLa2djsl8iWGKR3sAkN26RCUuu6sed26daamcXFP8sPOhPeYdctaRVi5sOd4RyhQ92jpdnVkQBZkIkTFOomEDvFYgowszwkRuov+YbLKcwnByTqNPPefxTDCw/UgcZlra2vtGhx1ysvLY29qiMNb+mL06NHW+3TljOqvOLT9bfTfQ6NHjx42bOrPi7onVEXIqmbt2tBisqbzYEmJDU3JepJUcHfuQUcWqCU/7pkFfdZlHnzqqZRVJLznlyWsIjsLkdNNYVu949BGqWOI85991oYwT27e3Oz/k8KzZIHdH5pKgra0k8mDmzDMYIZXOPCFF2yW9K9bLuqacc14YzKdqYSc4Bu0a83PM9UYxlixVhnVzs59+hhCnS0du36Zgu6JavGOgl26KSMGifUPFDneEUJ269Yt9tZx6mzcuNGu11BXjkwOEhdEdqeJ3KwZIQv3JMJWyIFMeCwofta7kkis07BWJhtI4CFGDf7kEdIT3lOmTPHYEkKbPHmyKSoqsvlsNGlOOC+KN54Yxg9Z4E3oEsNI+A7PLai/3F12UbSZNAjd6dOn2xAvPFAMkrhnvYy+oHzchPJl5kj4S3YoubuQmGFjJGTdiDUgFLssPrNuw8u+s7DQ1v9NTk6oxxEkE0qAHU7w5PyYs+0XxcxzQm7sgCLf3f2ER0IIUdYcKCs7yPDquOYZB/LheYkXg8K7UVNjch95JEgsWy6MNnIRTpL1KzyS3z7xhCFMhlGFtuzoIpTJgQz+nV9BjJtLm7ActGUTimAm7cZblTU65KLfZN2IvscwQUPqB8nkfybhdHRXUgn+IjNn+hT8pb+i+KQaw3jueIiEKF0eYCWJMvQn/UoZd5yJbIy/q9XVt41DoZHqzCSdd5R3Nd10l/7a968QYmxQfC0NF/5KSa+SRADDhwHE+5VQYZL0k6aFgkRhDn3jjdvWpZLm5aeHkiEcREhPU/MQkHVaJpptYZw1r3WZKQ1m/B8SmMnEPR1OGfGQ0hFI6yoCfgQY6BgjN1ToL6P3vyDAzFuNUfNGg3jqRBjUGMXHTta258yZk4gxgrMapPj4t8mS8j9luNP+oznb4Vu78cxYCVuyHqlJEUgSAd6LNWvW2JCwekbxkeUf1nknk8RMQ3bx8deSioAioAgoAhlEQD2kDIKrpBUBRUARUATiI6AGKT5WWlIRUAQUAUUggwioQcoguEpaEVAEFAFFID4CapDiY6UlFQFFQBFQBDKIgBqkDIKrpBUBRUARUATiI5DSIMkvE7A1si1tE44PgZZUBBQBRUARyAYEIn9clX984p8R0/lxzGxopMqgCCgCioAikP0IRHpI/IqrfsI8+ztRJVQEFAFFoD0gEGmQOtonzNtDh2obFAFFQBFoqwgEGiT5uRl+XbqjfcK8rXakyq0IKAKKQFtHINAg8ckAfnCQtaO29gnztt4hKr8ioAgoAh0VgUCDFBcM+X4I5fmBPX6gkN14JPnYFfd84yfuB/rkE+b8wjN1OfhWjiZFQBFQBBSB9o1AWgYpDBpCfmwX5xPmeFqc+bJo3MQnzNnhR105+PyAJkVAEVAEFIH2i0BGDBJwtdYnzNtvV2nLFAFFQBFo3whkxCC15U+Yt+/u1tYpAoqAIpC9COj3kLK3b1QyRUARUAQ6FAIZ8ZA6FILaWEVAEVAEFIFEEFCDlAiMSkQRUAQUAUUgXQT+D5PptlLN5msqAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown If the data frame is in the first row, and first column, we can specified the start row, and column.For example, the given [file](https://drive.google.com/open?id=1FAwZewZknR-eQKTVJELoVrpKdpxjEQ1U)![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAhQAAABuCAYAAAB/XIprAAAgAElEQVR4Ae2dD5BUxZ3H25xRMKzCglFZFPAie54XQRODhCSHKFRJSiRVXrRECSgpNXUkUBpzEAORM5gQchBJRSg1biJYGEOdYAIVUOSSEPyLQKJkTeSPuKuoIIiyerHOq0/L71XP23kzb2Z24A377aqZ96f//frT73X/+tc900e1tbV94OREQAREQAREQAREoAICH6kgrqKKgAiIgAiIgAiIgCcghUIPggiIgAiIgAiIQMUEpFBUjFAJiIAIiIAIiIAISKHQMyACIiACIiACIlAxASkUFSNUAiIgAiIgAiIgAlIo9AyIgAiIgAiIgAhUTEAKRcUIlYAIiIAIiIAIiIAUCj0DIiACIiACIiACFROQQlExQiUgAiIgAiIgAiJQUKFYv36969q1q/8MHjzYvfDCCyJ2kABsJk2a5Nra2jLFBHmQC/my7pYsWRI9X7fffntBcQmbL8z2Bx5wa0aPdv/75psF42fFEzmR91enneZWDBni3vrrX6smGmmvGz8+E2yoJ8rM509F6rpqQPIkfCjqw/KAQUe5PXv2uHHjxqlNLgNoufVB/WXp2Y0XvdxyxdOp5DpRoeCBpQFfs2aN7zSfeOIJN2DAgEryUlwRyCFwxRVX+Gfr3nvvzbkfv0A5uv/++911110X96q562N69HDDly93ozdtcic0NlZV/h2/+pVruPhiR56H2/W7/HJ32UsvuU//8IeHW5Sc/A9lfeRkXAMXtP8o8p3JhYovyu8TX/+6e/8QDRpff/zxgvmZwnAoZSq17o9OivDGG2+47t27u8YqN3pJ+eu+CEDAFNupU6e6+vr6vFA+1q+f+0iXLnn9OutNGp8Dr7ziGq+/vrMiyFy5P9a3b+Zk6swCxeuDDn3r4sVe2c+CEh7WDbL9adYs13vkSLevyExBvFxhOtU+T7RQ7N692+3du7dd/qFJHQ2WKZH4dEg4VYK/md85Tps2zZvkifPwww/7uOHUAR3I6NGjI1P4odKQkc00cpvmiefNtfkNHz68HZukG2mYhWlTfjiYs/iWd8iLMCEzOt27777boqY6JuVt9Wj1Z3KE+ds9ky0ue1LaqQRzzq1atcoNGTLEf/LFYeQ7+I473NFdu+Z4M6pAk89nZufl5D5HnIUNNX8LY/EtLB014f7yk5/4NP76s5/5a5u+MH/Cc6+cUU44SopP55isJlfc3yC0rlrljjvllHbWiTDtkEHIwdIOeRCPMpMfn5d/8xtfNjMBU17Ow/Q5T+ts9GV5x+PG6yPun5QP0z7UQxgeOZO45UuHuCZXyCxt2mYFOfH88/Mln3iv2LtFxObmZt+G8v6F7yV+9v7iF2+j8U96N+3+zJkz3YQJE6I2j/YxjSM+H+sfyN/aEOLHyxXKTbz58+f7PoC2hD6C+GHeJh/34+1NGvmS6uPVxx5zJw4Z0u6diafJ82PPg7ULhCnlGbb2gnj2HP3Pl7/sdj70kHuosdGnb88o6SLbFx54wHU58cS4ONF1UrmiAIfgpJ1CYZVFh7l69WrX0NDQrkKRC//+/fv7h2PMmDFu6dKlXlw6ty1btvj7PDhMmfAwWAc5d+5cd+WVVzri3HXXXW7RokVu3759bufOnT7OjBkzvD9xW1pavKk7fBiryYQXaNu2bV6OTZs2ucWLF0dzlMiA2R2ZrFylypLELJ42fOBAPrhHHnkkaiyM47Jly7wfYUJm+E+cODG1aIXypiNnOsLqz/KcPXu2fybI++abb3a9e/f2snK9fPnyyJJQKO00ApLeunXr3LBhw9IEzwmzZd4836FiZh/T3OxH69ap0LBjekfj52V9eflyH/dTP/iBV0y4t2/LFm+iJ/6//vKXUVgCvrN9u/v7/v3+/qbvftedfuWV7lSUwA0bfDr4k/ZFK1b4vLlpefgABb5ooGyURN6njx3rnp0xIzK7kg6KAn58mD6Jj6aQv2XlStf3sstycqLh4j7TLRY/7OReXbPGnTlpkveDGS6UG7nOue02h0Wo5be/dRf++tfurebmaI1G8513uv1bt/r4Ix991L24aFGqNSIoSZSRsiIX8pGXNdaU54UFCxxpmtwokmnc8Wec4c6bO9c9P2+el4U0X1+/3n3u5z9vxy1feoWYVZp2vvzCe7xvSe8W4Rjw0YauXLnSt0s7duxwGzdu9Emw3m3WrFmOdoz3aN68ee6mm26K2uFC76ZNRU6fPt2//8Tng5UwrUMRsf6BNoS2kzRwhdoz/OkTbrvtNte3b18/oGC6nT6Ftq2Q3GllSwp38gUXOJ5hU5LzhcO/7vTT/XNIG7L1/vv9u1nsGeY52vy970XPMM/k0zfe6N8dnqNR69f79uTUMWN8m8Fzbu827/cnp05tN2jKJ9/hvtdOobCHCUVgxIgRUQcaf5h42AiLo8FvbW31Dwwj5GuuuSYqF1MmTJ0whYKjsxs0aJA/p+Ps2bNnFBaloq6uzl166aX+HmkRZu3atVGYQiehRoz2yiefZp6UBrJNnjzZe5966qmuX79+DksNLwIvRCGze1Ka4f0kZpQvTHskZq2DShbxL7nkkmj9CmUaOnSoV3zwswbEmIX5pTkvljd1jGKBcrVixQqv2CCD5U0jlrS2oVjaxeSDOxzCZ6RYHPzpgEJzP9YLOv1d69ZFHTMdEqMROi46WTpSs3LwAn9iwoQoK9Y6HFtf7949+Axzbp01DUCPgQOjsJzg/+kf/ch3WKR50tChvqPNCZRwwUjkk9OmRZ0dJs7333rLHXj55SgGHSJlTHL7mpvd0ccf747r06ddEPz45HN9vvhFR+OGyyc3SlPdQX94/kPMKgSLMw++P+TdrX9/997u3fmyyrlH2Y6pq3N9Ro/29+GPcgELc+/t2RMpbHYv7RGl6Z8nT3YvNjX5DiDkmyaNQswqTbtY/nSgNoiIh6VdnTNnjlfgaSt5T3kfcRs2bHBjx46N2g3aXDpoLBq4St9Nn0iBr7CtO/fcc/17bApFofaMJBlsnnHwOaP97xJMZxaSG05YLKztt2NoASkgsqMuUWZ5v7BAmIUgjNN4ww3OlNn6c8/17+b/vfuufz8LPcMMNv7xqqui94s2A8U86V0M86yl88Q1FMUKEY4a4yZpOnZG++bOPvtsOy14pPPGgsEndDycaRydclzxSROvUBheUFOACoVL45ePGS8Zylh8CiVkxmjj6quvdps3b46yScskipDnJE3eRENhGD9+vFfuwoW5sKGRMgUjzCJt2mGc+DlK6JsFOs54eLum49/91FNueayjp8MLHesL/vCVr/jOyzpS82eUwmjE3HENDXZa1hEFh1GMKS35EsGfcJg+QxfmTWOGFcDKxijJGjjikAajJjr8eF42orb04WFWGeIyivrDuHHuQEtLlD0NaCXunR07fENdKA2Ujhfvu89/wnCWNwoGChqyPf3Nb7r6QYNSWxgsPZQVFEqsO6FVxvyTjsWYEa/ctJPytPso81hMsRLjGOnbIM7CJB2JZ1MWYRg66I54N8M0Sz0vtz179913C7aVKFVYSCtxPGtYBnC0AY+MGuU+94tfRIpAUtrFnmHeWdoTnt/Q8Z4eSa6dhaLSwjFlglZtUwMcS1nYiZUATZOH3j5plYRKLRT5yk6HeSic/ZrGymy/qoEFpsopU6ZEPIr9KqJUeZPyJh3kYUrlq1/9arvppzRsCqVdTM5evXq5HmX+QgGrAqMNM5FzDNdamIlywHXX5ZjXkYmpEUYpFp9jpb/IoCOLd/BJ5WeKJZQbc2io8GD+xB+5sLDYVA7pMdrHopEkL52ppY1Mz3zrW14JweKBCZaRvPl3xC8y0i4QQ7lhmsXy5kg5zVF+OHAf6wWKYCErjcWzI1NgKCIobCEv8y90TGJmcSpJ29JIOtL28Q7a9C/ta1pHO2HtiR0Z/Jmr5N20NEo9dkR7liR3pRaKeFkYcLBmIY2VjbjFnmHep/D55rwU5TYuXxavO1yhoJDhqHXhwoWRma0YABQPRr02V18sfNzfXj57eThaxxwPW+waGZAFmRiBM5eJuQ2Hhm1TI2E63GeKJa2JjbikzRSGrVMI0wvPrfMmj9CCw3TA9u3b/RoUwjNXmnZRZpq8rS4uuugib/0J5SxUX2nSDsuX75w0TjjhBD/tlM8/6R7mdkz+zQsWJAWJ1gacPHy4n2Kw9RQWIfzlCOmUa5pk1M/8PfOzxZxNM8RlSYrHL1uQM3Sl/FSUueC4MwXA5I77p71m7QVrSZIUmzAdwhA2XK8R+sfPTcbwPgoii0jDxW7mb+sm+n7pS35qK+3aDosfHuPMOiJtGwgxEEtyvAvWBiSFCe9jDaWdoL2Iu7TvJmsgWMNEO9rRzsoSb88K5cPUR6G20iwUYfvPOYs8KXOpjnf+3ddfd8cG0/JJaRR7hnn/bR1PUhrk8/a2bTnTm0lhS70PB/qlUpYAlJoH4TtcoWD+n46YyqUSu3XrltpCQZympiY/EiaufQq9aOUUOikOHbHJzZoJZOEah9kfOZAJiwEdN+s9OsJhxsQUaQtgycOUEvLHjykRy/uqq66KsmUKgrnSgQMHen8agVKmQwrlzWiIRglZyNvmYZn+YDSQr77CVdeF0rYHnHRZwIV5lnMaV3Nc04CEC7rMr9CRjhlTPqNRW43N0UamHHm5bd2EzWfaqJd1C3Rwttr6o3V1qTpGk4n5/lUXXujz5nje3LnRSIQOCFmYsnhlzRofLuwEmb5gBI6/yW6/trBO0+4jH1YGm/JgxM4iSeZ28znKbXE5+kWWt97qLSe2boHpEPywViBHKS5coU7atvAxlBuTL6Zf8rDFb+RNWOKE8sEKZ8zM76kpU6I1KsXkIy5lsnUTWDo+/tnP+ukTlCZLO6k+CjErlnYx2czfpkJpN82F7wfvAe8ag5q0Ux5YImijrF0gjbTvpslg67LIO/5uWphSj8Xas2LpFWpTisUt5B8+o+Fzlma6g3SLPcNYImgHrF0gj/gaDZ5N1llYGPMPZeP9sffM2oVC5TI/a0uZNmdpQbXcUW1tbR9UK/FaShdlgY7LfsFQS7IfybKiuKDAYH0KzbVZLTOdOh3eJ7/97ZxpikMhL50fc7XhVMGhyJc86FxZuxGuyThUedd6PozSWSOFAlALz3it8+6s8tPHYVm/7777osW6Hc2i7EWZHS2I0hOBfAQY0aBMYLkILUb5wnb2e2ap6OwcaqX8WCH42TWWUdYFSJmolZqrLTltULZr166qKhNQ6fApj9pCfWRLa/8pgrkr/qn2XFpHkqWhZdqH9ThyInCkEOCdZH4fxaJWlIkjpU05Up6hNOVgUMavX8pdT5gmDwujKQ8joaMIiIAIiIAIiEDZBGShKBudIoqACIiACIiACBgBKRRGQkcREAEREAEREIGyCUihKBudIoqACIiACIiACBgBKRRGQkcREAEREAEREIGyCUihKBudIoqACIiACIiACBgBKRRGQseaJMD/U1T6E1j7V8JD9Y+slYAOf7YX/qtoJWke6XGryYw/9LJ/OuWPxezfDQ8F0zA//nW0lH9OPBTyKY/OR0AKReerc5W4hgnw18MoQB29QRxI3nqvzV384O2u/sfX+s+1K5P3QqklhNVkVogDf+2NsmF/5cwRJSCN4x9XUU5QWHD298tSGtLQU5jDRUAKxeEir3w7hAD/onko/rClQ4TNeCJT1vzc9e7Ww+35xj3uuWt/5J565UU3+4nKtoPOeJErFo8NndgYqkuvXo5Ny8JN5Uj82F693MhHH/W7THIsd2My2zgt/GvzMD82LStlR9uKC64ERCAPASkUeaDoVvYJsP8BUx3822C46RGSM4KfNm2a38wNv3xhwq2O+Se5+A6tthEcccMpFZsesc3byM/Cpp0ysTRIO59sTGWYX5h3NWvlmVe3uidfedF97ZyRPptTunV3V531effYS895y0WxvCl7yAS+48aNi3a7LFbmsD4oO9MUoSsWvxJmVn/GPJ53KEf8nA2dhjY1+c2h2ABq8B13JG5Tzy643fr3b7cdNlYLmzaJp881lo6WlSvdOQc3crMwYX787frh2MPFZNFRBCAghULPQU0SYJdVLBObNm1y3bt3b1eG/fv3+41w5syZ43dGZbvkVatW+XB0TjNmzPB/5805ndnEiROjNFBWZs2a5dPGn02b2GGWcHQ6t956q99Rl63duUdnlnYvBtJj/wZ2juScD3+Li1KDozNrbW316eI3ZcqUKO9IwCqctL79putTV+/O6HGKT/3hvz3jvv/4Mteyf4975+/vVZwjrJLKTDnD+mhpafEb9ZmChn+1mFF/d955Z1TX5JV2R89SoRx4+WX3/ltvlbRz7fsHDrgXm5r8ZnPsaCknAlkmIIUiy7Uj2SoigCKA4oESwDbo27Zt8+lt3LjRH2175ngmGzZs8FvCExdn27Y3Nzf7azp/FJUVK1a4xYsX+30Y0u7FQN5sU33dddfFs/VKBDvesm8JMuOQEWXI8m4XKbgRLj600TZH65iDoImnz+za6votmOS+/bsl7pErbnF1x3R1rW/vSQxfigdy0IHH3c6dO11dXZ0vK37whcHatWt90GoyI4O9e/c66jyfCy0fxrQUq9F7b7wRbUfNttQDrr/eWzPCvLAujFq/Pu/utOvGj3c9zj47r1+Yhs5FIAsEpFBkoRYkQ6YIoHhMmDAhmnbINyWCsjFq1Ci3evXqvMpBUoFQJlAQTGGIh8Pa0rNnz/jtVNe2+JBRdvhJq+w80fo3941Hmtz6q/7Tbb7mh+6DDz7w+fbu9qH1JJUQCYGQDTkaGhp82cNphd27d7u5c+d6RcI6bfibqyYzUw7Jn7zj02es0QlZcl7Kmp1wDcXoTZvcn2bNSr0wk/Kff+edfrqDaQ85Ecg6ASkUWa8hyXdYCPArinhHEnbMjLbXrVvnR9WY6wmbxqFMFHKMlulgzZEuHWoaV4mFgsWYDXX17t5RNzjWT+CYBjn+2K7uYx89Nk32RcNY52xTGqFSwZQT1ouQOeFx1WRG+jZ9Rt5YRsaPHx9ZUiq1UIRQmLI4fexYt3/r1vB2wXMWfWLVePrGGx2//JATgSwTkEKR5dqRbFUhgAVg+/btDlM7jqmRcFHmsGHD/IiZtRT5nK2boPPhg2ONQBrX2NgYrb+Ih2e0jNLCtIcpKKz7oENl2qWYq8RCwdoJ1lDc8vsH/CJMfkL602dXuQtOO8srFZY3ihQjeTrauEPxMYUAJStpmob4oZJQiAl5FPKvlFm8DKFc+JkSZOXiWIqFIkwfhWDr4sXu5AsuCG97i0WhRZksvkQReXbGDP/z0ZzIRS5MISpl2qtIkvIWgUQCUigS0cgjywRsND5w4ED34IMPelN63FydJD8j0rFjxzri0rn179/fTZ8+PQpOp46SYf6hKRxlghEsYfjgh1KBiT4ccUeJxU7oAJuamrzSQNwwbYLSgbF4kXD4oVywCJRzOjN+ScE5+c2cOdOf5+vcY9kWvcQS8cDoyT5cvwX/7vigTNw8eHROXDr3ESNG+IWjyGPOeDClgewjR470igD+odzIjj9ltMWP+ZgQzjrBfP5hXVfCzBQk8uMzefJkvz6GPDvChWsolg8c6BUDFIRSXZ/RH9bD7y6/vCRLBcoxLq2Vq1S5FF4EQgJHtbW1fThRGt7VuQiIgAjkIYByYL+4sCmJPMF0KyMEsLJdffXVXkFG6ZMTgWoSkIWimnSVtggcQQSwwJh1QcpEtivWrEJY2bC2SZnIdn0dKdLJQnGk1KTKcdgJ2Ghw8+bNeWVhoaeZ+fMG0E0REAERqGECUihquPIkugiIgAiIgAhkhYCmPLJSE5JDBERABERABGqYgBSKGq48iS4CIiACIiACWSEghSIrNSE5REAEREAERKCGCUihqOHKk+giIAIiIAIikBUCUiiyUhOSQwREQAREQARqmIAUihquPIkuAiIgAiIgAlkhIIUiKzUhOURABERABESghglIoajhypPoIiACIiACIpAVAlIoslITkkMEREAEREAEapiAFIoarjyJLgIiIAIiIAJZISCFIis1ITlEQAREQAREoIYJSKGo4cqT6CIgAiIgAiKQFQJSKLJSE5JDBERABERABGqYgBSKGq48iS4CIiACIiACWSEghSIrNSE5REAEREAERKCGCUihqOHKk+giIAIiIAIikBUCUiiyUhOSQwREQAREQARqmIAUihquPIkuAiIgAiIgAlkhIIUiKzUhOURABERABESghglIoajhypPoIiACIiACIpAVAlIoslITkkMEREAEREAEapiAFIoarjyJLgIiIAIiIAJZISCFIis1ITlEQAREQAREoIYJSKGo4cqT6CIgAiIgAiKQFQJSKLJSE5Kjwwi0tbW5SZMmuSVLlhRM84UXXnCDBw92t99+e8Fw8jyyCfAcTJs2zfHcyImACJRPQApF+ewUUwQyS+Dhvz3j6n98rbt25YLMythRgllZKW++MsPA/DgSPnQDBgxwdXV1bt68eeFtnYuACJRI4Ki2trYPSoyj4CKQaQKMNG+++WY3dOhQd8UVV2Ra1moI98rbe91lD/2Xa6ird3XHdHH3XHx9NbLJZJpvvdfmLl8+z11w2lnu5sGj28n4zKtb3fgVd7qmUTe4T518euRvz8yVV17phgwZEt3XiQiIQHoCslCkZ6WQGSPAlEbXrl2jz/r16/NKuGfPHjd69Gj/4RzHNIfFjU+NkA7+YfphGPyZUtm8ebOfMiGdcNrEplIsffPj/rhx45zJYMJaemZyD/MljaRyWfz48ZbfL3H/1LO3+3TQYcbDxK/JG7M/ecGKfDmarPhTZvyNHdNFlMlcktzG0sJxDNPj2urImJGX8QjjFTs//tiurne3HonBenerd8cf07WdP/miTNx///1l5dsuQd0QgU5IQApFJ6z0I6HIdGQrVqxwLS0tvgOg88k3sqSjGj9+vO8sli9f7urr633xp06d6uNNnz49L46ZM2e6bdu2+TCbNm1yixcvzuk87777bnfLLbe4lStXOvz/+Mc/en/yu+mmm7z5HJmQj06YzrZXr14+rzfeeCNvntxMW66kBDDnP/XKi+62z5dumdm/f7+bPHmymzNnju/g+/bt61atWpWT1fDhw13//v09lzFjxrilS5d6f8pIZ2z1sWbNGp8W5SE8LAu5hQsX+jqCGZ/58+d7paZQnHx+WGf+srvVXdj3X/J5u6dffdGhdJzR45R2/o2NjW7fvn1u586d7fx0QwREoDgBKRTFGSlERgk0Nzc7Pknu9ddfj5SJUqc+Jk6c6DtE0j711FNdv3793O7du6Oszj77bN/xoqCE/nTAdMSDBg3yYfFHeVm3bp076qijXJ8+ffx9OlqzVuzYscP17t076kCTyhUfxcdH85j7f/rsKve9L1zhTunWPZK1lBPWEbCmgLSZMoorAihgxnLYsGGutbXVvfnmm16ZYIRvChvKHQrHhg0bPA+TwawVKA103j179jQvz4j7cRe3fFi5UWLMzX5iuV8ncdY9N3rrTDidAZeLH7zd+3/lNz91XztnpFcqLK4dSfeEE07IqWfz01EERKA4ASkUxRkpRAYJ0OnR+TFipiPIZyJnHcWuXbvcueee2yEloOM395nPfMYrElyTPyNqs5CEyoGF59ilSxe/+A/FBKXhzDPPdGatYBSPK1QuOmusLDaKt6ON5hdsXO3N/Zd84lNhth16jhJhjvKSN+XCoUjlcygNWD+wXuAOHDjgj3TeZrXBMoKjjPAMlQUUGCtreDTexGO9xJ5v3OM/jfW9vQKBIoHDIrHy36Z6v+3X/8QrXSggcUe+1F1Yz/EwuhYBEUgmIIUimY18Mk6ADsU6GDoCFAiuzd17771uypQpfgqC0X2lLqnDjKfLqD2UI+ygUByef/559+c//9nROaNYYAUI004qVyELxWv73nSPvfSc++8Xnop+0fD9x5f5634LJjkWI1bbheWk/HDAoTSgUGCtMMWJcvPLCjpxnCllxGMKCQXDlIo0FgqfyMEvpjtQJt75+3vhbX+OcsGCzeY9H8oWBjCZw7oI/XUuAiJQmIAUisJ85FsjBKyjiovL6JYOesaMGTmdfDxcoetly5b5UStz7MUc1pAnn3zSbdy40QdFCWBtAdMB1nkyQj/ppJMc6dFp0vGGpv8wj7BchSwUHz+hRzQKt5H6f5x/qfvSgPPc9uvnR79ooNPEmhNfUBnmWeo55WJ6hHKa4kb5UTBGjhwZJffaa695a9Fxxx3n1q5dm6NQRIEOKiDwMZfGQmFhOTLtw6LUfNM+rLFY9Nzv3Zgzzguj+HPYxKdh2gXSDREQgUQCUigS0cgjywTio1Y6s1tvvTXqtEPZzZyORYC1C+FIn8WXEyZM8PFI0xyLLs38TtpNTU3R+gALk+/IlMV9993nR9h0tA0NDV6ZMPM8o18sKUx3kD6jdDpeM/2XUq58+Re7Z50/v1AJ14QUi1fMn04fpYnykoct7jSGxEfJYL0JStdDDz0UWSvC+sjHrFjetn7C/muC8PZT2XD9BP6ssWCNSb5pIawmTMMgo5wIiEDpBPQ/FKUzU4wjnABWA5SI2bNn51VQar34lI8OH8UHBUjuw5+xouihFJnyJy4iIAKlETi6tOAKLQIiUKsEsATwE1oWqkqZyK1FFviyDkfKRC4XXYlAKQSkUJRCS2FFoIYJ2BqMGi5CVURnGoxFo9/5zneqkr4SFYHOQkBTHp2lplVOERABERABEagiAS3KrCJcJS0CIiACIiACnYWAFIrOUtMqpwiIgAiIgAhUkYAUiirCVdIiIAIiIAIi0FkISKHoLDWtcoqACIiACIhAFQlIoagiXCUtAiIgAiIgAp2FQEGFgj/A4Z/r+HTkX/V2FrgqpwiIgAiIgAh0FgKJ/0PBn+Cw1fCaNWv0Zy+d5WlQOUVABERABESgTAKJFgq2Ve7evbvfwKjMtBVNBERABERABESgkxBIVHklFzcAAALESURBVCjYOGjv3r3tMNhuhUyHYMHINx0STpXgzzWO47Rp06LdDh9++GE/lcLuh6SLi28UFG7Y5APoSwREQAREQAREIHME2ikUttvh8OHD3erVq6PdA1EeQoc/WyujCIwZM8YtXbrUe6MQbNmyxd/HjykT4nIfN3fuXL8BD3Huuusut2jRIr9l8M6dO30ctplmgx7itrS0+E2aTCEJ89e5CIiACIiACIhAdgi0UyjYhtgUgREjRvhOneupU6fmSD19+nRHWBzbQre2tvp47BdwzTXXRGEbGxv91AlTKLiJEye6QYMG+XMUh549e0ZhUSrYzvnSSy/190iLMGvXro3C6EQEREAEREAERCB7BNopFGlFRIkwxw598+fPj7Z6tqkQpjsaGhpcc3OzBS14ZJoFCwaKBHH5TJgwoWAceYqACIiACIiACBx+AmUrFEmiM2XCFAXTFTZtgZUircOCwfQIce0Tt46kTUvhREAEREAEREAEDg2BDlcoELtv376RtWLhwoWpLRQoHjt27HDLli07NKVXLiIgAiIgAiIgAh1CoMMVipEjR3qlwKYtunXrlvqnp8RpamryCzFtyoOjFmV2SF0rEREQAREQARGoGoGj2traPqha6kpYBERABERABESgUxDocAtFp6CmQoqACIiACIiACOQQkEKRg0MXIiACIiACIiAC5RCQQlEONcURAREQAREQARHIISCFIgeHLkRABERABERABMohIIWiHGqKIwIiIAIiIAIikENACkUODl2IgAiIgAiIgAiUQ0AKRTnUFEcEREAEREAERCCHgBSKHBy6EAEREAEREAERKIeAFIpyqCmOCIiACIiACIhADgEpFDk4dCECIiACIiACIlAOASkU5VBTHBEQAREQAREQgRwCUihycOhCBERABERABESgHAJSKMqhpjgiIAIiIAIiIAI5BP4f5XQjrarVHsQAAAAASUVORK5CYII=) ###Code ###Output _____no_output_____ ###Markdown **Hint,** to show all row you may set it by![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUAAAAA1CAYAAAAwCDJsAAAPIUlEQVR4Ae1dC3BU5RU+eRASIBGCIM8AQwAfWMARgjiAoC2VR6mAEYstY4sUcehQgSBgcaQQHoKlA0rkUaRTK0RALQ/r1ALiqIRQlLcJYYQ8eCgJGCAJCY/O98dzc/fu7t27m7thl3vOzOY+/v8//znfvffbc/7/7p+IsrKymyQiCAgCgoADEYiOiIhwoNvisiAgCAgCRJECgiAgCAgCTkVACNCpV178FgQEAYkA5R4QBAQB5yIgEaBzr714Lgg4HgEhQMffAgKAIOBcBKKNrl+/cZPKK69T5bUbxiI5FgQEAUEgLBCIiY6kuJgoioo0f8vFLQIU8guL6ytGCgKCgAkCCODAZb7EjQAl8vMFmZQLAoJAOCBghcvcCDAcHBMbBQFBQBCwA4FaE2BFRTlNnzqZsvfuscOesNZxoaSExqSOUHgAl2AIcAbeRv1VFy/QvtEj6czm90y7vXIijw5MGEeor5cTry+iLx7rRyi3S6ALOnfcm6xsM/ZpVz+ixxWBC9lZtl9L1x7q/mjzxkzCx26pNQHabZAvfSfyjtPE8b8lkE1dizfyqSs74PtfFi+k8RNeoNjYuLrqNuB+GnZMpj6f7KaULf+meo2bBKxHGt56BLL3RlHrZgm0eWM9zRjsL11SXzsO5s6AgY/Rpsz1tgdabrPAwXTidtfdJDGR3sncHBQ3EfGtzHiDRqU+TR2TO3ntI7Ztktcys4KOL6YRPiK3BwIxdzajmKZNbXWm/4BrdPRIFA0eWkWxsbaq9qkMz9ac9IW0ZNF8Sk7uTDi2Q9wiQDxoc1+drZgW6VzrZneotE4fcXGqh7KObVvQP9attWwL9COFQ1tfulHOYS/32e+hB+nD9zdR1y4dVPufPvIwITKyIqyD+9ankojuli5ZpPrjcu4b+tHPL4cMUr7CZ6PtqMvtoMcorMNTHbTFB+243DikcOjgAaVy8NBhRtXqGBHWg+s3UZOeKW7lSImQhuKTNeznLumvrzSVU2tufyTtRbpeUaH6gF6kzki7udxXCq43Dnqgj9vqdUOP/hjtPJ3T6+N92JUzZ7ZqjxT8u48/UimhXp/RL/jBgn0MJ3DKzhhZ8Y19Ymzgm3FoAXrYZ2xRF4Kt3ka2B/XZPtbP7fV2cn3cA7gX7I662yZVvxp36GAUd+WyRUSIKJE/iBpZUMYRo6fyCyURNCa1gdYWdY2CL/7OXe6mnTs+MRYFfOxGgNB06VIpzUybohi36PsfqGdKb1q3drXqBAS2IH0OjUwdTSg7UXCWnhn7rGUDtm/dQi1btlJt0R4RE7O5UffhnG+1sJejq91f7qPhT4wklKH9f3Z9bhoRsWFG3bAbkvHGMq5Cry2YR/mnTiq96Gdj5ruKXAE8+vlg28fKV7Q12j5iVKo6t2zFKk0f74B4Z8+cTumLlqg6sD07a49G7qg36fnnKKlde03Hxsz1LuN8n3+2m1J69/E79cXDm7d4oUpDBx7Nc0tHfaWp+X9bRa1TRxPa4nPfotcpSvf1f2r1Sio7+a0qQ6pbuOFdy+OIxZ/upPa/n6ja9t9/WMH13fYtapvQrQeVnTpJFUWF6hgPfsmeLymx90Mu/atCD3+K1v+TWj35FN01eCjlr11N3VeupWulpZq+c9u2ULeM1arvvl9k04W9WRoRtZ/4B4pLakfwHf2efOtN6jBxErUc8aSHnjyf+mrsGGrQvoPSDxvOflidGYDkijLXE/oEnj3WvUPHZkxTmMUkNlU23jAZPwY+sS1aqLZoHwyi8+xR9dknRlbR1/triI3rguw2ZdajwzmXqOj7Uvpg2xWamRZLJ/JqKGbS83GU1O6GKl+2opw2ZtYjfJfisyC9Po1MrVJl0AFdegLlfh7u24+y9nzh8mxwWSDbGusMrfGwcqqFTs+cOa069RWJGNR4PMTDD1IwSlFhIcXHJxBHOSA9EC0e/toK7C7MzyeMJUAwhoaxtNycbzRbQOQTXpikylu3aUNtk9pRSUlxbbtW31htkpLo/p90U7rg1+SpaS4XctpLswgkCunWvQddKi2l8rLqiRSQN/Bvm+R/eosHr81TTxOILlAB8YAIPMldQ39BIAxIbOs21KBde6q0iFnzQY9rdoFUQW4gUwjsTUzpTaUHvlLHIEIQ2J0/Xj910uQP7Eq4vxpvEHg9Q8rUZsxvtAgJkVKTXilUUZCvNMKWzjP/RJdzc+n0xg3qXPPBniNvbya0GzdeI8zEvv2p4uxZqrx4kU6/t0F9oXB0hmgNBAk/kbJGJyQolfpoEJiATFlA1hyd8rm62rZqdYPOnYt0ITbcGiAzEFiTxOrlRXv2uk5Dhl2jA1/XkOW0l67SiFFVytRu3a/TpdIIKi+LoKLCSIqPJ5VaoxA6oOvzz9xH6BITm7o8G7X12ysBGhWDPPiBNJb5c4yHHBElp7CcZkIHyGbF8r+qtJpTQURGdglIKK6Bf5MHBfnVD0VtbUDUG+jEBXAH/rdCmNw+faCrSts4XTOzhYnErA7KOLXkdO7YyzNcmoA4mHxBEM1/NkgjLZeKARzAD+4XW0SyegFBJf3uOSp4e42KUvVRr76et33YzgKS00fO3sZpI2PjKLpRI6osLqYruTkq0uNokNsgCgVZf9anp7LfSlrOdtixjWtwkwY9XuVCbKyXU2Q+trotKYmgFctjqGPbmvQZ0aInAQFC7AhMoMcyAQZCHp4cwLnJU9K0VBAzO3oSRBTGKSbSTHxQ3w4xkjhA/OHiRVPVgURdnhRyBM1l/hArSBv43wrBg4+HF+kWUlyka75IkB9WM3sRwRybNV2lltCNzz1z57s0adSps4r6ruQdp9LDhwhpsR0C4oUfSD+5b0RsekGd/DWrKHnaDGWnnRGX/gsCkTWiQwiwBgHC36vnzlLjB3tR6cEDdO3yZUJ6zILJKtiNNBrpdF2TYHLyDcraE0XF511/ZlaQX0MniArPnHEtZ/s9bZ8ZW0knCkpVCowUGp/JU666VWXiYyJ0q+DniRqLvTREqrp08SJt/AkdF+SfIqSrEIyheZoE4QF942C+vhvjg43ZHZAUxgm9CfoHaeXl5Xqr4vE8dEN4ABVpJcbZkGLzGKS+IWyALdwOZUbf9fXN9pHS7t+XTTx8AExB/KNSR1uKChE5IoL0hzTZHqROHEUx6QT6MCNFw+yiN8H4VHn+KQJxWRUmSxDOt2/WjMeiPaIwRH05f36F6rdoqaXLrBvkgUkD4yQDl5tt6zW9UyMVELo+AuRxP/SNNB0RF8YDaysgOKT5IC2+BqWHDijM9Kn91e+/o/j77ldDCud37agmQA8zuogYMVbpj+C+x+SfP5OHRv1IUVN6X6f9/6tObzEkjGOM22EyA4KJksL8SBow8JqxudsxCBV1t291n/gwVgYBxick+J3JGfXwsVcCxIwn0lCkqiAJHp/CuCBexcBsLMoxcI/xKyvC4HN6i9lUPNisG0S0PGO1Igeug62eRHn8jO2zeiGNuo19w34QOc/wgqBgi54cjb5jlhxkpvcLKTsmU2A3zwajXcbqt9XEEs4zpj179bYCm6qDcVjY5Gns1EwJj10hhc0eNZw6TklzeWh4RhKzw8W7d6nUimcW8ZBin1NFpF0YT9PPNJ/b+i/i9BgPNiYWeHzLTDfqQBcmC6Af0WCr1KfdXEHUV1V8nu7o8YBbWaAneHwRPqNvNS43+ldKHcjvm9kz1T5j12L4CDq3fas2Extov2iHFBZ+cwqLSPSeeQs1zPCFhbS7YXInRYCI/iAgOyZ8vh7AHRMi/kzO4MsUk2lHjxyuVRqJMby84zX0gbE9jNt17RKvZnIxATInvUIbEzTDDIS6PKNcESjPEGPraRIk0MlAb/1HlJeXu/xTpNPnf6BXXp6hohN/HlBvHYTLeZAsIsJX5863FJXVtV8gWVwX3Lz8hVHXNhj7Q+QE8rh7TrqlmVljeyvHiAzzXltA9y54TSMJK+2kjncEcK/jLQ98KfNEp/faoVOCV8nwHuC8BYtdAhMzCxMbxZgVWx8DNNUihUFHgGet+dWcoHcYAh3oU1GOKkPArLA1AdkDspZwJD/YjlfJnh033jL5WblQNTGsldohXIfHHPWpM+9zqhrC5lsyDd/Wf5w6Xf0iBBHh7Sqc6gWS4gUbE7yQzCmocctDB8G2IVD9GM7Be7dW350NtJ9gtMPYPYbi7M5K3VLgksuVwbBfdAoCgoAgUKcIYFHURrHu7xLqjbhtIkC9U7IvCAgCzkaAV4T2hYI5PfpqXYflWNq6Yf1oio6y/m5RHZonXQkCgkAYIhA2EaCQXxjeXWKyIBDiCIQNAUrkF+J3kpgnCIQhAmFDgGGIrZgsCAgCIY6AKQHihUl+lcTqLy5C3F8xTxAQBAQBDQGvkyB48RC/AcYaeHa/e6P1LjuCgCAgCNxCBLxGgPjR8R2NG7ssBnAL7ZSuBQFBQBCwHQFTAvS0VBT/8B/pMf/6wpge61NnpNA4hmCL5fZ5NYqPP9qmVqXQL03PP9fh1Fu/VJbt3otCQUAQcDQCbgQIwgH5YLWVXTv/qy1cyiubMFoo5yXchwwbTls+fF8VgcByc3LUOn5Yyw8pNFJpnIdgwVMsA4U2f1+7ht5as06t8IrltUCu+uX29Uvic7+yFQQEAUHALgTcCBArjTBxPTLgUe1/bxgXJdUv4a5fMh+/Nxzz67GafVhPD6k0L2SIBU95aXi1Fl+Tmv/uFMwl8TWDZEcQEAQEgR8RcCNAq8iA9FgwSbJw8VJtGSlOjRFJYu2747k5XNV0C5IM5pL4pp1LoSAgCDgOAa+zwIEigRQa//QI6SuiQaS+s16aalkdIsRQXZPPshNSURAQBMICgYAjQDPv9P8/BP9O02oEaGVJfLN+pUwQEAQEAX8QsJ0A8W8n8b80eGn5hg0bUqfOXSzZhIjR15L4lhRJJUFAEBAELCAQNusB+lra2oKvUkUQEAQEARcEbI8AXbTLgSAgCAgCIYyAEGAIXxwxTRAQBIKLgBsBYiXVUJNQtCnUMBJ7BAFBwH8E3NguLiaKQolwrC5t7b/r0kIQEAScjoDbJIjTARH/BQFBwDkIuEWAznFdPBUEBAGnIyAE6PQ7QPwXBByMgBCggy++uC4IOB0BIUCn3wHivyDgYASEAB188cV1QcDpCAgBOv0OEP8FAQcjIATo4IsvrgsCTkfg/9VzJc8eeiuQAAAAAElFTkSuQmCC)The default value is 10 ###Code pd.set_option('display.max_rows', 10) ###Output _____no_output_____ ###Markdown Getting data from Web APIs Some web site provide the WebAPIs which we can receive data using json objectTry to click on this [link](https://api.github.com/repos/pandas-dev/pandas/issues)you will see the list of last 30 GitHub issue for pandas on Github,we can add read the data using the Python as given![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAABXCAYAAABybP2cAAAd6ElEQVR4Ae2df5AWxZnHWyXhh1kICBJhEdHgxiDCcaYIIXeHeOcl5AqhEgOBaLCOiskfpkhpyIkURAuxYkxBRepgLRJWvFAS46kkR1KxBJILRTCGSHImt3BKCAsEzKLsKotozNWn8Rl7Z2fmnXf33d133/121bsz73T3092f7pnn6adn3z6npaXlr05BBERABERABESgVxE4t1e1Vo0VAREQAREQARHwBGQAaCCIgAiIgAiIQC8kIAOgF3a6miwCIiACIiACMgA0BkRABERABESgFxKQAdALO11NFgEREAEREAEZABoDIiACIiACItALCZTcAHij8S236++Pu70LTri/nNJ/GPbCMaUmi4AIiIAI9AACmQbA5s2b3YoVK1xDQ0MPaEp2FV/93zfcr2b/2WGg9KZw7733ukceeaQ3NVltFQEREAERyEGgT1KapqYmV1tb66688krXt2/fpCSp1951wbluys8uTI1XhAiIgAiIgAiIQPcTSPQAbNu2zX3mM59x48ePL6qGDQ+95n7cv8F/6peebJWXuBfva/bLAywR/Ok/W1qlO/HfrzvyhDI4t2BLCyY/XGJgqYHvJoM0P6056pj1W76f/80x99KPT7unq4/4ci3e5GcdbRbNTLp///7+s2vXrigL53Z98uTJbt++fVEcJ2E+0t16662upaXFfzgPZZGW8iycOHHCzZw5M5Ifn83HZZssu3733Xe7m2++OcqfJdvqZWXrKAIiIAIiULkEEg2AWbNmuerq6qJbXf25893HWqrdlesGJ+Y99O1X3bjVg92AS/u4Y1ta3Ed2DnevPn9WSZPhwDeb3Wv73/QyPvrr4e7Q+te8EkfB/+62V9yofz0r/58aR3r5/7eyqVU5z1z3kjt/bB+f/303DHCH/+OUM48E8oZ9rJ+7tmGEj/+H+ovcez7wrlb5s76gRA8cOOCV9oYNG9ymTZv8Ocp+5cqVbu/evf776tWr3e233+5Q3AQUMmkPHz7s4zGu8gaMhOXLl7t58+b5vMhAlil5yt66dWskm/RTpkzx4ufOnevzLFu2zFFfMzjuuOOOqHi8PCab+AceeMAbClECnYiACIiACFQsgUQDoLNai1I+/wNnVx1Q5ucOaF3SRXMGuPcvGegv9r+4jxtwWR935qW33MlfnXGnXnzTDf+X/j7uvAHnuMv+raqV8UDEmNuqHEYIYdg/93OnG/5SshcRFy5c6BYtWuRlT5o0yZ08edIr1T179rj58+e7yy+/3MdNnDjRjR492tXX1/t4FDZKd8iQIT6+mD+HDh1yVVVV7vrrr/fZkIHC3rFjRySGcvi0N+zcudPXs735lU8EREAERKBnEuhSA6A9iFD8BLwG5/Y/J1MESt/CkL/r6ybUDXEYC6UIU6dOjWbHKPuNGzd6pY5XIHSxo6TXr19fiiJdY2OjW7VqlS/HlhgoywL1wOMwffp0X7diXfhm0FBn5JtnweTrKAIiIAIiULkEyt4AQPETMATeCjYuxDNwpkze6A9d7OZqN1d8R4cOngeWE0wux9CNTzkWN2LECLd48eLcM3qUPm5/8rOEgUEgI6CjPab8IiACItAzCJStAXD00VNe6Vd98F2OD+HYD1v8kXcCDn37Nf9OAGv8eULfYed5g6H5d2/kSZ47zbRp0/wsPf7iHwJQsChlc9mTxmbdYQEHDx70X1G+4Qy/pqbGEffkk0+GyVPPx4wZ0yaOa3nc/EOHDnXDhw9vk18XREAEREAEKpNAovbcvn27W7p0qVu3bp1f6+ZY6PcA7E183sD/ny+87F/o4zz+3wBZGI9uPuWeuuCwf0sfBX/140P9S3woec65hkzS9Ks+L1rvz5JpcciouWeQ40XB8L8ELL69R2bguOEnTJjgFT5Kn7f27SXAW265xc+quc7LgXC1wDXc9rj5Oed9gTVr1li0d/3X1dX568Tbx2bp9qa/XSf/XXfd5dOZkPD9AdLZfwHE/7tg5MiR/v2CUnkurHwdRUAEREAEypPAOS0tgV+9G+vIv/Ch4K/898ElW7fvxuakFo3yRlHfd999rRR1agZFiIAIiIAIiEAnEEj0AHRCORIpAiIgAiIgAiJQRgRkAJRRZ6gqIiACIiACItBVBMpmCaCrGqxyREAEREAEREAEnJMHQKNABERABERABHohARkAvbDT1WQREAEREAERkAGgMSACIiACIiACvZCADIBe2OlqsgiIgAiIgAjIANAYEAEREAEREIFeSKBsDIDwl+kmT57skn5at7P75w+bN7ttM2e6My+/HBX1ZkuL2/2lL7mXfvGL6FpPPfntvfc62pgUOqOdTfv3u61TpjjKLXWgj+gr+oa65w20v731ocydCxY42lXugfFaLJtyaBNsYRzeg11Vr+5klnVvdrT9nSk7q26d8UwJy0tqV2c+c8KyS31uz7O053OpyzN5iQbA6dOn3YMPPuh/tpafrt2cojRMSCmO7Ei3ZcsWv7c9v4Hfk0Khh1Z3PliMI3Vsqq93I667zi61Or68d6//PnjChFbXe+KXzn7wlDsT2v/ipk3u0nnzXJ/+Z7fQLvc6W/0Ofv/7buTHP+7ePXiwXdKxAwQK3fcdEN2tWSu1XV0N9exWe7FS+bnasWPHus9//vOuqanJ1dbWOvYHuOaaa2IpK+/r+Zdc4s7t9862wpXSwhN79mQ+WP+0fXvJFcbAsWPdjF27OgUhCmL6li2dIrunCz3V0ODeXVXlepoxxyzo1NGjruYLX+jpXVA29S9035dNRYusSFq7OvOZU2QV25X8/NGj25WvvZkSPQAoelP2AwcOdBdffLE7fvx4ZhlsKcvGNrZRDYnZrMY2nwnjucbGNN3l6k9ryCVz5rjJ3/pW4qzp9cZG73L+/sUXR8sE5rb5ybXXuqPbtrktEyY44nF7ewv1bRf4Tz/9aXfoiSfcEzU1Pt6WGZip/frOO/3yAtdC2VZHK4M4PkluXdxGVqblC4/IOPyjH7khkyaFl6Nzs6YHxTwveC6sXI58t0CZ5k63NGE87jm73h63Vpg/Lgd5do10YeA7nOENd0sXr0Mo3+qd5DmwNoZlNO3b53kjO6k/wrTx86z+pB/oR6uztc3Gyf7vfMfHcd3qb3UPy2EWPfiqq1qNY9LTlpBdmDder5CX5bMyqV9W3jiTuOx4vNX9yE9+4gZcdFE0+7d2U1be+4M6WiCftduYhu0iXciD8RIGGw+WN15v5Fscx7jsUFb8PC67fu3aVknizEy25bPvZEq6xnVkxO97rtEO6m5jrVC7SGuBcvnkGQt2H1pejnFmoWzikR0yjceTJqldXA/rFPIhjpAl2xha2faMJh/lxZf+bFydldy6XUnP4qyykWETmmEf/rCJ7JJjogHQmSVPnz7dsUUtBsGsWbPcY4891pnFlUz2b1ascFd/85tuVn29w0vAw8o67bqnn3YXTZ/uZu7d6z71xz/6WS+WqFmj//C977lRs2b5vMQzczUX55nmZvfLL3/ZyyZu2JQprn7duqjenF86f76XS3yagRJlSDihrgNranx9EqJdktuVQX/y97+PyqUNv125stXa7LNf+YqruvRSnyYeP/6OO/z1mi9+ManIzGvc9C/t2uV5wht2yMFAI3CExdXf+EYbOZRreagT6fhYXjLwsLV6IwN3OQ+APOH1Eyfcvtpa949bt/pyyNOQ0xMB059/7nOJ/Uncs7fd5j60apWvL2MJBvYgY5yceO45d+0Pf+gObdni60/d8dyEwYy5pKUe+qv5xRcjdtZu2v7r5cujelH2i9/9bisln9bX8bywJ/x+9eqoWnnGMO1HWY3+1KeifJwUuj/++MQT7qMPPdSKWag06GtrM+OBdlEWgXR8t/uW+DD8ads2d8Wtt3rZ1i7ra2TsW7fOce8njbFQTtI5fDB2LG94n8SZhv3Bks7wqVPdsZ07ozH71unT7s2mpjYGftp9/9of/uDv5aQxTLuKue/D+ydeb7sPrf2FZDN2GQPWH7BJUohp7cp65hSSTb+G/RE+o63+aUdk/+aee6Kx8KFVq/y9bOOsUNlpcrviekED4Pnnn3f79+93U6dOLUl9li1b5ubOnetlTZs2zR05csQbAx0RzguDeBNsW1w7mvehI7ItL52KQrcbkIdKqYLJRt77rrnGu0G5mSyEN7tdC48oN1zt1C8ekEN+5CYFBinvBsS9Axgo77/55igL3oG+Q4a403/+c3QtVMpJ8VHCIk9QahhC1MF44xoOmRQpslXysN60m4cnD9E8AQYYgmHdCvWPyeXBhfFYPXOmXYqOFmdue+SPX7Lk7IP+7bqxpn9e//4O1kkKHmG4RjH2yB8PGFJXLFrkL4fttiUDqxd5MTpD4yJkFvY1746gUKw+9BdKkzFlD0AKLMToZH296zNwoBtQXR2vtjeKbGzH7w/GqLWVI+PmtYMHIxlhm8N6M5YwgGBs+aNMb59Uf+IT0T1l4zC87zEG4V1sQCHAJ22po1B/wJoxSzoC7OIGftZ9nzSGrV2wKOa+D8eRvUdk4yjOJY9s2sInLWS1Ky2PXS8kG4M7HLOWr9CRMXDZZz8bjRXuYe7zsB2Fyi5URmfFJ74DYIU1NDS4xx9/3M2ePdtVJ9yYlq6YI0rfAnvPl2L/+csvv9zt3r3bxPb4Iw9Ur5B4mC5a5H711a96tzYNY5aSZBWnNdpuSlMs8XRp1jTpcHOFrskBI0fGs7f6zgORpRKXYIi0SljgC7Nzm6nxPgbKg1kPD+FyC37NbufOXNXiIcssI60dWXF5CuDhxQxq/J13JiYPGaJQp9bV+XQ8nF54+GH/CTOGs9LwOudRXzvnH3ZZ780UGsOmjPO+tBjeH8zi4677JM9QvP55vqOof37TTe7U4cNRcmOCMsMQJB7vyJCJE70nwowJ2sR9y1KUhbz3LvdQVn9QBgrfG3tjx3pDLW7gF7rvrU52NAObsVnsfW8y8hyzZDMmmQxZf2K8/e3Xv97qfim2XVanQrKZRHF/soxLYAyFXkOTk3QkH89JxkEYGM+EQmWHebr6PNUDgPKvq6vzM/9x48Z1db2KKq8rPABFVaiDicMXEbkhcfvjDsPdyHJB6OIsVBSzuPDBH6bPsqZxPZsbnrJn7t3rZ55h/vg5M4u+F1wQv1z0d5Qq7m5uRtYRCWmziqKFlzgDs828ihvDJivYQ9jShDNZu5Z1RJGnzaKz8hHHwxaXLX1tH1yqaSHs60ghv50YBYaBYKHQGGYmy4yWGXqeYPcHCpr7AcVqdTYFnUdOVhqMKZZkPrhoUSQ7bljwYMfzRtl4TFjesRlk2GarWzGGe6H+QOFjGGOcMG7i7LLu+6R22xhuz32fJC/pWh7ZMDJe1AkjiueUhWLbZfk4FpJtSwg865iA2PJbKCPtnLFh9bZj2N+Fyk6T29nXEw2AUPnby4B5K3LwbfcbLwPeHLiQ8+ZvTzrzAPBeQfi5I+MB1p5y0vL0GzrUP/BCl0+YFqX46oEDkcsujAvPeXiwzp6msCmn37BhYRZ/zkBNevHEXI3mno1nLGRN24OWfKzjprXP4otVPswGeOkmNGhsNhg+1It974GHLw+P0IUdb3vWd1O81Ctu1Yf56C8eFPHZV1K7yIe7lPX7sL0mj7jGX/7SWZ+YbD8rzvFfKcYt7yzayuWI8kCJ2/p2GJd0zliwvjbFgyeJENVj/vxE13rSGE56ByWp3KT7o+/QoZHRCdfQY5Ukw67Fx4gZExZvR3srm/jfBe81WLwdLZ19zzrGnxfcv2G98/SHccdQ5nlhngfKLXTfh3WzdoVjuJj7PpQVf87xnkPoASFtMbLjBnMx7QrrlXQelx2mwZtFPcMQerzi/QU7xgb1yxOyys6Tv5RpEpcAdu7c6fgtgKefftp/KLBfv35uwYIFqUsBrLvzXwA33nijV/wLFy50a9asKfjfA9YYDAZeELTw6KOPuquuuso9/PDDDgVfzoGbj7VEc13hKv/oxo3RmhAzBdaI+G8BgrkLzW1q+YgLXU888JhVcJNbID60LO160jFrPZj0WdY0RgPKzWbgVy5e3GaWwUPLHlyhuy6p3ijSsG1J9eUaD2aUWMiE6yaf87hrlTow8wtnrKyvwg4Dg5C3bNavza1LmRO+9jV3+tgxL4M/PAisH/me161LWsYB4yJ0K1u7kuKsr8MZUFSR2Emxs+gwO+OXF+ngFRo8YdvS+tr1798mL31h7tOksWDtog7Esx4ef/kvrF84FsJ+hNmFH/lI1B+wvOzGG8OsmefhGOGexP3c8IMf+DwwYVZvZRPP9zeam308xobFccHu+VARpxUef17Ai3ZZyNMfpEHx83IyYyoMhe77rDGc574PywrP48852hR6ZArJRrGG48+ekzwTCFntShpn4TMnSzb3V/yZEo7hpP4K28XzmLETPhes7uTNKjvk1x3n57S0tPy1OwpWmWdnSww8FF5epZ6XGzcE7lHWg7kx4wFr9bf33OMHLoO02MCgZu0rVLrFykhKbzdjyMSu8cAzxZKUtzdfw+vAzKIz+HRWX9NfWbKt38Ox0Jv7OE/bC933heLzlNEdaXpqvbuDVTFlJnoAihGgtOVJAKVuL3kl1RCjICs+KU9XXOPlR9zRYWB2i3scRaCQTKDUhlhyKaW/2hkGS+lr2XMkFrrve05LWte0UtvVupVd/00GQNczV4kZBLjRw+UUSxq6o+2ajiIgAiIgAu0noCWA9rNTThEQAREQARHosQTkAcjoupajR91/TZ6ckUJRIiACIiAClUpg0BVX+F9G7Iz2veeSS9zHfvazzhCdW6Y8ALlRKaEIiIAIiIAIVA6BxN8BqJzmqSUiIAIiIAIiIAJJBGQAJFHRNREQAREQARGocAIyACq8g9U8ERABERABEUgiIAMgiYqulQ2BEydOuJtuusmx30MpQrhvxMyZMx3yw8B3ftEyfj1Mo3MREAERqAQCiQYAWwAvXbo0+mzevLkS2tor28CWyI888kjZtR1FjGLPUrTs67B8+XI3Y8aMkv0ctO0bsXfvXvfe9763DZchQ4b4DbAol/IVREAERKBSCST+GyC7/61YscK3mT0BNm7c6LZv3+6K3RioUqGpXV1D4Mknn/QFXX/99V1T4NulzJ071x04cMBRPucKIiACIlCJBBI9AGFD2QRo0KBB4aXUc5ttMuNkcyA+bPJjgdkebleLi89MSWtxHC2emdiSJUu8LMuf5L61ckp9LNSusN6TJ09u464OedAuXMy2ayHnISPSUp6FQszisk2WXb/77rv95kzGNUu21cvKLnRElsm1I+US0upt1ydMmODY8GnkyJFeRpwb6bZu3epZIdsC3LKYhfFWv7hsk5V1/OQnP+nLpx4KIiACIlCJBAoaAE1NTX5Hv7EJG8okAWELYGZPPIg3bNjgNm3aFCk73Krz5s3z3w8fPuzjTGHxoF27dq3DNWvKMZx9NTc3u0WLFrn777/fx0+ZMsXV1tYmVaFTrqW1C1f2ypUro3qvXr3a3X777ZFrm/bBgPbSrm3btuWuH+mzmFE2StJkkx4uBNjxfdmyZb4fOOcTbpEMP+sP4h544AGvjPNUkHbxoWz6jt0fKcvKTas3LvYtW7Z4XjfccENU9927d7dy89fX13vDc9SoUXmq0yYNO0uOGTPGt3nWrFnusccea5Mm6wLlYvhSDwUREAERqEQCqQYALn/eA7jvvvvchRdemLoNcBwKigBFTZg0aZI7efKkfwgfOnTIVVVVOXPnoghQPjt27IhEvPLKK27Pnj3R9/gJytW2Bp42bZo7cuSIlx1PF/9us2GbpdrRjI94+qTvae2ivvPnz4/qNXHiRDd69GivOFCqKH+ULu0tNuRhhoLqiJJi62fqWWyg3zA2aBc8p06dGvVHnnoXKu/gwYNuxIgRuQ2SuDwzRrhezFgxObSJ8qmHggiIgAhUIoFUA4D1ft4D4IMB8OCDDzreBygUUAQ8PAkoa94fQEk0Nja6VatWRQqDNMyqLZCG2T1piMvj4ufhnEd52ayUtOHHZstWh6xjWrvwdtAO6syHdqxfvz5LVO64Qszgi1HEbJeyi3Xhm6FmSrwYg4jZNemZ/cMUQ8IYFap3HgBw7UhA6Vugn4vxblg+2tjRepgsHUVABESg3AikGgBhRXH/v/766+7MmTPh5aLPmUWbwjBFHLqk7Q1t4vAOLFiwIHKlJxXGTBvFVyiUwgOQVQZLHdYeOxZjXGTJLsSMcqxMZqyLFy/237NkWhzsUIzkZ+kFgyCvEQD7p556yq/hY0AQzLvDeaF6Wx3Sjijf7g4o/3KoR3dzUPkiIAKVSSCXAcDsDi/AwIED202hpqbGu1Ptze5CglAwaQEjghe8bMaZls6ul8IDYLLiR2aaeC1Yj48HFCxK2ZY5SGOz7jCtuZlRvqFXpFhmScqKa3nc/EOHDnXDhw8Pq+XP7UW60DDAYGBpg/cZzPgIZ9h56k15LPmkLV/Q/1lLPGnM2jSgnRdoF+VnjcN2ilY2ERABESgLAokGgK3/228BUNM5c+Z0qMLMEuvq6rziQDHaxxQLR7vGEUXJkoDNLincXN28OY6HAMXe3YEZOG543mq3+ofLF7fccoufVRPHy4EwtcA13Pa27IFSXbNmjUX7tmcxi3s2yH/XXXe18orYrNzc/Ch0AkaU/UcF9TCmeTwXpIe/9QffrS0ozkJ9TfmkwftjMuJv6psR8dxzz0U8OLFy0pi1SpzwxZiF/4UQ9pdl4T0G3l+hHgoiIAIiUIkEesRugCgVXNsonTwKqpw7CkMHRc3LlSiznhiS+sOu4ZUplWGGssZ70R2sMJTwnpSqLT2xn1VnERCByiaQ6AGo7CardR0lgLI3F7zJYsb8zDPPlNRlbt6LvMtGVpeOHjE8cP9b+R2Vp/wiIAIiUI4EEn8JsBwrqjqVD4HQfR/WincCSumhwUPCkgbLQfxLqf0LaFhmqc9ZGsHrEF9KKXU5kicCIiAC3U2gRywBdDcklS8CIiACIiAClUZASwCV1qNqjwiIgAiIgAjkICADIAckJREBERABERCBSiMgA6DSelTtEQEREAEREIEcBGQA5ICkJCIgAiIgAiJQaQRkAFRaj6o9IiACIiACIpCDgAyAHJCURAREQAREQAQqjUDm7wCw+x+7+R0/ftxvzFNdXV1p7Vd7REAEREAERKBXEsj0ANjv9A8aNKhXwlGjRUAEREAERKBSCaQaAA0NDe7ZZ5/1O+7lbTy/n87PqNqGK/ySmxkRyIhvQEO6MMQ3BLJ4fnp2yZIlXpZtYJO0gUsoS+ciIAIiIAIiIALpBBINAFz/W7dudVdffbUbNWpUeu6EGLazZR91lPaGDRv8xjec81m+fLnf0Ifzw4cP+zgzEDAO1q5d6/elt/ThRizNzc3RDoHE85OztbW1CTXQJREQAREQAREQgUIEEg2AF154wedrz++6L1y4MNrznt9vZ0tVFDabxVRVVUUbrPB78uzut2PHjqiO7A+/Z8+e6Hv8hG137ffgp02blrlffDyvvouACIiACIiACLxDoI0B0NTU5NjUZcaMGa5fv37vpMx5xnawts0typqXCFH2jY2Nft97zonng7fAAtfvv/9+n4a4PC5+dqTDuFAQAREQAREQAREojkCb/wJgpn7s2DG3bt26VpL4Pn78eDdnzpxW14v5gncga293DIbdu3d7kaz/L1iwwNXV1UUGRbys0aNHp8bF0+q7CIiACIiACIjAOwTaGADjxo1zK1asiFLgEXjooYfc7NmzXUf+DbCmpsbvIc/e7uHaflRQ7ATlnhZ4X4AXDllCMG9DWlpdFwEREAEREAERaEugzRJA2ySluYKLn9n8pk2bvNK2ZQB7CTD+HwDsAc+SAPksTJ8+3ecdOXKkV/55DAnLq6MIiIAIiIAIiMA7BM5paWn56ztfy/OMdf7Fixd7pd+eFxPLs1WqlQiIgAiIgAh0H4Eu8wB0XxNVsgiIgAiIgAiIQJyADIA4EX0XAREQAREQgV5AoEcsAfSCflATRUAEREAERKBLCcgD0KW4VZgIiIAIiIAIlAcBGQDl0Q+qhQiIgAiIgAh0KQEZAF2KW4WJgAiIgAiIQHkQkAFQHv2gWoiACIiACIhAlxKQAdCluFWYCIiACIiACJQHARkA5dEPqoUIiIAIiIAIdCkBGQBdiluFiYAIiIAIiEB5EJABUB79oFqIgAiIgAiIQJcSkAHQpbhVmAiIgAiIgAiUB4H/B5FoXm9W7TusAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____ ###Markdown The response object show the response code (200 means it is successfully downloaded)To read the data which come with it. Use the json method![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAALMAAAA5CAYAAAB9PxpNAAAHjUlEQVR4Ae2cS6hPXxTH1/2nMPAIGXhmgIE8MhEmKBPlNZBnQspIUVJXIhKFYkgpokSSx8BAHiOJgRhIGMhzIOSVa3b/fXat0/5t9/z2uff3O7977z5r1+l3zn6svdZ3fffa+zz2r+3Pnz+dYskQSACB/xKwwUwwBBwCA9ra2gwKQyAJBCwyJ+FGMwIEjMzGg2QQMDIn40ozxMhsHEgGASNzMq40Q4zMxoFkEDAyJ+NKM8TIbBxIBgEjczKuNENaRuYjR47IpUuXDPEWIADOc+bMkVevXrWgt9ouvn37Jhs3bqzpGz327NkjHR0dtZWbfFWYzJcvX5ZDhw7Jhw8fmqxC1+IAAFAAx1L/QACy7t+/X5YsWSJTpkzJlOZ8yJAhcvLkySyvjJMomX/+/CnHjh2ToUOHysCBA8vQwWQ2GYE1a9bIo0ePagjV5C66FHfjxg2Xv3z58n/Kd+zYIZ8+fZKHDx/+U9asjCiZ7927J2vXrpXp06d3q09G6fbt22Xw4MHuOHjwYE17Iu6yZcuycpYhJM2fOXOmXLlyRcaOHevq+NNmKJt+yp7CVHldLjGVq22+gzjXfF9nbe+3o562RX+mYq4VF36Lzkyh3LDvELNQNjMhbVR39Qd6o5PareX05yf0vHXrVuZzv4xz2q1bt04uXrxYmq+iZF6xYoWMGzcu1C16zZQyZswYpzhA7tu3r6bN9evX5dy5c67848ePDjBAGzFihNy8eVOePXsmq1atEspo70eaO3fuONDIV2drVKjppKSLzZs3y5s3b5xeZ8+ezRwEIQ4fPux0Rzcw2LVrV6Yj5ThcbaLO3LlzMy1//folRLDjx4872ZSdPn06K693QjRGHgfYjR49uqY6+Pj+AGOwJoEheqIv7dUfPmEJRmozAQ5SKvbIePnypQwbNkzGjx9f069/MXXqVPnx44e8f//ez27aeZTMPekJp7148UK2bduW23zLli0ZmICK496+fZtb3y9YunRpNoUy4ufPn++A9uvknYcRTCONRsi8dn7+1q1bHenImz17tnMQJHjy5ImsX78+023WrFkyceJE52htj9M58hKE0vXmggUL3NSM7GYkbPQJqDJv377t9ERfEv5ob2+XBw8eOHKT59sMKYcPHy5fvnxREc53DBbwzEuUQfivX7/mVWkovxQyF9EIYJVI/IbLkHoywimRSFk0+REMkujhR8iYLAaPOg3inT9/3hGAyIUuahekOHPmTCaOupB10aJFrk6R5REDvBlkxm5s1GWbH3VRMEbEzIicE2yPJXChn6JBKyYvLO8VMkNGplOmKyVTuAwJFdVrnRJ37tyZtWWqL5qaEZnr9YUuapP++gOFc83Hsbt373bXeTKJ7Dpw8uoUzSfa0jfLCJYJPqG5OaNMU3cJN2nSJG2a+4t8+sGmMlIpZB41apR8//49m04BLYy8rOlGjhzpbCJKh+WhjNB4BYSBceLEibA497oZkTlPOMsCdEGnIqkeARi03HT5swAydUbzb9CK9OXXYXAofuSzVHr8+LE8ffrUVaNvyM4NW9GBhLxwQPh9cg6ZWTOr38PyRq+jZL5//77s3btXTp065RThN/a8WddcOp0yBfnRk+l23rx5whMLwAI4Iq2fQhl6d04+IKtsblw2bNjgN+21c6Iuywi1C9v8pwbhrIDdBw4cqCGM2sVyADsZfI0mSOQ/WQJDZgWVjT8uXLjgZkt01r79GSWmA+toorkOiK7qF7lJ7Kpd0by2jo4O251dFK0S60E4lhwQuDsk6kolZgYCDgML4rYqMVi5aTx69GjNAKX/ZtqXZ080Muc1tPy+i8DVq1fdU4OiS4RmWaIvS7p6TMrAYjZodKDW09XIXA+dflIWLiNYV4fLl1aYwuChX56l+/cNnOsz9DL1sGVGmeia7JYiYJG5pXBbZ2UiYGQuE12T3VIEjMwthds6KxMBI3OZ6JrsliJgZG4p3NZZmQi0jMz6PWyZxpjsaiMwIGb+379/3Vdh7969c1X5SH/16tWxZg2X99ZbrIYVNwG9hkA0MvMAfvLkye71KK9bITXfa1gyBPoaAlEyL1y4UDhI7AOcMGGCfP78OWpH+FYq/CqOL7N0exBvjvQrMM3vq9umooZbhV5DIErmnmqm7+IhNUf4vXJ/3jbVU0ysXbkIdIvMz58/l9evX7tvbOupxXq3r26bqqe3lfVvBAqTmf/LuHbtmqxcubJHG1xDmPQjc5YYHOEyJKzvXzNY/J3E3dk25cux87QQKERmiMxOanY9TJs2rWEEIGNvbZtqWHkT0GcRiJLZJ7LeCMasCbc89aVtUzHdrbz/IhB9zszOAZ4137171x2YOmjQINm0aVPucsPf8kR9bv7ytk1Rzjb2etumqDNjxgy3tYctPrptivzFixe7bVO/f//m0lKFEbDvmSvs/NRMjy4zUjPY7EkXASNzur6tnGVG5sq5PF2Djczp+rZylhmZK+fydA02Mqfr28pZZmSunMvTNdjInK5vK2eZkblyLk/XYCNzur6tnGVG5sq5PF2Djczp+rZylhmZK+fydA02Mqfr28pZ1tbZ2Wn/nF85t6dp8P+r8fJ+IOWASwAAAABJRU5ErkJggg==) ###Code ###Output _____no_output_____ ###Markdown To filter the data, The json is similar to the list of map data structure, we use the index, and key to get the data![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJkAAAAlCAYAAABRVEu0AAAFu0lEQVR4Ae1bf0iUZxz/KLI819w6NfLHzGp6f8Q0sOav2Jitfza8OVgq4gpNsMZsBmJzgX/0T2R/6AjSf0aZIJaD6Y0Za3HRWJg4GTKYnG1zlmZjdYpJt43Bxudhz3G+3p136vt2t3u+8Pq+z/P99T5fP3x/vGLUwsLCP1CkIqBjBGKio6N1NK9MqwgACmQKBbpHQKUx3UOsHMRERUWpKKgI6BoBlcl0Da8yzggokCkc6B4BBTLdQ6wcKJApDOgeAQUy3UOsHCiQKQzoHgEFMt1DrBwokCkM6B4BBTLdQ6wcBAyy06dPo7e315CIuVwu1NfXw2QyiYu+gyHK+9P15NMP/XnS77dvY7CgAAt37nhuB/T819wc7FYrho8dw98au/4MSL3P0tPX7PvXy5eXueLvTsbE25mXKazjRsAgC9bnxMQEDh48CKfTGayqW95utwsANDc3u/f48MVPozB/clhcGZ31GH3wyxI+5QmcCxcuLNn3XJBHmXPnzonge/L8PROAwQJI2iPoqEsbWnpm0yYU22ywjo3heYtFy17zuqKiQpyXMTWaYox2uFZ/BNTH3/Ti6/KTyN2yHa3DNlQPduCrspNI3vjCWs279WOTkhCbmOheB/ogwRKovB5yz27dqofZVdv0mcm0JevUqVNLnDBDWa1WdwqWJU3u5+TkoK+vD6mpqUImLy8PzG4kre1g0vf576/hleQdAmC09d7OV4XN7x78LO7r8SMpP19kFQJGEksnS+jNsjLc6+9Hv8UCljaWRpY6EssU93j9oCnxXFOHurQh5byVNunT8y6zoNTT2qesBDjfP5TIJ8ja29uRkpIiAEFQtLS0LHnv/v5+XLx4UfBnZmYwNDQkLrPZDJvNhrGxMRw4cADkUX94eBhZWVnCxvXr10XPxX1ZTgcGBpbY97ZY+NOF+4tzKM3cI9hc11ztwPRjJ8YfzXhTWbe9+MxMvDk0hNeuXMGLpaUodTjw7t27S8CYUV4u9nafPbvM78vNzUKHurRBXV7UCYTG29sRl5wsdOj7yeysAHUguk9bxivImHHGx8dRV1fn8/1qampAQJF4LygowNTUlE95T0ZJSYkbcGxGi4qKMDk56Smy4vPhq53I6PwAr6fvxEf5b8PhvL+iTrgKMFMSVJYjR8QRYkwmbK+sxG+3bgU1XDyt83sFWSAvw8wlpxXeteXUnw2CmOVT6ldXV/sTX8Y79OV5WMwpcH74KZryrAJgXP9f6Y+HD/FoZAS2nBx3mWXJDRdaVeNPkDQ0NICTCjMYSfZkKx2c5bGxsRHHjx8HJx4Sx+tAMln8BhNSNm7CO1l7BLioqy2hK/kPVz4nzjcGB0XfFW5n8JrJEhMTMT8/D4fDIc5DEGgz1ebNm5GQkCD4zGpavtaGNjBb/5uACNi2tjYt2+ea/djnEyPiMwaFbt77ETOPndi9ZYdPnfVkbEhIwOLkJJ5MTwdtlmWOfdWDGzeC0o1LS0NMfDwcnZ1B6YWKsNdMxh6L35qKi4vFe7Lp9/zmxAa+sLAQnCBJtbW1IjN5HkprIzs7G93d3aIXq6ysdNvev38/qqqqsLi46Knu87nkpVx0vfU+WDJJac+Z1/3zhU/nADgA7KiqwrV9+4SYedcu7O3qQnRsLEZPnBDTo9R3dHTAcvQo2PRLYl/17aFDouxxj0MCm39+O/MsgbN2O+JSU7H30iXhM/fMGWGf06UkqSvXoXqPcrlcIfd/l5w6m5qaQDDKcryaAMoyrP2Yy9K+bds2d7leje1w1WHV6enpQWtrq+iJjTiH13JphGPlI3IiENIgY7nmBBroUCF/bZSnnr+plTzKBPMhWNoPx7v826VsgYw8Q0iWSyMDoHzpH4GQzmT6H195MCICCmRGRDnCfSiQRTgAjDi+ApkRUY5wHwpkEQ4AI46vQGZElCPchwJZhAPAiOMrkBkR5Qj3oUAW4QAw4vj/AoKdKJoQu7XPAAAAAElFTkSuQmCC) ###Code data[0]['title'] ###Output _____no_output_____ ###Markdown So we can create the the dataframe from the given json object![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAb8AAABCCAYAAADHaNgBAAASyklEQVR4Ae2dbagV17nHV0O41VST+BJsovZEL/XQD+FI7m1SsZebGvBDSo5CQiqnajUIthCDoalFC5FIbWgqKFGwgkFb21NbGogHej9Iai30YA291mOgclKqSY3JDVXjS1qTEu69/Fb7DGsvZ2bPnv3i9sx/wT57zqy1nnnWb9asZ55n1p71satXr/6fUxIBERABERCBChG4qUJtVVNFQAREQAREwBOQ8VNHEAEREAERqBwBGb/KnXI1WAREQAREQMZPfUAEREAERKByBGT8KnfK1WAREAEREIFc43fhwgXX39/v1qxZ465evSpaDRJ47rnn3P79+xuspeJZBGAJ06x05MiRSvRVOIwfP95/8nhkcbqe+9HXdE8bV/7+3nvuUH+/e+OnP+2Ymn/57W/d0SefdB+1cYxrpl3ohX6v5fT9PFiw/PmnPuVoZyPJ6lG3mWNzPml/t6Vc49dtyo5VfewmwwYFvtMGhrz2c3NCHQxAo+n11193999/fzIolTl+o8dstDztGhwcdKtXr260amb5ZphlCu1AxpIlS/zN6J49ezpwtNYfAr1hv337dt/nWn+E6kg0w9ioYTNCeYb/7i99yT365z+7f//e96z4mPrONX6TJ092Q0ND6qQdOuWHDh3ygwLGkLRu3bqOedy9vb3u7Nmz/njdNjDBA49h/fr1jj6pNPYJfKKnZ0w2spl2TZw9uxQTM2J3fO5zpeo3W+kTd9/tbho3rlkxLa+fafzqhVZibyX2VLhTDz0ZC/+l3W3H4axYttW11mfJtvx2faOH6WptCz0ta5vlbdq0qZQq1H/22Wfdm2++6Y4fP+5lxLJD3hgGjMLu3bvdggULEu4ht5hZqHeeknZcylvICi8Rb9FSlmzOI3raHf6LL77o/w/rm3xjxjHidPDgQTdv3jz/ifNoo9Wl7WGKZXeKWahD1nasG48X4GXJWNO2kJflZ31zLsJ2InP58uX+fNU7H+i0YcMGHz1AH44d64UMyyM/PFaWTo3s/5dJk9yCoSEXD9R4KITewlBcGBplOwzNXf7jH93wihU+3Ebd33/rWz50+F/z5rm3fvELx3cc6jz9k5/48CBhvlA2+ofHjcN4pldYJvbEstpVhM3N9IEXXnAYsTBx3Jd7e92Zl192v37ssWt0t1Ar7aG9MLHENvuoR33kUC5um5VP+6aN1EmTb+XRGd1pQ7elTONXL7Sya9cuNzAwkOopcIHs3LnTjYyMJPnIK5K4ADdu3JjIxhsh3GWDdTOyixy/XpmVK1e6WbNm+XbhqTFIoRNp27Zt7q677kra/Mwzz9QTl5nPwNLT0+MNIIVeeeWVZKCx4x04cMDXxyNi36pVq5x5j3A05uSdPHky0SvWO1OJIAPDYu1evHixe+mll3xuPdkY8Pfff9/r9cQTT/jzSv1jx475+iEzZL399ts1z0lpx/DwsHvggQcCbf6xSZ+gb5jHSrvCdL2ZhbqE27QJrz7sK0RYzKvFoMMBHpR96qmn3NNPP530s1BWo9v1zseVK1fc2rVr3ZYtW/zx6IPcfFjKu+6tTLu+R3fudFdOnfKhuP/82c/cqR//uPCzpD/t2+dmDwy4mf397vVdu9x/7NvnPrp82f3trbe8uhiAD95918te+Mtfuj/96EeJsWCQ51j9IyM+f/aXv+x+v3FjzTPC333jG4luhAlPDQ7W5LeDyT3r17vFo6Nu5uLFDh6EKPmYkTSDi9639fbWqHDrpz/tHjpyxNejPnKoy40H9eoljOeJzZsdrKj32a1b3e++/vXC56Oe/E7kZxq/IgdnUOLiTEsXL15MBri0/Kx9Z86ccRMnTnSLFi3yRRgQMLKHDx9OqpSVzWAS3rViYBq9e8WgmVEhVHj77be7c+fO+TtrDEyrnkmhF4OjpYcfftjNmTPH/0ve/Pnz3enTpy079xuGjz/+eFIm1Nt2jo6OuunTpydeVOyBhe3GEDE4c+7ryYbPI4884g+DcZ47d64d0g+uyDFmtItzHfYrjnHp0iU3ZcqUpB4b7Mfw5YVC282sRqEG/sGbxwhZu8Oq9FHaBQd4kLgWMEKco2ZT3vkw2dyQ0Ney+ll4fqwO36EXTl372I1rWLbMNoP0Z9au9VUZzD8+ebL74Ny5QqKoO6mvz5fFeH086k+h7FtmzHATZs1yF/55g/Y/v/qVu2fDhsQo3LVwYY3hRGhYf/K99/r8//3gg0K63YiFYPOvS5c6jCgJtoQ3L7Wgj3aKR2njx90hicGPTh52cPZx57h161afF4dO8hp3/vx5X8/kIhtvy1IzsqnLHTYDZ/ixsJwdo5FvDDE6tzqhH4bBUjwpJWRiZfK+wzAaRi4eSONnfhiVMIWeFyHIkFk92aGccJubhldffbXG6MahS8q8V3KmWLuZhW1pZBvDhzGjb6clDFRs7NPKXY99ede9RYvCa4tt+suNlAjR3XLnnV5lJpT87Z13asKKQ3191wzy0+bPT0J7GIT5e/cmxvJGantRXfHA8XYt7Gnh16L1u6FcaePHhcsASOcmvMlFERpA7hyPHj3q87mLXbFiReGwDR6ChXzsQgoH47KyW+H5xSetXQMVHjCGgUESvQl7Ef4yHo3M9OOOnHNj4UG+MXatSM3Kjo0u7QsN69SpU92kAmGYuC3dzIxzmpfiGyqYYDC7IeVd9+32/DrVfjN44eSUMKxImI+QoXk9ndKr245DeBcW4Sd+VtttOof6lDZ+oRAGqGnTpoW7arbTLna7mBmUQy+GwZA8e55VIyjlnzTZKcX8rlZ7fjz/uO2229zMmTMdDBi0zKNiIEib8EJ7GUDisGKosw3cPBsL75qtrXg0eNVhQiZh0jA8HOZTlzIk9DY9wzJlt8vKhhv80CcroTNlYu86bi9MzCsJZbWTGUaJCR+NTEhBt7w+Th/lnBP6RD6JZ260IwwZh22Mt7l+qMuH5+etPNfhseLr/np7fnhoGC4mevD86cNgAlGod73t90ZG3F/feMM/J8MLxKt77TvfaevzLIuehA5EPT3JNy+V0GyZRPj3/dOnk2efRWV88gtfcH/Yti15Llq0XljOIjOtnjQVHiNvO9X42UXNAINhYhBn2wbs2IMijIZ3ZwO1DfDU4cOgRBiUC5v/aayFRLnId+zYkehImb179/qL3+rzbZ0iT3YipI0bxgKdCEs+//zzvk3ojXdqsy15HteId4bKVtd4mreLbPhaPl7g0qVLr2klz5BCPhhg0sKFC/0NhfGfMGFCyzy/ZmTDEH5wDM+16Y3u7Of5ZmgMrOFhe2HCsyqeF5M6wcx0O3HixDXG2XRM+0a3uI+HjwY479zI2Pmi7cz+5Xj1rk2uQT70Iepzflrl5de77tPa2ql9M/r7/aEIv73y0ENuzurV/plg0eOHMx4xdJ//wQ+SsCUTSHhOSLjTwnzxTNGix8kqZ48VzCnIKpe2v/erX3V/OXIk0c1mqtpsTPR+59Ahd/DBB6+Z9Yn3yrM78mibzfa03w+yj/Amk43Ythm1eHhMcrF6Yd00HdP2cfN73333+bGJft3p9DGt51ccOYMyRs2MUvGaKtkMAQZdwuZwtxusZuS1si43G9zc7du3L5mQ1Er5Y00WN9DMGrZJY2OtfWXbgxe0bNkyfwPXbX28bJuK1KM/hE5EkTqtKpPq+bVKuOSIQCsI4MFg+LhQMITdkMwLkuHrhrNx4+pgnnxfX1+lDJ9FqK6X4aPHyPNr4LqR59cArDYUFf82QO2wSG5g7Fk4E9vssUGH1dDhREDGT31ABERABESgegQU9qzeOVeLRUAERKDyBGT8Kt8FBEAEREAEqkdAxq9651wtFgEREIHKE5Dxq3wXEAAREAERqB4BGb/qnXO1WAREQAQqT0DGr/JdQABEoHsI2Bp09pYS08z28yaReG06K5P1bW8ridfYyyof77e178q8xcRkmf5xuyw/79v0t7er5JXttjzaa2+NCXWz38nam5343V+Y+NE/a0u2880vMn4hcW2LgAh0JYG8tenarTCvAONF1qxdxzJKN1LCcNuivmX0toV6y9TNq8OLK1hhByPI7z3jxOIFvKqQVxa2K8n4tYus5IqACJQmEK6oUFpIF1Zspl0TZ8/uwhbVV4l1/m4aN65+wagEb0/iDTCxVxgVK/3vzaVrqqIIiIAItJiAeXhlxBJi4yXMpFumT3ef/+EPa5Yd+vD8eR+Cu3D8uJs8d27Ny6sJif76sccy6+bpQ1jyv7/5TcfLsUmxbPY10y5Wbrj/hRdSVSCc+puvfMXRJhKL6v7bd7/rWEg33M/LrUkxlyxm4X7qGdfer33NsYI8qQgzXgpuK8v7Sg38ISTKC/15sTsrmvB/K5OMXytpSpYIiMB1IUB47+YJE/zacijA4H1y+3ZvCEyhE9/+dmIQCeeNfv/7fiCn7onNm31YkxAngzpLIoUrO5iMtO+3hob84resa9fpRBtYcWLB0FDtoceP9/to22ubN/sVGDDAYcpjZkYLTnicsQFrllmoR942K5JcunTJsb4podBWJoU9W0lTskRABK4LAYzWjC9+MTn25HvvdR9dvuw9INvJEjyUI7EeHcsA4TldOHbML+tjeZP6+hyhukujo1a17rfJqluwDQXeHR726xg2KroIsyyZrWCWJTvcj7eXtp5nWKbstjy/suRUTwREoGsIxKFHFCP8mJVYxNUmr1w5dcqvV2ehPasze2DANnO/8YqQYaFFVjiPPaVcAU1kfmbtWh9yZR1DEivOF11NvVFmoZrNMgtl5W1j/FjbknUOW73Uk4xfHnnliYAI3BAETv5zVuDi0VG/urmF+7KU5/nfzbfemkzEaNZg8RyMjz2D47idMID2PJBngrT5N8uX+xBnEQPYKLOYZbPMYnlp//NTBya92GK/aWXK7lPYsyw51RMBEegqArfceac3fHg0PO/7MGPtR/JPDQ66afPn+/KEQP+wbZs3Hs02iFmNhEwbTSz1hJfTzMzGcVOnunF33FFzaPbBISuEW48Zz/vSwqqtZFajcPQPxo9nflOmTIlymv9Xxq95hpIgAiLQZgJMQuFH5oQW3zl0yB188MGaH7v3PPqoOzM05MsQAiTkaWFNU43ZnMggH8NnnhleEs8DkUl+/GN2Js+wj3yOjQ72w20M6dEnn0zqIRuDYrLt2PW+zbMhvFc02Q/nTWf0YvJL6PUxyeWeDRv8TFbKhS8IKMJsRn+/V4d2Ud9+aF+PWdE21Cs3Ojrqn/nNnDmzXtGG87WYbcPIVEEEREAEWkuAN5osW7asUqu5QxDPbt26df4nDfEzvby8VtCX59cKipIhAiIgAiUIMMCvWbPG9fX1Vc7w1cPF212Y7BIbxXr1iubL+BUlpXIiIAIi0GICPOfbvn2794DaNci3WOWWiLN3e/Kas927d18jE0/4ypUrjre8tCsp7NkuspIrAiIgAiLQtQTk+XXtqZFiIiACIiAC7SIg49cuspIrAiIgAiLQtQRk/Lr21EgxERABERCBdhGQ8WsXWckVAREQARHoWgIyfl17aqSYCIiACIhAuwjkGj+bjsrvUPg9ipIIiIAIiIAIjAUCucZvLDRQbRABERABERCBmIB+5xcT0f8iIAIiIAJjnkCm57d//37/lnHeQMAbx+NkIVHy+cShUd5Obnl8I49kr/MJ315OXniMWLbVNR2yZFu+vkVABERABEQgj0Cm8VuyZIk3VHv27Emtv2vXLv8yUowZH17Rg5EjYbx27tzpRkZGfB75yCuSKLtx48ZE9tmzZ93g4GCy1EczsoscX2VEQAREQATGPoFM41ek6cPDw964pZW9ePGiO3bsWFpW7r4zZ864iRMnukWLFvlyvPttYGDAHT58OKlXVnYiQBsiIAIiIAKVJlDa+NkLRzFOeHxhGJN9W7ZscVu3bvV5/f393hssQvr8+fO+nslF9sqVK5OqzchOhGhDBERABESg0gRKGz+Mkr2NnPAmxjA0gHPmzHFHjx71niGe24oVKwobwFWrVvmyFlLle/369cmJakZ2IkQbIiACIiAClSVQ2viFxKZOneqmTZsW7qrZ7unpqfmff2zFYgxm6Nn19vb6vAMHDlxTJ21Hmuy0ctonAiIgAiIgAkYg1fjhaTF700KOmzZt8ts2IzOejTl9+vSalXjj2Zh4hYRBLZSJbAuJMpllx44dpo8vs3fvXj/JhePbx7zKPNmJEG2IgAiIgAiIQA4B/c4vB46yREAEREAExiaBVM9vbDZVrRIBERABERCBfxCQ8VNPEAEREAERqBwBGb/KnXI1WAREQAREQMZPfUAEREAERKByBGT8KnfK1WAREAEREAEZP/UBERABERCByhGQ8avcKVeDRUAEREAEZPzUB0RABERABCpHQMavcqdcDRYBERABEZDxUx8QAREQARGoHAEZv8qdcjVYBERABETg/wG7rOUEPa5DnAAAAABJRU5ErkJggg==) ###Code ###Output _____no_output_____ ###Markdown Getting data from other source---Pandas provide the tools to get historic data from many remote data source [link text](https://pydata.github.io/pandas-datareader/remote_data.html)Some of them required the key, some are not. You can see the usage in the given link.![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUEAAAEECAYAAAC2m4buAAAgAElEQVR4Ae2dC7yVU/rHn1DTXamkJLlVE5FGyKT8yYiQQsqYSTeDQpRKGCJRIZkM/+mCmaEaUUz+E2LURCVzSgxdRLpRRCpdx/T/fNeZZ/eeffbl3Wdf3nef8zyfz+5997o8a63fu3vO86z1rt8qt2vXrv1iYggYAoZAGUXgoDI6bhu2IWAIGAIOATOC9kMwBAyBMo2AGcEy/fht8IaAIWBG0H4DhoAhUKYRMCNYph+/Dd4QMAQO8QvBrl27/Ba1coaAIWAI5A0C5gnmzaOyjhoChkA2EPDtCWrjlSpV0lu7GgKGgCGQ9wiYJ5j3j9AGYAgYAukgkDMjuGDBArnpppvE5hbTeVxW1xAwBDKNwEHt27eX1atXZ1pv6PSZEQ7dI7EOGQKhQOCgWrVqhaIj1glDwBAwBIJAwHc4PGbMGHnhhReK9JHQlhAXLwv59ttv5dJLLxUWT/hMnTq1SHm+PP/88wnzi1X4b8LKlSvljDPOiNRFP23RJkIftF3KUR7Reueee65MnDhRDjvsMFfOW9cVtH8MAUOgTCLg2wg2atRI1qxZExckDOI999wjV199tZv327BhgzN4aiCpiBHavHmzy//ggw/kueeeixiruIpFXPnf/e53cuutt7r7t956S04++WR5+OGHnVHD0I0cOVLQST8ee+wxGTRokDOQjRs3lkWLFgl1+vTp49Io88orr7i6idq1PEPAECj9CPg2gg0bNoyg8eCDDzovD2Py/fffCyH1unXrpFq1atKpUydXDo8Lg/j2229H6mGEBgwY4L4fddRRgmEtKCiI5Me7QTcGuGXLlq5IkyZNpG7durJlyxb3HR2//OUvBYOHtGjRQo4++mhZsWKF+27/GAKGgCEQDwHfRhBDt2PHDsHDQ3bu3Omuhx56qNSuXdsZpLFjx0bCTULTnj17xmvXhaT169ePm+/NQH+5cuUiBlONG8YQ+fzzz11bGg5jgPE6TQwBQ8AQSIaA75elMSzbt293huiYY45xRhBjhPeH8UHw9EaPHh35nqhxvMiNGzfKOeeck6iYy0M/nh1GVQ0r4S19Unn66aelW7du+tWuhoAhYAj4QsC3J6jamNMjLK1cubILddUI4pV98cUX8vLLL2vRhNelS5e68urNUVgXMaLfJ6QswiIIxpNP69atI/oxpHihuhgSyfDc4MkSUhNamxgChoAhoAgc9OGHH8oll1wiyd4XxOghGCTm8zCEM2fOFLxCBK/smWeeKbL6iwcXvTBCOdKZV6S815tzimL8wxyftkFd/ahuDCKLIaecckokL3r1l/lC5g21THR+jGYtyRAwBMoAAuX8njGC94VggHItGDterfGG2qSlYkhz3WdrzxAwBPIDgZTD4SCGRZgdLaw6M08YhFGO7ot9NwQMgfxFIC+MoPe1Gw2FWVTxeob5+wis54aAIRAkAimHw0F21to2BAwBQyDTCOSFJ5jpQZs+Q8AQMAQUAd/vCZZk7o1tbCaGgCFgCIQZAd9GsKSDOPHEE0ta1eoZAoaAIZB1BCwczjrE1oAhYAiEGYGse4KpDH7utI/k9UlLZP/+/dKh78+kbVfzIlPBz8oaAoZA6giEwgh+WvClvDxukVSsWl5uHH+RG8XMcQtl8aurpNMtZ8jxLeulPjKrYQgYAoaADwQCDYd37dgrU0bMk8lD3pC2V50o/Z7oKEc2ruU+3JNGHmUoG3ZhV80tt9wiCxcuDHtXZdq0aY78gr3fo0aNSqm/1E21TkoNWGFDIIcIBGoEMXC7tu+Ru2d0k1YXnVBs2KSRR5nf9/u/YvmZSpiz/HupNvB9efIfmzKl0rceDCaGU7cl+q6YZsGrrrrKsQKVNsqxfPpDlOYjtOoZQiBQI7h6yVdCKDzzsYXy3Vc7ig2JNPIos2FVIYFqsUKWYAgYAoZAGggEagTpN57eYfWqyhM3viqvTVriwl5CX+5JI48yiWTz9n3ys1EfOW8Oj04/eHhIdP6A6YV7kTW984RVrtzgmetc3Ua/XSofbiwkjU3ULnnQe3Xu3NmFlocffrhMnjy5SBU8PUJO/WiovGrVKjnrrLPk/PPPd3WoSxl06bkp6tVo3VQ9Rm/IS1u06UdoX8N66tF+rLYJibVvOi70J+o3fYrWFZ0WDzN0U5ZPrLZJ02cArto3yqsk0k0Zr97o+tHjoqxJ/iMQuBGsVLWCXNC7pfT7fUf59svtcn/nqTKiyzR3Txp5lIknO/f+R26c9oUcU6uibHqwpbw7sJnUqnKIjL7sKGnf9FBnAC/8/Yoi+TOXfedC38OrlZd/DjlJZvQtDMWps/2R02TNfS2kef1C6rB47ZLOf4rhw4c7MlcIZ+Fa7NWrV6QKxmT58uUu7CT/jTfecP/JSD/hhBPk3XffdWnUoS5lZsyYEaEXgzi2f//+Lp18hLNR/AgGr2rVqpG2MTzjx4/3HXavXbvW9XXWrFmub9Ftjxgxwh2PQJ8JqadMmRLRnajfULBBiLF+/frIMOB5xNjyQn4izLQC5L0czRDd9pAhQyLPAKzJ50PojyTTjYHkQ//0Wd51112R+o8//rjAho5O8tm/7jWw2j+75hcCh8DInC3RM0H86K95RFXpfldb5/lRHuPnR3bs+VE+37Jb+pxVRypXOEjqVivvjKDWXbZhp6zcvFtGdTrK5R9Xu6Kc1rCqvLliu/Q4o7COlk31qjti4CaMJXAlXnvttZEsOA1r1KjhjiLww6PYsWPHSF0MBIYi0WFXkcIizshiaFV4FrNnz3aGys/uH/o5bNiwiEGObttrHKJ1J+o3fYL/kXNhuMcwYUz69u3ruuoHs0Rt63hjXZPpnjt3rpx55plFxswfKv7Y8aGfHCaGgGH37t2d8ef5+8E0Vp8sLXgEDqpZs6Zk65OL4VX9ycHOy1uxabdrTo3eCXUquu+rvi5MJ+QlTK57R4G89snWXHTNteENr1I9/ElDZg3L8ID8SnTohqHaujW9cWME0JtMkvW7Xbt2zgtGF2zgeFfePwrpYJasb4l0413iCWKY6RsGUD1UDvVavHixo2/T50HIbZL/CBxSp06d/B+FiExa8LX7MJjereu4UJh7NYaEvITHuRRCJQ2v+E/Of66BAwf66gJlBw8e7ObPNJxDn19PkNANIWzDS8EwcSxpOoKxSubx+Ok3HvG2bdtcSIz3hVFUSQcz1RHvmkw3f6TmzJnjDB06mKbwevkcBcH0gNdgx2vL0vMHgUM4eyNbsmfPnoSq2RHySI+ZCV+I1hep4+0eWf3Nbnl/7Q43rxfLyJ18ZGVpfHhFeWr+13LWsdVcSBzdqbrVC0No9Saj8+N9Bzud3yK0w/CwMEKYpMJRpWo4JkyYUOwY0GgdWk+v/MdEMGLjxo2LHGmq+YmuarTwapgPLKknqG0/+uijiZorkpeo3xiRpk2bRs6E4cgDryTDzFs2+h6sGbeGttH58XSDEfOazCUSEkdLgwYNpHr16sIzZO7RpPQgcBAPNlufZDBdNuBMuaD3qTJ1xLxiL0Tri9TkUYaysYQFjMtOrika7urKsK4As/jxzK+OdYaSUFjzve8EomPI+fWcJ0m+39VhDB8Gj1CTEIlwivkqFcIlFhh05bdKlSriPViKctE6dHUYQ8HpebrKiVfIOc5+pUuXLu7QK/pF+5zTwjwfwn94FkrII8RmkYN7QkUVDKaOiysGMJZx0PJ69dtv+vfAAw84zPSPBDr8YKZtxbsyv4gHzpj44AEm000feJaKt9bVlWzyH3roITcvqHle3fH6YunhR6Dcpk2b9merm5wH7IdFBoM3b9pHMnfqR1KrfnXXHVaK2THS9qqTEq4O8xoMBtAb7mLgRr3xpfz1+sa+VnmzNf581athOwsjGOmyIPxhGDp0qDOEauw1jXlBnZIoC1iUtTEeUrFi4QJCkAPX12RO79hYZoxd4LrSa1R7YcU4mejCh7dcqmGtt67dl00EMHh47V7hNR4WQ7zTG958uy8dCBxSvnz50IwEo9drVGorbjecXVcwevrCM4PhPcF0vUC8od69e7uJ8lgAEfYGNTdEeBdvpbh58+by7LPPlhkPLtazKUkaYTzPk3DYK/HmCL1l7D6/ESi3d+/erIXDvAvmJxzObwit94aAIZDPCBxy0EGBbxrJZ/ys74aAIZDnCJgRzPMHaN03BAyB9BAwNzA9/Ky2IWAI5DkCh5QrVy40QzB6/dA8CuuIIVBmEDB6/TLzqG2ghoAhEAuBg++99957Y2VkIm3Dhg1ut0I8XbwkPX30O/LaxALp0Kel2xVSvVZl4cM7g+V/crBMuX+ubF77vTtnpHyFg+OpCkU675oNGjTIEVKwzSrfhe1yl1xyiXzzzTfSpk2bfB+O9d8QiIlAoHOCRq8vbnuXbs2K+YQs0RAwBLKKQKDhMPT6FauUdxT6eILRO0Sg1589scDR6+/+YV9WgTDlxRFgyxx0UiaGQGlGIFBPEGCNXj839PpKzADmhO3seIFkQI8G8OZTxsu7pwQE3v8I3nwlFFCqKnTj3bLn9tVXX3VXr7fLbhxtl7pe4gZvG3ZvCOQCgcCNoO4bNnr9zNLrY+CmTp3qqL6gg4eRhqMAMIAIabfddpuMHj3acQ5CMcUWMRW2kFHGy4qjeejmE4+GHsov9tt26tTJUe//8Y9/jPAHooMjAiZNmuT0o0P1qX67GgK5RCBwI6iDVXr9dt1OcuwxUO1Hh8da1ntVev3zmhRyBcaj17++TSGVvpden/NJ0pF06PX9tAtNvbK4QOUUTXGfSAd8ehgy9sQi7IlVIlOtBz0W+kuiW2nota6XdRoyUuUIxPhqH7RdjhzQNK7owhiaGAJBIBAaI1jSwRu9fnHk8PYwSl5uvFSp/YtrPZCSiIb+QKn4d3h+GkJzhc/QxBAICoFAF0YyOWij1y+OZjwGFA2Ji9fwl5KMhj6RFl67IQz39s3mBBMhZnnZRiBQT1Dp9aHQjyfkQcHvh16f4zL5PHZFISU9Or30+vHC30zQ69OW0ut7xxKPzl3LeOn1Nc17jaap9+bFu9cQFePCIkQmBQOqNPTMGfJhDpA2/QpM13qsA16heYJ+kbNy2UAgUCNo9PrZo9eHCZn5OIyohp7eFdpEPybv6i0GCu5CdLD6i7FLREOfSC95zEFy5KZS92NQ6ZeJIRAUAuX279+fNT7BRYsW+eITNHr9oB5/6u3iCRoNfeq4WY3wIhCKOUF9Tcbo9cP7Q9GeYQSNhl7RsGtpQCAUnmC6QHKyHAsjKkavn116febxjIZef212zXcESoURzPeHYP03BAyB4BAIdGEkuGFby4aAIWAIFCJgRtB+CYaAIVCmETAjWKYfvw3eEDAEUlod/u677+SFF16QWbNmyfLly+Xf//63HHXUUY5w85prrpEmTZpIOqfXGb2+/SANAUMg1wj4WhjhVcK33npLhg0bJl9/fWAVNrqzbIwfOHCgVKlSxWX5fU+QXSEvj1skFauWl8tuOdPVnTluoezesU863XKGY5WObsu+GwKGgCGQCQR8hcPvvfeeo43HALIL4e9//7t8+umn8tlnn8nSpUsdH1ydOnXkmWeeEZhJ8BD9CC9JTxkxT2CYbnvVidLviY5yZONa7sM9aeRRhrJhF96hY/cDr5CERby7P3TXR6765iVKgAGHfcNBCv3xu2smyH5a27lFIKkR3Lt3r7z00kvy/fffy5VXXikcScJWLA17q1ev7tLHjh0rhx56qPzlL38RpZhKNhSj188+vf6ECRMcVZXu82U7XS4E48veZYgSaBuGaqUFy0X71oYh4BeBpEbwhx9+iPwFP+ecc6RChQoxdbdo0ULOPvtsoTxhsB+BXp9QeOZjCwUq/WghjTzKbFi1JTrbvidBAM8USq127dolKZn57C1btkiNGjWkcePGmVduGg2BDCJwUKbILCtXriyQgN58881y3HHH+e6i0etnj14/1hY3fTAausciV4AoYfz48Y4CHxp8KPIp56W8oozWjRXqYgS3bt2qzUWu2i6hKfrQEV0/URhNu3y0LvUp7xVv32Llw7itfaesSdlG4CBojRJJxYoVBTooBJr0zZs3xy1+wQUXyIABA4SrX9F9w0avnzl6febeMCzK+6fkqt5zRFjo6t+/vwtV9ZlCe6/y/PPPOzp+nv3rr78u77zzjnsjgDAX/VWrVnV1CXWZZ8NoYuDUANHmnDlzIiw2XgNKG+RDzkp9aPiZckHQ/dBDD0lBQYHLY4558ODBRSjBYLXRuhMnTnTUXrSN0L73WAH0w1ytMnnyZPcbJp1QnbKMyaTsInBQMh448rt27erm+whzmVNiYcTv4odfaI1evzhSJaXXZ+6NOTi8/Pbt20fm5WbMmBGhtU+mG8Okc3hQZ3l/J6RTXwVaLKj7MUT8PtTA0DZ94DtU/17h7BKdnyRcV3p+jB/tadvQ9GOIV65cGanurettG2OGUfMeKxCp9N8bqP+JVhBCdUJ2vFaTsotA0jlBoIH/7eGHHxZWgPlR9+7dW1q1auX+Qs+fP192794dGIJGr18y6NVb1LAQ78qvaEirdTFEsULfRPq885R4akrMumbNmgh/IfqJVPDe/AjGjNe5lLDVTx0rYwj4MoLlypWT8847z80N9evXz3mFrBZPnz5dfv3rXzsjSTiENxCUwCJTbeD70nnCKunduo60b3qo68oJdSq664y+JzjWaWWfnt7neKlcwdfwSzwkQjPmq9Qb4soL5X4Er4YwkFATT4oPoV8mJF3dMGgjhNH0C+8NjypTwjh1zHr1hrTx2sH41axZM162pRsCMRFIyQrUrl3bvQy9YMECNz94xRVXRAwicze8QrNs2bKYDcVKNHp9cV4LxnH9+vWxIHJzamTgueEtZVJKQt2v7devX9+FyHiF/AFM1RNUPdFXPETGyXhTFU6ua9q0aWR+MtX6Vr5sIpCSEVSIWCxp06aNO7MWg/jkk0+6/6z8cG+//Xbn+WjZRFej188evX4i3DEWvPSuCyZ4nFdffXWiKkXyunTpIi+//LJbYSVc5fWoTHmCeHz8QVX6fUJi74JOkY7E+MJ8IAaafmm4Hr16HKOaJZVhBHxtm/ODDwbw+uuvl88//1zuu+8+YS+x321zRq/vB2ErYwgYAtlAICmBwmuvvSaffPKJNGjQwL3KUL58+Zj9OOaYY6Rt27bOCH788ceyb9++mOViJeprMkavHwsdSzMEDIFsIpDUCMIcw0Q4K8S88hAv7Dn44IPj7ibxOwBek+k16ny/xV25G86uKys27XYLIlrR6PWzS6+vONvVECgNCCQ1gsz31K1bVxYvXuxefr388suF1eJo+eqrr9y7aaTzjlc8jzG6Xia+c86w96zhTOhk3oz36sIovF+n79iFsX/WJ0MgnxBIujDCKx3QYyFQad19990u5NWXpQl7eTGXnSL/+te/nAE899xz8wkD66shYAiUYQR8LYxg8GCHuf/++2XPnj1x4cIDHDNmjJx88smujN+FkbgKLcMQMAQMgSwj4MsIah+YH+TViL/97W/ufUAMIvRZbG269NJLpUOHDgKRgooZQUXCroaAIRBWBFIygqkOIlUjaPT6qSJs5Q0BQyBdBJIujKTbgJ/6Xnr9G8df5KpAr7/41VVGr+8HQCtjCBgCJUYg6cJIiTX7qGj0+j5ASrMI+4TZcaG7J7LBn8eODKOtT/NBWfXAEAjUCBq9fuml1w/sF20NGwIpIhCoETR6/RSfVorFITcIil4/xa5acUMgMAQCNYKM2uj1g6HXh+mZ0JiPhspeooFkYbS3HkQMJoZAviIQuBHUfcNGr59ben1+sBCpQmKqXIVTpkxx7NB4kMOHD3dMM+RB9QVjsxpJrl4Ke2jqTQyBfEUgcCOowBm9viJx4JqMAv9AyaJ3vLSejF6fGl6qeS9NPdyGeIe8+4ko9dbcuXOdkcRYJqKwL9ob+2YIhBuB0BjBksJk9PolRU7cYUx6dgiG8+mnn3YGD5p6iE29nHyp0O+XvEdW0xDIPQKheE8wE8OGXp8PEo9eXyn3M9GeHx3MmxE6Ek7iTTHPpvuwk9X3UuArWQL6CF9zIXiJnPqmRlLbJFQ2MQRKEwKBeoJGrx8svX68HzKnsK1du1a8R3BqWYwizM2Exghkurfddptm29UQyDsEAjWCRq8fDL1+sl8pXuukSZPc4oeuHHPVhZG+ffu6e9Kg5ocOn3sTQyAfEQjF3mGj18/Hn4712RAoHQiEYk5QX5Mxev3S8aOyURgC+YRAKDzBdAEbMP2LyKIIuoxe3+j10/1NWf2yg0CpMIJl53HZSA0BQyDTCAS6MJLpwZg+Q8AQMARSRSDrc4I//vhjqn2y8oaAIWAI5AwB8wRzBrU1ZAgYAmFEIOueYCqD/scLH8sbk5fK/v375YI+LaXNFT9NpbqVNQQMAUMgZQRCYQThFXzl8fekYtXycv3jHdwgXn58kaPXv/Tm0+W4U49IeWBWwRAwBAwBPwgEGg7zkvS0kfPlmTvelLO7NpMbfneh1D/hMPfhnjTyKEPZsMvu3bvl1ltvjeysCHt/s9U/jmetUaOG+4wePTpbzZheQyAjCARqBDFwu7bvkWHTr5TTLjy+2IBII48yT900u1h+phLeXLFNagxeIk/N35wplb71sBUNw4kBLS3StWtX2bp1q/zhD38oLUOycZRiBAI1gp8t3SQaCn/31Y5iMJNGmEyZjZ9+WyzfEgwBQ8AQSBeBQI0gncfTO6xeVXmy/9/k9clLXdhL6Ms9aeRRJpFs3r5PTn/4E+fN4dHpBw8Pic6/7aV1RdIvn7TafR/6ygZX97jhH8pHX/qjjILy6vLLL3eh3xFHHOE4+Zyy//6Dp6ehIVclIYB9pU2bNu7Aenj8qEs+utCJaHit9VP1GAlFta5eCVURb7/Ji9at/dN60WFtIt2ugQT/RLetfUpQxbIMgawhELgRZN/w+T1byA3jL5Rvv9wuI694QR68crq7J408ysSTnXv/I/1fWCeNDvuJbBxxisy/tanbNvfQpUfKeU2qOwN48f9+WiT/5Q+3utD38Grl5b1BP5UXex/n1FNn6+hTZfU9zeWkepXiNRlJx0jdf//9At8f4d9XX30lPXv2jOTzn33FihUuj/zZs2fLmDFjnAGCxHT+/PkujTrUpcyLL77ouAdR8tZbb8mNN94Y0U1aLHqrSIOeG4ztokWL5LPPPov0a9iwYUKoGqvfVH388cedBvo9dOhQefjhh13b6ECXGqtEuj1diHkb3Ta6lXcxZgVLzAkCf5v9lvzyVzdK398MlLnzFuSkzbA0ErgRVCCg1+9259nS9qoT3YII96Qlkx17fpQ13+6RcxtXlcoVDpLDqx7ijKDW+3DjLlm5ebf85ue1Xf6xtX4iPzuqiry1codgQNORpUuXuupKQx+tC0qqHj16RJLh6cOzgrnZj1x00UWCsUQqVqworVu39k2qOm/ePDnjjDOcQdW6X375pTOA9Bu+wPbt20d0Y2wx2BjAOXPmSMOGDaVFixYun3HcfvvtsmDBAlc/kW5XIcE/69atK0bdzx8RdJoEh8Cfn5sugwfdJD17dBfuy5KExgiWFHTo9fECV27e41So0TuhTkX3fdXXhQsOhLyEyfXv+kBeX/59SZtLuZ43bDz22GOdofGrJDokve666/xWlUaNGjnvDaOG94UBw4hiEBGMXOXKlePqq1evXqRsdKFkuqPLe7/zBwCPU8N//iikMi6vLrvPHALvvbdEfv7zVnLeeW1l3j8Wyt69+zKnPOSaQvGeYCYwmrzwG+GD9DqztguFuVdjSMhLeJxLIXzUkFTp9fGo/IiGpDfffLMLYamDPr/0+hi5N998UzC8CCG312PFE9y5c2fE0GGcCMdV1GtUo0l5lWS6tVy8K3158MEHI23HK2fpuUOA7a0HH3yw++zbu08qVCifu8YDbilQT5AdIWN7vuJWf+PhwMowZeLtHvlsyx7557of3Lwe83l8Hu1yVERd8/qVpPHhFeV/3/kmbvhbt3p5F0KrNxmpnOSmVq1a7vwQQjwED4dFDq94Pa6JEycW8wSjdXjrck99BK9Q5+yiy0R/x/Njno05SAwbn7Fjx0aMDmE5QtiLaHnCUoz1qaeeKosXLxYN9zHI6POedRJPt1OY4J9E1P0JqlmWIZA1BAI1gp1uOUN+0auFTHvgH8VeiNYXqcmjDGVjCQsYnZrXEA13dWVYV4BZ/Jj8y0bOUBIKa773nUB03H5eXedJku93dZj5um7dukmrVq3cXB9hIosPKsy54UFp6Fe1alVp0qSJZrtrtA5dHcYYYXQ6dOjgdLNQcfXVVxepG+8L3pu3rq7w6gowuidMmOAMG3n0j/CXRROEPmHMBw0a5NrGm0TfmWee6QxpIt0YVNrRMHfkyJHuXleXo9vWvumqebwxWXpuEbj1tt/K7YPvkw8++Jd89NFy+fiTlbntQA5byzqfYNOmTZMOB4M3/4WPZd60f8lh9QrPquAdQXaMtLmyWcLVYV6DwQB6w10M3Jg3N8nL1x3va5U3aQfzrACG6I477ogYLrqvacwLqrErybBUjxrFTOouSX+sTmYQOPb402XL1yucslp1mshTTz4s+/btk7fffkdWrf5c/vPjf+TCDudKu3atXZlmPy2MJjLTerBaQjEnqK/JsEPk5XGLHCLXPniur9VhXfjwwphqWOutWxrumevzzuExJkJ2QlyMVzqSTd3p9MvqZhaBK6+4xCm86eY7ZMO6D+Sbb76TCzt2E16lQS74RTvpd2OvzDYakLZQeILpjp3QVxdF0AW9frpeIPNgnKrG4kIsIezlpLUghAWSeCuqJ510kgtlWegglPYK83iEtOkKoWu2dKfbN6tfMgSiPUH1Chsdd5qsWf2+U4qHuHbNEtm2bbucd/7lsuCdV0vWWMhqlQojGDJMrTuGQN4h4McIRhvEzz59L+/GGavDgS6MxPmqRmEAAB8sSURBVOqQpRkChkCwCJT3vB5z+OG1g+1MDlrP+pwg7x6ZGAKGQPgQ4IXoWO8DLnn/wBTQnNdeiHT86UnjIvfclJb/2+YJFnms9sUQKP0IvPba3+WaX/eTZs3PllGjf1dsd0i9enUjIFSvXvi2Bgn/c87PI+ml6SZURnDutI/kzl/8SYad/0eZ95d/lSacbSyGQGgQ+NNz0+WWm6+TlZ8skH//+B9p0qx0Gje/gGc9HPbTkU8LvnSvxkCvf+P4i1yVmeMWOnp9XpI+vmU9P2qsjCFgCPhAgG1x5ctXELy8Bx+4Uy684Dy5ddDdPmqWziKBeoK8JD1lxDyZPOQNxx7T74mOcmTjWu7DPYwy5FGGsmGXXbt2yS233BLhDAx7f61/ZROBa665Qn5zw0DZvLlwr33btmfK32ZNKZtgiEigRhADB3X+3TO6SauLCimjvE+CNPIo8/t+/+fNyuj9nOXfS7WB78uT/9iUUb1+lPHOHYYTA2piCOQCgUsu/oVc3a2zXNvr5sh8YCqrwPEWVHLR92y0EagRhByBUHjmYwslHr0+eZTZsMofB182QDKdhkBpQ+CWm/vKic0ay7A7H0h5aHiQdeqUnldnAjWCoI+nB4X+Eze+Kq9NWhKh1+eeNPIok0igz//ZqI+cN4dHpx88PCQ6f8D0L4qkd56wyn0fPHOdq9vot0vlw407XVqyf9hZ0rlzZ0cUevjhh8vkyZOLVMHTq1atWuSjRAGwwpx11lly/vnnuzrUpRy60IloeK31/XqM1Kfs+PHjnU7IEPhOe7SLePuN/mjd0fmwyHj75C2vY9SxuYL2T+gRuOe3gxyd2p/+fOA1GD+dXrDwfWl1WiHhrp/yYS8TuBFk3/AFvVtKv993dJT693eeKiO6THP3pJGXjF7/xmlfyDG1KsqmB1vKuwObuW1zoy87Sto3PdQZwAt/v6JI/sxl37nQF4aZfw45SWb0LQzFqbP9kdNkzX0tpHn9+ISj+lAxUsOHD3dMMtu3b5fNmzdLr14H9lNiSJYvXy7k8XnjjTdk1KhREXr9d99916VRh7qUmTFjRhF6/f79+7t08hG/9PrsHf7hhx+cfjgJu3fvLp06dZKCggJnXKP7jW6l6ooe1xdffCFTp051c52VKlWSe+65x+1Npi+MkTExtkxsyVNs7ZobBH51zZUy5I4RsmLFp74bnDjpz8K8YmmRwI2gAgmVfve72kq7bie5BRHu/dLrf75lt5zXpJqjz69brZAbUPUu27DT0etf36aOyz+udkU5rWFVeXPF9rj8glo32fWDDz5wRbxkpd460EZde+21kaRU6fU7duwYodfH+ODJ+SVVhaKqS5curm2M7CmnnBLpB/3GSOKFIujG2GKwMWrr1693HqSOi3FAGTZ37lxXnu9QY7EX+fnnn3fGzwxgBN68umne/Kdy59Cb5brrB7k9wck6P+np5+WnTY83TzAZULnMh14fL3DFpkIafTV6yiitLDOEvITJde8okNc+OcCgnO2+4iVpOHv00UcXI1VN1L6GzFq/T58+iYqnlAdZK8YvlkC+MG7cONEQnfaj24ZzEBIFCCYgmjDJXwR69LhK2p59hrT7n8scf2CskcA8/eCox2XoHSPkmmsSn/4Yq36Y00LxnmAmAJq04Gvhg/RuXceFwtyrMSTkJTzOpegpaoSTeE94WQMHDvTVBcrCUsPcm9Jfoc+vJ5isETxBwl41hNH0+niPDz30UCQ/Wh/zf4TzeIuE1onKRte17+FDYMjgm6RRo4Zy4cVXS49fd5VOl3Zw7xF+8/W38uFHH8v0l2bJqS1Okvffe01qHVYzfANIo0eBhsNtu54oj/SY6VZ/442BlWHKUDaWrP5mt7y/doeb12M+j89jVxwdKXrykZUdvf5T87+OG/4qvb56k5HKSW6UGp/wEWFOLXphxOtxwebMiW5eidbhzeMe7xHBK8Q7y4QovT7zeAjGcMqUKS7kxVgno8DXeUDmGQmTEb9zla6w/RNKBK7q2kmWLXlT6h1xuNx772jp2/dWGfPIeFm3boOMvP8Oue/ewaXOAPIgAvUELxtwpnw07wuZOmKeHNeynvBdhZejeT1mdcGXLv2ktgcMm5bhygLGZSfXFF3h1Ty8QYwhix/P/OpYueSplS4U1nwWQW44u3CPJDqGnF9PWB3Gm4SP8K/XN066OEJIiCFo2bKlU8sZInfddZc24ebcWFAgrEQ4XCgWvb5XB5T8kyZNcp4jBkbn7UiHXp/FjnQFQ0cbvXv3joS59Fs9zlj5tInRxEBSjzlAnQek/9pP1ZFuH61+MAhwNEOvnt3dJ5ge5L7VrPMJnnhibA/OO1QM3rxpH8ncqR9JrfqFJ8JxEDs7RtpedVLC1WFeg8EAesNdXnoe9caXvgyZtx92bwgYAmUPgUA9QYVbX5M5vWNjmTF2gUvuNaq9r9VhXfhQXVxTDWu9de3eEDAEyhYCofAE04Wcl591UQRdfsPZRO0y70XYp8dSRpclfBwyZEh0ck6+s0ASvVqrDTdv3lyeffbZyKs1mm5XQ8AQiI1AqTCCsYdmqYaAIWAIJEcg0NXh5N2zEoaAIWAIZBcBM4LZxde0GwKGQMgRMCMY8gdk3TMEDIHsIhCK1WEdIvT6r09aIvv375cOfX8W9wVpLW9XQ8AQMATSRSAURtDo9dN9jFbfEDAESopAoOGw0euX9LGFvx6vGMGN6OUdDH+vrYdlEYFAjaDR64vj6DNDkbn/euyDBk8jeM0cpqVdU6DhMPT6FauUd3uEO/RpWWyHCJT7sycWOIKF3T/sK+3PolSNj/3HEMSaGAJhRyBQTxBwjF4/fPT6Xm9K+RC91PzsWOGjefANej0v8pQDkTJeIUxWTw2dlIv2hL31Y+mJzte2aUuPOIDQQetSXoWyms5V65LPPX1ZtmyZI7AlP7r/qseupQeBwI2g7hs2ev3w0OvrzxtD0qhRI0fvDzX/Sy+9pFlu257mwZ4DFRfGE4FJhqMCSI8lcBliXGbNmiVwLfJdWbqhDEMXxwCgg+2JcBtyRABCPozW1COfj7LZsI1RjziA8UbzldkGAxzvuAPtJ1RoHB9A3+jDggULIueyaBm7li4EAjeCCqfR6ysSB65B0etrD7z0Wu3atZONGzdGDJ03Dyqxbdu2RfK0frwr1P/Q8xMy88GIYdQQDA8cig0aNHDfaVcJYFUfnIwrV67Ur76vtJXsuAP2Xmvf6AN9gXDWpPQiEBojWFKIjV6/ZMh5yV7jacAAqWCoIHVVJmpNz/QVo7N48WJ3zgm6OdeEtjFgCByOjz76qOMvJFyNDqWT9QcPVMNh2oomuW3VqlXEADNWxqyeZjLdlp+fCOS9EVTYYZHhDBG4BePR6yvzNNfpfY53By9p/WxcmYtinklDN67RpKrx2iV0U3p9DevihZfxdCRKj/auoun1E9XNZh5M2wjepc7ZRZ9hglFSTOrXry9Dhw715YWm8zyyOWbTHSwCgRpBo9cXCSO9fpA/SeYd8e7UyHmPII3VL+YlvYL3hmHUk/G8edx7PeBYxx1El7fvpR+BQI0gdPoX9D7V0etPGTHPHbyukOuL1FDvU8ZLva9luHrp9fXQda56wLrS63MOCSfNaRnYp1WUXl+9Sb+Hr3vp9fFa+A/JXJkKCwt4XHpqW5UqVYp5gtE69PB1wj+l10c3XiH0+pkQdEOvD/U/uukfhkMXENJpQ1eW0Qvn4YgRI1wbfldZOSaUEJT6+lFM6Ff0yjBjYCHDG6bjOXpXgXV12M/zSGfsVjc/EQgFn6DR6+fnjycbvcZY8sfEa5DVgAZFYpuNcZrO8CAQqCeoMOhrMrf/qYvUPKKK+wz6Y2e5oHfLhOeLUN/o9RXF/L/iRbIC7RXmRvHqosNebxm7NwTSQSAUnmA6A6Cu0esfQDDf6fV5D7BHjx7y4YcfRgbFgpDXM4xk2I0hkAEESoURzAAOpsIQMATKKAKhCIfLKPY2bEPAEAgBAmYEQ/AQrAuGgCEQHAJmBIPD3lo2BAyBECAQKJVW9PiNXj8aEftuCBgC2UYgFEbQ6PWz/ZhNvyFgCMRDINBwWHeFwDDd9qoTpd8THeXIxrXch3vSyIveTRJvMEGn624J3mszOYAALzvrrg1S+a67QbzpB2rYnSGQOwQCNYJGr3+AyFO5+HL36INriZ0f7A32bjEMrjfWcllHIFAjCL0+ofDMxxYKVPrRQhp5lNmwyjjdovGx74aAIZA+AoEaQbpv9PqZp9fXn0U02YBy78UK2ymre3Spz3Y1iAs0bPXmUVbLa753CkD1ax4kCiaGQFgRCNwI6r5ho9fPLL0+RgmGFeUyhG4+FXnllVcc0wxhKzrQ5zV0MMSwn5f8aHr9xx9/3LHSKB2Whb2pIG9lc41A4EZQB2z0+orEgWtJ6fXxxDing7k3ZWQ+oNXfHTT0WpcrRKYYQ5V49Prs/eUcj2giVK1nV0MgbAiExgiWFBij1y8pconr4fVpOMvVQtrEeFlu/iIQivcEMwEfhKh8kHj0+u2bHpqJpnzrYN4MY4IHhTfFPNvAgQN91ffS6yuDCvrWrFnjq346hfDmbrvtNiGE1vM1vHOC6ei2uoZA2BAI1BM0ev3s0OtHU8yrUYv+8Wl4i6Fmjs8rsE3reR/k+/UEqbN169bIaXAYbr91ve3bvSGQKwQCNYJGr194elr37t0jBwsplTyeYzr0+l6Keaj577zzzshvCiPZv3//CI0984csZqhA+d+6detIn8hnZdmP0G/mIvXwczxX7wFR3lVnjCPGl3AbY2liCASBQCj4BI1eP/uPHm8OY/bQQw8VOY8j+y1bC4ZAuBEIxZygviZzesfGMmPsAodYr1HthRXjZGL0+skQsnxDwBBIhEAojKB2EKPXa9T5+tXX9Yaz68qKTbvdecNaoVaVQ+Sv1zd2J9FpWqpXwrbevXvLnDlzYlblFZGgDv4hdIyew9NO5ju9vo7DroZArhAIRTicq8FaO4aAIWAIRCMQ6MJIdGfsuyFgCBgCuUbAjGCuEbf2DAFDIFQImBEM1eOwzhgChkCuEQjVwojR6+f68Vt7hoAhEAojaPT69kM0BAyBoBAINBw2ev2gHnvidnk9qGfPnsJ2u0TCfuKzzjoraTmvDr+6vXVKeq+8hrwobmIIxEMgUCNo9Pplk14/3o/R0g2BIBAI1AgavX4QjzxzbfKy+LvvvivsNTYxBPIVgUCNIKAZvX526PUJASE9WLZsmQtZISnw0mF5iQzIU+p97w955cqVkbrefMJkwmDqKeGD1iMEZTcN7Ss9f3QZysbTTR47YtCtn+hwlnFoXnQ47h0XTDiTJ0/WrtnVEIiJQOBGUPcNG71+Zun1edoYgHvuuUdmzZolBQUFsmDBAjd/h6EaPny4Y6mBAn/z5s3ux+FlkoEOC/YX6mo+lPsInh8eIDpr1Kjh0rz/oBM+wtGjR7u6DRs2dNyEWsarGzqvtWvXygcffOCyMXjRxwKgS+cnMZAbN250emkH4wxLDsYv1rh69eqlzdrVEIiJQOBGUHtl9PqKxIFrSen1VQP7iDFE0Fs1aNBAjj76aNmyZYszOBge6K4QpdaCFh9jgmDctC75eFwYPgyNH3n00UedsdS6XjJYr276ptT96IbpBgox0hHyOnXq5AwufcNAQj2GXuTSSy8VjCyepRpS0kwMAb8IhMYI+u1wdDmj149G5MD3Vq1aOeNHCkZj3LhxEaZoDIcakgM14t9hQHMlidrCgCrZa676Y+2UbgRC8Z5gJiA2ev3UUMQTxPNSQ4iHSJgaTwhb69evHykfr1wm0mlLaf3pI+EvJ9sh9JG+6mIM+YzFxBAoKQKBeoJGr58dev1kP4bGjRu7InoMZ6ww1KtDw9B27dp5kzN+j0Em7Cbk1bCcEFdDdw2dCZnpM8IY8GpPOeUU5yFiQNevX+/ymOO0hZGMP6ZSpzBQI2j0+tmj10/0S8WYTJo0yRkbVllZRcXL0wOdqIvH1bJlS7cKS3jK6zDqnenqLfnTp093c42xVoAT9SFeHn1gTpA26ZsusOgcIf2gr/SZfAwmiz8YULxD71EFeI925nE8pC1dEQgFn6DR6+vjsKshYAjkGoFQzAnqazJGr5/rx2/tGQKGQCg8wXQfw4DpX0TOHEaX0es/G1k4SBdbq28IlHYESoURLO0PycZnCBgC2UMg0IWR7A3LNBsChoAh4A8BM4L+cLJShoAhUEoRMCNYSh+sDcsQMAT8IRCK1WHtqtHrKxJ2NQQMgVwhEAojaPT6uXrc1o4hYAhEIxDo6jAvSc98bKF8OHeNdL61tbS6qCg55+L/WyUzxi6Q5u0aCbtLeJ8wzMJWrqFDh7pdC7q7Isz9tb4ZAoaASKBzgkavHyy9PuSkbIEzMQTKMgKBGkGj1y/LPz0buyEQDgQCNYJAYPT62aHXB1slOlAqeqWp1/QRI0ZInz59IlT1Xvp9L4U+9b156Oa76o2muGdaAMZnzedqHmc4/sNbL4ojELgR1H3DRq+fWXp9jNjs2bMFailo6PnoPCVMLXyHYQUKfc2HoQWBxgrKetihyUMHBlQNGdd4FPfUh8IKphfVa0wuxf/jWUp4EAjcCCoURq+vSBy4pkuvv2LFCkc7f0CjvzsvRx81oLHSk+Uwfoko7jG+0PT37dvXX2NWyhAIGIHQGMGS4mD0+rGRg1sPT45zRAhHvafFxa5RNDURi7RR3BfFyr7lNwJ5bwQVfuj1qw18XzpPWCW9W9eR9k0PdVkn1KnorjP6niDbHzkt8pne53ipXCG7wydsJIzUkJRrkyZNtMsJrxqSYrw0rCR0TUUIf7UuRo3Xd5SROZkePD5vWfquohT3+p1yRnGvaNg13xDIrhVIgobR6+eOXl/P6PA+EtJinSAHY/TixYsjp7cpvT6szRhTjGsiinuMJKe/IfwhYAHGxBAIKwKBGkGj188evT7Gx7s666Wh1x+jHk2pVPW6Akwo/eyzzzpqe3RAdQ/lvS6sJKK41/lDDcM5atMWRhRxu4YRgUB3jCggRq+vSJTOK8YVr9N7hknpHKmNKh8RCMXeYX1Nxuj18/EnZH02BPIbgVAYQYWQ12R6jTpfv/q63nB2XVmxabdbENEKRq9v9Pr6W7CrIZAMgVCEw8k6afmGgCFgCGQLgUAXRrI1KNNrCBgChoBfBMwI+kXKyhkChkCpRMCMYKl8rDYoQ8AQ8ItAqBZGjF7f72OzcoaAIZApBEJhBI1eP1OP0/QYAoZAqggEGg7zkvSUEfMEhum2V50o/Z7oKEc2ruU+3JNGHmUoG3ZRHj3l7Qt7f9kO17lz57jkCsopqDtJwj4e658hUBIEAjWCRq+fXXp9jHGq7DEl+RFZHUMgnxEINByGXr9ilfLusKUOfVoKL0t75buvdsjsiQVCuLz7h33eLLvPAALs850xY0ZcTewhhmDBxBAozQgE6gkCrNHrZ55eX8NYSAwmT54sSpBA6EsIjHgJFmKFu6QpAQNlvcJ3Pt4y3ikADbO1PlcvBb/2T/Njte9tz+4NgWwiELgR1H3DRq+fOXp99eBgiO7Vq5ds3lxct1Lsx+MohCkGLsJ4DDCcTQIpAmXQ4aXWmjBhgmOcIa+goECaN2/uCF7pFwYyEXV/Nn/sptsQiIVA4EZQO2X0+orEgWu69PoHNGX+DuOorDDwD27bts2RsGLk8ArbtWvnGm3QoIG0atXKEcuSkIi6P/O9NI2GQHIEQmMEk3c1dgmj14+NS1CplSpVkoYNG8rcuXNdF9avX+8MIIZSJRF1v5axqyGQKwQCXRjJ5CCh1+eDxKPXV8r9TLabSBfzZnhFUNOzCIGXNHDgwERVInkaNrK6qx4X+iApDbNgBDFysEkrozThMqGwilL3UxbxUvdrGbsaArlCIFBP0Oj1s0uvX6tWLWdg8MZyJXraHIZNzzdRI04fElH356qP1o4h4EUgUCNo9PrZo9fnIeN9cS4IhoeVWF0d1pe6SWOBA4+Ne12lxQulLGnk6QHteKLJhDabNm3qKPmprx+tS34i6v5k+i3fEMg0AqHgEzR6/Uw/1uD04QmOHDlSHnnkETcFQE9Iu/3222XMmDFFwuLgemktGwIHEAjFnKC+JmP0+gceTL7ebdmyRThtziu8JlOuXDkhPDcxBMKGQCiMoIJi9PqKROIroSUhaizhnTzCTe9CRKxy2UrjRDo+nFCn0r59e5k0aVLEM9R0uxoCYUAgFOFwGICwPhgChkDZRCDQhZGyCbmN2hAwBMKEQNbD4XW7i5IihGnw1hdDwBAwBMwTtN+AIWAIlGkEsu4JpoJuwYtLZOGzi0T275fWPVvLqV1apFLdyhoChoAhkDICoTCC65aul7lPzJOfVK0gVz56uRvE20/MlX/97WNp16+tHNWiQcoDswqGgCFgCPhBINBweM+OPfL6qDfkr3f/VU69ooVcOfYKqXN8HffhnjTyKEPZsMvu3bvk3kHXS8F774S9qwn7t/XbLTLouu7y+acrEpazTEOgNCAQqBF85e6/yu4du6X3lF5y4gXNiuFJGnmUeeHWF4vlZyph/qpvpdmdb8ufF+Ruj632HYOJ4cSAmhgChkDuEQjUCK7/YIOs/28ovG3TtmKjJ40wmTJfry5kiClWyBIMAUPAEEgDgUCNIP3G06t+RDV5YcB0WfDsIhf2EvpyTxp5lEkkW3bslYsfe895c3h0+sHDQ6Lzh7+8skj6dc8sc99HzvrU1f35A+/Iiq92uLRk/xA6Xte1gzSrU05aHlVZ/vLs/xapgqdHnn40VCbU7HxOC7mmYxtXh7qUQRc6EQ2vta5fj1H7pG2hKzrNj+7PVy13faT96LaffOT+yJgYhzd01rZS7XcR4OyLIZAjBAI3gj+p+hM5s8eZcuVjV8i2r76XSd0ny6Srn3b3pJFHmXiya++PcueLK6RBzUryz3vOlhk3nSY1K5eXYRcfL21OOMwZwB4TlxbJf/2jr13oW6tqBZk14HT5w7UnO/XU+fiBc+SdO38uTaIOfYrVPobksZF3yqVdfyUff71fCtbtlK49fhMpijFYveJjl0f+n1+dL089fL8zSMcc30RmvL3UpVGHupT5w19mS43DCvfYvvv316XH9bdGdKN4zqyXIvrj3VD/1DN+Lov+8VakyGefLnf3xx7f1F2T6d629TuZ+vST8vRLb8q7K76RjWvXyMfLClzdWdOfk01fboj0uXf/2+XBYbe4ccXChEpPP/Gwq2v/GAJhQyBwI6iAVK9bXS4Y8gtpecWp0vLyFu6etGSyc++Psv67XdKmcU2pVOFgqV21gtSsUj5S7ZMvd8hnX++Ua8460uU3PKySNG9QXeav/E4woOmIGoX2F3eJqQZjdOWv+kbyMEDVa9SU7779JpKW6ObcCzsJxhKpWLGSnHbm2bLui88SVYnkdejU1RlgDDGCQcRYq4FNppt+3jFynCuvRhVDiL5X/vInp4s+IYy/fsNGgqEFE8q1ObeDy6MMhpw/BtoXl2H/GAIhQSA0RrCkeFSucLDz8lZv3ulUqNFrVLuy+77mm8J0Ql7C5J8N/4fMW1loGEraZir1vGHjWU1qy+qVn/iuriGzhpWDb7jGd916DRpK1eqHOsOE8cFzU8OEknR0YyBrHlY7bl8wiBUrF+Ift5BlGAIhQSAU7wlmAotp720UPshVp9d3oTD3agwJeQmPcymEjUsWvePCSbwpjNGIof19dYGyhJiEmhdf8UtXB31+PUH1HDUkrlvvyIgXmK5uQmW82WOk0EvdvXOn8/50YHiCpKmnSFnqmBgCYUQgUE+QHSF/7vu88LJ0PCGPMvF2j6z9dpd8uH6bm9djPo/PPZ0aR9T9tF5VObZOZfnzuxvihr91qlVw84jqTUYqJ7nBG9qw9nP5cv1aV5J5r+iFEa9XNOXp3xfzBKN1RDdJfQTPbdL4MdHZCb/j+RGGPjfhd0J4HC0l0a2hMSEx83/I/Ldmu3C42cktReccSUMoo+GzhuIuw/4xBEKCQKBG8Jz+7aR1jzPk9VGvF3shWl+kJo8ylI0lLGD84qQ6ouGurgzrCjCLH490a+YMJaGw5nvfCUTHDece7TxJ8v2uDjNf16nrr6Vj66ZupfSoo4+Vm4beF+kmRgivSFd+K1epKsc1/mkkn5toHbo6jMFgDo/VY8JhvMLLuvUoUjfZF3Qc16SZVDu0hhAeq6Sr+4aBdwuepY4LIzdg2APO80P36Keec4aPflOGsurNah/sagiEBYGs8wlWP+6MpGPF4C15cYkUTF8i1Y841JXnHUEWSE69/NSEq8O8BoMB9Ia7GLgn3/pCJvc+xdcqb9IO5nEB5iQxzmaE8vghWtezikAo5gT1NZlmHZrJ2+MLz6u9dMTF4md1WBc+vCilGtZ665ame94TZE6ye88bS9OwbCyGQEYRCIUnmO6ICH11UQRdvCeYrhfI4sHg638p8//+WszuEfYSFgYhLJDEWylucuIp0ufmIXL7b64W7h+dOC3ymk0QfbU2DYGwI5B1Ixh2AKx/hoAhULYRCHRhpGxDb6M3BAyBMCBgRjAMT8H6YAgYAoEhkNVwOLBRWcOGgCFgCPhEwDxBn0BZMUPAECidCJgRLJ3P1UZlCBgCPhEwI+gTKCtmCBgCpRMBM4Kl87naqAwBQ8AnAv8Pv66ckghHXyUAAAAASUVORK5CYII=)What you have to do is just add the code to the correct providerThe list of datasource is also [shown](https://github.com/wilsonfreitas/awesome-quantdata-sources) Getting Data from other resources We can get the data from any web services as wellSee the example of getting the [Bank Of Thailand example](https://apiportal.bot.or.th/bot/public/) ###Code ###Output _____no_output_____ ###Markdown Hands on Web API Create the data frame for "Deposit Interest Rates for Individuals of Commercial Banks (Percent per annum)" ###Code ###Output _____no_output_____
_doc/notebooks/python/gil_example.ipynb
###Markdown Le GILLe GIL ou [Global Interpreter Lock](https://en.wikipedia.org/wiki/Global_interpreter_lock) est un verrou unique auquel l'interpréteur Python fait appel constamment pour protéger tous les objets qu'il manipule contre des accès concurrentiels. ###Code from jyquickhelper import add_notebook_menu add_notebook_menu() ###Output _____no_output_____ ###Markdown Deux listes en parallèlleOn mesure le temps nécessaire pour créer deux liste et comparer ce temps avec celui que cela prendrait en parallèle. ###Code def create_list(n): res = [] for i in range(n): res.append(i) return res %timeit create_list(100000) ###Output 10.4 ms ± 1.87 ms per loop (mean ± std. dev. of 7 runs, 100 loops each) ###Markdown En parallèle avec le module [concurrent.futures](https://docs.python.org/3/library/concurrent.futures.html) et deux appels à la même fonction. ###Code from concurrent.futures import ThreadPoolExecutor def run2(nb): with ThreadPoolExecutor(max_workers=2) as executor: for res in executor.map(create_list, [nb, nb+1]): pass %timeit run2(100000) ###Output 54.7 ms ± 4.94 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) ###Markdown C'est plus long que si les calculs étaient lancés les uns après les autres. Ce temps est perdu à synchroniser les deux threads bien que les deux boucles n'aient rien à échanger. Chaque thread passe son temps à attendre que l'autre ait terminé de mettre à jour sa liste et le *GIL* impose que ces mises à jour aient lieu une après l'autre. Un autre scénarioAu lieu de mettre à jour une liste, on va lancer un thread qui ne fait rien qu'attendre. Donc le *GIL* n'est pas impliqué. ###Code import time def attendre(t=0.009): time.sleep(t) return None %timeit attendre() def run2(t): with ThreadPoolExecutor(max_workers=2) as executor: for res in executor.map(attendre, [t, t+0.001]): pass %timeit run2(0.009) ###Output 12.6 ms ± 43.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
code/NN_zoo.ipynb
###Markdown Neural Network Zoo VisualizerThis excellent publication of 2016, [The Neural Network Zoo](http://www.asimovinstitute.org/neural-network-zoo/) by the Asimov Institute inspire this notebook to visualize some NN. ###Code drawd3('perceptron') drawd3('feed_forward') drawd3('radial_basis_network') ###Output Radial Basis Network
Kaggle Course/Course Codes/exercise-loops-and-list-comprehensions.ipynb
###Markdown **This notebook is an exercise in the [Python](https://www.kaggle.com/learn/python) course. You can reference the tutorial at [this link](https://www.kaggle.com/colinmorris/loops-and-list-comprehensions).**--- With all you've learned, you can start writing much more interesting programs. See if you can solve the problems below.As always, run the setup code below before working on the questions. ###Code from learntools.core import binder; binder.bind(globals()) from learntools.python.ex5 import * print('Setup complete.') ###Output _____no_output_____ ###Markdown 1.Have you ever felt debugging involved a bit of luck? The following program has a bug. Try to identify the bug and fix it. ###Code def has_lucky_number(nums): """Return whether the given list of numbers is lucky. A lucky list contains at least one number divisible by 7. """ for num in nums: if num % 7 == 0: return True else: return False ###Output _____no_output_____ ###Markdown Try to identify the bug and fix it in the cell below: ###Code def has_lucky_number(nums): """Return whether the given list of numbers is lucky. A lucky list contains at least one number divisible by 7. """ for num in nums: if num % 7 == 0: return True return False # Check your answer q1.check() #q1.hint() #q1.solution() ###Output _____no_output_____ ###Markdown 2.Look at the Python expression below. What do you think we'll get when we run it? When you've made your prediction, uncomment the code and run the cell to see if you were right. ###Code #[1, 2, 3, 4] > 2 ###Output _____no_output_____ ###Markdown R and Python have some libraries (like numpy and pandas) compare each element of the list to 2 (i.e. do an 'element-wise' comparison) and give us a list of booleans like `[False, False, True, True]`. Implement a function that reproduces this behaviour, returning a list of booleans corresponding to whether the corresponding element is greater than n. ###Code def elementwise_greater_than(L, thresh): """Return a list with the same length as L, where the value at index i is True if L[i] is greater than thresh, and False otherwise. >>> elementwise_greater_than([1, 2, 3, 4], 2) [False, False, True, True] """ L_list = [] for i in L: L_list.append(i > thresh) return L_list # Check your answer q2.check() #q2.solution() ###Output _____no_output_____ ###Markdown 3.Complete the body of the function below according to its docstring. ###Code def menu_is_boring(meals): """Given a list of meals served over some period of time, return True if the same meal has ever been served two days in a row, and False otherwise. """ for i in range(len(meals) - 1): if meals[i] == meals[i+1]: return True return False # Check your answer q3.check() #q3.hint() #q3.solution() ###Output _____no_output_____ ###Markdown 4. 🌶️Next to the Blackjack table, the Python Challenge Casino has a slot machine. You can get a result from the slot machine by calling `play_slot_machine()`. The number it returns is your winnings in dollars. Usually it returns 0. But sometimes you'll get lucky and get a big payday. Try running it below: ###Code play_slot_machine() ###Output _____no_output_____ ###Markdown By the way, did we mention that each play costs $1? Don't worry, we'll send you the bill later.On average, how much money can you expect to gain (or lose) every time you play the machine? The casino keeps it a secret, but you can estimate the average value of each pull using a technique called the **Monte Carlo method**. To estimate the average outcome, we simulate the scenario many times, and return the average result.Complete the following function to calculate the average value per play of the slot machine. ###Code def estimate_average_slot_payout(n_runs): """Run the slot machine n_runs times and return the average net profit per run. Example calls (note that return value is nondeterministic!): >>> estimate_average_slot_payout(1) -1 >>> estimate_average_slot_payout(1) 0.5 """ payouts = [play_slot_machine()-1 for i in range(n_runs)] avg_payout = sum(payouts) / n_runs return avg_payout estimate_average_slot_payout(10000000) ###Output _____no_output_____ ###Markdown When you think you know the expected value per spin, run the code cell below to view the solution and get credit for answering the question. ###Code # Check your answer (Run this code cell to receive credit!) q4.solution() ###Output _____no_output_____
ipynb/sandbox/.ipynb_checkpoints/procrustes-checkpoint.ipynb
###Markdown Mapping fractions between gradient communities in order to perform procrustes ###Code %%R otu.tbl.file1 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/0/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq' otu.tbl.file2 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/100/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq' physeq1 = readRDS(otu.tbl.file1) physeq2 = readRDS(otu.tbl.file2) %%R ord1 = ordinate(physeq1, method='NMDS', distance='bray') ord2 = ordinate(physeq2, method='NMDS', distance='bray') ord1 %>% scores %>% head %>% print ord2 %>% scores %>% head %>% print %%R get.fracs = function(ord){ fracs = gsub('.+__', '', rownames(ord %>% scores)) %>% as.data.frame() colnames(fracs) = c('fractions') fracs = fracs %>% separate(fractions, c('start','end'), sep='-', convert=T) %>% mutate(start = start * 1000, end = end * 1000) return(fracs) } ord1.f = get.fracs(ord1) ord2.f = get.fracs(ord2) %%R library(IRanges) %%R ord1.r = IRanges(start=ord1.f$start, end=ord1.f$end) ord2.r = IRanges(start=ord2.f$start, end=ord2.f$end) %%R ov = findOverlaps(ord1.r, ord2.r, select='first') ov %%R ov = findOverlaps(ord1.r, ord2.r) ov ###Output _____no_output_____ ###Markdown Calculating centroid of binned fraction samples * centroid of all 20 replicates for fraction samples that fall into the BD-range bin* trying oriellipse() function from vegan package ###Code %%R otu.tbl.file1 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/0/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq' otu.tbl.file2 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/100/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq' physeq1 = readRDS(otu.tbl.file1) physeq2 = readRDS(otu.tbl.file2) %%R ord1 = ordinate(physeq1, method='NMDS', distance='bray') ord2 = ordinate(physeq2, method='NMDS', distance='bray') %%R grps = as.character(rep(seq(1,nrow(ord1$points) / 2), 2)) grps = append(grps, '2') plot(ord1, type = "p", display='sites') elps = ordiellipse(ord1, grps, kind="se", conf=0.95, lwd=2, col="blue") elps = elps %>% summary %>% t %>% as.data.frame elps %%R ggplot(elps, aes(NMDS1, NMDS2)) + geom_point() %%R get.ellipse = function(ord){ grps = as.character(rep(seq(1,nrow(ord$points) / 2), 2)) grps = append(grps, '2') plot(ord, type = "p", display='sites') elps = ordiellipse(ord, grps, kind="se", conf=0.95, lwd=2, col="blue") elps = elps %>% summary %>% t %>% as.data.frame return(elps) } get.ellipse(ord1) %%R mid = function(x, y){ (x + y)/2 } get.BD.range = function(tbl){ tbl = as.data.frame(tbl) tbl$lib = gsub('__.+', '', rownames(tbl)) %>% as.character tbl$BD.start = gsub('.+__([0-9.]+)-.+', '\\1', rownames(tbl)) %>% as.numeric tbl$BD.end = gsub('.+-', '', rownames(tbl)) %>% as.numeric tbl$BD.mid = mapply(mid, tbl$BD.start, tbl$BD.end) return(tbl) } ord.BD = get.BD.range(ord1 %>% scores) ord.BD %>% head %%R # making fixed BD-range & binning by BD.mid BD.range = seq(1.6, 1.9, 0.004) BD.range ###Output _____no_output_____
SetupVM.ipynb
###Markdown Setup Hacking VM- install virtualization tool - VirtualBox (http://www.virtualbox.org) - VMWare (https://www.vmware.com)- download pre-built virtual images of Kali Linux (x86): https://www.offensive-security.com/kali-linux-vm-vmware-virtualbox-image-download/ Install multilib for gcc and g++ - to be able to compile for 32 bit (x86) machine if using x64 (64 bit machine) ###Code %%bash # check machine architecture uname -a %%bash # do this only if you have x64 Linux VM apt install -y gcc-multilib g++-multilib ###Output _____no_output_____ ###Markdown Setup Hacking VM- install virtualization tool - VirtualBox (http://www.virtualbox.org) - VMWare (https://www.vmware.com) - either one is fine; depending on your system VMWare may not be free! - download pre-built virtual images of Kali Linux: https://www.offensive-security.com/kali-linux-vm-vmware-virtualbox-image-download/- can download either 64-bit or 32-bit (64-bit is recommended)- if you can provide more than 4 GB of memory (>=50% of total memory your system has) to your virtual machine, you should download 64-bit ###Code %%bash # check machine architecture uname -a ! echo kali | sudo -S apt update ###Output Hit:1 http://dl.google.com/linux/chrome/deb stable InRelease  Get:2 https://packages.microsoft.com/repos/vscode stable InRelease [3,958 B]  Get:4 https://packages.microsoft.com/repos/vscode stable/main amd64 Packages [215 kB][33m Get:3 http://kali.download/kali kali-rolling InRelease [30.5 kB] Get:5 http://kali.download/kali kali-rolling/main amd64 Packages [17.0 MB] Get:6 http://kali.download/kali kali-rolling/contrib amd64 Packages [104 kB] Get:7 http://kali.download/kali kali-rolling/non-free amd64 Packages [200 kB] Fetched 17.5 MB in 4s (4,090 kB/s)m  Reading package lists... Done Building dependency tree Reading state information... Done 921 packages can be upgraded. Run 'apt list --upgradable' to see them. ###Markdown Compilers and Libraries required- GCC for compiling C code- most system programs are written in C language- G++ for compiling C++ code- C++ is built on C and C libraries can be readily used in C++- we'll rely heavily on C++ with essential C libraries and functions for writing vulnerable programs- both compilers and necessary build tools can be installed by installing **build-essential** package- https://packages.ubuntu.com/xenial/build-essential- multilib library to compile x86 programs in x64-bit OS- we'll use C++ code with the mix of some C code especially to demonstrate various vulnerabilities ###Code %%bash echo kali | sudo -S apt install -y curl build-essential ccache gdb ###Output Reading package lists... Building dependency tree... Reading state information... build-essential is already the newest version (12.8). The following packages were automatically installed and are no longer required: libcdio18 libmpdec2 libprotobuf22 libx264-155 libx264-159 Use 'sudo apt autoremove' to remove them. The following additional packages will be installed: libcurl4 libdebuginfod1 libdw1 libelf1 libnsl2 libpython3.9 libpython3.9-minimal libpython3.9-stdlib Suggested packages: distcc | icecc gdb-doc gdbserver The following NEW packages will be installed: libdebuginfod1 libnsl2 libpython3.9 libpython3.9-minimal libpython3.9-stdlib The following packages will be upgraded: ccache curl gdb libcurl4 libdw1 libelf1 6 upgraded, 5 newly installed, 0 to remove and 906 not upgraded. Need to get 9,201 kB of archives. After this operation, 20.2 MB of additional disk space will be used. Get:1 http://kali.download/kali kali-rolling/main amd64 ccache amd64 4.1-1 [420 kB] Get:2 http://kali.download/kali kali-rolling/main amd64 curl amd64 7.72.0-1 [264 kB] Get:3 http://kali.download/kali kali-rolling/main amd64 libcurl4 amd64 7.72.0-1 [336 kB] Get:4 http://kali.download/kali kali-rolling/main amd64 libdw1 amd64 0.182-1 [234 kB] Get:5 http://kali.download/kali kali-rolling/main amd64 libelf1 amd64 0.182-1 [166 kB] Get:6 http://kali.download/kali kali-rolling/main amd64 libdebuginfod1 amd64 0.182-1 [25.4 kB] Get:7 http://kali.download/kali kali-rolling/main amd64 libpython3.9-minimal amd64 3.9.0-5 [797 kB] Get:8 http://kali.download/kali kali-rolling/main amd64 libnsl2 amd64 1.3.0-2 [39.5 kB] Get:9 http://kali.download/kali kali-rolling/main amd64 libpython3.9-stdlib amd64 3.9.0-5 [1,750 kB] Get:10 http://kali.download/kali kali-rolling/main amd64 libpython3.9 amd64 3.9.0-5 [1,688 kB] Get:11 http://kali.download/kali kali-rolling/main amd64 gdb amd64 10.1-1+b1 [3,481 kB] Fetched 9,201 kB in 3s (3,065 kB/s) (Reading database ... 263493 files and directories currently installed.) Preparing to unpack .../00-ccache_4.1-1_amd64.deb ... Unpacking ccache (4.1-1) over (3.7.11-1) ... Preparing to unpack .../01-curl_7.72.0-1_amd64.deb ... Unpacking curl (7.72.0-1) over (7.68.0-1+b1) ... Preparing to unpack .../02-libcurl4_7.72.0-1_amd64.deb ... Unpacking libcurl4:amd64 (7.72.0-1) over (7.68.0-1+b1) ... Preparing to unpack .../03-libdw1_0.182-1_amd64.deb ... Unpacking libdw1:amd64 (0.182-1) over (0.180-1+b1) ... Preparing to unpack .../04-libelf1_0.182-1_amd64.deb ... Unpacking libelf1:amd64 (0.182-1) over (0.180-1+b1) ... Selecting previously unselected package libdebuginfod1:amd64. Preparing to unpack .../05-libdebuginfod1_0.182-1_amd64.deb ... Unpacking libdebuginfod1:amd64 (0.182-1) ... Selecting previously unselected package libpython3.9-minimal:amd64. Preparing to unpack .../06-libpython3.9-minimal_3.9.0-5_amd64.deb ... Unpacking libpython3.9-minimal:amd64 (3.9.0-5) ... Selecting previously unselected package libnsl2:amd64. Preparing to unpack .../07-libnsl2_1.3.0-2_amd64.deb ... Unpacking libnsl2:amd64 (1.3.0-2) ... Selecting previously unselected package libpython3.9-stdlib:amd64. Preparing to unpack .../08-libpython3.9-stdlib_3.9.0-5_amd64.deb ... Unpacking libpython3.9-stdlib:amd64 (3.9.0-5) ... Selecting previously unselected package libpython3.9:amd64. Preparing to unpack .../09-libpython3.9_3.9.0-5_amd64.deb ... Unpacking libpython3.9:amd64 (3.9.0-5) ... Preparing to unpack .../10-gdb_10.1-1+b1_amd64.deb ... Unpacking gdb (10.1-1+b1) over (9.2-1) ... Setting up libpython3.9-minimal:amd64 (3.9.0-5) ... Setting up ccache (4.1-1) ... Updating symlinks in /usr/lib/ccache ... Setting up libnsl2:amd64 (1.3.0-2) ... Setting up libcurl4:amd64 (7.72.0-1) ... Setting up curl (7.72.0-1) ... Setting up libelf1:amd64 (0.182-1) ... Setting up libpython3.9-stdlib:amd64 (3.9.0-5) ... Setting up libdw1:amd64 (0.182-1) ... Setting up libdebuginfod1:amd64 (0.182-1) ... Setting up libpython3.9:amd64 (3.9.0-5) ... Setting up gdb (10.1-1+b1) ... Processing triggers for libc-bin (2.31-2) ... Processing triggers for man-db (2.9.3-2) ... Processing triggers for kali-menu (2020.3.2) ... ###Markdown Install multilib for gcc and g++ - to be able to compile for 32 bit (x86) machine if using x64 (64 bit machine) ###Code %%bash # do this only if you have x64 Linux VM echo kali | sudo -S apt install -y gcc-multilib g++-multilib ###Output Reading package lists... Building dependency tree... Reading state information... g++-multilib is already the newest version (4:10.2.0-1). gcc-multilib is already the newest version (4:10.2.0-1). The following packages were automatically installed and are no longer required: libcdio18 libmpdec2 libprotobuf22 libx264-155 libx264-159 Use 'sudo apt autoremove' to remove them. 0 upgraded, 0 newly installed, 0 to remove and 906 not upgraded. ###Markdown Compiling with gcc/g++ as 32-bit program in 64-bit system- use -m32 switch with gcc and g++```bashgcc -m32 -o output input.cg++ -m32 -o output input.cpp``` Install and configure PEDA- PEDA: Python Exploit Development Assistance for GDB- https://github.com/longld/peda ###Code %%bash dirPath=~/peda if [ -d "$dirPath" ] # if peda directory exists in user's home directory then echo "Directory $dirPath exists... Peda was already downloaded!"; exit 0; else git clone https://github.com/longld/peda.git $dirPath if [ $? == 0 ]; # if the last command successfuly executed then echo "source $dirPath/peda.py" >> ~/.gdbinit; echo "Finished downloading peda and configuring gdb!"; exit 0; else echo "failed cloning peda" exit 1; fi fi ###Output Finished downloading peda and configuring gdb! ###Markdown Setup Hacking VM- install virtualization tool - VirtualBox (http://www.virtualbox.org) - VMWare (https://www.vmware.com) - either one is fine; depending on your system VMWare may not be free! - download pre-built virtual images of Kali Linux: https://www.offensive-security.com/kali-linux-vm-vmware-virtualbox-image-download/- can download either 64-bit or 32-bit (64-bit is recommended)- if you can provide more than 4 GB of memory (>=50% of total memory your system has) to your virtual machine, you should download 64-bit ###Code %%bash # check machine architecture uname -a ! echo kali | sudo -S apt update ###Output Hit:1 http://dl.google.com/linux/chrome/deb stable InRelease  Get:2 https://packages.microsoft.com/repos/vscode stable InRelease [3,958 B]  Get:4 https://packages.microsoft.com/repos/vscode stable/main amd64 Packages [215 kB][33m Get:3 http://kali.download/kali kali-rolling InRelease [30.5 kB] Get:5 http://kali.download/kali kali-rolling/main amd64 Packages [17.0 MB] Get:6 http://kali.download/kali kali-rolling/contrib amd64 Packages [104 kB] Get:7 http://kali.download/kali kali-rolling/non-free amd64 Packages [200 kB] Fetched 17.5 MB in 4s (4,090 kB/s)m  Reading package lists... Done Building dependency tree Reading state information... Done 921 packages can be upgraded. Run 'apt list --upgradable' to see them. ###Markdown Compilers and Libraries required- GCC for compiling C code- most system programs are written in C language- G++ for compiling C++ code- C++ is built on C and C libraries can be readily used in C++- we'll rely heavily on C++ with essential C libraries and functions for writing vulnerable programs- both compilers and necessary build tools can be installed by installing **build-essential** package- https://packages.ubuntu.com/xenial/build-essential- multilib library to compile x86 programs in x64-bit OS- we'll use C++ code with the mix of some C code especially to demonstrate various vulnerabilities ###Code %%bash echo kali | sudo -S apt install -y curl build-essential ccache gdb ###Output Reading package lists... Building dependency tree... Reading state information... build-essential is already the newest version (12.8). The following packages were automatically installed and are no longer required: libcdio18 libmpdec2 libprotobuf22 libx264-155 libx264-159 Use 'sudo apt autoremove' to remove them. The following additional packages will be installed: libcurl4 libdebuginfod1 libdw1 libelf1 libnsl2 libpython3.9 libpython3.9-minimal libpython3.9-stdlib Suggested packages: distcc | icecc gdb-doc gdbserver The following NEW packages will be installed: libdebuginfod1 libnsl2 libpython3.9 libpython3.9-minimal libpython3.9-stdlib The following packages will be upgraded: ccache curl gdb libcurl4 libdw1 libelf1 6 upgraded, 5 newly installed, 0 to remove and 906 not upgraded. Need to get 9,201 kB of archives. After this operation, 20.2 MB of additional disk space will be used. Get:1 http://kali.download/kali kali-rolling/main amd64 ccache amd64 4.1-1 [420 kB] Get:2 http://kali.download/kali kali-rolling/main amd64 curl amd64 7.72.0-1 [264 kB] Get:3 http://kali.download/kali kali-rolling/main amd64 libcurl4 amd64 7.72.0-1 [336 kB] Get:4 http://kali.download/kali kali-rolling/main amd64 libdw1 amd64 0.182-1 [234 kB] Get:5 http://kali.download/kali kali-rolling/main amd64 libelf1 amd64 0.182-1 [166 kB] Get:6 http://kali.download/kali kali-rolling/main amd64 libdebuginfod1 amd64 0.182-1 [25.4 kB] Get:7 http://kali.download/kali kali-rolling/main amd64 libpython3.9-minimal amd64 3.9.0-5 [797 kB] Get:8 http://kali.download/kali kali-rolling/main amd64 libnsl2 amd64 1.3.0-2 [39.5 kB] Get:9 http://kali.download/kali kali-rolling/main amd64 libpython3.9-stdlib amd64 3.9.0-5 [1,750 kB] Get:10 http://kali.download/kali kali-rolling/main amd64 libpython3.9 amd64 3.9.0-5 [1,688 kB] Get:11 http://kali.download/kali kali-rolling/main amd64 gdb amd64 10.1-1+b1 [3,481 kB] Fetched 9,201 kB in 3s (3,065 kB/s) (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 263493 files and directories currently installed.) Preparing to unpack .../00-ccache_4.1-1_amd64.deb ... Unpacking ccache (4.1-1) over (3.7.11-1) ... Preparing to unpack .../01-curl_7.72.0-1_amd64.deb ... Unpacking curl (7.72.0-1) over (7.68.0-1+b1) ... Preparing to unpack .../02-libcurl4_7.72.0-1_amd64.deb ... Unpacking libcurl4:amd64 (7.72.0-1) over (7.68.0-1+b1) ... Preparing to unpack .../03-libdw1_0.182-1_amd64.deb ... Unpacking libdw1:amd64 (0.182-1) over (0.180-1+b1) ... Preparing to unpack .../04-libelf1_0.182-1_amd64.deb ... Unpacking libelf1:amd64 (0.182-1) over (0.180-1+b1) ... Selecting previously unselected package libdebuginfod1:amd64. Preparing to unpack .../05-libdebuginfod1_0.182-1_amd64.deb ... Unpacking libdebuginfod1:amd64 (0.182-1) ... Selecting previously unselected package libpython3.9-minimal:amd64. Preparing to unpack .../06-libpython3.9-minimal_3.9.0-5_amd64.deb ... Unpacking libpython3.9-minimal:amd64 (3.9.0-5) ... Selecting previously unselected package libnsl2:amd64. Preparing to unpack .../07-libnsl2_1.3.0-2_amd64.deb ... Unpacking libnsl2:amd64 (1.3.0-2) ... Selecting previously unselected package libpython3.9-stdlib:amd64. Preparing to unpack .../08-libpython3.9-stdlib_3.9.0-5_amd64.deb ... Unpacking libpython3.9-stdlib:amd64 (3.9.0-5) ... Selecting previously unselected package libpython3.9:amd64. Preparing to unpack .../09-libpython3.9_3.9.0-5_amd64.deb ... Unpacking libpython3.9:amd64 (3.9.0-5) ... Preparing to unpack .../10-gdb_10.1-1+b1_amd64.deb ... Unpacking gdb (10.1-1+b1) over (9.2-1) ... Setting up libpython3.9-minimal:amd64 (3.9.0-5) ... Setting up ccache (4.1-1) ... Updating symlinks in /usr/lib/ccache ... Setting up libnsl2:amd64 (1.3.0-2) ... Setting up libcurl4:amd64 (7.72.0-1) ... Setting up curl (7.72.0-1) ... Setting up libelf1:amd64 (0.182-1) ... Setting up libpython3.9-stdlib:amd64 (3.9.0-5) ... Setting up libdw1:amd64 (0.182-1) ... Setting up libdebuginfod1:amd64 (0.182-1) ... Setting up libpython3.9:amd64 (3.9.0-5) ... Setting up gdb (10.1-1+b1) ... Processing triggers for libc-bin (2.31-2) ... Processing triggers for man-db (2.9.3-2) ... Processing triggers for kali-menu (2020.3.2) ... ###Markdown Install multilib for gcc and g++ - to be able to compile for 32 bit (x86) machine if using x64 (64 bit machine) ###Code %%bash # do this only if you have x64 Linux VM echo kali | sudo -S apt install -y gcc-multilib g++-multilib ###Output Reading package lists... Building dependency tree... Reading state information... g++-multilib is already the newest version (4:10.2.0-1). gcc-multilib is already the newest version (4:10.2.0-1). The following packages were automatically installed and are no longer required: libcdio18 libmpdec2 libprotobuf22 libx264-155 libx264-159 Use 'sudo apt autoremove' to remove them. 0 upgraded, 0 newly installed, 0 to remove and 906 not upgraded. ###Markdown Compiling with gcc/g++ as 32-bit program in 64-bit system- use -m32 switch with gcc and g++```bashgcc -m32 -o output input.cg++ -m32 -o output input.cpp``` Install and configure PEDA- PEDA: Python Exploit Development Assistance for GDB- https://github.com/longld/peda ###Code %%bash dirPath=~/peda if [ -d "$dirPath" ] then echo "Directory $dirPath exists... Peda was already downloaded!"; exit 0; else git clone https://github.com/longld/peda.git ~/peda; echo "source ~/peda/peda.py" >> ~/.gdbinit; exit 0; fi ###Output Directory /home/kali/peda exists... Peda was already downloaded!
notebooks/txt-clf.ipynb
###Markdown Sentiment classification ###Code import torch from perceiver.data import TextPreprocessor from perceiver.model import LitTextClassifier imdb_preproc = TextPreprocessor(tokenizer_path='../.cache/imdb-tokenizer-10003.json') # Download model checkpoints !wget -nc -O logs.zip https://martin-krasser.com/perceiver/logs-update-2.zip !unzip -qo logs.zip ckpt_path = 'logs/seq_clf/version_1/checkpoints/epoch=021-val_loss=0.253.ckpt' model = LitTextClassifier.load_from_checkpoint(ckpt_path, clf_ckpt=None).model model.eval(); text_batch = [ "I've seen this movie yesterday and it was really boring", "I can recommend this movie to all fantasy movie lovers" ] with torch.no_grad(): logits = model(*imdb_preproc.preprocess_batch(text_batch)) preds = logits.argmax(dim=1) for text, pred in zip(text_batch, preds): print(f'{text} (positive = {pred == 1})') ###Output I've seen this movie yesterday and it was really boring (positive = False) I can recommend this movie to all fantasy movie lovers (positive = True) ###Markdown Sentiment classification ###Code !pip install perceiver-io[text]==0.3.0 # Download trained tokenizer !wget https://raw.githubusercontent.com/krasserm/perceiver-io/main/.cache/imdb-tokenizer-10003.json # Download checkpoints !wget -nc -O logs.zip https://martin-krasser.com/perceiver/logs-update-4.zip !unzip -qo logs.zip import torch from perceiver.data.text import TextPreprocessor from perceiver.model.text.classifier import LitTextClassifier imdb_preproc = TextPreprocessor(tokenizer_path='imdb-tokenizer-10003.json') ckpt_path = 'logs/seq_clf/version_1/checkpoints/epoch=017-val_loss=0.254.ckpt' model = LitTextClassifier.load_from_checkpoint(ckpt_path, clf_ckpt=None).model model.eval(); text_batch = [ "I've seen this movie yesterday and it was really boring", "I can recommend this movie to all fantasy movie lovers" ] with torch.no_grad(): logits = model(*imdb_preproc.preprocess_batch(text_batch)) preds = logits.argmax(dim=1) for text, pred in zip(text_batch, preds): print(f'{text} (positive = {pred == 1})') ###Output I've seen this movie yesterday and it was really boring (positive = False) I can recommend this movie to all fantasy movie lovers (positive = True)
_site/notes/notes_ipynb/lecture22-evaluation-tools.ipynb
###Markdown Lecture 22: Tools for Diagnosing Model Performance Applied Machine Learning__Volodymyr Kuleshov__Cornell Tech Practical Considerations When Applying Machine LearningSuppose you trained an image classifier with 80% accuracy. What's next? * Add more data?* Train the algorithm for longer?* Use a bigger model?* Add regularization?* Add new features? We will next learn how to prioritize these decisions when applying ML. Part 1: Learning CurvesLearning curves are a common and useful tool for performing bias/variance analysis in a deeper way.This section is mostly based on [materials](https://www.deeplearning.ai/machine-learning-yearning/) from an e-book by Andrew Ng. Review: Overfitting (Variance)Overfitting is one of the most common failure modes of machine learning.* A very expressive model (a high degree polynomial) fits the training dataset perfectly.* The model also makes wildly incorrect prediction outside this dataset, and doesn't generalize.Models that overfit are said to be __high variance__. Review: Underfitting (Bias)Underfitting is another common problem in machine learning.* The model is too simple to fit the data well (e.g., approximating a high degree polynomial with linear regression).* As a result, the model is not accurate on training data and is not accurate on new data.Because the model cannot fit the data, we say it's __high bias__. Learning CurvesLearning curves show performance as a function of __training set size__.Learning curves are defined for __fixed hyperparameters__. Observe that dev set error decreases as we give the model more data. Visualizing Ideal PerformanceIt is often very useful to have a target upper bound on performance (e.g., human accuracy); it can also be visualized on the learning curve.Extrapolating the red curve suggests how much additional data we need. In the example below, the dev error has plateaued and we know that adding more data will not be useful. Learning Curves for the Training SetWe can further augment this plot with training set performance.The blue curve shows training error as a function of training set size. A few observations can be made here:* Training error is normally less that dev set error: the training set is easier to fit.* Training error *increases* with training set size because our model overfits small datasets. Diagnosing High BiasLearning curves can reveal when we have a bias problem.Here, the model can't fit larger datasets, hence it's underfitting. In practice, in can be hard to visually assess if the dev error has plateaued. Adding the training error makes this easier.Here, adding data can no longer help: the blue error can only increase and thus dev error cannot decrease. Relationship to Bias/Variance AnalysisBias/variance analysis correspond to looking at the very last point on the learning curves.Looking at the entire curve ensures a more reliable diagnosis. Diagnosing High VarianceThe following plot shows we have high variance.Training error is small (near optimal), but dev set error is large. We can address this by adding more data. In this plot, we have both high variance and high bias.The training error significantly exceeds desired performance, and the dev set error is even higher. Practical ConsiderationsIn practice, the following tricks are useful.* When data is small, performance estimates are noisy, and curves are non-smooth.* The solution is to average over multiple models trained on random subsets of $m$ points, for each $m$.* If classes are imbalanced, choose metrics that account for this. Learning Curves: An ExampleTo further illustrate the idea of learning curves, consider the following example. We will use the `sklearn` digits dataset, a downscaled version of MNIST. ###Code # Example from https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html from sklearn.datasets import load_digits X, y = load_digits(return_X_y=True) ###Output _____no_output_____ ###Markdown We can visualize these digits as follows: ###Code from matplotlib import pyplot as plt plt.figure(figsize=(8,16)) _, axes = plt.subplots(2, 5) images_and_labels = list(zip(digits.images, digits.target)) for ax, (image, label) in zip(axes.flatten(), images_and_labels[:10]): ax.set_axis_off() ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') ax.set_title('Digit %i' % label) ###Output _____no_output_____ ###Markdown This is boilerplate code for visualizing learning curves and it's not essential to understand this example. ###Code import numpy as np from sklearn.model_selection import learning_curve def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None, n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)): """Generate learning curves for an algorithm.""" if axes is None: _, axes = plt.subplots(1, 3, figsize=(20, 5)) axes[0].set_title(title) if ylim is not None: axes[0].set_ylim(*ylim) axes[0].set_xlabel("Training examples") axes[0].set_ylabel("Score") train_sizes, train_scores, test_scores, fit_times, _ = \ learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, return_times=True) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) fit_times_mean = np.mean(fit_times, axis=1) fit_times_std = np.std(fit_times, axis=1) # Plot learning curve axes[0].grid() axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Accuracy") axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g", label="Dev Set Accuracy") axes[0].legend(loc="best") return plt ###Output _____no_output_____ ###Markdown We visualize learning curves for two algorithms:* Gaussian Naive Bayes model, a version of Naive Bayes for continuous inputs $x$* A support vector machine with a radial basis function (RBF) kernel ###Code from sklearn.model_selection import ShuffleSplit from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC fig, axes = plt.subplots(1, 2, figsize=(10, 5)) # This is a technical detail, but we will obtain dev set performance via # cross-valation rather that via a dev set. # Cross validation is a technique that emulates a separate dev set with small data. # We also use 100 iterations to get smoother mean test and train curves, # each time with 20% data randomly selected as a validation set. cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0) title = "Learning Curves (Naive Bayes)" plot_learning_curve(GaussianNB(), title, X, y, axes=[axes[0]], ylim=(0.7, 1.01), cv=cv, n_jobs=4) cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0) title = r"Learning Curves (SVM, RBF kernel, $\gamma=0.001$)" plot_learning_curve(SVC(gamma=0.001), title, X, y, axes=[axes[1]], ylim=(0.7, 1.01),cv=cv, n_jobs=4) ###Output _____no_output_____ ###Markdown We can draw a few takeways:* The Gaussian model is very simple for this task: performance saturates after ~1,400 examples.* The SVM is much more expressive: it keeps improving in dev set performance as we give it more data. Limitations of Learning CurvesThe main limitations of learning curves include:1. Computational time needed to learn the curves.2. Learning curves can be noisy and require human intuition to read. Part 2: Loss CurvesAnother way to understand the performance of the model is to visualize its objective as we train the model.This section is based on [materials](https://cs231n.github.io/neural-networks-3/) by Andrej Karpathy. Review: Model Development WorkflowThe machine learning development workflow has three steps:1. __Training:__ Try a new model and fit it on the training set. 2. __Model Selection__: Estimate performance on the development set using metrics. Based on results, try a new model idea in step 1. 3. __Evaluation__: Finally, estimate real-world performance on test set. Loss CurvesMany algorithms minimize a loss function using an iterative optimization procedure like gradient descent. Loss curves plot the __training objective__ as a function of the number of __training steps__ on training or development datasets. Diagnosing Bias and VarianceLoss curves provide another way to diagnose bias and variance. A few observations can be made here:* As we train the model for more epochs, training accuracy improves.* When we are *not overfitting* (green), validation accuracy tracks training accuracy. * When we *overfit* (blue), validation and training accuracies have a large gap, and validation accuracy eventually even decreases. OvertrainingA failure mode of some machine learning algorithms is *overtraining*.Model performance worsens after some number of training steps. The solution is to train for less or preferrably to regularize the model. A closely related problem is __undertraining__: not training the model for long enough.This can be diagnosed via a learning curve that shows that dev set performance is still on an improving trajectory. Diagnosing Optimization IssuesLoss curves also enable diagnosing optimization problems.Here, we show training set accuracy for different learning rates. Each line is a loss curve with a different learning rate (LR).* The blue training is still decreasing at the end: the LR is too low.* The green line plateaus too soon and too high indicating a high LR.* The yellow curve explodes: the LR was soo high that parameters took a step into a very bad direction.The red loss curve is not too fast and not too slow. Pros and Cons of Loss CurvesAdvantages of using loss curves include the following.1. Producing loss curves doesn't require extra computation.2. Loss curves can detect optimization problems and overtraining.Loss curves don't diagnose the utility of adding more data; when bias/variance diagnosis is ambiguous, use learning curves. Part 3: Validation CurvesValidation curves help us understand the effects of different hyper-parameters. Review: Model Development WorkflowThe machine learning development workflow has three steps:1. __Training:__ Try a new model and fit it on the training set. 2. __Model Selection__: Estimate performance on the development set using metrics. Based on results, try a new model idea in step 1. 3. __Evaluation__: Finally, estimate real-world performance on test set. Validation CurvesML models normally have hyper-parameters, e.g. L2 regularization strength, neural net layer size, number of K-Means clusters, etc. Loss curves plot __model peformance__ as a function of __hyper-parameter values__ on training or development datasets. Validation Curve: An ExampleConsider the following example, in which we train a Ridge model on the digits dataset. Recall the digits dataset introduced earlier in this lecture. ###Code from matplotlib import pyplot as plt plt.figure(figsize=(8,16)) _, axes = plt.subplots(2, 5) images_and_labels = list(zip(digits.images, digits.target)) for ax, (image, label) in zip(axes.flatten(), images_and_labels[:10]): ax.set_axis_off() ax.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') ax.set_title('Digit %i' % label) ###Output _____no_output_____ ###Markdown We can train an SVM with and RBF kernel for different values of bandwidth $\gamma$ using the `validation_curve` function. ###Code from sklearn.model_selection import validation_curve # https://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html param_range = np.logspace(-6, -1, 5) train_scores, test_scores = validation_curve( SVC(), X, y, param_name="gamma", param_range=param_range, scoring="accuracy", n_jobs=1) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) ###Output _____no_output_____ ###Markdown We visualize this as follows. ###Code plt.title("Validation Curve with SVM") plt.xlabel(r"$\gamma$") plt.ylabel("Score") plt.ylim(0.0, 1.1) lw = 2 plt.semilogx(param_range, train_scores_mean, label="Training accuracy", color="darkorange", lw=lw) plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="darkorange", lw=lw) plt.semilogx(param_range, test_scores_mean, label="Validation accuracy", color="navy", lw=lw) plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="navy", lw=lw) plt.legend(loc="best") ###Output _____no_output_____
milestone_3/.ipynb_checkpoints/milestone_3-checkpoint (1).ipynb
###Markdown Milestone 3: Traditional statistical and machine learning methods, due Wednesday, April 19, 2017Think about how you would address the genre prediction problem with traditional statistical or machine learning methods. This includes everything you learned about modeling in this course before the deep learning part. Implement your ideas and compare different classifiers. Report your results and discuss what challenges you faced and how you overcame them. What works and what does not? If there are parts that do not work as expected, make sure to discuss briefly what you think is the cause and how you would address this if you would have more time and resources. You do not necessarily need to use the movie posters for this step, but even without a background in computer vision, there are very simple features you can extract from the posters to help guide a traditional machine learning model. Think about the PCA lecture for example, or how to use clustering to extract color information. In addition to considering the movie posters it would be worthwhile to have a look at the metadata that IMDb provides. You could use Spark and the [ML library](https://spark.apache.org/docs/latest/ml-features.htmlword2vec) to build your model features from the data. This may be especially beneficial if you use additional data, e.g., in text form.You also need to think about how you are going to evaluate your classifier. Which metrics or scores will you report to show how good the performance is?The notebook to submit this week should at least include:- Detailed description and implementation of two different models- Description of your performance metrics- Careful performance evaluations for both models- Visualizations of the metrics for performance evaluation- Discussion of the differences between the models, their strengths, weaknesses, etc. - Discussion of the performances you achieved, and how you might be able to improve them in the future Preliminary Peer AssessmentIt is important to provide positive feedback to people who truly worked hard for the good of the team and to also make suggestions to those you perceived not to be working as effectively on team tasks. We ask you to provide an honest assessment of the contributions of the members of your team, including yourself. The feedback you provide should reflect your judgment of each team member’s:- Preparation – were they prepared during team meetings?- Contribution – did they contribute productively to the team discussion and work?- Respect for others’ ideas – did they encourage others to contribute their ideas?- Flexibility – were they flexible when disagreements occurred?Your teammate’s assessment of your contributions and the accuracy of your self-assessment will be considered as part of your overall project score.Preliminary Peer Assessment: [https://goo.gl/forms/WOYC7pwRCSU0yV3l1](https://goo.gl/forms/WOYC7pwRCSU0yV3l1) Questions to answer: - **What are we predicting exactly?**So, we are trying to predict movie genres. However, we have that each movie has multiple genres. This leads to the question of how we can predict multiple classifiers for the same object. This more general question is called a multilabel clasification problem. We will explore some of our specifications for this problem below. One of the best and most standard solution to do multilable classification is called "one vs. rest" classifiers. These classifiers create n models for each of the n labels. One of the advantages of this model is its interpretability and, for our cases, its ease. We can easily create a pipeline that then does these predictions for us. For an implementation of one vs. all, look at scikit learn: http://scikit-learn.org/dev/modules/generated/sklearn.multiclass.OneVsRestClassifier.htmlsklearn.multiclass.OneVsRestClassifierWe will likely be using this in our early attempts at classification. - **What does it means to be succesful? What is our metric for success?***adapted from http://people.oregonstate.edu/~sorowerm/pdf/Qual-Multilabel-Shahed-CompleteVersion.pdf*Here are a few options for our measure of accuracy: Exact Match RatioThe exact match ratio only considers a correct answer for our multilabel data if it is exactly correct (e.g. if there are three classes, we only classify this as correct if we correctly identify all three classes.) Accuracy Accuracy is a simple way of "goodness of prediction." It is defined as follows $$ \frac{1}{n} \sum_i^{n} \frac{|Y_i\cap Z_i|}{|Y_i \cup Z_i|}$$Where $$Y_i\cap Z_i $$ refers to the total number of correctly predicted labels over the total number of labels for that instance. So, if for example we predicted [romance, action] and the true labels were [romance, comedy, horror], this would receive an accuracy of 1/4 because there was one correct prediction and 4 unique labels. Hamming Loss The final and most common form of error for multilable predictions is hamming loss. Hamming loss takes into account both the prediction error (an incorrect error is predicted) and the missing error (a relevant lable is NOT predicted.) this is defined as follows below $$ \text{HammingLoss, HL} = \frac{1}{kn} \sum_{i}^{n} \sum_l^k [l \in Z_i \wedge l \notin Y_i) + I(l \notin Z_i \wedge l \in Y_i)]$$*For this project, we will use the hamming loss, which is defined above.* There is a convenient function in `sklearn` to calculate hamming loss: `sklearn.metrics.hamming_loss`- What is our first modeling approach? Why? - What is our second modeling approach? Why? ###Code ''' An example of hamming loss. We have true labels: [0, 1] [1, 1] And predicted labels: [0, 0] [0, 0] Hamming loss is .75 ''' hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) ###Output _____no_output_____ ###Markdown Data Collection & Cleaning Decision for droppingHere we choose to drop the missing data instead of imputing because it is non numerical and avereraging or finding means does not make sense in this scencario ###Code train = pd.read_csv("../data/train.csv") # drop a rogue column train.drop("Unnamed: 0", axis = 1, inplace = True) train = train.dropna(axis=0).copy() print "Dataframe shape:", train.shape train.head(1) # check for null values train.isnull().any() ###Output _____no_output_____ ###Markdown Model 1: Random ForestSome thoughts: * Random forests don't accept strings, so we'll need to vectorize all of the string variables or exclude them entirely. ###Code train.columns string_cols = ["director", "lead actors", "overview", "title"] string_matrix = train[string_cols] # Set up helper cleaner function def cleaner(cell): line = cell.replace('[u', '').replace(']', '').replace(',', '').replace("u'", '').replace("'", '') line = re.sub("(^|\W)\d+($|\W)", " ", line) return line string_matrix['lead actors'] = string_matrix['lead actors'].apply(cleaner) # trim trailing and leading spaces string_matrix = string_matrix.apply(lambda col: col.str.strip()) vect = CountVectorizer(ngram_range=(1, 3)) vect_df = sp.hstack(string_matrix.apply(lambda col: vect.fit_transform(col))) # def _coo_to_sparse_series(A, dense_index=False): # """ Convert a scipy.sparse.coo_matrix to a SparseSeries. # Use the defaults given in the SparseSeries constructor. """ # s = pd.Series(A.data, pd.MultiIndex.from_arrays((A.row, A.col))) # s = s.sort_index() # s = s.to_sparse() # TODO: specify kind? # # ... # return s _coo_to_sparse_series(vect_df) labels = train.columns[:17] features = train.columns[17:] # X = train[features] X = train[["popularity", "vote_average", "vote_count"]] genre_ids_df = pd.read_csv("../data/genre_ids.csv") genre_ids_df.drop("Unnamed: 0", axis = 1, inplace = True) for label in labels: print genre_ids_df[genre_ids_df["id"] == int(label)]["genre"].item() ###Output Music Romance Family War Adventure Fantasy Animation Drama Horror Action Comedy History Western Thriller Crime Science Fiction Mystery ###Markdown Currently, our label matrix has 17 rows, meaning that each row has 17 different labels associated with it. This is a big problem because there are 2^17 different possible combinations for each row, and, unless we have a ton of data, we likely won't see more than 1 or 2 instances of a given row from the label matrix. This will make it difficult for our classifier to learn patterns. We should probably combine similar genres to make this prediction task more teneble. How should we do this combination? Evaluating the Random Forest using KFold CV ###Code h_losses = [] for train_ind, test_ind in KFold(n_splits = 5).split(X): X_train, X_test = X.iloc[train_ind], X.iloc[test_ind] y_train, y_test = X.iloc[train_ind], X.ilco[test_ind] forest = RandomForestClassifier(n_estimators=100, random_state=109) # instantiate the classifier (n_jobs = -1 tells it) # to fit using all CPUs multi_target_forest = MultiOutputClassifier(forest, n_jobs=-1) # fit the multi-target random forest fitted_forest = multi_target_forest.fit(X_train, y_train) # predict the label matrix preds = fitted_forest.predict(X_test) h_losses.append(hamming_loss(y_test, preds)) print np.average(h_losses) ###Output _____no_output_____
ingest_data/04_Ingest_data_with_EMR.ipynb
###Markdown Ingest Data with EMRThis notebook demonstrates how to read the data from the EMR cluster.We are going to use the data we load into S3 in the previous notebook [011_Ingest_tabular_data.ipynb](011_Ingest_tabular_data_v1.ipynb).Amazon EMR is the industry-leading cloud big data platform for processing vast amounts of data using open source tools such as Apache Spark, Apache Hive, Apache HBase, Apache Flink, Apache Hudi, and Presto. With EMR you can run Petabyte-scale analysis at less than half of the cost of traditional on-premises solutions and over 3x faster than standard Apache Spark. Set up NotebookFirst, we are going to make sure we have the EMR Cluster set up and the connection between EMR and Sagemaker Notebook set up correctly. You can follow the [documentation](https://aws.amazon.com/blogs/machine-learning/build-amazon-sagemaker-notebooks-backed-by-spark-in-amazon-emr/) and [procedure](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-lifecycle-config-emr.html) to set up this notebook. Once you are done with setting up, restart the kernel and run the following command to check if you set up the EMR and Sagemaker connection correctly. ###Code %%info %%local import sagemaker from sklearn.datasets import * import pandas as pd sagemaker_session = sagemaker.Session() s3 = sagemaker_session.boto_session.resource("s3") bucket = sagemaker_session.default_bucket() # replace with your own bucket name if you have one prefix = "data/tabular/boston_house" filename = "boston_house.csv" ###Output _____no_output_____ ###Markdown Download data from online resources and write data to S3 ###Code %%local # helper functions to upload data to s3 def write_to_s3(filename, bucket, prefix): # put one file in a separate folder. This is helpful if you read and prepare data with Athena filename_key = filename.split(".")[0] key = "{}/{}/{}".format(prefix, filename_key, filename) return s3.Bucket(bucket).upload_file(filename, key) def upload_to_s3(bucket, prefix, filename): url = "s3://{}/{}/{}".format(bucket, prefix, filename) print("Writing to {}".format(url)) write_to_s3(filename, bucket, prefix) %%local tabular_data = load_boston() tabular_data_full = pd.DataFrame(tabular_data.data, columns=tabular_data.feature_names) tabular_data_full["target"] = pd.DataFrame(tabular_data.target) tabular_data_full.to_csv("boston_house.csv", index=False) upload_to_s3(bucket, "data/tabular", filename) %%local data_s3_path = "s3://{}/{}/{}".format(bucket, prefix, filename) print("this is path to your s3 files: " + data_s3_path) ###Output _____no_output_____ ###Markdown Copy the S3 bucket file pathThe S3 bucket file path is required to read the data on EMR Spark. Copy and paste the path string shown above into the next cell. ###Code ### replace this path string with your path shown in last step data_s3_path = "s3://sagemaker-us-east-2-060356833389/data/tabular/boston_house/boston_house.csv" ###Output _____no_output_____ ###Markdown Read the data in EMR spark ClusterOnce we have a path to our data in S3, we can use `spark s3 select` to read data with the following command. You can specify a data format, schema is not necessary but recommended, and in options you can specify `compression`, `delimiter`, `header`, etc. For more details, please see [documentation on using S3 select with Spark](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark-s3select.html). ###Code # EMR cell schema = " CRIM double, ZN double, INDUS double,\ CHAS double, NOX double, RM double, AGE double, DIS double, RAD double, TAX double, PTRATIO double, \ B double, LSTAT double, target double" df = spark.read.format("csv").schema(schema).options(header="true").load(data_s3_path) df.show(5) ###Output _____no_output_____ ###Markdown Ingest Data with EMRThis notebook demonstrates how to read the data from the EMR cluster.We are going to use the data we load into S3 in the previous notebook [011_Ingest_tabular_data.ipynb](011_Ingest_tabular_data_v1.ipynb).Amazon EMR is the industry-leading cloud big data platform for processing vast amounts of data using open source tools such as Apache Spark, Apache Hive, Apache HBase, Apache Flink, Apache Hudi, and Presto. With EMR you can run Petabyte-scale analysis at less than half of the cost of traditional on-premises solutions and over 3x faster than standard Apache Spark. Set up NotebookFirst, we are going to make sure we have the EMR Cluster set up and the connection between EMR and Sagemaker Notebook set up correctly. You can follow the [documentation](https://aws.amazon.com/blogs/machine-learning/build-amazon-sagemaker-notebooks-backed-by-spark-in-amazon-emr/) and [procedure](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-lifecycle-config-emr.html) to set up this notebook. Once you are done with setting up, restart the kernel and run the following command to check if you set up the EMR and Sagemaker connection correctly. ###Code %%info %pip install -qU 'sagemaker>=2.15.0' 'scikit-learn' %%local import sagemaker from sklearn.datasets import * import pandas as pd sagemaker_session = sagemaker.Session() s3 = sagemaker_session.boto_session.resource('s3') bucket = sagemaker_session.default_bucket() #replace with your own bucket name if you have one prefix = 'data/tabular/boston_house' filename = 'boston_house.csv' ###Output _____no_output_____ ###Markdown Download data from online resources and write data to S3 ###Code %%local #helper functions to upload data to s3 def write_to_s3(filename, bucket, prefix): #put one file in a separate folder. This is helpful if you read and prepare data with Athena filename_key = filename.split('.')[0] key = "{}/{}/{}".format(prefix,filename_key,filename) return s3.Bucket(bucket).upload_file(filename,key) def upload_to_s3(bucket, prefix, filename): url = 's3://{}/{}/{}'.format(bucket, prefix, filename) print('Writing to {}'.format(url)) write_to_s3(filename, bucket, prefix) %%local tabular_data = load_boston() tabular_data_full = pd.DataFrame(tabular_data.data, columns=tabular_data.feature_names) tabular_data_full['target'] = pd.DataFrame(tabular_data.target) tabular_data_full.to_csv('boston_house.csv', index = False) upload_to_s3(bucket, 'data/tabular', filename) %%local data_s3_path = 's3://{}/{}/{}'.format(bucket, prefix, filename) print ('this is path to your s3 files: '+data_s3_path) ###Output _____no_output_____ ###Markdown Copy the S3 bucket file pathThe S3 bucket file path is required to read the data on EMR Spark. Copy and paste the path string shown above into the next cell. ###Code ### replace this path string with your path shown in last step data_s3_path = 's3://sagemaker-us-east-2-060356833389/data/tabular/boston_house/boston_house.csv' ###Output _____no_output_____ ###Markdown Read the data in EMR spark ClusterOnce we have a path to our data in S3, we can use `spark s3 select` to read data with the following command. You can specify a data format, schema is not necessary but recommended, and in options you can specify `compression`, `delimiter`, `header`, etc. For more details, please see [documentation on using S3 select with Spark](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark-s3select.html). ###Code # EMR cell schema = ' CRIM double, ZN double, INDUS double,\ CHAS double, NOX double, RM double, AGE double, DIS double, RAD double, TAX double, PTRATIO double, \ B double, LSTAT double, target double' df = spark.read.format('csv').schema(schema).options(header='true').load(data_s3_path) df.show(5) ###Output _____no_output_____ ###Markdown Ingest Data with EMRThis notebook demonstrates how to read the data from the EMR cluster.We are going to use the data we load into S3 in the previous notebook [011_Ingest_tabular_data.ipynb](011_Ingest_tabular_data_v1.ipynb).Amazon EMR is the industry-leading cloud big data platform for processing vast amounts of data using open source tools such as Apache Spark, Apache Hive, Apache HBase, Apache Flink, Apache Hudi, and Presto. With EMR you can run Petabyte-scale analysis at less than half of the cost of traditional on-premises solutions and over 3x faster than standard Apache Spark. Set up NotebookFirst, we are going to make sure we have the EMR Cluster set up and the connection between EMR and Sagemaker Notebook set up correctly. You can follow the [documentation](https://aws.amazon.com/blogs/machine-learning/build-amazon-sagemaker-notebooks-backed-by-spark-in-amazon-emr/) and [procedure](https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-lifecycle-config-emr.html) to set up this notebook. Once you are done with setting up, restart the kernel and run the following command to check if you set up the EMR and Sagemaker connection correctly. ###Code %%info %%local import sagemaker from sklearn.datasets import * import pandas as pd sagemaker_session = sagemaker.Session() s3 = sagemaker_session.boto_session.resource('s3') bucket = sagemaker_session.default_bucket() #replace with your own bucket name if you have one prefix = 'data/tabular/boston_house' filename = 'boston_house.csv' ###Output _____no_output_____ ###Markdown Download data from online resources and write data to S3 ###Code %%local #helper functions to upload data to s3 def write_to_s3(filename, bucket, prefix): #put one file in a separate folder. This is helpful if you read and prepare data with Athena filename_key = filename.split('.')[0] key = "{}/{}/{}".format(prefix,filename_key,filename) return s3.Bucket(bucket).upload_file(filename,key) def upload_to_s3(bucket, prefix, filename): url = 's3://{}/{}/{}'.format(bucket, prefix, filename) print('Writing to {}'.format(url)) write_to_s3(filename, bucket, prefix) %%local tabular_data = load_boston() tabular_data_full = pd.DataFrame(tabular_data.data, columns=tabular_data.feature_names) tabular_data_full['target'] = pd.DataFrame(tabular_data.target) tabular_data_full.to_csv('boston_house.csv', index = False) upload_to_s3(bucket, 'data/tabular', filename) %%local data_s3_path = 's3://{}/{}/{}'.format(bucket, prefix, filename) print ('this is path to your s3 files: '+data_s3_path) ###Output _____no_output_____ ###Markdown Copy the S3 bucket file pathThe S3 bucket file path is required to read the data on EMR Spark. Copy and paste the path string shown above into the next cell. ###Code ### replace this path string with your path shown in last step data_s3_path = 's3://sagemaker-us-east-2-060356833389/data/tabular/boston_house/boston_house.csv' ###Output _____no_output_____ ###Markdown Read the data in EMR spark ClusterOnce we have a path to our data in S3, we can use `spark s3 select` to read data with the following command. You can specify a data format, schema is not necessary but recommended, and in options you can specify `compression`, `delimiter`, `header`, etc. For more details, please see [documentation on using S3 select with Spark](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark-s3select.html). ###Code # EMR cell schema = ' CRIM double, ZN double, INDUS double,\ CHAS double, NOX double, RM double, AGE double, DIS double, RAD double, TAX double, PTRATIO double, \ B double, LSTAT double, target double' df = spark.read.format('csv').schema(schema).options(header='true').load(data_s3_path) df.show(5) ###Output _____no_output_____
Web-Scraping_Automation_with_Selenium.ipynb
###Markdown Web Scraping Automation with Selenium Why automate web browsing?There are two main purposes to automate web browsing,1. Test website functionality, which is usually implemented by web developer automating testing to reduce the cost and time, while also providing a means of round the clock testing. It also makes cross-browser proofing easier.2. Botting Processes, a piece of software that executes commands or perform routine tasks without the user intervention. This can be applied to any repetitive task online, such as order take-out everyday from a website, filling out forms, logging in, etc. **Selenium** is a Python package that provides a simple API to write functional/acceptance tests using Selenium WebDriver. Through Selenium Python API people can access all functionalities of Selenium Web Driver in an intuitive way. Resource: https://selenium-python.readthedocs.io/Additionally, Selenium interfaces with the internet by using a web driver, which is a browser that Selenium can use to automate web processes. The two we recommend using are **Chrome Driver**, which runs Google Chrome or **Gecko Driver**, which runs Firefox. You can select whichever browser you feel more comfortable with, and use a package installer to download either one of the drivers. FYI, we use Chrome Driver in the demonstration.Chromedriver: https://chromedriver.chromium.org Geckodriver: https://github.com/mozilla/geckodriver Part I - Basic Browser InteractionIn this section, we demonstrate the basic browser interaction with Selenium and Google Chrome Driver. It covers how to open a browser and basics of how Selenium interacts with the web. To test our code, we use this website, [Selenium Easy](https://www.seleniumeasy.com/test/basic-first-form-demo.html), where we can practice implementing some of the basic Selenium functions. We use the **Simple Form Demo** as our example for testing the code. You can assess to the page by selecting **All Examples** >> **Input Form** >> **Simple Form Demo** under the Menu List on the left side of the screen. ###Code # Import the dependencies from selenium import webdriver # Initialize the webdriver driver = webdriver.Chrome() # Open up the URL driver.get('https://www.seleniumeasy.com/test/basic-first-form-demo.html') ###Output _____no_output_____ ###Markdown Let's interacting with this pageThere is a couple fields we can interact with on this page. The first is the message board, or single input field. The second is the two input fields.Lets just try writing the classic "Hello World" line in a single input field. To get a message to display, we need to first type a string, then we need to click the "Show Message" button. Translating this to Python, the first step is to tell the program what element we want to interact with. The easiest way to do this is use the inspector tool. Once we find the element in the HTML document, go ahead and right click, copy, and copy xpath. **xpath** is element specific, which allow Selenium to find the HTML item. ###Code # Find element by xpath messageField = driver.find_element_by_xpath('//*[@id="sum1"]') # Pass a string to the message field using the send keys method messageField.send_keys('Hello World') ###Output _____no_output_____ ###Markdown Now that we find the message field and input the string into the field. We can then follow the same logic to identify the "Show Message" button from the HTML document and copy the xpath. ###Code # Click the Show Message button showMessageButton = driver.find_element_by_xpath('//*[@id="get-input"]/button') # Click on the button using click() showMessageButton.click() ###Output _____no_output_____ ###Markdown Next up, lets try interacting with the two input fields. Similarly to the first process, we can send keys to the two input fields and then click the get total button. ###Code # Input the first number to the first field additionField1 = driver.find_element_by_xpath('//*[@id="sum1"]') additionField1.send_keys('10') # Input the first number to the second field additionField2 = driver.find_element_by_xpath('//*[@id="sum2"]') additionField2.send_keys('20') # Click the "Get Total" button getTotalButton = driver.find_element_by_xpath('//*[@id="gettotal"]/button') getTotalButton.click() ###Output _____no_output_____ ###Markdown Part II - Handling Drag and DropMost actions performed with Selenium can be accomplished with a single function. For a more challenging web driver action, we explore to use Selenium for drag and drop action. As we know, this action consists of three basic steps. First, an object or text must be **selected**. Then it must be **drag** to the desired position. And finally **dropped** into a place. To accomplish this task, we need a **source element** that we want to drag and a **destination element** we want to drop to. To demo our code, we use this [dhtmlgoodies webpage](http://www.dhtmlgoodies.com/scripts/drag-drop-custom/demo-drag-drop-3.html), which will act as a practice ground for our script. ###Code # Import the dependency from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains # Create a web driver driver = webdriver.Chrome() # Maximize the window driver.maximize_window() # Open up the URL driver.get('http://www.dhtmlgoodies.com/scripts/drag-drop-custom/demo-drag-drop-3.html') # Specify the source and destination element source = driver.find_element_by_xpath('//*[@id="box3"]') dest = driver.find_element_by_xpath('//*[@id="box103"]') # Use Action Chains to drag and drop actions = ActionChains(driver) actions.drag_and_drop(source, dest).perform() ###Output _____no_output_____ ###Markdown Part III - Selenium Implicit Wait FunctionsThe problem with scraping modern day websites is many of them use asynchronous techniques, like Ajax, to load their webpages. This method allows web servers to update parts of their webpage without reloading the entire thing, so that they are able to create fast loading and dynamic webpages. However, this becomes a problem when Selenium web driver tries to locate a web element before it's loaded. This causes an exception with our script and our program not working. This transitions us into why we need **wait functions**. **Wait function** adds crucial time intervals in between actions performed, thus allowing the web driver to wait until an element is loaded before it interacts with it. Selenium offers two types of wait, **explicit** and **implicit**. **Explicit wait**: When paired with a condition, the program wait until that condition is satisfied before executing.**Implicit wait**: Try to pull the DOM for a certain amount of time until the element is available. Implicit Wait FunctionTo practice implementing wait functions, we are going to explore [Google Earth](https://www.google.com/earth/), which uses Ajas to loads its elements. Navigating to their website, you can see when we initially load the webpage the top banner appears slightly after the rest of the page is loaded in. Let's say we want to click the "Launch Earth" button, if we used normal ways of grabbing the element, at this point, we would get an exception as the element doesn't exist when the webpage first loads in. We are going to work around that by using an implicit wait. ###Code # Import Selenium Web Driver from selenium import webdriver # Assigning the URL url = 'https://www.google.com/earth/' # Initialize the webdriver driver = webdriver.Chrome() # Create the implicit wait (5 seconds) for the driver driver.implicitly_wait(5) # Open up the URL driver.get(url) # Find the button element launchEarthButton = driver.find_element_by_xpath('/html/body/header/div/nav[1]/ul[2]/li[2]/a') # Create a click action on the button launchEarthButton.click() ###Output _____no_output_____ ###Markdown Part IV - Selenium Explicit Wait FunctionsOne problem with **Implicit Wait** is that the performance time is fixed for every action with the driver. Also, the speed of the internet may delay the page loading, which make cause an exception in the code. The better solution is to create an **Explicit Wait** by conditions. In our example, since our goal is to click on the "Launch Earth" button on the page, we can set the condition to wait until the element become clickable. ###Code # Import the dependencies from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC # Assigning the URL url = 'https://www.google.com/earth/' # Initialize the webdriver driver = webdriver.Chrome() # Open up the URL driver.get(url) # Create the wait function for the web driver # This function will throw an exception after 10 seconds # if the condition is not satisfied wait = WebDriverWait(driver, 10) ###Output _____no_output_____ ###Markdown Setting the condition for **explicit wait**. The condition is created with the expected conditions module to make our program wait until the launch Earth in Chrome button becomes clickable.Let's go ahead and grab the XPath of the button and implement this idea. Our condition, which in his case, is element to be clickable. ###Code # Creating condition for the button action launchEarthButton = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/header/div/nav[1]/ul[2]/li[2]/a'))) # Add the click function to the button launchEarthButton.click() ###Output _____no_output_____
notebooks/sedflux3d_and_child.ipynb
###Markdown Sedflux3D + CHILD* Link to this notebook: https://github.com/csdms/pymt/blob/master/notebooks/sedflux3d_and_child.ipynb* Install command: `$ conda install notebook pymt_sedflux pymt_child` ###Code # Some magic to make plots appear within the notebook %matplotlib inline import numpy as np # In case we need to use numpy import pymt.models child = pymt.models.Child() sedflux = pymt.models.Sedflux3D() child_in, child_dir = child.setup( "_child", grid_node_spacing=500.0, grid_x_size=40000.0, grid_y_size=20000.0, run_duration=1e6, ) sedflux_in, sedflux_dir = sedflux.setup( "_sedflux", river_bed_load_flux=0.0, river_suspended_load_concentration_0=0.1, river_suspended_load_concentration_1=0.1, run_duration=1e6 * 365.0, ) child.initialize(child_in, dir=child_dir) sedflux.initialize(sedflux_in, dir=sedflux_dir) gid = child.var["land_surface__elevation"].grid x, y = child.get_grid_x(gid), child.get_grid_y(gid) z = child.get_value("land_surface__elevation") x_shore = 20000.0 z[np.where(x > x_shore)] += 100.0 z[np.where(x <= x_shore)] -= 100.0 child.set_value("land_surface__elevation", z) sedflux.set_value( "bedrock_surface__elevation", mapfrom=("land_surface__elevation", child) ) child.quick_plot( "land_surface__elevation", edgecolors="k", vmin=-200, vmax=200, cmap="BrBG_r" ) sedflux.quick_plot("bedrock_surface__elevation", vmin=-200, vmax=200, cmap="BrBG_r") sedflux.set_value("channel_exit_water_flow__speed", 1.2) sedflux.set_value("channel_exit_x-section__mean_of_width", 400.) sedflux.set_value("channel_exit_x-section__mean_of_depth", 4.) now = child.time times = np.arange(now, now + 1000, 1.0) sedflux.update() child.update() for t in times: child.update_until(t, units="years") sedflux.set_value("channel_water_sediment~bedload__mass_flow_rate", mapfrom=child) sedflux.update_until(t, units="years") z = child.get_value("land_surface__elevation") child.set_value( "land_surface__elevation", mapfrom=("land-or-seabed_sediment_surface__elevation", sedflux), nomap=np.where(z > 0.0), ) child.quick_plot( "land_surface__elevation", edgecolors="k", vmin=-200, vmax=200, cmap="BrBG_r" ) child.quick_plot( "land_surface__elevation", edgecolors="k", vmin=-200, vmax=200, cmap="BrBG_r" ) ###Output _____no_output_____
content/Chapter_18/04_Chi_Squared_Distributions.ipynb
###Markdown Chi-Squared Distributions Let $Z$ be a standard normal random variable and let $V = Z^2$. By the change of variable formula for densities, we found the density of $V$ to be$$f_V(v) ~ = ~ \frac{1}{\sqrt{2\pi}} v^{-\frac{1}{2}} e^{-\frac{1}{2} v}, ~~~~ v > 0$$That's the gamma $(1/2, 1/2)$ density. It is also called the *chi-squared density with 1 degree of freedom,* which we will abbreviate to chi-squared (1). From Chi-Squared $(1)$ to Chi-Squared $(n)$ When we were establishing the properties of the standard normal density, we discovered that if $Z_1$ and $Z_2$ are independent standard normal then $Z_1^2 + Z_2^2$ has the exponential $(1/2)$ distribution. We saw this by comparing two different settings in which the Rayleigh distribution arises. But that wasn't a particularly illuminating reason for why $Z_1^2 + Z_2^2$ should be exponential. But now we know that the sum of independent gamma variables with the same rate is also gamma; the shape parameter adds up and the rate remains the same. Therefore $Z_1^2 + Z_2^2$ is a gamma $(1, 1/2)$ variable. That's the same distribution as exponential $(1/2)$, as you showed in exercises. This explains why the sum of squares of two i.i.d. standard normal variables has the exponential $(1/2)$ distribution.Now let $Z_1, Z_2, \ldots, Z_n$ be i.i.d. standard normal variables. Then $Z_1^2, Z_2^2, \ldots, Z_n^2$ are i.i.d. chi-squared $(1)$ variables. That is, each of them has the gamma $(1/2, 1/2)$ distribution. By induction, $Z_1^2 + Z_2^2 + \cdots + Z_n^2$ has the gamma $(n/2, 1/2)$ distribution. This is called the *chi-squared distribution with $n$ degrees of freedom,* which we will abbreviate to chi-squared $(n)$. Chi-Squared with $n$ Degrees of Freedom For a positive integer $n$, the random variable $X$ has the *chi-squared distribution with $n$ degrees of freedom* if the distribution of $X$ is gamma $(n/2, 1/2)$. That is, $X$ has density$$f_X(x) ~ = ~ \frac{\frac{1}{2}^{\frac{n}{2}}}{\Gamma(\frac{n}{2})} x^{\frac{n}{2} - 1} e^{-\frac{1}{2}x}, ~~~~ x > 0$$Here are the graphs of the chi-squared densities for degrees of freedom 2 through 5. ###Code # NO CODE x = np.arange(0, 14, 0.01) y2 = stats.chi2.pdf(x, 2) y3 = stats.chi2.pdf(x, 3) y4 = stats.chi2.pdf(x, 4) y5 = stats.chi2.pdf(x, 5) plt.plot(x, y2, lw=2, label='2 df') plt.plot(x, y3, lw=2, label='3 df') plt.plot(x, y4, lw=2, label='4 df') plt.plot(x, y5, lw=2, label='5 df') plt.legend() plt.xlabel('$v$') plt.title('Chi-Squared $(n)$ Densities for $n = 2, 3, 4, 5$'); ###Output _____no_output_____
examples/end_to_end_demo_with_multiple_geos.ipynb
###Markdown : Run in Google Colab View source on GitHub End to End Demo with Multiple Geos Welcome to the end to end demo of LightweightMMM, with multiple geos. This is a very simple demo in which we showcase the basic usage and functionalities of the library. Disclaimer: This notebook skips all exploratory data analysis and preprocessing (besides scaling) and assumes the user will do or has done it prior to engaging this point of the demo.This notebook uses dummy data and therefore the numbers and results might not be representative of what one might get on a real dataset. This notebook is nearly identical to the simple_end_to_end_demo notebook, but uses multiple geos to showcase that functionality. For running chains in parallel you can set the number of CPUs at the begining of the program like the following:`numpyro.set_host_device_count(n)` ###Code # First would be to install lightweight_mmm !pip install --upgrade git+https://github.com/google/lightweight_mmm.git # Import jax.numpy and any other library we might need. import jax.numpy as jnp import numpyro # Import the relevant modules of the library from lightweight_mmm import lightweight_mmm from lightweight_mmm import optimize_media from lightweight_mmm import plot from lightweight_mmm import preprocessing from lightweight_mmm import utils ###Output _____no_output_____ ###Markdown Organising the data for modelling ###Code SEED = 105 N_CHAINS = 2 data_size = 104 + 13 n_media_channels = 3 n_extra_features = 1 n_geos = 2 media_data, extra_features, target, costs = utils.simulate_dummy_data( data_size=data_size, n_media_channels=n_media_channels, n_extra_features=n_extra_features, geos=n_geos) ###Output _____no_output_____ ###Markdown We can then split the dataset into train and test. Lets leave only the last 13 weeks for testing in this case. Note that this train/test split looks exactly the same as the notebook without multiple geos. ###Code # Split and scale data. split_point = data_size - 13 # Media data media_data_train = media_data[:split_point, ...] media_data_test = media_data[split_point:, ...] # Extra features extra_features_train = extra_features[:split_point, ...] extra_features_test = extra_features[split_point:, ...] # Target target_train = target[:split_point] ###Output _____no_output_____ ###Markdown Scaling is essential for many modelling problems and this one is no exception.We provide the class `CustomScaler` which behaves accordingly with `sklearn`scalers.In most cases you will need 3 or 4 scalers. One scaler for the media data, onefor the target and one for costs. Optionally if you are adding extra featuresthose might need an extra scaler. **It is very important that you save and"carry with you" those scalers throughout your MMM journey as LighweightMMM willallow you to re-insert these scalers at different points to ensure everything isalways in the correct scale and results. If some results don't make sense, itmight be a scaling problem.** A few more details on CustomScaler usage:This scaler can be used in two fashions for both the multiplication and divisionoperation. - By specifying a value to use for the scaling operation. - By specifying an operation used at column level to calculate the value for theactual scaling operation.Eg. if one wants to scale the dataset by multiply by 100 you can directly passmultiply_by=100. Value can also be an array of an appropriate shape by whichto divide or multiply the data. But if you want to multiply by the mean value of eachcolumn, then you can pass multiply_operation=jnp.mean (or any other operationdesired).Operation parameters have the upper hand in the cases where both values andoperations are passed, values will be ignored in this case.Consult the full class documentation if you still need to know more. ###Code media_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean) extra_features_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean) target_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean) cost_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean) media_data_train = media_scaler.fit_transform(media_data_train) extra_features_train = extra_features_scaler.fit_transform(extra_features_train) target_train = target_scaler.fit_transform(target_train) costs = cost_scaler.fit_transform(costs) ###Output _____no_output_____ ###Markdown Training the model The currently available models are the following: - hill_adstock - adstock - carryover ###Code mmm = lightweight_mmm.LightweightMMM(model_name="carryover") ###Output _____no_output_____ ###Markdown Training the model will require the following mandatory parameters: - media- total_costs (one value per channel) - targetWe can optionally also pass the following: - extra_features: Other variables to add to the model. - degrees_seasonality: Number of degrees to use for seasonality. Default is 3. - seasonality_frequency: Frequency of the time period used. Default is 52 as in 52 weeks per year. - media_names: Names of the media channels passed. - number_warmup: Number of warm up samples. Default is 1000. - number_samples: Number of samples during sampling. Default is 1000. - number_chains: Number of chains to sample. Default is 2. ###Code number_warmup=2000 number_samples=2000 # For replicability in terms of random number generation in sampling # reuse the same seed for different trainings. mmm.fit( media=media_data_train, total_costs=costs, target=target_train, extra_features=extra_features_train, number_warmup=number_warmup, number_samples=number_samples, number_chains=N_CHAINS, seed=SEED) ###Output sample: 100%|██████████| 4000/4000 [02:39<00:00, 25.01it/s, 255 steps of size 2.09e-02. acc. prob=0.90] sample: 100%|██████████| 4000/4000 [02:42<00:00, 24.63it/s, 255 steps of size 2.05e-02. acc. prob=0.85] ###Markdown You can check the summary of your trace by printing a summary: ###Code mmm.print_summary() ###Output mean std median 5.0% 95.0% n_eff r_hat ad_effect_retention_rate[0] 0.52 0.30 0.52 0.10 1.00 3158.95 1.00 ad_effect_retention_rate[1] 0.48 0.30 0.48 0.00 0.90 1951.37 1.00 ad_effect_retention_rate[2] 0.45 0.11 0.46 0.29 0.64 1386.07 1.00 beta_extra_features[0,0] -0.31 0.04 -0.31 -0.38 -0.24 2589.72 1.00 beta_extra_features[0,1] -0.30 0.04 -0.30 -0.37 -0.23 1982.51 1.00 beta_media[0,0] 0.01 0.01 0.00 0.00 0.02 3119.69 1.00 beta_media[0,1] 0.01 0.01 0.00 0.00 0.02 3567.91 1.00 beta_media[1,0] 0.02 0.02 0.01 0.00 0.04 2449.12 1.00 beta_media[1,1] 0.02 0.02 0.01 0.00 0.04 1814.20 1.00 beta_media[2,0] 0.93 0.14 0.92 0.69 1.13 564.24 1.00 beta_media[2,1] 0.91 0.13 0.90 0.72 1.12 1229.09 1.00 beta_seasonality[0] 0.16 0.13 0.11 0.01 0.34 199.83 1.01 beta_seasonality[1] 0.17 0.13 0.13 0.01 0.38 323.94 1.01 beta_trend[0] -0.00 0.00 -0.00 -0.01 0.00 2775.24 1.00 beta_trend[1] -0.00 0.00 -0.00 -0.01 0.00 2777.99 1.00 channel_beta_media[0,0] 0.04 0.09 0.01 0.00 0.10 2311.02 1.00 channel_beta_media[1,0] 0.07 0.16 0.02 0.00 0.16 2578.05 1.00 channel_beta_media[2,0] 1.07 0.44 0.98 0.41 1.73 460.32 1.01 expo_trend 0.08 0.09 0.06 0.00 0.19 2187.41 1.00 exponent[0] 0.89 0.10 0.92 0.76 1.00 3274.78 1.00 exponent[1] 0.89 0.10 0.92 0.76 1.00 3918.94 1.00 exponent[2] 0.95 0.05 0.97 0.88 1.00 1321.00 1.00 gamma_seasonality[0,0] -0.03 1.01 -0.07 -1.62 1.65 3372.07 1.00 gamma_seasonality[0,1] -0.02 0.96 -0.00 -1.59 1.54 473.38 1.01 gamma_seasonality[1,0] -0.48 0.41 -0.35 -1.04 -0.06 637.40 1.00 gamma_seasonality[1,1] -0.25 0.23 -0.17 -0.57 -0.01 712.92 1.00 intercept[0] 0.40 0.25 0.39 0.00 0.74 107.87 1.02 intercept[1] 0.41 0.24 0.40 0.00 0.76 141.62 1.02 peak_effect_delay[0] 1.71 1.31 1.48 0.00 3.55 2667.13 1.00 peak_effect_delay[1] 1.26 1.15 0.93 0.00 2.87 3149.00 1.00 peak_effect_delay[2] 0.48 0.13 0.50 0.28 0.69 676.13 1.00 sigma[0] 0.09 0.01 0.09 0.08 0.10 2832.65 1.00 sigma[1] 0.08 0.01 0.08 0.07 0.09 2268.03 1.00 Number of divergences: 217 ###Markdown We can visualise the posterior distributions of the media effects, one for each channel-geo combination. ###Code plot.plot_media_channel_posteriors(media_mix_model=mmm) ###Output ###Markdown One can also check your model's fit to the training data, one plot per geo. ###Code # Here is another example where we can pass the target scaler if you want the plot to be in the "not scaled scale" plot.plot_model_fit(mmm, target_scaler=target_scaler) ###Output _____no_output_____ ###Markdown If one wants to run predictions on unseen data they can rely on the `predict`method: ###Code # We have to scale the test media data if we have not done so before. new_predictions = mmm.predict(media=media_scaler.transform(media_data_test), extra_features=extra_features_scaler.transform(extra_features_test), seed=SEED) new_predictions.shape plot.plot_out_of_sample_model_fit(out_of_sample_predictions=new_predictions, out_of_sample_target=target_scaler.transform(target[split_point:])) ###Output _____no_output_____ ###Markdown Media insights ###Code media_effect, roi_hat = mmm.get_posterior_metrics(target_scaler=target_scaler, cost_scaler=cost_scaler) ###Output _____no_output_____ ###Markdown We can quickly visualise the estimated media effects with their respectivecredibility intervals. ###Code plot.plot_bars_media_metrics(metric=media_effect, metric_name="Media Effect") plot.plot_bars_media_metrics(metric=roi_hat, metric_name="ROI hat") ###Output _____no_output_____ ###Markdown Another vital question we can solve with MMMs is how each media channel behavesindividually as we invest more in it.For that we can plot the curve response of all media channels with the followingfunction: ###Code plot.plot_response_curves( media_mix_model=mmm, target_scaler=target_scaler, seed=SEED) ###Output _____no_output_____ ###Markdown Optimization The optimization is meant to solve the budget allocation questions for you. Note that the optimization is done at a national level, not a geo level, since we typically use MMMs to inform channel-level decisions at a high level.First you need to provide for how long you want to optimize your budget (eg. 15weeks in this case). The optimization values will be bounded by +- 20% of the max and min historicvalues used for training. Which means the optimization won't recommend tocompletely change your strategy but how to make some budget re-allocation.You can change that percentage with the following parameters: -bounds_lower_pct - bounds_upper_pctWhich can hold 1 value for all channels or 1 value per channel. Prices are the average price you would expect for the media units of eachchannel. If your data is already a money unit (eg. $) your prices should be anarray of 1s. ###Code prices = jnp.ones(mmm.n_media_channels) ###Output _____no_output_____ ###Markdown The budget is how much one would like to allocate throughtout the total of`n_time_periods`. Make sure this amount is inline with the historic spend orotherwise some conditions/bounds in the optimization might not be met. ###Code n_time_periods = 10 budget = jnp.sum(jnp.dot(prices, media_data.mean(axis=0)))* n_time_periods # Run optimization with the parameters of choice. solution = optimize_media.find_optimal_budgets( n_time_periods=n_time_periods, media_mix_model=mmm, extra_features=extra_features_scaler.transform(extra_features_test)[:n_time_periods], budget=budget, prices=prices, media_scaler=media_scaler, target_scaler=target_scaler, seed=SEED) # Obtain the optimal weekly allocation. optimal_buget_allocation = prices * solution.x optimal_buget_allocation # Both values should be very close in order to compare KPI budget, optimal_buget_allocation.sum() ###Output _____no_output_____ ###Markdown We can double check the budget constraint was met: ###Code # Both numbers should be almost equal budget, jnp.sum(solution.x * prices) ###Output _____no_output_____ ###Markdown Saving the model to disk ###Code # We can use the utilities for saving models to disk. file_path = "media_mix_model.pkl" utils.save_model(media_mix_model=mmm, file_path=file_path) # Once saved one can load the models. loaded_mmm = utils.load_model(file_path=file_path) loaded_mmm.trace["beta_media"].shape # Example of accessing any of the model values. ###Output _____no_output_____
chapter_computer-vision/demo_ssd.ipynb
###Markdown 1. Predict with pre-trained SSD models==========================================This article shows how to play with pre-trained SSD models with only a fewlines of code.First let's import some necessary libraries: ###Code from gluoncv import model_zoo, data, utils from matplotlib import pyplot as plt ###Output _____no_output_____ ###Markdown Load a pretrained model-------------------------Let's get an SSD model trained with 512x512 images on Pascal VOCdataset with ResNet-50 V1 as the base model. By specifying``pretrained=True``, it will automatically download the model from the modelzoo if necessary. For more pretrained models, please refer to:doc:`../../model_zoo/index`. ###Code net = model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=True) ###Output _____no_output_____ ###Markdown Pre-process an image--------------------Next we download an image, and pre-process with preset data transforms. Here wespecify that we resize the short edge of the image to 512 px. But you canfeed an arbitrarily sized image.You can provide a list of image file names, such as ``[im_fname1, im_fname2,...]`` to :py:func:`gluoncv.data.transforms.presets.ssd.load_test` if youwant to load multiple image together.This function returns two results. The first is a NDArray with shape`(batch_size, RGB_channels, height, width)`. It can be fed into themodel directly. The second one contains the images in numpy format toeasy to be plotted. Since we only loaded a single image, the first dimensionof `x` is 1. ###Code im_fname = utils.download('https://github.com/dmlc/web-data/blob/master/' + 'gluoncv/detection/street_small.jpg?raw=true', path='street_small.jpg') x, img = data.transforms.presets.ssd.load_test(im_fname, short=512) print('Shape of pre-processed image:', x.shape) ###Output _____no_output_____ ###Markdown Inference and display---------------------The forward function will return all detected bounding boxes, and thecorresponding predicted class IDs and confidence scores. Their shapes are`(batch_size, num_bboxes, 1)`, `(batch_size, num_bboxes, 1)`, and`(batch_size, num_bboxes, 4)`, respectively.We can use :py:func:`gluoncv.utils.viz.plot_bbox` to visualize theresults. We slice the results for the first image and feed them into `plot_bbox`: ###Code class_IDs, scores, bounding_boxs = net(x) ax = utils.viz.plot_bbox(img, bounding_boxs[0], scores[0], class_IDs[0], class_names=net.classes) plt.show() ###Output _____no_output_____
docs/transform/custom_transformers.ipynb
###Markdown Custom Transformers View on QuantumAI Run in Google Colab View source on GitHub Download notebook The [Transformers](/cirq/transformers) page introduced what a transformer is, what transformers are available in Cirq, and how to create a simple one as a composite of others. This page covers the details necessary for creating more nuanced custom transformers, including `cirq.TransformerContext`, primitives and decompositions. Setup ###Code try: import cirq except ImportError: print("installing cirq...") !pip install --quiet cirq import cirq print("installed cirq.") ###Output _____no_output_____ ###Markdown `cirq.TRANSFORMER` API and `@cirq.transformer` decoratorAny callable that satisfies the `cirq.TRANSFORMER` contract, i.e. takes a `cirq.AbstractCircuit` and `cirq.TransformerContext` and returns a transformed `cirq.AbstractCircuit`, is a valid transformer in Cirq. You can create a custom transformer by simply decorating a class/method, that satisfies the above contract, with `@cirq.transformer` decorator. ###Code @cirq.transformer def reverse_circuit(circuit, *, context=None): """Transformer to reverse the input circuit.""" return circuit[::-1] @cirq.transformer class SubstituteGate: """Transformer to substitute `source` gates with `target` in the input circuit.""" def __init__(self, source, target): self._source = source self._target = target def __call__(self, circuit, *, context=None): batch_replace = [] for i, op in circuit.findall_operations(lambda op: op.gate == self._source): batch_replace.append((i, op, self._target.on(*op.qubits))) transformed_circuit = circuit.unfreeze(copy=True) transformed_circuit.batch_replace(batch_replace) return transformed_circuit # Build your circuit q = cirq.NamedQubit("q") circuit = cirq.Circuit( cirq.X(q), cirq.CircuitOperation(cirq.FrozenCircuit(cirq.X(q), cirq.Y(q))), cirq.Z(q) ) # Transform and compare the circuits. substitute_gate = SubstituteGate(cirq.X, cirq.S) print("Original Circuit:", circuit, "\n", sep="\n") print("Reversed Circuit:", reverse_circuit(circuit), "\n", sep="\n") print("Substituted Circuit:", substitute_gate(circuit), sep="\n") ###Output _____no_output_____ ###Markdown `cirq.TransformerContext` to store common configurable options`cirq.TransformerContext` is a dataclass that stores common configurable options for all transformers. All cirq transformers should accept the transformer context as an optional keyword argument. The `@cirq.transformer` decorator can inspect the `cirq.TransformerContext` argument and automatically append useful functionality, like support for automated logging and recursively running the transformer on nested sub-circuits. `cirq.TransformerLogger` and support for automated loggingThe `cirq.TransformerLogger` class is used to log the actions of a transformer on an input circuit. `@cirq.transformer` decorator automatically adds support for logging the initial and final circuits for each transfomer step. ###Code # Note that you want to log the steps. context = cirq.TransformerContext(logger=cirq.TransformerLogger()) # Transform the circuit. transformed_circuit = reverse_circuit(circuit, context=context) transformed_circuit = substitute_gate(transformed_circuit, context=context) # Show the steps. context.logger.show() ###Output _____no_output_____ ###Markdown Neither of the custom transformers, `reverse_circuit` or `substitute_gate`, had any explicit support for a logger present in the `context` argument, but the decorator was able to use it anyways. If your custom transformer calls another transformer as part of it, then that transformer should log its behavior as long as you pass the `context` object to it. All Cirq-provided transformers do this. ###Code @cirq.transformer def reverse_and_substitute(circuit, context=None): reversed_circuit = reverse_circuit(circuit, context=context) reversed_and_substituted_circuit = substitute_gate(reversed_circuit, context=context) return reversed_and_substituted_circuit # Note that you want to log the steps. context = cirq.TransformerContext(logger=cirq.TransformerLogger()) # Transform the circuit. transformed_circuit = reverse_and_substitute(circuit, context=context) # Show the steps. context.logger.show() ###Output _____no_output_____ ###Markdown Note: Each transformer that is run on the circuit is indexed globally, over all transformers run through the logger. However, when one transformer (`reverse_and_substitute`) runs other transformers as a subprocess (`reverse_circuit` and `SubstituteGate`), the constitutent transformers are indented to show this relationship. Support for `deep=True`You can call `@cirq.transformer(add_deep_support=True)` to automatically add the functionality of recursively running the custom transformer on circuits wrapped inside `cirq.CircuitOperation`. The recursive execution behavior of the transformer can then be controlled by setting `deep=True` in the transformer context. ###Code @cirq.transformer(add_deep_support=True) def reverse_circuit_deep(circuit, *, context=None): """Transformer to reverse the input circuit.""" return circuit[::-1] @cirq.transformer(add_deep_support=True) class SubstituteGateDeep(SubstituteGate): """Transformer to substitute `source` gates with `target` in the input circuit.""" pass # Note that you want to transform the CircuitOperations. context = cirq.TransformerContext(deep=True) # Transform and compare the circuits. substitute_gate_deep = SubstituteGateDeep(cirq.X, cirq.S) print("Original Circuit:", circuit, "\n", sep="\n") print( "Reversed Circuit with deep=True:", reverse_circuit_deep(circuit, context=context), "\n", sep="\n", ) print( "Substituted Circuit with deep=True:", substitute_gate_deep(circuit, context=context), sep="\n" ) ###Output _____no_output_____ ###Markdown Transformer Primitives and DecompositionsIf you need to perform more fundamental changes than just running other transformers in sequence (like `SubstituteGate` did with `cirq.Circuit.batch_replace`), Cirq provides circuit compilation primitives and gate decomposition utilities for doing so. Moment preserving transformer primitivesCirq's transformer primitives are useful abstractions to implement common transformer patterns, while preserving the moment structure of input circuit. Some of the notable transformer primitives are:- **`cirq.map_operations`**: Applies local transformations on operations, by calling `map_func(op)` for each `op`.- **`cirq.map_moments`**: Applies local transformation on moments, by calling `map_func(m)` for each moment `m`.- **`cirq.merge_operations`**: Merges connected component of operations by iteratively calling `merge_func(op1, op2)` for every pair of mergeable operations `op1` and `op2`.- **`cirq.merge_moments`**: Merges adjacent moments, from left to right, by iteratively calling `merge_func(m1, m2)` for adjacent moments `m1` and `m2`.An important property of these primitives is that they have support for common configurable options present in `cirq.TransformerContext`, such as `tags_to_ignore` and `deep`, as demonstrated in the example below. Note: Primitives support both the `deep` argument and the `tags_to_ignore` argument, like many transformers, so you can easily pass those values in from a `TransformContext`, if one is available. ###Code @cirq.transformer def substitute_gate_using_primitives(circuit, *, context=None, source=cirq.X, target=cirq.S): """Transformer to substitute `source` gates with `target` in the input circuit. The transformer is implemented using `cirq.map_operations` primitive and hence has built-in support for 1. Recursively running the transformer on sub-circuits if `context.deep is True`. 2. Ignoring operations tagged with any of `context.tags_to_ignore`. """ return cirq.map_operations( circuit, map_func=lambda op, _: target.on(*op.qubits) if op.gate == source else op, deep=context.deep if context else False, tags_to_ignore=context.tags_to_ignore if context else (), ) # Build your circuit from x_y_x components. x_y_x = [cirq.X(q), cirq.Y(q), cirq.X(q).with_tags("ignore")] circuit = cirq.Circuit(x_y_x, cirq.CircuitOperation(cirq.FrozenCircuit(x_y_x)), x_y_x) # Note that you want to transform the CircuitOperations and ignore tagged operations. context = cirq.TransformerContext(deep=True, tags_to_ignore=("ignore",)) # Compare the before and after circuits. print("Original Circuit:", circuit, "\n", sep="\n") print( "Substituted Circuit:", substitute_gate_using_primitives(circuit, context=context), "\n", sep="\n", ) ###Output _____no_output_____ ###Markdown Analytical Gate DecompositionsGate decomposition is the process of implementing / decomposing a given unitary `U` using only gates that belong to a specific target gateset. Cirq provides analytical decomposition methods, often based on [KAK Decomposition](https://arxiv.org/abs/quant-ph/0507171), to decompose one-, two-, and three-qubit unitary matrices into specific target gatesets. Some notable decompositions are:* **`cirq.single_qubit_matrix_to_pauli_rotations`**: Decomposes a single qubit matrix to ZPow/XPow/YPow rotations. * **`cirq.single_qubit_matrix_to_phased_x_z`**: Decomposes a single-qubit matrix to a PhasedX and Z gate.* **`cirq.two_qubit_matrix_to_sqrt_iswap_operations`**: Decomposes any two-qubit unitary matrix into ZPow/XPow/YPow/sqrt-iSWAP gates.* **`cirq.two_qubit_matrix_to_cz_operations`**: Decomposes any two-qubit unitary matrix into ZPow/XPow/YPow/CZ gates.* **`cirq.three_qubit_matrix_to_operations`**: Decomposes any three-qubit unitary matrix into CZ/CNOT and single qubit rotations.You can use these analytical decomposition methods to build transformers which can rewrite a given circuit using only gates from the target gateset. This example again uses the transformer primitives to support recursive execution and `ignore` tagging. ###Code @cirq.transformer def convert_to_cz_target(circuit, *, context=None, atol=1e-8, allow_partial_czs=True): """Transformer to rewrite the given circuit using CZs + 1-qubit rotations. Note that the transformer decomposes only operations on <= 2-qubits and is presented as an illustration of using transformer primitives + analytical decomposition methods. """ def map_func(op: cirq.Operation, _) -> cirq.OP_TREE: if not (cirq.has_unitary(op) and cirq.num_qubits(op) <= 2): return op matrix = cirq.unitary(op) qubits = op.qubits if cirq.num_qubits(op) == 1: g = cirq.single_qubit_matrix_to_phxz(matrix) return g.on(*qubits) if g else [] return cirq.two_qubit_matrix_to_cz_operations( *qubits, matrix, allow_partial_czs=allow_partial_czs, atol=atol ) return cirq.map_operations_and_unroll( circuit, map_func, deep=context.deep if context else False, tags_to_ignore=context.tags_to_ignore if context else (), ) # Build the circuit from three versions of the same random component component = cirq.testing.random_circuit(qubits=3, n_moments=2, op_density=0.8, random_state=1234) component_operation = cirq.CircuitOperation(cirq.FrozenCircuit(component)) # A normal component, a CircuitOperation version, and a ignore-tagged CircuitOperation version circuit = cirq.Circuit(component, component_operation, component_operation.with_tags('ignore')) # Note that you want to transform the CircuitOperations, ignore tagged operations, and log the steps. context = cirq.TransformerContext( deep=True, tags_to_ignore=("ignore",), logger=cirq.TransformerLogger() ) # Run your transformer. converted_circuit = convert_to_cz_target(circuit, context=context) # Ensure that the resulting circuit is equivalent. cirq.testing.assert_circuits_with_terminal_measurements_are_equivalent(circuit, converted_circuit, atol=1e-6) # Show the steps executed. context.logger.show() ###Output _____no_output_____
ipynb/Bike Speed versus Grade.ipynb
###Markdown Peter Norvig, Oct 2017Revised Jan 2020 Bicycling: Speed, Grade, VAM, Hill-index, EddingtonLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded a bunch of my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). I parse the file into `rides`, a listof `Ride` structures. ###Code import matplotlib.pyplot as plt import collections import numpy as np import re Ride = collections.namedtuple('Ride', 'miles, hours, feet, date, title') def parse_rides(lines: str) -> [Ride]: """Parse lines from a Strava log file into a list of `Ride`. Tab separated fields: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" return [Ride(number(mi), parse_hours(time), number(ft), date, title) for line in lines if line.startswith('Ride') for _, date, title, time, mi, ft in [line.strip().split('\t')]] def number(string) -> float: return float(re.sub(r'[^0-9. ]', '', string)) def parse_hours(time: str) -> float: if time.count(':') < 2: time = '0:' + time hour, min, sec = map(int, time.split(':')) return hour + min/60 + sec/3600 rides = parse_rides(open('bikerides25.tsv')) ###Output _____no_output_____ ###Markdown From the raw data I will derive three important arrays of numbers: - `miles`: array of lengths of each ride in miles- `hours`: array of durations of each ride in hours- `feet`: array of total climbing of each ride in feet I'll show a simple scatter plot visualization: ###Code def column(attr, rides): return np.array([getattr(r, attr) for r in rides]) miles = column('miles', rides) hours = column('hours', rides) feet = column('feet', rides) plt.scatter(feet/miles, miles/hours); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like some kind of downward sloping curve would be a better fit than a straight line, so I'll fit quadratic (degree two) and cubic (degree 3) polynomials to the data (for no good theoretical reason; just because those are two simple nonlinear functions and numpy has a solver for them). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degrees=(2, 3)): """Plot X versus Y and a best fit curve to it, with some bells and whistles.""" plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') grid(); plt.ylabel(ylabel); plt.xlabel(xlabel) plt.scatter(X, Y, c='k') X1 = np.linspace(min(X), max(X), 100) for degree in degrees: F = poly_fit(X, Y, degree) plt.plot(X1, [F(x) for x in X1], '-', lw=2) plt.title(f'{len(X)} rides') def grid(): plt.minorticks_on(); plt.grid(which='minor', ls=':', alpha=0.7) def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(feet/miles, miles/hours) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But when the grade is steeper than 50 ft/mile, the speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and around 10 mph at 120 ft/mile. Note that 120 ft/mile is only 2.3% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 6.9% average grade on the up part. Estimating Time of a RideThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def estimate(dist, climb, F=poly_fit(feet/miles, miles/hours, 2)) -> float: """Given a distance in miles and total climb in feet, estimate time in minutes.""" return 60 * dist / F(climb / dist) f'Coast: {estimate(15.7, 344):.0f} min, Creek: {estimate(13.5, 735):.0f} min.' ###Output _____no_output_____ ###Markdown This predicts the shorter creek route would be about 6 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual **segments**, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative climbing segments ([`bikesegments.csv`](bikesegments.csv)) with the segment length in miles and climb in feet, along with several of my times on the segment. (Strava shows my times for up to 10 rides on each segment; I picked the first, middle, and last of the rides shown.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'title, miles, feet, time(s)'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: time = parse_hours(time) yield Ride(float(mi), time, int(ft), 'date?', title) segments = list(parse_segments(open('bikesegments.csv'))) miles2 = column('miles', segments) hours2 = column('hours', segments) feet2 = column('feet', segments) ###Output _____no_output_____ ###Markdown For the plot I will list grade in percent rather than feet per mile: ###Code pct2 = feet2 / miles2 * 100 / 5280 show(pct2, miles2 / hours2, xlabel='Segment Grade (percent)') ###Output _____no_output_____ ###Markdown So I can cruise at 15 to 17 mph on a 1% grade, but only about 8 mph at 5% grade, and around 5 mph on 8% grades. VAMThe average number of vertical feet (or meters) climbed per hour (regardless of horizontal distance travelled) is known as [VAM](https://en.wikipedia.org/wiki/VAM_%28bicycling%29), which stands for *velocità ascensionale media* (for native Campagnolo speakers) or *mean ascent velocity* (for SRAM) or 平均上昇率 (for Shimano). The theory is that for medium-steepish climbs, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (For flatish rides power is spent on wind and rolling resistance, and for super-steep rides power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM versus grade (in percent): ###Code show(pct2, feet2 / hours2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown I don't see the flattening effect; I see a wide spread of VAM numbers; at about 8% I range from 1500 to 3000 VAM. Champions can do 6000. Another way to look at it: at what speed am I getting the most VAM (implicitly assuming I'm giving a similar level of effort at all times, which is not really true)? The curve says 5 or 6 mph, but the data is pretty noisy. ###Code show(miles2 / hours2, feet2 / hours2, xlabel='Speed(mph)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown Hill-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications, I invented a new metric, the hill-index:> *Your **hill-index** is the maximum number**&nbsp;h** for which you can regularly climb an**&nbsp;h** percent grade at **&nbsp;h** miles per hour.*I think **my hill-index is 6**. (I know that [Old La Honda](https://graphics.stanford.edu/~lucasp/grade/oldlahonda.html) is a 7% grade, and I only [achieved 7.0 mph](https://www.strava.com/segments/9343401?filter=my_results&gender=all) on it once out of many tries). But let's draw a plot to confirm: ###Code points = range(2, 8) show(pct2, miles2 / hours2, xlabel='Grade (percent)') plt.plot(points, points, 'gD:', lw=2); ###Output _____no_output_____ ###Markdown The green dotted line is where grade = speed, the red and blue lines are best-fit polynomials, and both best-fit lines are above the diamond at 6%, but both are below the diamond for 7%, so my guess is confirmed: my hill-index is 6.Note that the original h-index for publications is based on cumulative totals, so it can never decrease, but the Hill-index is based on an average so it can (and sadly, often does) decrease as you grow older. Eddington NumberSpeaking of h-numbers, the physicist [Sir Arthur Eddington](https://en.wikipedia.org/wiki/Arthur_Eddington) (1882-1944) was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the maximum integer **e** such that you have bicycled at least**&nbsp;e** miles on at least**&nbsp;e** days.*Let's calculate mine: ###Code def Eddington(distances) -> int: """Eddington number: The maximum integer e such that you have bicycled a distance of at least e on at least e days.""" distances = sorted(distances, reverse=True) return max(e for e, d in enumerate(distances, 1) if d >= e) Eddington(miles) ###Output _____no_output_____ ###Markdown My **Eddington Number is 58**: I've done at least 58 miles on at least 58 days. This is a bit above [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number), but not as good as Eddington himself: he was 62 years old when he died with a number of **84**, and his available roads, bicycles, and navigation aids were not nearly as nice as mine, so good for him.How many rides would I need to improve my number from 58 to 60? It must be 2, right? **Wrong!** Here's the problem: ###Code [r for r in rides if 58 <= r.miles <= 60] ###Output _____no_output_____ ###Markdown These six rides all counted towards my Eddington number of 58, but don't count towards an Eddington number of 60, because they are all less than 60 miles long. So I would need to replace all of these rides with 60+ mile rides before I get an Eddington number of 60. The following chart tells me how many rides I have to go&mdash;what I call the **Eddington Gap**&mdash;to reach various Eddington numbers: ###Code def Egaps(distances, targets): """Print the number of days doing various target distances, and the Eddington gap.""" print('Dist Days Gap') print('---- ---- ---') for target in targets: days = sum(d >= target for d in distances) print(f'{target:4} {days:4} {target - days:3}') Egaps(miles, range(58, 71)) ###Output Dist Days Gap ---- ---- --- 58 60 -2 59 55 4 60 54 6 61 48 13 62 46 16 63 44 19 64 42 22 65 40 25 66 38 28 67 32 35 68 30 38 69 24 45 70 23 47 ###Markdown An easy way to get bigger Eddington numbers is to use kilometers rather than miles: ###Code kms = miles * 1.609344 Eddington(kms) Egaps(kms, range(81, 101)) ###Output Dist Days Gap ---- ---- --- 81 87 -6 82 79 3 83 78 5 84 73 11 85 72 13 86 71 15 87 69 18 88 68 20 89 67 22 90 66 24 91 64 27 92 63 29 93 60 33 94 58 36 95 55 40 96 54 42 97 51 46 98 48 50 99 46 53 100 46 54 ###Markdown My **metric Eddington Number is 81**.Here is my Eddington number progress over the years, in miles and kms, from when I first started tracking rides: ###Code def year(ride) -> int: return int(ride.date[-4:]) def Eprogress(years, rides=rides): "Print a table of Eddington numbers by year" print('Year E_ml E_km') print('---- ----- -----') for y in years: miles = np.array([r.miles for r in rides if year(r) <= y]) kms = 1.609344 * miles print(f'{y} {Eddington(miles):5} {Eddington(kms):5}') Eprogress(range(2014, 2021)) ###Output Year E_ml E_km ---- ----- ----- 2014 34 45 2015 41 60 2016 46 65 2017 50 72 2018 53 75 2019 54 78 2020 58 81 ###Markdown Peter Norvig, Oct 2017Data updated monthly Bicycling StatisticsDuring a pandemic, bicycling is a great way to (1) spend some time, (2) get some exercise, (3) stay far enough away from others to be safe, and (4) generate some data to analyze. This notebook does the analysis on three types of data:- **rides**: a random sampling of my 20+ mile rides, and all my 60+ mile rides (from [Strava](https://www.strava.com/athletes/575579)). - **segments**: short sections of a mile or so long, chosen to have a variety of steepness (from [Strava](https://www.strava.com/athletes/575579)). - **places**: percentages of the roads that I have ridden in each town, city, county, etc. (from [Wandrer.earth](https://wandrer.earth/athletes/3534/)). This notebook is mostly for my own benefit, but if you're a cyclist you're welcome to adapt it to your data, and if you're a data scientist, you might find it an interesting example of exploratory data analysis. Wandering PlacesThe website [**Wandrer.earth**](https://wandrer.earth) tracks the distinct roads a user has biked on. It provides a fun incentive to get out and explore new roads. The site is gamified in a way that the biggest rewards are for first reaching 25% of the road-miles in each place (city or neighborhood), and then 90%. (You get no credit for repeating a road you've already been on.)For the most part, your scores can only go up, as you cover more roads over time. But the [underlying map](https://www.openstreetmap.org/) is periodically updated, and sometimes a big city (like San Francisco) is split into neighborhoods (like Sunset, Mission, and Presidio); sometimes unincorporated areas (like Sequoia Tract) are introduced; sometimes boundaries change (e.g. Emerald Lake Hills doubled in road miles in the 2020-11 update, causing my percentage to drop). The wandrer.earth site does a great job of showing my current status in all the places I've been, but I wanted to track my progress month-by-month, so I created the charts below.The legend `"94% ( 52/ 56 mi) 2.7 to 99% Atherton"` means that I have ridden 52 of the 56 miles of roads (or 94%) in Atherton, and I need another 2.7 miles to reach the bonus scoring milestone of 99%. The companion notebook [**BikeCode.ipynb**](BikeCode.ipynb) has the implementation details. ###Code %run BikeCode.ipynb wandering(places) ###Output _____no_output_____ ###Markdown Eddington NumberThe physicist [Sir Arthur Eddington](https://en.wikipedia.org/wiki/Arthur_Eddington), a contemporary of Einstein, was a pre-Strava bicyclist who favored this metric:> *Your [**Eddington Number**](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the largest integer **e** such that you have cycled at least**&nbsp;e** miles on at least**&nbsp;e** days.*I'll look at the **miles** column across all my **rides** to compute my Eddington number: ###Code Ed_number(rides['miles']) ###Output _____no_output_____ ###Markdown **My Eddington Number is 63**: I've ridden at least 63 miles on at least 63 days (but not 64 miles on 64 days). My number is above [the mean for Strava](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number), but not nearly as good as Eddington himself: his number was **84** when he died at age 62, and his roads, bicycles, and navigation aids were not nearly as nice as mine, so bravo zulu to him. I don't think I will reach 84, but how many more rides do I need to reach 65? or 70? I'll call the difference the Eddington gap: ###Code {mi: Ed_gap(rides['miles'], mi) for mi in range(64, 71)} ###Output _____no_output_____ ###Markdown I need just 7 more rides to reach 65, but 40 to reach 70.Eddington was English, so he used miles, but we could also consider a **metric Eddington number**: ###Code Ed_number(rides['kms']) {km: Ed_gap(rides['kms'], km) for km in range(88, 101, 2)} ###Output _____no_output_____ ###Markdown My **metric Eddington Number is 87**, and my gap to 90 is 5 more rides, and to 100 is 36 more rides.Here are some properties of Eddington numbers:- Your Eddington number is monotonic: it can never decrease over time. - To improve from an Eddington number of *n* to *n* + 1 can take as few as 1 ride, or as many as *n* + 1 rides. + *Suppose I have done 49 rides, each of exactly 50 miles. My Eddington number is 49.* + *With one 50 mile ride, my Eddington number becomes 50.* + *I would then need 51 new 51+ mile rides to reach 51.*- Your metric Eddington number will always be greater than or equal to your imperial Eddington number.- Your metric Eddington number will never be more than 1.609344 times your imperial Eddington number.- Of two riders, it is possible that one has a higher metric number and the other a higher imperial number.Here is my Eddington number progress over the years, in miles and kms: ###Code Ed_progress() ###Output _____no_output_____ ###Markdown *Note:* the definition of Eddington Number seems precise, but what exactly does ***day*** mean? The New Oxford dictionary has three senses:1. a period of 24 hours; 2. a unit of time, reckoned from one midnight to the next;3. the part of a day when it is light. I originally assumed sense 2, but I wanted to accept sense 1 for [bikepacking](https://bikepacking.com/) trips where I rode to a camping site in the afternoon, pitched a tent for the night, and rode back home the next morning; what bikepackers call a [sub-24-hour overnight](https://oneofsevenproject.com/s24o-bikepacking-guide/) (S24O). And then COVID struck, the camping sites closed, so why not allow an S24O ride where I sleep in my own home? I realize Eddington had a lot more hardships than we have (World War I, the 1918 pandemic, and World War II), but I hope he would approve of this modest accomodation on my part. Hill-Index: Speed versus Grade on Short SegmentsThe Eddington number reminds me of the [**h-index**](https://en.wikipedia.org/wiki/H-index) metric for scientific publications. I invented another metric:> *Your **hill-index** is the maximum integer **h** where you can regularly climb an **h** percent grade at **h** miles per hour.*I'll plot grade versus speed for segments (not rides) with two best-fit curves: a blue quadratic and an orange cubic. I'll also superimpose a red dotted line where grade = speed. ###Code show('pct', 'mph', segments, 'Miles per hour versus segment grade in percent') plt.plot((2, 6, 7), (2, 6, 7), 'ro:'); ###Output _____no_output_____ ###Markdown Both best-fit curves are above the red circle at 6% and below the red circle for 7%, so **my hill-index is 6**. We also see that I can cruise at 15 to 17 mph on a 1% grade, but only about 8 mph at 5% grade, and around 5 mph on 8% grades. Speed versus Grade on Long RidesThe plot above tell me how fast I should expect to climb a particular hill, but what about average time on longer rides? Here's a plot of my speed versus steepness (measured in feet climbed per mile rather than in percent). ###Code show('fpm', 'mph', rides, 'Speed (miles per hour) versus Ride Grade (feet per mile)') ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort (and maybe the wind) than on the grade of the road. But when the grade is steeper than 50 ft/mile, my speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and around 10 mph at 120 ft/mile. Note that 120 ft/mile is only 2.3% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 6.9% average grade on the up part.I can use this to predict the time of a ride. For example, if I'm in La Honda and want to get to Pescadero, which is faster: the [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 361 ft climb), or the [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 853 ft climb)? We can estimate: ###Code f'Coast: {estimate(15.7, 361):.0f} min, Creek: {estimate(13.5, 853):.0f} min.' ###Output _____no_output_____ ###Markdown This predicts the shorter but steeper creek route would be about 6 minutes faster (whereas Google Maps predicts the creek route would be 80 minutes, 2 more than the coast route—I guess Google lacks confidence in my climbing ability). This is all good to know, but other factors (like the scenery and whether I want to stop at the San Gregorio store) are probably more important in making the choice. VAMClimbing speed measured by vertical ascent per hour is known as [VAM](https://en.wikipedia.org/wiki/VAM_%28bicycling%29), which stands for *velocità ascensionale media* (for native Campagnolo speakers) or *mean ascent velocity* (for SRAM) or 平均上昇率 (for Shimano). The theory is that for steepish climbs, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (For flatish rides power is spent on wind and rolling resistance, and for the steepest of rides, in my experience, power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM versus grade over short segments: ###Code show('pct', 'vam', segments, 'VAM (vertical feet per hour) versus segment grade in percent') ###Output _____no_output_____ ###Markdown I see a wide spread of VAM numbers; at 7% to 8% I range from 1500 to 3000 ft/hour. Champion cyclists sprint at over 6000 ft/hour, and can sustain [5400 ft/hour for 7 hours](https://www.strava.com/activities/4996833865]). As we can see in the segments listed below, I can sustain 3000 ft/hour for only a few minutes: ###Code top(segments, 'vam') ###Output _____no_output_____ ###Markdown On segments that are at least a kilometer long my VAM tops out at about 2700 ft/hour: ###Code top(segments[segments['kms'] > 1], 'vam') ###Output _____no_output_____ ###Markdown I can also look at VAM numbers for complete rides. I would expect the ride numbers to be half the segment numbers (or less) since most of my rides are circuits where I return to the start, and thus no more than half the ride is climbing. Sure enough, the best I can do is about 1300 ft/hour: ###Code top(rides, 'vam') ###Output _____no_output_____ ###Markdown Exploring the Data: `rides` and `segments`Some more poking around with the data: ###Code rides.describe() # Summary statistics for the rides segments.describe() # Summary statistics for the segments top(rides, 'mph') # Fastest rides top(segments, 'mph') # Fastest segments (there are no descent segments in the database) top(segments, 'feet') # Biggest climbs top(segments, 'pct') # Steepest climbs ###Output _____no_output_____ ###Markdown Peter Norvig, 23 Oct 2017Revised 2 June 2019 Bicycling: Speed versus Grade, VAM, Eddington Number, and H-indexLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded all my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). I parse the file into `rides`, a list of `Ride` structures. I filter out all the rides that have exactly 0 ft climbing (i.e. stationary bike rides) and that have `'MTB'` in the title (i.e. mountain bike rides that are not relevant to road bike riding). ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import re from collections import namedtuple Ride = namedtuple('Ride', 'miles, hours, feet, date, title') def parse_ride(line: str) -> Ride: """Parse a line from a Strava log file into a `Ride`. Line format is tab separated: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" _, date, title, time, mi, ft = line.strip().split('\t') return Ride(number(mi), hours(*time.split(':')[::-1]), number(ft), date, title) def number(string) -> float: return float(re.sub(r'[^0-9. ]', '', string)) def hours(sec, min, hour=0) -> float: return int(sec)/3600 + int(min)/60 + int(hour) rides = [parse_ride(line) for line in open('bikerides25.tsv') if 'MTB' not in line and ' 0 ft' not in line] ###Output _____no_output_____ ###Markdown I'll want to plot the data to visualize it, so I'll collect four interesting vectors of numbers: length of ride in miles, speed in miles per hour, grade in feet climbed per mile, and grade in percent: ###Code def vectors3(rides) -> tuple: "Return vectors of (miles, miles/hour, feet climbed per hour)" return ([r.miles for r in rides], [r.miles / r.hours for r in rides], [r.feet / r.miles for r in rides]) mls, mph, fpm = vectors3(rides) plt.scatter(fpm, mph); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like a curve would be a better fit than a straight line, so I'll fit a quadratic (degree two) polynomial to the data (for no good theoretical reason; just because that's the simplest nonlinear function I could think of). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degree=2): "Plot X versus Y and a best fit curve to it, with some bells and whistles." plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') plt.ylabel(ylabel); plt.xlabel(xlabel) plt.minorticks_on(); plt.grid(True, which='major'); plt.grid(True, which='minor', alpha=0.4) plt.scatter(X, Y) X1 = sorted(set(X)) F = poly_fit(X, Y, degree) plt.plot(X1, [F(x) for x in X1], 'k:') def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(fpm, mph) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from around 50 ft/mile grade speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and under 9 mph at 130 ft/mile. Note that even 130 ft/mile is only 2.5% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 7.5% grade on the up part. Estimating DurationThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def duration(dist, climb, F=poly_fit(fpm, mph, 2)) -> float: "Given a distance in miles and total climb in feet, estimate time in minutes." return 60 * dist / F(climb / dist) duration(15.7, 344), duration(13.5, 735) ###Output _____no_output_____ ###Markdown This suggests the shorter creek route would be about 6 or 7 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual segments, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative segments, wrote down (in [`bikesegments.csv`](bikesegments.csv)) the segment length in miles and climb in feet, along with several of my times on the segment. (I picked my first, fifth, and tenth fastest times when available, or closest to that if I've done the segment less than 10 times.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'title, miles, feet, time,*'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: yield Ride(float(mi), hours(*time.split(':')[::-1]), int(ft), '?', title) segments = list(parse_segments(open('bikesegments.csv'))) mls2, mph2, fpm2 = vectors3(segments) ###Output _____no_output_____ ###Markdown Now I plot this data, this time using a third-degree polynomial, just because it seems to fit better, and listing grade in percent rather than feet per mile: ###Code pct2 = [f * 100 / 5280 for f in fpm2] show(pct2, mph2, xlabel='Grade (percent)', degree=3) ###Output _____no_output_____ ###Markdown So I can cruise at 17 mph on flat roads (sometimes only 12.5 mph and occassionally up to 21 mph); I'm down around 6 mph at 6% grade, and down around 5 mph on 8% grades. VAM [VAM](https://en.wikipedia.org/wiki/VAM_(bicycling)) stands for *velocità ascensionale media* in Italian, but since I don't have Campagnolo gear, I'll call it "mean ascent velocity": the speed in vertical feet climbed per hour (regardless of horizontal distance travelled). Legend has it that at grades between about 6% to 12%, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (I guess that below that grade, power is spent on wind and rolling resistance, and above it, power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM at various grades: ###Code vam2 = [r.feet / r.hours for r in segments] show(pct2, vam2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)', degree=3) ###Output _____no_output_____ ###Markdown Sure enough, the best fit cubic curve looks pretty flat between 6% and 12%, but there is a lot of variance. These numbers confirm that I'm an average club climber at about 2000 ft/hour; champion climbers can do 5,000 or even 6,000 feet per hour. Eddington NumberThe British physicist Sir Arthur Eddington (1882-1944) was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the maximum number**&nbsp;E** such that you have bicycled**&nbsp;E** or more miles on**&nbsp;E** or more days.*Let's calculate mine, and see how many rides I need to improve it. For various number of miles, I'll list the number of rides of at least that long that I've taken, and the *gap*&mdash;the number of additional rides I would need to reach that Eddington number. ###Code def eddington(distances, unit): """Print information on the number of rides and the Eddington gap.""" print(f'{unit} Days Gap') print(f'-- ---- ---') for E in range(1, 999): days = sum(d >= E for d in distances) gap = E - days if -2 <= gap <= 45: print(f'{E:2} {days:2} {gap:2}') eddington(mls, 'Mi') ###Output Mi Days Gap -- ---- --- 52 54 -2 53 52 1 54 49 5 55 47 8 56 46 10 57 43 14 58 40 18 59 35 24 60 34 26 61 28 33 62 26 36 63 24 39 64 22 42 65 21 44 ###Markdown So my **Eddington Number is 52**: I've done 52 miles or more on more than 52 days, but for 53 miles, only 52 days. This happens to be right around [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number). To increase 10% to 57 I would need 14 more 57-mile days. (Eddington's Eddington number was **77**.)If 52 seems too small a number, we can calculate the metric Eddington number (in kilometers): ###Code eddington([m * 1.609344 for m in mls], 'Km') ###Output Km Days Gap -- ---- --- 75 76 -1 76 75 1 77 74 3 78 72 6 79 71 8 80 70 10 81 67 14 82 59 23 83 58 25 84 53 31 85 52 33 86 51 35 87 49 38 88 48 40 89 47 42 90 46 44 ###Markdown My **metric Eddington Number is 75**, and to increase 10% to 82 I would need 23 more 82 km (51 mile) days, but to increase 5 more to 80 I would need just 10 such days. H-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications (and by Eddington's h-index-like number), I invented a new h-index, or hill-index:> *Your **h-index** is the maximum number**&nbsp;H** for which you can regularly climb an**&nbsp;H** percent grade at **&nbsp;H** miles per hour.*I think **my h-index is 6**, but let's draw a plot to confirm: ###Code show(pct2, mph2, xlabel='Grade (percent)', degree=3) plt.plot([6, 7], [6, 7], 'rD:'); ###Output _____no_output_____ ###Markdown Bike Speed versus GradeLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded data on my past rides from [Strava](https://www.strava.com/athletes/575579) and manipulated the `data` to create two lists: - `X`: the *grade* of each ride, in feet of ascent per mile. - `Y`: the *speed* of each (corresponding) ride, in miles per hour.I omit rides shorter than 30 miles, because on some short rides I was sprinting at an unsustainable speed, and on others I was slowed by city traffic. Here is the code to collect and plot the X:Y data: ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import csv def hours(sec): return float(sec) / 60 / 60 def feet(meters): return float(meters) * 100 / 2.54 / 12 def miles(meters): return feet(meters) / 5280 # Read data file with three fields: distance (m), climb (m), elapsed time (sec) X, Y = [], [] for (dist, climb, time) in csv.reader(open('dist-climb-time.csv')): if miles(dist) > 30: X.append(feet(climb) / miles(dist)) Y.append(miles(dist) / hours(time)) plt.plot(X, Y, 'o'); ###Output _____no_output_____ ###Markdown As expected, the speeds get slower as the grade gets steeper. The data has a lot of variance, and I can say that it looks more like a curve than a straight line, so I'll fit a cubic (degree two) polynomial to the data,and make the plot prettier: ###Code def poly(X, Y, n): "Best-fit degree-n polynomial for X, Y data." C = np.polyfit(X, Y, n)[::-1] # Array of coefficients, reversed return lambda x: sum(C[i] * x ** i for i in range(n + 1)) F = poly(X, Y, 2) # defines y = F(x); x in ft/mile, y in mph def show(X, Y, F=F): plt.rcParams["figure.figsize"] = (12, 8) plt.style.use('fivethirtyeight') plt.plot(X, Y, 'o') X1 = list(range(int(max(X)))) plt.plot(X1, [F(x) for x in X1], '-') plt.ylabel('Speed (mph)') plt.xlabel('Grade (feet/mile)') plt.minorticks_on() plt.grid(True, which='major') plt.grid(True, which='minor', alpha=0.4) plt.title('Average speed vs Grade on {} rides'.format(len(X))) show(X, Y) ###Output _____no_output_____ ###Markdown So, I average about 14 mph when the road is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from 60 ft/mile and up, speed falls off quickly at 1 mph for every 20 ft/mile, and by 120 ft/mile, I'm down around 10 mph, and 8.5mph at 140 ft/mile. Note that 140 ft/mile is only 2.7% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat-ish, then that's 8% grade on the up part.I can use the polynomial `F` to estimate the duration of a route: ###Code def duration(dist, climb, F=F): "Given a distance in miles and total climb in feet, return estimated time in minutes." return dist / F(climb / dist) * 60 ###Output _____no_output_____ ###Markdown For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1): ###Code coast = duration(15.7, 344) creek = duration(13.5, 735) coast - creek ###Output _____no_output_____ ###Markdown Peter Norvig, 23 Oct 2017Revised 2 June 2019 Bicycling: Speed versus Grade, Eddington Number, and H-indexLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded all my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). I parse the file into `rides`, a list of `Ride` structures. I filter out all the rides that have exactly 0 ft climbing (i.e. stationary bike rides) and that have `'MTB'` in the title (i.e. mountain bike rides that are not relevant to road bike riding). ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import re from collections import namedtuple Ride = namedtuple('Ride', 'miles, feet, hours, date, title') def parse_ride(line: str) -> Ride: """Parse a line from a Strava log file into a `Ride`. Line format is tab separated: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" _, date, title, time, mi, ft = line.strip().split('\t') return Ride(number(mi), number(ft), hours(*time.split(':')[::-1]), date, title) def number(string) -> float: return float(re.sub(r'[^0-9. ]', '', string)) def hours(sec, min, hour=0) -> float: return int(sec)/3600 + int(min)/60 + int(hour) rides = [parse_ride(line) for line in open('bikerides25.tsv') if 'MTB' not in line and ' 0 ft' not in line] ###Output _____no_output_____ ###Markdown I'll want to plot the data to visualize it, so I'll collect four interesting vectors of numbers: length of ride in miles, speed in miles per hour, grade in feet climbed per mile, and grade in percent: ###Code def vectors3(rides) -> tuple: "Return vectors of (miles, miles/hour, feet climbed per hour)" return ([r.miles for r in rides], [r.miles / r.hours for r in rides], [r.feet / r.miles for r in rides]) def percent(r: Ride) -> float: return r.feet / r.miles * (100/5280) mls, mph, fpm = vectors3(rides) plt.scatter(fpm, mph); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like a curve would be a better fit than a straight line, so I'll fit a quadratic (degree two) polynomial to the data (for no good theoretical reason; just because that's the simplest nonlinear function I could think of). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degree=2): "Plot X versus Y and a best fit curve to it, with some bells and whistles." plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') plt.ylabel(ylabel); plt.xlabel(xlabel) plt.minorticks_on(); plt.grid(True, which='major'); plt.grid(True, which='minor', alpha=0.4) plt.scatter(X, Y) X1 = sorted(set(X)) F = poly_fit(X, Y, degree) plt.plot(X1, [F(x) for x in X1], 'k:') def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(fpm, mph) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from around 50 ft/mile grade speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and under 9 mph at 130 ft/mile. Note that even 130 ft/mile is only 2.5% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 7.5% grade on the up part. Estimating DurationThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def duration(dist, climb, F=poly_fit(fpm, mph, 2)) -> float: "Given a distance in miles and total climb in feet, estimate time in minutes." return 60 * dist / F(climb / dist) duration(15.7, 344), duration(13.5, 735) ###Output _____no_output_____ ###Markdown This suggests the shorter creek route would be about 6 or 7 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual segments, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative segments, wrote down (in [`bikesegments.csv`](bikesegments.csv)) the segment length in miles and climb in feet, along with several of my times on the segment. (I picked my first, fifth, and tenth fastest times when available, or closest to that if I've done the segment less than 10 times.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'title, miles, feet, time,*'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: yield Ride(float(mi), int(ft), hours(*time.split(':')[::-1]), '?', title) segments = list(parse_segments(open('bikesegments.csv'))) mls2, mph2, fpm2 = vectors3(segments) ###Output _____no_output_____ ###Markdown Now I plot this data, this time using a third-degree polynomial, just because it seems to fit better, and listing grade in percent rather than feet per mile: ###Code pct2 = [f * 100 / 5280 for f in fpm2] show(pct2, mph2, xlabel='Grade (percent)', degree=3) ###Output _____no_output_____ ###Markdown So I can cruise at 17 mph on flat roads (sometimes only 12.5 mph and occassionally up to 21 mph); I'm down around 6 mph at 6% grade, and down around 5 mph on 8% grades. VAM [VAM](https://en.wikipedia.org/wiki/VAM_(bicycling)) stands for *velocità ascensionale media* in Italian, but since I don't have Campagnolo gear, I'll call it "mean ascent velocity": the speed in vertical feet climbed per hour (regardless of horizontal distance travelled). Legend has it that at grades between about 6% to 12%, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (I guess that below that grade, power is spent on wind and rolling resistance, and above it, power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM at various grades: ###Code vam2 = [r.feet / r.hours for r in segments] show(pct2, vam2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)', degree=3) ###Output _____no_output_____ ###Markdown Sure enough, the best fit cubic curve looks pretty flat between 6% and 12%, but there is a lot of variance. These numbers confirm that I'm an average club climber; good climbers can do over 5,000 feet per hour. Eddington NumberThe British physicist Sir Arthur Eddington (1882-1944) was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the maximum number**&nbsp;E** such that you have bicycled**&nbsp;E** or more miles on**&nbsp;E** or more days.*Let's calculate mine, and see how many rides I need to take to improve it. For various number of miles, I'll list the number of rides of at least that long that I've taken, and the *gap*&mdash;the number of additional rides I would need to reach that Eddington number. ###Code def eddington(distances, unit): """Print information on the number of rides and the Eddington gap.""" print(f'{unit} Days Gap') print(f'-- ---- ---') for E in range(1, 999): days = sum(d >= E for d in distances) gap = E - days if -2 <= gap <= 45: print(f'{E:2} {days:2} {gap:2}') eddington(mls, 'Mi') ###Output Mi Days Gap -- ---- --- 52 54 -2 53 52 1 54 49 5 55 47 8 56 46 10 57 43 14 58 40 18 59 35 24 60 34 26 61 28 33 62 26 36 63 24 39 64 22 42 65 21 44 ###Markdown So my **Eddington Number is 52**: I've done 52 miles or more on more than 52 days, but for 53 miles, only 52 days. This happens to be right around [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number). To increase 10% to 57 I would need 14 more 57-mile days. (Eddington's Eddington number was **77**.)If 52 seems too small a number, we can calculate the metric Eddington number (in kilometers): ###Code eddington([m * 1.609344 for m in mls], 'Km') ###Output Km Days Gap -- ---- --- 75 76 -1 76 75 1 77 74 3 78 72 6 79 71 8 80 70 10 81 67 14 82 59 23 83 58 25 84 53 31 85 52 33 86 51 35 87 49 38 88 48 40 89 47 42 90 46 44 ###Markdown My **metric Eddington Number is 75**, and to increase 10% to 82 I would need 23 more 82 km (51 mile) days, but to increase 5 more to 80 I would need just 10 such days. H-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications (and by Eddington's h-index-like number), I invented a new h-index, or hill-index:> *Your **h-index** is the maximum number**&nbsp;H** for which you can regularly climb an**&nbsp;H** percent grade at **&nbsp;H** miles per hour.*I think **my h-index is 6**, but let's draw a plot and do a computation to confirm: ###Code show(pct2, mph2, xlabel='Grade (percent)', degree=3) plt.plot([6, 7, 8], [6, 7, 8], 'rD'); ###Output _____no_output_____ ###Markdown My guess is confirmed: the leftmost red diamond indicates 6 mph at 6% grade, and we can see that about half my segments on 6% grades are above 6 mph. The term *regularly* in my definition is imprecise, but I think that's close enough. On the other hand, the next diamond is 7 mph at 7%, and there are only 3 segments that are up-and-to-the-right of that, so I wouldn't count that as *regularly*. And I have never achieved 8 mph at 8%.The other piece of confirmation is fitting the curve as the function `speed` and seeing that my predicted speed at 6% grade is 6.3 mph: ###Code speed = poly_fit(pct2, mph2, 3) {pct: speed(pct) for pct in (6, 7)} ###Output _____no_output_____ ###Markdown Peter Norvig, 23 Oct, 2017 Bicycling: Speed versus Grade, Eddington Number, and H-indexLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded all my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). Here I parse the file into a set of `Ride` structures: ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import re from collections import namedtuple Ride = namedtuple('Ride', 'miles, feet, hours, date, title') def parse_ride(line: str) -> Ride: """Parse a line from a Strava log file into a `Ride`. Line format is: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" _, date, title, time, mi, ft = line.strip().split('\t') return Ride(number(mi), number(ft), hours(time), date, title) def number(string) -> float: return float(re.sub(r'[^0-9.]','', string)) def hours(time: str) -> float: """Convert '1:30:00' to 1.5""" hour, min, sec = map(int, time.split(':')) return hour + min / 60 + sec / 3600 rides = map(parse_ride, open('bikerides25.tsv')) ###Output _____no_output_____ ###Markdown A few of the rides were on stationary bikes and thus have 0 feet climbed; let's filter them out. A few others were mountain bike rides where I spent a lot of time off the bike pushing it uphill; I'll filter out rides < 8 mph: ###Code rides = [r for r in rides if r.feet > 0 and r.miles / r.hours > 8] ###Output _____no_output_____ ###Markdown Collect three vectors of numbers: miles per hour, feet climbed per mile, and miles per ride. Plot feet per mile versis miles per hour: ###Code mph = [r.miles / r.hours for r in rides] fpm = [r.feet / r.miles for r in rides] mls = [r.miles for r in rides] plt.scatter(fpm, mph); ###Output _____no_output_____ ###Markdown As expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like a curve would be a better fit than a straight line, so I'll fit a quadratic (degree two) polynomial to the data, and make the plot prettier: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)'): plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') plt.ylabel(ylabel); plt.xlabel(xlabel) plt.minorticks_on(); plt.grid(True, which='major'); plt.grid(True, which='minor', alpha=0.4) plt.scatter(X, Y) X1 = sorted(set(X)) F = quadratic_fit(X, Y) plt.plot(X1, [F(x) for x in X1], 'k-') def quadratic_fit(X, Y): """The quadratic function that best fits the X,Y vectors.""" a, b, c = np.polyfit(X, Y, 2) return lambda x: a * x ** 2 + b * x + c show(fpm, mph) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from around 50 ft/mile grade speed falls off quickly: down to 12mph at 80 ft/mile; under 11 mph at 100 ft/mile; and under 9 mph at 130 ft/mile, Note that 130 ft/mile is only 2.5% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flatish, then that's 7.5% grade on the up part. Estimating DurationFor a route with a given distance and elevation, how long will it take to ride it? ###Code def duration(dist, climb, F=quadratic_fit(fpm, mph)): "Given a distance in miles and total climb in feet, return estimated time in minutes." return 60 * dist / F(climb / dist) ###Output _____no_output_____ ###Markdown For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code duration(15.7, 344), duration(13.5, 735) ###Output _____no_output_____ ###Markdown This suggests the shorter creek route would be about 7 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. Eddington NumberThe physicist Sir Arthur Eddington was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the largest integer**&nbsp;E** for which you have bicycled**&nbsp;E** or more miles on**&nbsp;E** different days.*Let's calculate mine, and see how many rides I need to take to improve it. For various number of miles, I'll list the number of rides of at least that long that I've taken, and the *gap*&mdash;the number of additional rides I would need to reach that Eddington number. ###Code def eddington(distances, unit): """Print information on the number of rides and the Eddington gap.""" print(f'{unit} Rides Gap') print(f'-- ----- ---') for E in range(1, 1000): num_rides = sum(d >= E for d in distances) gap = E - num_rides if -1 <= gap <= 45: print(f'{E:2} {num_rides:2} {gap:2}') eddington(mls, 'Mi') ###Output Mi Rides Gap -- ----- --- 52 53 -1 53 51 2 54 48 6 55 46 9 56 45 11 57 42 15 58 39 19 59 34 25 60 33 27 61 27 34 62 25 37 63 23 40 64 21 43 65 20 45 ###Markdown So my **Eddington Number is 52**: I've done 53 rides of 52 miles or more, but not 53 rides of 53 miles. We can also calculate the metric Eddington number: ###Code eddington([m * 1.60934 for m in mls], 'Km') ###Output Km Rides Gap -- ----- --- 75 75 0 76 74 2 77 73 4 78 71 7 79 70 9 80 69 11 81 66 15 82 58 24 83 57 26 84 52 32 85 51 34 86 50 36 87 48 39 88 47 41 89 46 43 90 45 45 ###Markdown Peter Norvig, Oct 2017Revised Jan 2020 Bicycling: Speed, Grade, VAM, Hill-index, EddingtonLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded a bunch of my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). I parse the file into `rides`, a listof `Ride` structures. ###Code import matplotlib.pyplot as plt import numpy as np import re from collections import namedtuple Ride = namedtuple('Ride', 'miles, hours, feet, date, title') def parse_rides(lines: str) -> [Ride]: """Parse lines from a Strava log file into a list of `Ride`. Tab separated fields: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" return [Ride(number(mi), parse_hours(time), number(ft), date, title) for line in lines if line.startswith('Ride') for _, date, title, time, mi, ft in [line.strip().split('\t')]] def number(string) -> float: return float(re.sub(r'[^0-9. ]', '', string)) def parse_hours(time: str) -> float: if time.count(':') < 2: time = '0:' + time sec, min, hour = time.split(':')[::-1] return int(sec)/3600 + int(min)/60 + int(hour) rides = parse_rides(open('bikerides25.tsv'))#{parse_ride(line) for line in open('bikerides25.tsv') if line.startswith('Ride')} ###Output _____no_output_____ ###Markdown From the raw data I will derive three important arrays of numbers: - `miles`: array of lengths of each ride in miles- `hours`: array of durations of each ride in hours- `feet`: array of total climbing of each ride in feet I'll show a simple scatter plot visualization: ###Code def column(attr, rides): return np.array([getattr(r, attr) for r in rides]) miles = column('miles', rides) hours = column('hours', rides) feet = column('feet', rides) plt.scatter(feet/miles, miles/hours); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like some kind of downward sloping curve would be a better fit than a straight line, so I'll fit quadratic (degree two) and cubic (degree 3) polynomials to the data (for no good theoretical reason; just because those are two simple nonlinear functions and numpy has a solver for them). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degrees=(2, 3)): "Plot X versus Y and a best fit curve to it, with some bells and whistles." plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') grid(); plt.ylabel(ylabel); plt.xlabel(xlabel) plt.scatter(X, Y, c='k') X1 = np.linspace(min(X), max(X), 100) for degree in degrees: F = poly_fit(X, Y, degree) plt.plot(X1, [F(x) for x in X1], '-', lw=2) plt.title(f'{len(X)} rides') def grid(): plt.minorticks_on(); plt.grid(which='minor', ls=':', alpha=0.7) def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(feet/miles, miles/hours) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But when the grade is steeper than 50 ft/mile, the speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and around 10 mph at 120 ft/mile. Note that 120 ft/mile is only 2.3% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 6.9% average grade on the up part. Estimating Time of a RideThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def time(dist, climb, F=poly_fit(feet/miles, miles/hours, 2)) -> float: "Given a distance in miles and total climb in feet, estimate time in minutes." return 60 * dist / F(climb / dist) f'Coast: {time(15.7, 344):.1f} min, Creek: {time(13.5, 735):.1f} min.' ###Output _____no_output_____ ###Markdown This predicts the shorter creek route would be 6 or 7 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual segments, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative climbing segments ([`bikesegments.csv`](bikesegments.csv)) with the segment length in miles and climb in feet, along with several of my times on the segment. (Strava shows my times for up to 10 rides on each segment; I picked the first, middle, and last of the rides shown.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'title, miles, feet, time(s)'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: time = parse_hours(time) yield Ride(float(mi), time, int(ft), 'date?', title) segments = set(parse_segments(open('bikesegments.csv'))) miles2 = column('miles', segments) hours2 = column('hours', segments) feet2 = column('feet', segments) ###Output _____no_output_____ ###Markdown For the plot I will list grade in percent rather than feet per mile: ###Code pct2 = feet2 / miles2 * 100 / 5280 show(pct2, miles2 / hours2, xlabel='Segment Grade (percent)') ###Output _____no_output_____ ###Markdown So I can cruise at 15 to 17 mph on a 1% grade, but only about 8 mph at 5% grade, and around 5 mph on 8% grades. VAMThe average number of vertical feet (or meters) climbed per hour (regardless of horizontal distance travelled) is known as [VAM](https://en.wikipedia.org/wiki/VAM_%28bicycling%29), which stands for *velocità ascensionale media* (for native Campagnolo speakers) or *mean ascent velocity* (for SRAM) or 平均上昇率 (for Shimano). The theory is that for medium-steepish climbs, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (For flatish rides power is spent on wind and rolling resistance, and for super-steep rides power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM versus grades (in percent): ###Code show(pct2, feet2 / hours2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown I don't see the flattening effect; I see a wide spread of VAM numbers; at about 8% I range from 1500 to 3000 VAM. Champions can do 6000. Another way to look at it: at what speed am I getting the most VAM (implicitly assuming I'm giving a similar level of effort at all times, which is not really true)? The curve says 5 or 6 mph, but the data is pretty noisy. ###Code show(miles2 / hours2, feet2 / hours2, xlabel='Speed(mph)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown Hill-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications, I invented a new metric, the hill-index:> *Your **hill-index** is the maximum number**&nbsp;h** for which you can regularly climb an**&nbsp;h** percent grade at **&nbsp;h** miles per hour.*I think **my hill-index is 6**. (I know that [Old La Honda](https://graphics.stanford.edu/~lucasp/grade/oldlahonda.html) is a 7% grade, and I only [achieved 7.0 mph](https://www.strava.com/segments/9343401?filter=my_results&gender=all) on it once out of many tries). But let's draw a plot to confirm: ###Code points = range(2, 8) show(pct2, miles2 / hours2, xlabel='Grade (percent)') plt.plot(points, points, 'gD:', lw=2); ###Output _____no_output_____ ###Markdown The green dotted line is where grade = speed, the red and blue lines are best-fit polynomials, and if I interpret "regularly" to mean "around half the time," then both best-fit lines are above the diamond at 6%, but both are below the diamond for 7%, so my guess is confirmed: my hill-index is 6.Note that the original h-index for publications is based on cumulative totals, so it can never decrease, but the Hill-index is based on an average so it can (and sadly, often does) decrease as you grow older. Eddington NumberSpeaking of h-numbers, the physicist Sir Arthur Eddington (1882-1944) was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the maximum integer**&nbsp;e** such that you have bicycled at least**&nbsp;e** miles on at least**&nbsp;e** days.*Let's calculate mine: ###Code def Eddington(distances) -> int: "Eddington number: the maximum integer e such that you have bicycled at least e on at least e days." distances = sorted(distances, reverse=True) return max(e for e, d in enumerate(distances, 1) if d >= e) Eddington(miles) ###Output _____no_output_____ ###Markdown My **Eddington Number is 57**: I've done at least 57 miles on at least 57 days. This is a bit above [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number). Eddington was 62 years old when he died with a number of **84**, and his available roads, bicycles, and navigation aids were not nearly as nice as mine, so good for him.How many more rides would I need to improve my number? Obviously, for anyone to improve from, say, 57 to 60 they need at least 3 more rides. But any rides that were, say, 58 or 59 miles count for 57 but wouldn't count for 60; thus more than 3 rides may be needed. Here is a table of the number of rides I would need&mdash;what I'll call the *gap*&mdash;for increasingly ambitious Eddington numbers: ###Code def Egaps(distances, targets): """Print the number of days doing various target distances, and the Eddington gap.""" print('Dist Days Gap') print('---- ---- ---') for target in targets: days = sum(d >= target for d in distances) print(f'{target:4} {days:4} {target - days:3}') Egaps(miles, range(57, 71)) ###Output Dist Days Gap ---- ---- --- 57 58 -1 58 55 3 59 50 9 60 49 11 61 43 18 62 41 21 63 39 24 64 37 27 65 35 30 66 33 33 67 28 39 68 26 42 69 21 48 70 20 50 ###Markdown I'd like to get my Eddington number to meet or beat my age. If I do two 65+ mile rides per month, I'll make it before I turn 65.An easy way to get bigger Eddington numbers is to use kilometers rather than miles: ###Code kms = miles * 1.609344 Eddington(kms) Egaps(kms, range(81, 101)) ###Output Dist Days Gap ---- ---- --- 81 82 -1 82 74 8 83 73 10 84 68 16 85 67 18 86 66 20 87 64 23 88 63 25 89 62 27 90 61 29 91 59 32 92 58 34 93 55 38 94 53 41 95 50 45 96 49 47 97 46 51 98 43 55 99 41 58 100 41 59 ###Markdown My **metric Eddington Number is 81**, and I should be able to get to 90 in about a year, and 100 in two or three years, assuming around two 100+ km rides per month.Here is my Eddington number progress over the years, in miles and kms: ###Code def year(ride) -> int: return int(ride.date[-4:]) def Eprogress(years, rides=rides): "Print a table of Eddington numbers by year" print('Year E_ml E_km') print('---- ----- -----') for y in years: miles = np.array([r.miles for r in rides if year(r) <= y]) print(f'{y} {Eddington(miles):5} {Eddington(miles * 1.609344):5}') Eprogress(range(2013, 2021)) ###Output Year E_ml E_km ---- ----- ----- 2013 23 27 2014 34 45 2015 41 60 2016 46 65 2017 50 72 2018 53 75 2019 54 78 2020 57 81 ###Markdown Peter Norvig, 23 Oct 2017Revised 2 June 2019 Bicycling: Speed versus Grade, VAM, Eddington Number, and H-indexLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded all my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). I parse the file into `rides`, a list of `Ride` structures. I filter out all the rides that have exactly 0 ft climbing (i.e. stationary bike rides) and that have `'MTB'` in the title (i.e. mountain bike rides that are not relevant to road bike riding). ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import re from collections import namedtuple Ride = namedtuple('Ride', 'miles, hours, feet, date, title') def parse_ride(line: str) -> Ride: """Parse a line from a Strava log file into a `Ride`. Line format is tab separated: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" _, date, title, time, mi, ft = line.strip().split('\t') return Ride(number(mi), hours(*time.split(':')[::-1]), number(ft), date, title) def number(string) -> float: return float(re.sub(r'[^0-9. ]', '', string)) def hours(sec, min, hour=0) -> float: return int(sec)/3600 + int(min)/60 + int(hour) rides = [parse_ride(line) for line in open('bikerides25.tsv') if 'MTB' not in line and ' 0 ft' not in line] ###Output _____no_output_____ ###Markdown I'll collect three interesting vectors of numbers: length of ride in miles, speed in miles per hour, grade in feet climbed per mile, and grade in percent. Then I'll start with a simple scatter plot visualization: ###Code def vectors3(rides) -> tuple: "Return vectors of (miles, miles/hour, feet climbed per hour)" return ([r.miles for r in rides], [r.miles / r.hours for r in rides], [r.feet / r.miles for r in rides]) mls, mph, fpm = vectors3(rides) plt.scatter(fpm, mph); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like a curve would be a better fit than a straight line, so I'll fit a quadratic (degree two) polynomial to the data (for no good theoretical reason; just because that's the simplest nonlinear function I could think of). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degree=2): "Plot X versus Y and a best fit curve to it, with some bells and whistles." plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') plt.ylabel(ylabel); plt.xlabel(xlabel) plt.scatter(X, Y) F = poly_fit(X, Y, degree) X1 = sorted(set(X)) plt.plot(X1, [F(x) for x in X1], 'k:') def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(fpm, mph) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from around 50 ft/mile grade speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and under 9 mph at 130 ft/mile. Note that even 130 ft/mile is only 2.5% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 7.5% grade on the up part. Estimating DurationThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def duration(dist, climb, F=poly_fit(fpm, mph, 2)) -> float: "Given a distance in miles and total climb in feet, estimate time in minutes." return 60 * dist / F(climb / dist) duration(15.7, 344), duration(13.5, 735) ###Output _____no_output_____ ###Markdown This suggests the shorter creek route would be about 6 or 7 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual segments, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative climbing segments ([`bikesegments.csv`](bikesegments.csv)) with the segment length in miles and climb in feet, along with several of my times on the segment. (I picked the first, midde, and last of the rides shown by Strava&mdash;the 10 fastest rides.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'title, miles, feet, time,*'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: yield Ride(float(mi), hours(*time.split(':')[::-1]), int(ft), '?', title) segments = list(parse_segments(open('bikesegments.csv'))) mls2, mph2, fpm2 = vectors3(segments) ###Output _____no_output_____ ###Markdown Now I plot this data, this time using a third-degree polynomial, just because it seems to fit better, and listing grade in percent rather than feet per mile: ###Code pct2 = [f * 100 / 5280 for f in fpm2] show(pct2, mph2, xlabel='Segment Grade (percent)', degree=3) ###Output _____no_output_____ ###Markdown So I can cruise at 17 mph on a 1% grade, but only a bit over 6 mph at 6% grade, and around 5 mph on 8% grades. VAMThe number of vertical feet climbed per hour (regardless of horizontal distance travelled) is known as [VAM](https://en.wikipedia.org/wiki/VAM_(bicycling)), which stands for *velocità ascensionale media* (when you have Campagnolo components) or *mean ascent velocity* (on SRAM) or 平均上昇率 (on Shimano). Legend has it that at grades between about 6% to 12%, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (I guess that below that grade, power is spent on wind and rolling resistance, and above it, power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM at various grades: ###Code vam2 = [r.feet / r.hours for r in segments] show(pct2, vam2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)', degree=3) ###Output _____no_output_____ ###Markdown I don't really get that flat effect; my numbers are widely spread out. This does confirm that I'm an average club climber at about 2000 ft/hour; champion climbers can do 5,000 or even 6,000 feet per hour. Eddington NumberThe British physicist Sir Arthur Eddington (1882-1944) was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the maximum number**&nbsp;E** such that you have bicycled**&nbsp;E** or more miles on**&nbsp;E** or more days.*Let's calculate mine, and see how many rides I need to improve it. For various number of miles, I'll list the number of rides of at least that long that I've taken, and the *gap*&mdash;the number of additional rides I would need to reach that Eddington number. ###Code def eddington(distances, unit='Mi', gaps=range(-1, 41)): """Print the number of days doing various distances, and the Eddington gap.""" print(f'{unit} Days Gap') print(f'-- ---- ---') for E in range(1, 200): days = sum(d >= E for d in distances) gap = E - days if gap in gaps: print(f'{E:2} {days:2} {gap:2}') eddington(mls, 'Mi') ###Output Mi Days Gap -- ---- --- 53 54 -1 54 51 3 55 49 6 56 48 8 57 45 12 58 42 16 59 37 22 60 36 24 61 30 31 62 28 34 63 26 37 64 24 40 ###Markdown So my **Eddington Number is 53**: I've done 53 miles or more on 54 days, but 54 miles I've done on only 51 days. This happens to be right around [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number). To increase that number by 5 to 58, obviously I would need at least 5 rides, but it turns out I would actually need 16 (because 11 rides were between 53 and 57 miles). Eddington was about my age when he died with a number of **77**, and his available roads and bicycles were not nearly as nice as mine, so good for him.If 53 seems too small a number, we can use kilometers instead of miles: ###Code kms = [m * 5280 * 12 * 2.54 / 100 / 1000 for m in mls] eddington(kms, 'Km') ###Output Km Days Gap -- ---- --- 76 77 -1 77 76 1 78 74 4 79 73 6 80 72 8 81 69 12 82 61 21 83 60 23 84 55 29 85 54 31 86 53 33 87 51 36 88 50 38 89 49 40 ###Markdown My **metric Eddington Number is 76**, and to increase by 5 to 81 I would need 12 more rides. H-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications (and by Eddington's h-index-like number), I invented a new h-index, or hill-index:> *Your **h-index** is the maximum number**&nbsp;H** for which you can regularly climb an**&nbsp;H** percent grade at **&nbsp;H** miles per hour.*I think **my h-index is 6**, but let's draw a plot to confirm: ###Code show(pct2, mph2, xlabel='Grade (percent)', degree=3) plt.plot([6, 7], [6, 7], 'rD:'); ###Output _____no_output_____ ###Markdown Peter Norvig, Oct 2017Data updated monthly Bicycling StatisticsDuring a pandemic, bicycling is a great way to (1) spend some time, (2) get some exercise, (3) stay far enough away from others to be safe, and (4) generate some data to analyze. This notebook does the analysis and the companion notebook [**BikeCode.ipynb**](BikeCode.ipynb) has the implementation details. ###Code %run BikeCode.ipynb ###Output _____no_output_____ ###Markdown Rides and SegmentsI start by sampling some of my **rides** (over 20 miles) and **segments** (a mile or so) [from Strava](https://www.strava.com/athletes/575579). Later we'll see the unique **places** I've been, [from wandrer.earth](https://wandrer.earth/athletes/3534). ###Code rides segments ###Output _____no_output_____ ###Markdown We can get a feel for the data with these stats: ###Code rides.describe() segments.describe() ###Output _____no_output_____ ###Markdown Eddington NumberThe physicist [Sir Arthur Eddington](https://en.wikipedia.org/wiki/Arthur_Eddington), a contemporary of Einstein, was a pre-Strava bicyclist who favored this metric:> *Your [**Eddington Number**](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the largest integer **e** such that you have cycled at least**&nbsp;e** miles on at least**&nbsp;e** days.*I'll look at the **miles** column across all my **rides** to compute my Eddington number: ###Code Ed_number(miles) ###Output _____no_output_____ ###Markdown **My Eddington Number is 62**: I've ridden at least 62 miles on at least 62 days (but not 63 miles on 63 days). My number is above [the mean for Strava](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number), but not nearly as good as Eddington himself: he had a number of **84** when he died at age 62, and his roads, bicycles, and navigation aids were not nearly as nice as mine, so bravo zulu to him. I'm 64 years old; how many rides do I need to match my age? I call the number of needed rides the **Eddington gap**: ###Code Ed_gap(miles, 64) ###Output _____no_output_____ ###Markdown I need 6 more 64-mile-plus rides. Here are the gaps to other numbers: ###Code {m: Ed_gap(miles, m) for m in range(63, 71)} ###Output _____no_output_____ ###Markdown Eddington was English, so he used miles, but we could convert to kilometers to get a **metric Eddington number**: ###Code Ed_number(kms(miles)) {k: Ed_gap(kms(miles), k) for k in range(86, 101, 2)} ###Output _____no_output_____ ###Markdown My **metric Eddington Number is 86**, and my gap to 100 is 37 rides.Here are some properties of Eddington numbers:- Your Eddington number is monotonic: it can never decrease over time. - To improve from an Eddington number of *n* to *n* + 1 can take as few as 1 ride, or as many as *n* + 1 rides. + *Suppose I have done 49 rides, each of exactly 50 miles. My Eddington number is 49.* + *With one 50 mile ride, my Eddington number becomes 50.* + *I would then need 51 new 51+ mile rides to reach 51.*- Your metric Eddington number will always be greater than or equal to your imperial Eddington number.- Your metric Eddington number will never be more than 1.609344 times your imperial Eddington number.- Of two riders, it is possible that one has a higher metric number and the other a higher imperial number.Here is my Eddington number progress over the years, in miles and kms: ###Code Ed_progress(range(2014, 2022)) ###Output _____no_output_____ ###Markdown *Note:* the definition of Eddington Number seems precise, but there is an ambiguity: what does ***day*** mean? The New Oxford dictionary mentions:- (a) a period of 24 hours; - (b) a unit of time, reckoned from one midnight to the next;- (c) the part of a day when it is light. I originally assumed (b), but I wanted to make an exception for [bikepacking](https://bikepacking.com/) trips where I rode to a camping site in the afternoon, pitched a tent for the night, and rode back home the next morning; what bikepackers call a [sub-24-hour overnight (S24O)](https://oneofsevenproject.com/s24o-bikepacking-guide/). And then COVID struck, the camping sites closed, and in response I further relaxed the definition to allow an S24O ride where I sleep in my own home. I realize Eddington had a lot more hardships than we have (World War I, the 1918 pandemic, and World War II), but I hope he would approve of this accomodation. Hill-Index: Speed versus GradeThe Eddington number is similar to the [**h-index**](https://en.wikipedia.org/wiki/H-index) metric for scientific publications. I invented another metric:> *Your **hill-index** is the maximum integer **h** where you can regularly climb an **h** percent grade at **h** miles per hour.*I'll plot grade versus speed for segments (not rides) with two best-fit curves: a blue quadratic and an orange cubic. I'll also superimpose a red dotted line where grade = speed. ###Code show(pct2, miles2 / hours2) plt.plot((2, 6, 7), (2, 6, 7), 'ro:'); ###Output _____no_output_____ ###Markdown Both best-fit curves are above the red circle at 6% and below the red circle for 7%, so **my hill-index is 6**. We also see that I can cruise at 15 to 17 mph on a 1% grade, but only about 8 mph at 5% grade, and around 5 mph on 8% grades. The plot above tells me how fast I should expect to climb a particular hill, but what about average time on longer rides? Here's a plot of all my rides; this time grade is listed in feet climbed per mile rather than in percent. ###Code show(X=feet/miles, Y=miles/hours, xlabel='Ride Grade (feet/mile)') ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort (and maybe the wind) than on the grade of the road. But when the grade is steeper than 50 ft/mile, the speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and around 10 mph at 120 ft/mile. Note that 120 ft/mile is only 2.3% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 6.9% average grade on the up part.I can use this to predict the time of a ride. For example, if I'm in La Honda and want to get to Pescadero, which is faster: the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb)? We can estimate: ###Code f'Coast: {estimate(15.7, 344):.0f} min, Creek: {estimate(13.5, 735):.0f} min.' ###Output _____no_output_____ ###Markdown This predicts the shorter creek route would be about 8 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. VAMClimbing speed measured by vertical elevation ascended is known as [VAM](https://en.wikipedia.org/wiki/VAM_%28bicycling%29), which stands for *velocità ascensionale media* (for native Campagnolo speakers) or *mean ascent velocity* (for SRAM) or 平均上昇率 (for Shimano). The theory is that for steepish climbs, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (For flatish rides power is spent on wind and rolling resistance, and for the steepest of rides power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM versus grade: ###Code show(pct2, feet2 / hours2, ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown I don't see the flattening effect; I see a wide spread of VAM numbers; at about 8% I range from 1500 to 3000 VAM. Champions do over 6000. WanderingThe website [**wandrer.earth**](https://wandrer.earth) tracks the number of miles of distinct roads you have travelled on. It provides a fun way to get out and explore new roads. The site is gamified in a way that the biggest rewards are for first reaching 25% of the roads in each place, and then 90%. Note that the [underlying map](https://www.openstreetmap.org/) is periodically updated. Sometimes a big city (like "San Francisco") is split into neighborhoods (like "Sunset", "Mission", and "Presidio"); sometimes unincorporated areas (like "Ladera" and "Sequoia Tract") are introduced; sometimes boundaries change (e.g. "Emerald Lake Hills" doubled in size and road miles in the November 2020 update). These changes can cause my percentages to go either up or down. ###Code wandering(places) ###Output _____no_output_____ ###Markdown Peter Norvig, Oct 2017pandas Aug 2020Data updated monthly Bicycling StatisticsThis notebook investigates various measures and statistics about riding bicycles. Speed versus GradeLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded a bunch of my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). The columns are: the date; a title; the elapsed time of the ride in hours; the length of the ride in miles; and the total climbing in feet. I parse the file into the pandas dataframe `rides`: ###Code import matplotlib.pyplot as plt from IPython.core.display import HTML import numpy as np import pandas as pd def parse_hours(time: str) -> float: """Parse '4:30:00' to 4.5 hours.""" while time.count(':') < 2: time = '0:' + time return round(pd.Timedelta(time).seconds / 60 / 60, 3) def parse_int(num: str) -> int: """Parse a str as an int, ignoring commas.""" return int(num.replace(',', '')) rides = pd.read_table(open('bikerides25.tsv'), comment='#', converters={'hours': parse_hours, 'feet': parse_int}) rides ###Output _____no_output_____ ###Markdown We can get a feel for the data with the `describe` method, and with a scatter plot of the number of feet climbed per hour on the x-axis, versus the number of miles per hour on the y-axis: ###Code rides.describe() miles, hours, feet = [rides[attr] for attr in ('miles', 'hours', 'feet')] plt.scatter(feet/miles, miles/hours); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. I'll attempt to summarize the data by plotting three best-fit curves to the data: a straight line (degree 1 polynomial), a quadratic (degree 2) and a cubic (degree 3). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degrees=(1, 2, 3)): """Plot X versus Y and a best fit curve to it, with some bells and whistles.""" plt.rcParams["figure.figsize"] = (9, 7) grid(); plt.ylabel(ylabel); plt.xlabel(xlabel) plt.scatter(X, Y, c='k', marker='+') X1 = np.linspace(min(X), max(X), 100) for degree in degrees: F = poly_fit(X, Y, degree) plt.plot(X1, [F(x) for x in X1], '-', lw=2) plt.title(f'{len(X)} data points') def grid(): plt.minorticks_on(); plt.grid(which='both', ls=':', alpha=0.7) def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(X=feet/miles, Y=miles/hours) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort (and maybe the wind) than on the grade of the road. But when the grade is steeper than 50 ft/mile, the speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and around 10 mph at 120 ft/mile. Note that 120 ft/mile is only 2.3% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 6.9% average grade on the up part. Estimating Time of a RideThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def estimate(dist, climb, F=poly_fit(feet/miles, miles/hours, 2)) -> float: """Given a distance in miles and total climb in feet, estimate time in minutes.""" return 60 * dist / F(climb / dist) f'Coast: {estimate(15.7, 344):.0f} min, Creek: {estimate(13.5, 735):.0f} min.' ###Output _____no_output_____ ###Markdown This predicts the shorter creek route would be 10% faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual **segments**, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative climbing segments ([`bikesegments.csv`](bikesegments.csv)) with the segment length in miles and climb in feet, along with several of my times on the segment. (Strava shows my times for up to 10 rides on each segment; I picked the fastest, middle, and slowest of the rides shown.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'segment_title, miles, feet_climb, time(s), ...'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: yield title, float(mi), parse_int(ft), parse_hours(time) segments = pd.DataFrame(parse_segments(open('bikesegments.csv')), columns=('title', 'miles', 'feet', 'hours')) segments ###Output _____no_output_____ ###Markdown For this plot I will list grade in percent rather than climbing feet per mile (a 1% grade is 52.8 feet per mile): ###Code miles2, hours2, feet2 = (segments[attr] for attr in ('miles', 'hours', 'feet')) pct2 = feet2 / miles2 * 100 / 5280 show(pct2, miles2 / hours2, xlabel='Segment Grade (percent)') ###Output _____no_output_____ ###Markdown So I can cruise at 15 to 17 mph on a 1% grade, but only about 8 mph at 5% grade, and around 5 mph on 8% grades. VAMThe average number of vertical feet (or meters) climbed per hour (regardless of horizontal distance travelled) is known as [VAM](https://en.wikipedia.org/wiki/VAM_%28bicycling%29), which stands for *velocità ascensionale media* (for native Campagnolo speakers) or *mean ascent velocity* (for SRAM) or 平均上昇率 (for Shimano). The theory is that for medium-steepish climbs, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (For flatish rides power is spent on wind and rolling resistance, and for super-steep rides power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of my VAM versus grade (in percent): ###Code show(pct2, feet2 / hours2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown I don't see the flattening effect; I see a wide spread of VAM numbers; at about 8% I range from 1500 to 3000 VAM. Champions can do 6000. Hill-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications, I invented a new metric, the hill-index:> *Your **hill-index** is the maximum number**&nbsp;h** for which you can regularly climb an**&nbsp;h** percent grade at **&nbsp;h** miles per hour.*I think **my hill-index is 6**. (I know that [Old La Honda](https://graphics.stanford.edu/~lucasp/grade/oldlahonda.html) is a 7% grade, and I only [achieved 7.0 mph](https://www.strava.com/segments/9343401?filter=my_results&gender=all) on it once out of many tries). But let's draw a plot to confirm: ###Code show(pct2, miles2 / hours2, xlabel='Grade (percent)') plt.plot(range(2, 8), range(2, 8), 'rD:', lw=2); ###Output _____no_output_____ ###Markdown The red dotted line is where grade = speed. All three best-fit curves are above the diamond at 6% and below the diamond for 7%, so my guess is confirmed: my hill-index is 6. Eddington NumberSpeaking of h-numbers, the physicist [Sir Arthur Eddington](https://en.wikipedia.org/wiki/Arthur_Eddington) (1882-1944), a contemporary of Einstein, was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the largest integer **e** such that you have bicycled at least**&nbsp;e** miles on at least**&nbsp;e** days.*Let's calculate mine: ###Code def E_number(distances) -> int: """Eddington number: The maximum integer e such that you have bicycled a distance of at least e on at least e days.""" distances = sorted(distances, reverse=True) return max(e for e, d in enumerate(distances, 1) if d >= e) E_number(miles) ###Output _____no_output_____ ###Markdown My **Eddington Number is 61**: I've done at least 61 miles on at least 61 days. This is a bit above [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number), but not as good as Eddington himself: he had a number of **84** when he died at age 62, and his roads, bicycles, and navigation aids were not nearly as nice as mine, so bravo for him.Here are some properties of Eddington numbers:- Your Eddington number is monotonic: it can never decrease over time. (This is also true of the academic h-index, but not my hill-index.)- To improve from an Eddington number of *n* to *n* + 1 can take as few as 1 ride, or as many as *n* + 1 rides. (Suppose I have done 49 rides, each of exactly 50 miles. Then my Eddington number is 49, and with one more 50-mile ride, my number becomes 50, but then I would need 51 rides of 51 miles each to achieve an Eddington number of 51.)- You could also compute a metric Eddington number, using kilometers rather than miles.- Your metric Eddington number will always be greater than or equal to your imperial Eddington number.- Your metric Eddington number will never be more than 1.609344 times your imperial Eddington number.- Of two riders, it is possible that one has a higher metric number and the other a higher imperial number.The following chart tells me how many rides I have to go&mdash;what I call the **Eddington Gap**&mdash;to reach various Eddington numbers: ###Code def E_gaps(distances, targets, unit='miles'): """Print the number of days doing various target distances, and the Eddington gap.""" rows = [(target, sum(distances > target), target - sum(distances > target)) for target in targets] return showdf(pd.DataFrame(rows, columns=[unit, 'days', 'gap'])) def showdf(df: pd.DataFrame) -> HTML: """Show a DataFrame as HTML, but without the index number.""" return HTML(df.to_html(index=False)) E_gaps(miles, range(61, 76)) ###Output _____no_output_____ ###Markdown An easy way to get bigger Eddington numbers is to use kilometers rather than miles: ###Code kms = 1.609344 * miles E_number(kms) E_gaps(kms, range(85, 101)) ###Output _____no_output_____ ###Markdown My **metric Eddington Number is 85**.Here is my Eddington number progress over the years, in miles and kms, from when I first started tracking rides: ###Code def year(ride) -> int: return int(ride.date[-4:]) def E_progress(years, rides=rides): """Print a table of Eddington numbers by year""" def row(y): miles = np.array([r.miles for r in rides.itertuples() if year(r) <= y]) kms = 1.609344 * miles E_ml, E_km = E_number(miles), E_number(kms) return [y, E_ml, E_km, round(E_km/E_ml, 2)] return showdf(pd.DataFrame(map(row, years), columns=['year', 'E_ml', 'E_km', 'ratio'])) E_progress(range(2014, 2021)) ###Output _____no_output_____ ###Markdown *Note:* the definition of Eddington Number seems to be mathematically precise, but there is one big ambiguity: what exactly does the word ***day*** mean? The New Oxford dictionary mentions (a) "a period of 24 hours"; (b) "a unit of time, reckoned from one midnight to the next"; and (c) "the part of a day when it is light". I originally assumed ***day*** meant "one midnight to the next," but I wanted to make an exception for bikepacking trips where I rode to a camping site in the afternoon, spent the night there, and rode back home the next morning; what bikepackers call a [sub-24-hour overnight (S24O)](https://oneofsevenproject.com/s24o-bikepacking-guide/). I thought that should count as a valid Eddington trip as long as it was completed in 24 hours. And then COVID struck, the camping sites closed, and in response I further relaxed the definition to allow an S24O ride where I sleep in my own home, as long as all the riding is within "a period of 24 hours." I realize Eddington had a lot more hardships than we have (the 1918 pandemic, World War I, and World War II), but I hope he would approve of this accomodation on my part. WanderingThe website [`wandrer.earth`](https://wandrer.earth) tracks the number of miles of distinct roads you have travelled on (taken from your Strava data). I realize I often repeat the same routes, so this gives me a chance to try something new. Below I plot the percentage of roads I've travelled in various places, across different months. ###Code dates = '20/7', '20/8', '20/9', '20/10' # Year/Month data = { 'Earth': [ ('California', 361_433, 0.712, 0.811, 0.846, 0.867), ('USA', 6_270_601, 0.048, 0.052, 0.055, 0.05589), ('Earth', 37_010_550, 0.0081, 0.0089, 0.0091, 0.00936) ], 'Counties': [ # Miles 20/7, 20/8, 20/9, 20/10 ('Alameda', 5704, 3.3, 3.3, 3.3, 3.9), ('Marin', 2322, 6.7, 6.7, 6.7, 6.7), ('Napa', 1524, 5.1, 5.1, 5.1, 5.1), ('Sonoma', 4556, 5.1, 5.1, 5.1, 5.1), ('San Francisco', 1183, 4.5, 4.5, 4.5, 4.5), ('San Mateo', 3248, 20.1, 21.2, 22.9, 23.4), ('Santa Clara', 7412, 12.7, 13.6, 15.4, 15.6), ('Santa Cruz', 2767, 2.3, 2.3, 2.3, 2.3), ('Grant (OR)', 8049, 2.6, 2.6, 2.6, 2.6), ('Baker (OR)', 5170, 2.4, 2.4, 2.4, 2.4), ('New York (NY)', 6689, 0.4, 0.4, 0.4, 0.4), ], 'San Mateo County': [ ('Belmont', 98, 15.5, 17.3, 18.6, 18.6), ('East Palo Alto', 46, 74.4, 91.2, 91.9, 91.9), ('Emerald Lake Hills', 2, 0.0, 94.0, 94.3, 94.3), ('Foster City', 150, 9.1, 9.1, 9.1, 9.1), ('Kensington Square', 1, 86.9, 99.4, 100., 100.), ('Menlo Park', 139, 67.7, 76.8, 87.7, 90.7), ('North Fair Oaks', 27, 48.1, 90.4, 93.1, 93.8), ('Redwood City', 241, 34.0, 39.1, 46.0, 51.6), ('San Carlos', 99, 22.2, 26.0, 32.9, 32.9), ('San Mateo', 412, 11.1, 11.1, 11.1, 11.1), ('Woodside', 78, 51.9, 51.9, 52.3, 52.3), ], 'Santa Clara County': [ ('Cupertino', 172, 22.1, 23.9, 26.2, 26.2), ('Campbell', 120, 8.9, 10.1, 12.4, 12.4), ('Los Altos', 138, 40.2, 43.7, 72.4, 77.2), ('Los Altos Hills', 91, 48.4, 48.4, 49.0, 55.1), ('Los Gatos', 152, 7.5, 8.6, 8.8, 8.8), ('Monte Sereno', 21, 20.4, 20.4, 20.5, 20.5), ('Mountain View', 207, 53.0, 59.9, 63.0, 63.6), ('Palo Alto', 293, 63.0, 73.6, 85.4, 85.7), ('San Jose', 2545, 1.30, 1.36, 5.3, 5.3), ('Santa Clara', 348, 6.4, 6.4, 9.6, 9.6), ('Saratoga', 180, 14.5, 15.7, 17.4, 17.4), ('Sunnyvale', 361, 19.4, 19.9, 22.2, 22.2), ]} def wandrer(title, data=data, dates=dates): """Plot Wandrer.earth data.""" entries = sorted(data[title], key=lambda e: -e[-1]) months = [num_months(d) for d in dates] markers = 'osdhDHPX*><v^p|67' fig = plt.figure() ax = plt.subplot(111) for (place, miles, *pcts), m in zip(entries, markers): ax.plot(months, pcts, ':', marker=m, lw=1.5, label=f'{place}: {int(round(miles * pcts[-1] / 100)):,d}/{miles:,d} mi, {pcts[-1]}%') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), shadow=True) plt.xticks(months, dates); grid() plt.ylabel('Percent of Area Ridden'); plt.xlabel('Year/Month') plt.title(title); plt.tight_layout(); plt.show() def num_months(date) -> int: """num_months("20/9") = 20 * 12 + 9 = 249""" year, month = date.split('/') return int(year) * 12 + int(month) for title in data: wandrer(title) ###Output _____no_output_____ ###Markdown Peter Norvig, 23 Oct 2017Revised 2 June 2019 Bicycling: Speed v. Grade, VAM, Eddington, Hill-indexLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded all my recorded [Strava](https://www.strava.com/athletes/575579) rides longer than 25 miles as [`bikerides25.tsv`](bikerides25.tsv). I parse the file into `rides`, a list of `Ride` structures. I filter out all the rides that have exactly 0 ft climbing (i.e. stationary bike rides) and that have `'MTB'` in the title (i.e. mountain bike rides that are not relevant to road bike riding). ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np import re from collections import namedtuple Ride = namedtuple('Ride', 'miles, hours, feet, date, title') def parse_ride(line: str) -> Ride: """Parse a line from a Strava log file into a `Ride`. Line format is tab separated: Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft""" _, date, title, time, mi, ft = line.strip().split('\t') return Ride(number(mi), hours(*time.split(':')[::-1]), number(ft), date, title) def number(string) -> float: return float(re.sub(r'[^0-9. ]', '', string)) def hours(sec, min, hour=0) -> float: return int(sec)/3600 + int(min)/60 + int(hour) rides = [parse_ride(line) for line in open('bikerides25.tsv') if 'MTB' not in line and ' 0 ft' not in line] ###Output _____no_output_____ ###Markdown From the raw data I will derive three important vectors of numbers: - `mls`: length of ride in miles- `mph`: speed in miles per hour- `fpm`: grade in feet climbed per mileI'll show a simple scatter plot visualization: ###Code def vectors3(rides) -> tuple: "Return vectors of (miles, miles/hour, feet/mile)" return ([r.miles for r in rides], [r.miles / r.hours for r in rides], [r.feet / r.miles for r in rides]) mls, mph, fpm = vectors3(rides) plt.scatter(fpm, mph); ###Output _____no_output_____ ###Markdown Making it PrettierAs expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing the scatter plot, it looks like some kind of downward sloping curve would be a better fit than a straight line, so I'll fit quadratic (degree two) and cubic (degree 3) polynomials to the data (for no good theoretical reason; just because those are the two simplest nonlinear functions I could think of). I'll also make the plot prettier and bigger: ###Code def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', degrees=(2, 3)): "Plot X versus Y and a best fit curve to it, with some bells and whistles." plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') grid(); plt.ylabel(ylabel); plt.xlabel(xlabel) plt.scatter(X, Y, c='k') X1 = np.linspace(min(X), max(X), 100) for degree in degrees: F = poly_fit(X, Y, degree) plt.plot(X1, [F(x) for x in X1], '-', lw=2) plt.title(f'{len(X)} rides') def grid(): plt.minorticks_on(); plt.grid(which='minor', ls=':', alpha=0.7) def poly_fit(X, Y, degree) -> callable: """The polynomial function that best fits the X,Y vectors.""" coeffs = np.polyfit(X, Y, degree)[::-1] return lambda x: sum(c * x ** i for i, c in enumerate(coeffs)) show(fpm, mph) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from around 50 ft/mile grade speed falls off quickly: down to 12mph at 80 ft/mile; 11 mph at 100 ft/mile; and under 10 mph at 120 ft/mile. Note that even 120 ft/mile is only 2.3% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flat, then that's 6.8% average grade on the up part. Estimating DurationThis data can help me predict how long will it take to ride a route with a given distance and elevation. For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (15.7 miles, 344 ft climb), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1) (13.5 miles, 735 ft climb); which is faster? ###Code def duration(dist, climb, F=poly_fit(fpm, mph, 2)) -> float: "Given a distance in miles and total climb in feet, estimate time in minutes." return 60 * dist / F(climb / dist) duration(15.7, 344), duration(13.5, 735) ###Output _____no_output_____ ###Markdown This suggests the shorter creek route would be about 6 or 7 minutes faster. Good to know, but other factors (like the scenery) are probably more important in making the choice. SegmentsThe data I have so far helps me estimate the duration for a long ride over varying up-and-down terrain. But what if I just want to know how long it will take to climb the next hill? To answer that I'll go back to Strava and retrieve data from individual segments, some only a fraction of a mile long, some several miles long, but most with a relatively constant grade. I picked some representative climbing segments ([`bikesegments.csv`](bikesegments.csv)) with the segment length in miles and climb in feet, along with several of my times on the segment. (Strava shows my times for up to 10 rides on the segment; I picked the first, middle, and last of the rides shown.) ###Code def parse_segments(lines): """Parse a string of segments into Rides. Each line contains: 'title, miles, feet, time(s)'.""" for segment in lines: title, mi, ft, *times = segment.split(',') for time in times: time = hours(*time.split(':')[::-1]) yield Ride(float(mi), time, int(ft), 'date?', title) segments = set(parse_segments(open('bikesegments.csv'))) mls2, mph2, fpm2 = vectors3(segments) ###Output _____no_output_____ ###Markdown For the plot I will list grade in percent rather than feet per mile: ###Code pct2 = [f * 100 / 5280 for f in fpm2] show(pct2, mph2, xlabel='Segment Grade (percent)') ###Output _____no_output_____ ###Markdown So I can cruise at 17 mph on a 1% grade, but only a bit over 6 mph at 6% grade, and around 5 mph on 8% grades. VAMThe number of vertical feet (or meters) climbed per hour (regardless of horizontal distance travelled) is known as [VAM](https://en.wikipedia.org/wiki/VAM_%28bicycling%29), which stands for *velocità ascensionale media* (for native Campagnolo speakers) or *mean ascent velocity* (for SRAM) or 平均上昇率 (for Shimano). Legend has it that at grades between about 6% to 12%, most of your power is going into lifting against gravity, so your VAM should be about constant no matter what the grade in that range. (I guess that below that grade, power is spent on wind and rolling resistance, and above it, power goes largely to cursing *sotto voce*, as they say in Italian.) Here's a plot of VAM versus grades (in percent): ###Code vam2 = [r.feet / r.hours for r in segments] show(pct2, vam2, xlabel='Grade (percent)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown I don't see the flattening effect; I see a wide spread of VAM numbers; at about 8% I range from 1500 to 3000 VAM. Champions can do 6000. Another way to look at it: at what speed am I getting the most VAM (implicitly assuming I'm giving a similar level of effort at all times, which is not really true)? The curve says 5½ mph, but the data is pretty noisy. ###Code show(mph2, vam2, xlabel='Speed(mph)', ylabel='VAM (vertical feet per hour)') ###Output _____no_output_____ ###Markdown Hill-IndexInspired by the [h-index](https://en.wikipedia.org/wiki/H-index) for scientific publications, I invented a new metric, the hill-index:> *Your **hill-index** is the maximum number**&nbsp;h** for which you can regularly climb an**&nbsp;h** percent grade at **&nbsp;h** miles per hour.*I think **my hill-index is 6**, but let's draw a plot to confirm: ###Code points = [2, 6, 7] show(pct2, mph2, xlabel='Grade (percent)') plt.plot(points, points, 'gD-', lw=2); ###Output _____no_output_____ ###Markdown My guess is confirmed: the green line is where grade = speed, the red and blue lines are best-fit polynomials, and if I interpret "regularly" to mean "about half the time," then at 6% grade the best-fit line and 4/8 rides are above the diamond at (6,6), but not so at 7%. Note that the original publication h-index is based on cumulative totals, so it can never decrease, but the Hill-index is based on an average so it can (and sadly, often does) decrease as you grow older. Eddington NumberThe British physicist Sir Arthur Eddington (1882-1944) was a pre-Strava bicyclist who favored this metric:> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the maximum integer**&nbsp;e** such that you have bicycled at least**&nbsp;e** miles on at least**&nbsp;e** days.*Let's calculate mine: ###Code def Eddington(distances): "Eddington number: max integer e such that there are e or more distances >= e." def at_least(e): return sum(d >= e for d in distances) >= e return max(filter(at_least, range(len(distances) + 1))) Eddington(mls) ###Output _____no_output_____ ###Markdown My **Eddington Number is 54**: I've done at least 54 miles on at least 54 days. This is a bit above [the mean for Strava users](https://swinny.net/Cycling/-4687-Calculate-your-Eddington-Number). Eddington was about my age when he died with a number of **77**, and his available roads, bicycles, and navigation aids were not nearly as nice as mine, so good for him.If I wanted a bigger number, I could use kilometers: ###Code kms = [m * 5280 * 12 * 2.54 / 100000 for m in mls] Eddington(kms) ###Output _____no_output_____ ###Markdown My **metric Eddington Number is 77**.How many more rides would I need to improve my numbers? Obviously, for anyone to improve from 54 to 55 they need at least 1 more ride, but if some of their previous rides are between 54 and 55 miles, they won't count towards an Eddington number of 55, so more than one ride would be needed. Here is a table of the number of rides I would need to improve&mdash;what I'll call the *gap*&mdash;for increasingly ambitious numbers (measured in both miles and kilometers): ###Code def Egaps(distances, rows, unit='Mls'): """Print the number of days doing various distances, and the Eddington gap.""" print(f'{unit} Days Gap') print(f'--- ---- ---') for dist in rows: days = sum(d >= dist for d in distances) print(f'{dist:3} {days:3} {dist - days:2}') Egaps(mls, range(54, 67)) ###Output Mls Days Gap --- ---- --- 54 54 0 55 52 3 56 51 5 57 48 9 58 45 13 59 40 19 60 39 21 61 33 28 62 30 32 63 28 35 64 26 38 65 24 41 66 23 43 ###Markdown OK, if I do one 65 mile ride every 3 weeks, I could have my Eddington number match my age before I turn 65. ###Code Egaps(kms, range(77, 91), 'Kms') ###Output Kms Days Gap --- ---- --- 77 79 -2 78 77 1 79 76 3 80 75 5 81 72 9 82 64 18 83 63 20 84 58 26 85 57 28 86 56 30 87 54 33 88 53 35 89 52 37 90 51 39 ###Markdown Peter Norvig, 23 Oct, 2017 Bike Speed versus GradeLike most people, I bike slower when I'm going up a steep hill than on a flat road. But how much slower?To answer that, I downloaded all my recorded past rides longer than 20 miles as ([bikerides20.txt](bikerides20.txt)). (I started recording using [Strava](https://www.strava.com/athletes/575579) in mid-2013, but didn't use it across all rides until 2015). Here I parse the file: ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np from collections import namedtuple line = 'Ride Thu, 8/9/2018 BRNW 4:58:07 68.41 mi 3,862 ft ' # Sample line Ride = namedtuple('Ride', 'miles, feet, kind, date, title, hours') def parse_ride(line) -> Ride: "Parse a line from a Strava datafile into a `Ride`." kind, date, title, time, mi, ft = line.strip().split('\t') times = [int(t) for t in reversed(time.split(':'))] miles = float(mi.split()[0]) feet = int(ft.split()[0].replace(',', '')) return Ride(miles, feet, kind, date, title, hours(*times)) def hours(sec, min, hour=0): return sec / 3600 + min / 60 + hour all_rides = {parse_ride(line) for line in open('bikerides20.txt')} ###Output _____no_output_____ ###Markdown I'm interested in my "serious" rides, so I'll focus on `rides` that are at least a marathon (26 miles) in length, and for those rides collect three vectors of numbers: miles per hour, feet climbed per mile, and total miles: ###Code rides = [r for r in all_rides if r.miles > 26] mph = [r.miles / r.hours for r in rides] fpm = [r.feet / r.miles for r in rides] mls = [r.miles for r in rides] plt.scatter(fpm, mph); ###Output _____no_output_____ ###Markdown As expected, there is a lot of variance, but overall speeds get slower as the grade gets steeper. Just eyeballing it, it looks like a curve would be a better fit than a straight line, so I'll fit a quadratic (degree two) polynomial to the data, and make the plot prettier: ###Code def poly(X, Y, n): "Best-fit degree-n polynomial for X, Y data." C = np.polyfit(X, Y, n)[::-1] # Array of coefficients, reversed return lambda x: sum(C[i] * x ** i for i in range(n + 1)) def show(X, Y, xlabel='Grade (feet/mile)', ylabel='Speed (mph)', fit=True): plt.rcParams["figure.figsize"] = (12, 10) plt.style.use('fivethirtyeight') plt.scatter(X, Y) X1 = list(range(int(min(X)), int(max(X)) + 5)) plt.ylabel(ylabel); plt.xlabel(xlabel) plt.minorticks_on() plt.grid(True, which='major') plt.grid(True, which='minor', alpha=0.4) if fit: F = poly(X, Y, 2) plt.plot(X1, [F(x) for x in X1], 'k-') show(fpm, mph) ###Output _____no_output_____ ###Markdown So, I average a little under 14 mph when the overall route is fairly flat, with a lot of variability from 12 to 16 mph, depending more on my level of effort than on the grade of the road. But from around 40 or 50 ft/mile, speed falls off quickly, and by 140 ft/mile, I'm down near 8 mph. Note that 140 ft/mile is only 2.7% grade, but if you figure a typical route is 1/3 up, 1/3 down, and 1/3 flatish, then that's 8% grade on the up part. Estimating DurationFor a route with a given distance and elevation, how long will it take to ride it? ###Code def duration(dist, climb, F=poly(fpm, mph, 2)): "Given a distance in miles and total climb in feet, return estimated time in minutes." return 60 * dist / F(climb / dist) ###Output _____no_output_____ ###Markdown For example, to get to Pescadero from La Honda, I could take the flatter [coast route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.4039496!2d37.3116594!3s0x808f062b7d7585e7:0x942480c22f110b74!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1), or the shorter [creek route](https://www.google.com/maps/dir/La+Honda,+California/Pescadero,+California/@37.2905834,-122.3896683,12z/data=!4m19!4m18!1m10!1m1!1s0x808faed4dc6265bd:0x51a109d3306a7219!2m2!1d-122.274227!2d37.3190255!3m4!1m2!1d-122.3658887!2d37.2538867!3s0x808f00acf265bd43:0xb7e2a0c9ee355c3a!1m5!1m1!1s0x808f00b4b613c4c1:0x43c609077878b77!2m2!1d-122.3830152!2d37.2551636!3e1); which is faster? ###Code duration(15.7, 344), duration(13.5, 735) ###Output _____no_output_____ ###Markdown This suggests the coast route takes 7 or 8 minutes longer. Good to know, but other factors (like the scenery) are probably more important in making the choice. Eddington Number> *Your [Eddington Number](https://www.triathlete.com/2011/04/training/measuring-bike-miles-eddington-number_301789) is the largest integer **e** for which you have bicycled **e** or more miles on **e** different days.*Let's calculate mine, and see how many rides I need to take to improve it. I'm pretty sure I've done at least 50 rides of 50 miles, so: ###Code def eddington(distances): return max(e for e, d in enumerate(sorted(distances, reverse=True), 1) if d >= e) eddington(mls) ###Output _____no_output_____ ###Markdown So **my Eddington Number is 52**: I've done 52 rides of 52 miles or more. Let's see a plot of how far I have to go to improve this number: ###Code def edplot(distances, X): M = [min(X), max(X)] plt.plot(M, M, 'r-') plt.title('Eddington Number = {}'.format(eddington(distances))) show(X, [sum(d > x for d in distances) for x in X], fit=False, xlabel='Ride Distance', ylabel='Number of Rides longer than distance') edplot(mls, range(46, 81)) ###Output _____no_output_____ ###Markdown Here the gap between the straight line and the dotted line is my Eddington gap: the lines cross at 52 so that's my Eddington Number; at 60 miles the gap is 28, so I nneed 28 60+ mile rides to get an Eddington number of 60. Metric Eddington Number[Arthur Eddington](https://en.wikipedia.org/wiki/Arthur_Eddington) was born in 19th century England, so he used miles. You can get a bigger number using kilometers: ###Code edplot([m * 1.60934 for m in mls], range(70, 101)) ###Output _____no_output_____
catboost/tutorials/model_analysis/model_export_as_json_tutorial.ipynb
###Markdown CatBoost JSON model tutorial CatBoost supports exporting model to JSON format and loading model from it.This tutorial explains the structure of the JSON model with numeric features only. Download MSRank dataset ###Code import catboost from catboost import datasets import os import numpy as np train_df, _ = datasets.msrank() X, Y = train_df[train_df.columns[1:]], train_df[train_df.columns[0]] pool = catboost.Pool( data=X[:1000], # top 1000 documents are enough for this example label=Y[:1000], feature_names=list(X.columns) ) ###Output _____no_output_____ ###Markdown Now we will train a simple model with trees of depth 2 ###Code cls = catboost.CatBoostClassifier(depth=2, random_seed=0, iterations=10, verbose=False) cls.fit(pool) approx = cls.predict(X[0:3], prediction_type="RawFormulaVal") ###Output _____no_output_____ ###Markdown The next block save JSON model to file. ###Code cls.save_model( "model.json", format="json", # pool=pool # this parameter is required only for models with categorical features. ) ###Output _____no_output_____ ###Markdown The next block loads model from file as JSON and shows its keys.The model json contains __model_info__, __oblivious_trees__ and __features_info__. Model with categorical features will also conatain __ctrs__. ###Code import json model = json.load(open("model.json", "r")) model.keys() ###Output _____no_output_____ ###Markdown Model infomodel['model_info'] is analogue for [get_metadata()](https://tech.yandex.com/catboost/doc/dg/concepts/python-reference_catboost_metadata-docpage/) function.You can look on the training parameters the model was trained with or on catboost version that the model was trained with. ###Code print(model['model_info']['catboost_version_info']) ###Output Svn info: URL: svn+ssh://arcadia.yandex.ru/arc/trunk/arcadia Last Changed Rev: 5437739 Last Changed Author: dkvasov Last Changed Date: 2019-08-08T13:43:02.471284Z Other info: Build by: eermishkina Top src dir: /place/home/eermishkina/trunc/arcadia Top build dir: /home/eermishkina/.ya/build Hostname: su57.search.yandex.net Host information: Linux su57.search.yandex.net 4.4.88-42 #1 SMP Mon Sep 18 14:33:37 UTC 2017 x86_64 ###Markdown Features info Let's look now on the features_info value.It could contain $float\_features$, $categorical\_features$ and $ctrs$, which are lists of descriptions of some features. ###Code model['features_info'].keys() ###Output _____no_output_____ ###Markdown In our case (model without cateforical features) there are only one fields - $float\_features$. Every float feature is described in the following way:__flat_feature_index__ (int) - feature index in pool, zero-based indexation __feature_index__ (int) index among only float features, zero-based indexation. For example, in dataset that looks like \[float, categ, float\], the second float feature has indices $flat\_feature\_index = 2$ and $feature\_index = 1$. Because it is the 2 feature of all in 0 based indexation and 1 feature of numeric ones (here we exclude the caterorical feature).For a model without categorical features $feature\_index$ will be equal to $float\_feature\_index$ for every feature.__borders__ (list of all borders (or splits) used in the model for this particular float feature). Float feature values can be splits by $border$ value. All elements with feature value $> border$ go to the left subtree, and all elements with feature value ($<= border$) elements go to the right subtree.__has_nans__ (bool) This field shows if there were any nan values in the training dataset, which was used to train the model.__nan_value_treatment__ ('AsIs', 'AsTrue' or 'AsFalse') If the feature has had nan values in the training dataset, then there is an additional split that puts nans to the left and everything else to the right (if 'AsFalse') or an additional split that puts nans to the right and everything else to the left (if 'AsTrue').'AsIs' is internal default value for features without nan values. ###Code model['features_info']['float_features'][0] ###Output _____no_output_____ ###Markdown This shows that the first feature has a list of borders that were used in the model. And this feature had no nan values in train. Symmetric trees CatBoost uses so-called symmetric or oblivious trees. For each level of the tree CatBoost uses the same features to split learning instances into the left and the right partitions: on the first level tree is partitioned by first split into two parts, on the second level each subtree splits with second split and so on.In this case a tree of depth $k$ has exactly $2^k$ leaves and $k$ splits, each split on a subsequent layer.There are three types of splits: "FloatFeature", "OneHotFeature" and "OnlineCtr". A model without cateforical features contains only float feature slits. Now, let's look on how JSON model describes a single tree.A tree of depth $k$ is described by $2^k$ leaf_values, $2^k$ leaf_weights and $k$ splits. Let's take a look on the first tree of our model. ###Code def dump_json(item): print(json.dumps(item, indent=2)) dump_json(model['oblivious_trees'][0]) # first_tree ###Output { "leaf_values": [ 0.022173912547853145, 0.017826086558078085, 0.011304347573415556, -0.02565217333967221, -0.02565217333967221, 0.07652173742004059, -0.016956521360122625, -0.019130434355010134, -0.02130434734989765, -0.019130434355010148, 0.02407969585645712, 0.027495255552409406, 0.0035863376807432745, -0.026869069608165402, -0.028292219481478875, 0.07398772840759553, -0.004233128739738201, -0.012975459832675437, -0.0286196312621424, -0.02815950857304045 ], "splits": [ { "split_index": 4, "float_feature_index": 16, "border": 10.407758712768555, "split_type": "FloatFeature" }, { "split_index": 3, "float_feature_index": 13, "border": 3.5, "split_type": "FloatFeature" } ], "leaf_weights": [ 123, 54, 512, 311 ] } ###Markdown The list of "leaf_values" describes the values in leaves. This is a tree with 4 leaves. It has depth 2, which means it has two different splits. The first split is used to split all the objects into left and right.And the second split is used twice, to split the left objects into two parts, and to split the right objects into two parts.The indices in the list can be represented using base-2 numeral system in the following way: 00, 01, 10, 11. Leaf 00 is the leaf where the 0-th split and the 1-st split are equal to False. Leaf 01 contains the objects where 0-th split is equal to False and 1-st feature is equal to True. And so on.The next part of the tree description is called "leaf_weights". This list represents sum of weights of training samples, that are in this leaf. Leaf indexation in this list is the same as in "leaf_values".The last part is "splits", and it is description of the two splits that are used in the tree of depth two.Each of the descriptions contains several key-values. Firstly, it contains internal CatBoost parameter $split\_index$. This is the only parameter that is used by catboost when loading the model, all other parts of in "splits" are ignored (they are duplicated in a different place), and are present here only to do the model easier to understand. Let's first describe the other fields. Split type "FloatFeature" means that it is so called 'float split'. Float split condition $float\_feature[float\_feature\_index] > border$ (see above in [Features info](features_info)) is decripted with "float_feature_index" and "border" accordingly. This description should enable you to analyze the model.But if you will want to change the model, you will have to change "split_index" in a right way.To to do that let's explain, how this feature is built. Look one more time at features info: ###Code feature = model['features_info']['float_features'][0] feature ###Output _____no_output_____ ###Markdown Each float split determines by feature index and border value, hence feature description specifies len(feature['borders']) splits. List all float splits with first feature from model['features_info'] with border value from features $borders$ list, with second feature and so on. This is the order, in which splits are enumerated in model. Splits numbering begins with 0. Build split list in this order ###Code split_list = [] for float_feature in model['features_info']['float_features']: if not float_feature['borders']: continue for border in float_feature['borders']: split_list.append( { 'split_index': len(split_list), 'float_feature_index': float_feature['feature_index'], 'border_id': border, 'split_type': 'FloatFeature', 'flat_feature_index': float_feature['flat_feature_index'] } ) ###Output _____no_output_____ ###Markdown Ensure, that splits in first tree and corresponding splits from obtained above split_list are identical ###Code first_tree = model['oblivious_trees'][0] first_tree['splits'] split_indexes = [x['split_index'] for x in first_tree['splits']] [split_list[index] for index in split_indexes] ###Output _____no_output_____ ###Markdown Multiclassification The only difference between classification or regression vs multiclassification is the leaves count in each tree. Model contains leaf values for each class, so tree depth of $k$ has $2^k$ leafs and in json model are stored $2^k \cdot classes\_count$ leaf values in this order: first $2^k$ values for first class, second $2^k$ values for second and so on. Leaf weights count is $2^k$ as they are the same for all classes. Look at first tree of multiclass model trained on [Iris](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset. ###Code # Get Iris dataset from sklearn import datasets iris = datasets.load_iris() # Train the model cls_multilclass = catboost.CatBoostClassifier(loss_function='MultiClass', depth=2, random_seed=0, verbose=False) cls_multilclass.fit(iris.data, iris.target) # Save model cls_multilclass.save_model( "multiclass_model.json", format="json", # pool=pool # is required for model with cat_features to obtain applicable model ) multilclass_model = json.load(open("multiclass_model.json", "r")) multilclass_model['oblivious_trees'][0] ###Output _____no_output_____ ###Markdown Truncate modelModel can be modified and applied. Truncate and apply model ###Code trees = model['oblivious_trees'][:] approx # = cls.predict(X[0:3], prediction_type="RawFormulaVal") model['oblivious_trees'] = trees[0:5] # use only first 5 trees json.dump(model, open("head_model.json", "w")) # Save modified model cls.load_model("head_model.json", "json") # load model cls.predict(X[0:3], prediction_type="RawFormulaVal") # apply model model['oblivious_trees'] = trees[5:] # drop first 5 trees json.dump(model, open("tail_model.json", "w")) # Save modified model cls.load_model("tail_model.json", "json") # load model cls.predict(X[0:3], prediction_type="RawFormulaVal") # apply model ###Output _____no_output_____ ###Markdown CatBoost JSON model tutorial CatBoost supports exporting model to JSON format and loading model from it.This tutorial explains the structure of the JSON model with numeric features only. Download MSRank dataset ###Code import catboost from catboost import datasets import os import numpy as np train_df, _ = datasets.msrank_10k() X, Y = train_df[train_df.columns[1:]], train_df[train_df.columns[0]] pool = catboost.Pool( data=X[:1000], # top 1000 documents are enough for this example label=Y[:1000], feature_names=list(X.columns) ) ###Output _____no_output_____ ###Markdown Now we will train a simple model with trees of depth 2 ###Code cls = catboost.CatBoostClassifier(depth=2, random_seed=0, iterations=10, verbose=False) cls.fit(pool) approx = cls.predict(X[0:3], prediction_type="RawFormulaVal") ###Output _____no_output_____ ###Markdown The next block save JSON model to file. ###Code cls.save_model( "model.json", format="json", # pool=pool # this parameter is required only for models with categorical features. ) ###Output _____no_output_____ ###Markdown The next block loads model from file as JSON and shows its keys.The model json contains __model_info__, __oblivious_trees__ and __features_info__. Model with categorical features will also conatain __ctrs__. ###Code import json model = json.load(open("model.json", "r")) model.keys() ###Output _____no_output_____ ###Markdown Model infomodel['model_info'] is analogue for [get_metadata()](https://tech.yandex.com/catboost/doc/dg/concepts/python-reference_catboost_metadata-docpage/) function.You can look on the training parameters the model was trained with or on catboost version that the model was trained with. ###Code print(model['model_info']['catboost_version_info']) ###Output Svn info: URL: svn+ssh://arcadia.yandex.ru/arc/trunk/arcadia Last Changed Rev: 5437739 Last Changed Author: dkvasov Last Changed Date: 2019-08-08T13:43:02.471284Z Other info: Build by: eermishkina Top src dir: /place/home/eermishkina/trunc/arcadia Top build dir: /home/eermishkina/.ya/build Hostname: su57.search.yandex.net Host information: Linux su57.search.yandex.net 4.4.88-42 #1 SMP Mon Sep 18 14:33:37 UTC 2017 x86_64 ###Markdown Features info Let's look now on the features_info value.It could contain $float\_features$, $categorical\_features$ and $ctrs$, which are lists of descriptions of some features. ###Code model['features_info'].keys() ###Output _____no_output_____ ###Markdown In our case (model without cateforical features) there are only one fields - $float\_features$. Every float feature is described in the following way:__flat_feature_index__ (int) - feature index in pool, zero-based indexation __feature_index__ (int) index among only float features, zero-based indexation. For example, in dataset that looks like \[float, categ, float\], the second float feature has indices $flat\_feature\_index = 2$ and $feature\_index = 1$. Because it is the 2 feature of all in 0 based indexation and 1 feature of numeric ones (here we exclude the caterorical feature).For a model without categorical features $feature\_index$ will be equal to $float\_feature\_index$ for every feature.__borders__ (list of all borders (or splits) used in the model for this particular float feature). Float feature values can be splits by $border$ value. All elements with feature value $> border$ go to the left subtree, and all elements with feature value ($<= border$) elements go to the right subtree.__has_nans__ (bool) This field shows if there were any nan values in the training dataset, which was used to train the model.__nan_value_treatment__ ('AsIs', 'AsTrue' or 'AsFalse') If the feature has had nan values in the training dataset, then there is an additional split that puts nans to the left and everything else to the right (if 'AsFalse') or an additional split that puts nans to the right and everything else to the left (if 'AsTrue').'AsIs' is internal default value for features without nan values. ###Code model['features_info']['float_features'][0] ###Output _____no_output_____ ###Markdown This shows that the first feature has a list of borders that were used in the model. And this feature had no nan values in train. Symmetric trees CatBoost uses so-called symmetric or oblivious trees. For each level of the tree CatBoost uses the same features to split learning instances into the left and the right partitions: on the first level tree is partitioned by first split into two parts, on the second level each subtree splits with second split and so on.In this case a tree of depth $k$ has exactly $2^k$ leaves and $k$ splits, each split on a subsequent layer.There are three types of splits: "FloatFeature", "OneHotFeature" and "OnlineCtr". A model without cateforical features contains only float feature slits. Now, let's look on how JSON model describes a single tree.A tree of depth $k$ is described by $2^k$ leaf_values, $2^k$ leaf_weights and $k$ splits. Let's take a look on the first tree of our model. ###Code def dump_json(item): print(json.dumps(item, indent=2)) dump_json(model['oblivious_trees'][0]) # first_tree ###Output { "leaf_values": [ 0.022173912547853145, 0.017826086558078085, 0.011304347573415556, -0.02565217333967221, -0.02565217333967221, 0.07652173742004059, -0.016956521360122625, -0.019130434355010134, -0.02130434734989765, -0.019130434355010148, 0.02407969585645712, 0.027495255552409406, 0.0035863376807432745, -0.026869069608165402, -0.028292219481478875, 0.07398772840759553, -0.004233128739738201, -0.012975459832675437, -0.0286196312621424, -0.02815950857304045 ], "splits": [ { "split_index": 4, "float_feature_index": 16, "border": 10.407758712768555, "split_type": "FloatFeature" }, { "split_index": 3, "float_feature_index": 13, "border": 3.5, "split_type": "FloatFeature" } ], "leaf_weights": [ 123, 54, 512, 311 ] } ###Markdown The list of "leaf_values" describes the values in leaves. This is a tree with 4 leaves. It has depth 2, which means it has two different splits. The first split is used to split all the objects into left and right.And the second split is used twice, to split the left objects into two parts, and to split the right objects into two parts.The indices in the list can be represented using base-2 numeral system in the following way: 00, 01, 10, 11. Leaf 00 is the leaf where the 0-th split and the 1-st split are equal to False. Leaf 01 contains the objects where 0-th split is equal to False and 1-st feature is equal to True. And so on.The next part of the tree description is called "leaf_weights". This list represents sum of weights of training samples, that are in this leaf. Leaf indexation in this list is the same as in "leaf_values".The last part is "splits", and it is description of the two splits that are used in the tree of depth two.Each of the descriptions contains several key-values. Firstly, it contains internal CatBoost parameter $split\_index$. This is the only parameter that is used by catboost when loading the model, all other parts of in "splits" are ignored (they are duplicated in a different place), and are present here only to do the model easier to understand. Let's first describe the other fields. Split type "FloatFeature" means that it is so called 'float split'. Float split condition $float\_feature[float\_feature\_index] > border$ (see above in [Features info](features_info)) is decripted with "float_feature_index" and "border" accordingly. This description should enable you to analyze the model.But if you will want to change the model, you will have to change "split_index" in a right way.To to do that let's explain, how this feature is built. Look one more time at features info: ###Code feature = model['features_info']['float_features'][0] feature ###Output _____no_output_____ ###Markdown Each float split determines by feature index and border value, hence feature description specifies len(feature['borders']) splits. List all float splits with first feature from model['features_info'] with border value from features $borders$ list, with second feature and so on. This is the order, in which splits are enumerated in model. Splits numbering begins with 0. Build split list in this order ###Code split_list = [] for float_feature in model['features_info']['float_features']: if not float_feature['borders']: continue for border in float_feature['borders']: split_list.append( { 'split_index': len(split_list), 'float_feature_index': float_feature['feature_index'], 'border_id': border, 'split_type': 'FloatFeature', 'flat_feature_index': float_feature['flat_feature_index'] } ) ###Output _____no_output_____ ###Markdown Ensure, that splits in first tree and corresponding splits from obtained above split_list are identical ###Code first_tree = model['oblivious_trees'][0] first_tree['splits'] split_indexes = [x['split_index'] for x in first_tree['splits']] [split_list[index] for index in split_indexes] ###Output _____no_output_____ ###Markdown Multiclassification The only difference between classification or regression vs multiclassification is the leaves count in each tree. Model contains leaf values for each class, so tree depth of $k$ has $2^k$ leafs and in json model are stored $2^k \cdot classes\_count$ leaf values in this order: first $2^k$ values for first class, second $2^k$ values for second and so on. Leaf weights count is $2^k$ as they are the same for all classes. Look at first tree of multiclass model trained on [Iris](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset. ###Code # Get Iris dataset from sklearn import datasets iris = datasets.load_iris() # Train the model cls_multilclass = catboost.CatBoostClassifier(loss_function='MultiClass', depth=2, random_seed=0, verbose=False) cls_multilclass.fit(iris.data, iris.target) # Save model cls_multilclass.save_model( "multiclass_model.json", format="json", # pool=pool # is required for model with cat_features to obtain applicable model ) multilclass_model = json.load(open("multiclass_model.json", "r")) multilclass_model['oblivious_trees'][0] ###Output _____no_output_____ ###Markdown Truncate modelModel can be modified and applied. Truncate and apply model ###Code trees = model['oblivious_trees'][:] approx # = cls.predict(X[0:3], prediction_type="RawFormulaVal") model['oblivious_trees'] = trees[0:5] # use only first 5 trees json.dump(model, open("head_model.json", "w")) # Save modified model cls.load_model("head_model.json", "json") # load model cls.predict(X[0:3], prediction_type="RawFormulaVal") # apply model model['oblivious_trees'] = trees[5:] # drop first 5 trees json.dump(model, open("tail_model.json", "w")) # Save modified model cls.load_model("tail_model.json", "json") # load model cls.predict(X[0:3], prediction_type="RawFormulaVal") # apply model ###Output _____no_output_____ ###Markdown CatBoost JSON model tutorial [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/catboost/tutorials/blob/master/model_analysis/model_export_as_json_tutorial.ipynb)CatBoost supports exporting model to JSON format and loading model from it.This tutorial explains the structure of the JSON model with numeric features only. Download MSRank dataset ###Code import catboost from catboost import datasets import os import numpy as np train_df, _ = datasets.msrank_10k() X, Y = train_df[train_df.columns[1:]], train_df[train_df.columns[0]] pool = catboost.Pool( data=X[:1000], # top 1000 documents are enough for this example label=Y[:1000], feature_names=list(X.columns) ) ###Output _____no_output_____ ###Markdown Now we will train a simple model with trees of depth 2 ###Code cls = catboost.CatBoostClassifier(depth=2, random_seed=0, iterations=10, verbose=False) cls.fit(pool) approx = cls.predict(X[0:3], prediction_type="RawFormulaVal") ###Output _____no_output_____ ###Markdown The next block save JSON model to file. ###Code cls.save_model( "model.json", format="json", # pool=pool # this parameter is required only for models with categorical features. ) ###Output _____no_output_____ ###Markdown The next block loads model from file as JSON and shows its keys.The model json contains __model_info__, __oblivious_trees__ and __features_info__. Model with categorical features will also conatain __ctrs__. ###Code import json model = json.load(open("model.json", "r")) model.keys() ###Output _____no_output_____ ###Markdown Model infomodel['model_info'] is analogue for [get_metadata()](https://tech.yandex.com/catboost/doc/dg/concepts/python-reference_catboost_metadata-docpage/) function.You can look on the training parameters the model was trained with or on catboost version that the model was trained with. ###Code print(model['model_info']['catboost_version_info']) ###Output Svn info: URL: svn+ssh://arcadia.yandex.ru/arc/trunk/arcadia Last Changed Rev: 5437739 Last Changed Author: dkvasov Last Changed Date: 2019-08-08T13:43:02.471284Z Other info: Build by: eermishkina Top src dir: /place/home/eermishkina/trunc/arcadia Top build dir: /home/eermishkina/.ya/build Hostname: su57.search.yandex.net Host information: Linux su57.search.yandex.net 4.4.88-42 #1 SMP Mon Sep 18 14:33:37 UTC 2017 x86_64 ###Markdown Features info Let's look now on the features_info value.It could contain $float\_features$, $categorical\_features$ and $ctrs$, which are lists of descriptions of some features. ###Code model['features_info'].keys() ###Output _____no_output_____ ###Markdown In our case (model without cateforical features) there are only one fields - $float\_features$. Every float feature is described in the following way:__flat_feature_index__ (int) - feature index in pool, zero-based indexation __feature_index__ (int) index among only float features, zero-based indexation. For example, in dataset that looks like \[float, categ, float\], the second float feature has indices $flat\_feature\_index = 2$ and $feature\_index = 1$. Because it is the 2 feature of all in 0 based indexation and 1 feature of numeric ones (here we exclude the caterorical feature).For a model without categorical features $feature\_index$ will be equal to $float\_feature\_index$ for every feature.__borders__ (list of all borders (or splits) used in the model for this particular float feature). Float feature values can be splits by $border$ value. All elements with feature value $> border$ go to the left subtree, and all elements with feature value ($<= border$) elements go to the right subtree.__has_nans__ (bool) This field shows if there were any nan values in the training dataset, which was used to train the model.__nan_value_treatment__ ('AsIs', 'AsTrue' or 'AsFalse') If the feature has had nan values in the training dataset, then there is an additional split that puts nans to the left and everything else to the right (if 'AsFalse') or an additional split that puts nans to the right and everything else to the left (if 'AsTrue').'AsIs' is internal default value for features without nan values. ###Code model['features_info']['float_features'][0] ###Output _____no_output_____ ###Markdown This shows that the first feature has a list of borders that were used in the model. And this feature had no nan values in train. Symmetric trees CatBoost uses so-called symmetric or oblivious trees. For each level of the tree CatBoost uses the same features to split learning instances into the left and the right partitions: on the first level tree is partitioned by first split into two parts, on the second level each subtree splits with second split and so on.In this case a tree of depth $k$ has exactly $2^k$ leaves and $k$ splits, each split on a subsequent layer.There are three types of splits: "FloatFeature", "OneHotFeature" and "OnlineCtr". A model without cateforical features contains only float feature slits. Now, let's look on how JSON model describes a single tree.A tree of depth $k$ is described by $2^k$ leaf_values, $2^k$ leaf_weights and $k$ splits. Let's take a look on the first tree of our model. ###Code def dump_json(item): print(json.dumps(item, indent=2)) dump_json(model['oblivious_trees'][0]) # first_tree ###Output { "leaf_values": [ 0.022173912547853145, 0.017826086558078085, 0.011304347573415556, -0.02565217333967221, -0.02565217333967221, 0.07652173742004059, -0.016956521360122625, -0.019130434355010134, -0.02130434734989765, -0.019130434355010148, 0.02407969585645712, 0.027495255552409406, 0.0035863376807432745, -0.026869069608165402, -0.028292219481478875, 0.07398772840759553, -0.004233128739738201, -0.012975459832675437, -0.0286196312621424, -0.02815950857304045 ], "splits": [ { "split_index": 4, "float_feature_index": 16, "border": 10.407758712768555, "split_type": "FloatFeature" }, { "split_index": 3, "float_feature_index": 13, "border": 3.5, "split_type": "FloatFeature" } ], "leaf_weights": [ 123, 54, 512, 311 ] } ###Markdown The list of "leaf_values" describes the values in leaves. This is a tree with 4 leaves. It has depth 2, which means it has two different splits. The first split is used to split all the objects into left and right.And the second split is used twice, to split the left objects into two parts, and to split the right objects into two parts.The indices in the list can be represented using base-2 numeral system in the following way: 00, 01, 10, 11. Leaf 00 is the leaf where the 0-th split and the 1-st split are equal to False. Leaf 01 contains the objects where 0-th split is equal to False and 1-st feature is equal to True. And so on.The next part of the tree description is called "leaf_weights". This list represents sum of weights of training samples, that are in this leaf. Leaf indexation in this list is the same as in "leaf_values".The last part is "splits", and it is description of the two splits that are used in the tree of depth two.Each of the descriptions contains several key-values. Firstly, it contains internal CatBoost parameter $split\_index$. This is the only parameter that is used by catboost when loading the model, all other parts of in "splits" are ignored (they are duplicated in a different place), and are present here only to do the model easier to understand. Let's first describe the other fields. Split type "FloatFeature" means that it is so called 'float split'. Float split condition $float\_feature[float\_feature\_index] > border$ (see above in [Features info](features_info)) is decripted with "float_feature_index" and "border" accordingly. This description should enable you to analyze the model.But if you will want to change the model, you will have to change "split_index" in a right way.To to do that let's explain, how this feature is built. Look one more time at features info: ###Code feature = model['features_info']['float_features'][0] feature ###Output _____no_output_____ ###Markdown Each float split determines by feature index and border value, hence feature description specifies len(feature['borders']) splits. List all float splits with first feature from model['features_info'] with border value from features $borders$ list, with second feature and so on. This is the order, in which splits are enumerated in model. Splits numbering begins with 0. Build split list in this order ###Code split_list = [] for float_feature in model['features_info']['float_features']: if not float_feature['borders']: continue for border in float_feature['borders']: split_list.append( { 'split_index': len(split_list), 'float_feature_index': float_feature['feature_index'], 'border_id': border, 'split_type': 'FloatFeature', 'flat_feature_index': float_feature['flat_feature_index'] } ) ###Output _____no_output_____ ###Markdown Ensure, that splits in first tree and corresponding splits from obtained above split_list are identical ###Code first_tree = model['oblivious_trees'][0] first_tree['splits'] split_indexes = [x['split_index'] for x in first_tree['splits']] [split_list[index] for index in split_indexes] ###Output _____no_output_____ ###Markdown Multiclassification The only difference between classification or regression vs multiclassification is the leaves count in each tree. Model contains leaf values for each class, so tree depth of $k$ has $2^k$ leafs and in json model are stored $2^k \cdot classes\_count$ leaf values in this order: first $2^k$ values for first class, second $2^k$ values for second and so on. Leaf weights count is $2^k$ as they are the same for all classes. Look at first tree of multiclass model trained on [Iris](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset. ###Code # Get Iris dataset from sklearn import datasets iris = datasets.load_iris() # Train the model cls_multilclass = catboost.CatBoostClassifier(loss_function='MultiClass', depth=2, random_seed=0, verbose=False) cls_multilclass.fit(iris.data, iris.target) # Save model cls_multilclass.save_model( "multiclass_model.json", format="json", # pool=pool # is required for model with cat_features to obtain applicable model ) multilclass_model = json.load(open("multiclass_model.json", "r")) multilclass_model['oblivious_trees'][0] ###Output _____no_output_____ ###Markdown Truncate modelModel can be modified and applied. Truncate and apply model ###Code trees = model['oblivious_trees'][:] approx # = cls.predict(X[0:3], prediction_type="RawFormulaVal") model['oblivious_trees'] = trees[0:5] # use only first 5 trees json.dump(model, open("head_model.json", "w")) # Save modified model cls.load_model("head_model.json", "json") # load model cls.predict(X[0:3], prediction_type="RawFormulaVal") # apply model model['oblivious_trees'] = trees[5:] # drop first 5 trees json.dump(model, open("tail_model.json", "w")) # Save modified model cls.load_model("tail_model.json", "json") # load model cls.predict(X[0:3], prediction_type="RawFormulaVal") # apply model ###Output _____no_output_____
breadth-first search.ipynb
###Markdown Поиск в ширину Алгоритм позволяет определить, существует ли путь из А в В. Если существует, то позволяет найти кратчайший путь. Для определения кратчайшего пути используется такая абстрактная структура данных как граф Граф Графы реализуются хеш-таблицами.Граф моделирует набор связей. Состоит из узлов и ребер. Поиск происходит по связям различных уровней. Связи первого уровня предпочтительнее связей второго и тд. Для проверки связей в порядке их добавления используются очереди Очередь Абстрактная структура данных, хранение элементов в которой организовано по принципу FIFO. Поддерживаются две операции:- добавления (PUSH)- извлечения (POP)В Python для создания двусторонней очереди используется deque() из модуля collections ###Code from collections import deque dq = deque('ghi') print(dq) dq.append('a') dq.append('b') dq.append('c') print(dq) dq.pop() print(dq) dq.pop() print(dq) dq.pop() print(dq) ###Output deque(['g', 'h', 'i', 'a', 'b']) deque(['g', 'h', 'i', 'a']) deque(['g', 'h', 'i']) ###Markdown Реализация алгоритма В алгоритме необходимо определить массив для сохранения элементов, чтобы не возникло дублирования элементов.Алгоритма работает пока:- не будет найден искомый элемент;- очередь не опустеет; ###Code def search(name): search_queue = deque() search_queue += graph[name] searched = [] # этот массив используется для отслеживания уже проверенных людей while search_queue: person = search_queue.popleft() if not person in searched: # человек проверяется только в том случае, если он не проверялся ранее if person_is_seller(person): print(f'{person} is a mango seller!') return True else: search_queue += graph[name] searched.append(person) # человек помечается как уже проверенный return False ###Output _____no_output_____
Stock-Market-Analysis/fs_demo.ipynb
###Markdown S&P 500 vs 10 Year Treasury Rate ###Code import pandas as pd tables = pd.read_html('http://www.multpl.com/s-p-500-dividend-yield/table', header=0) tables2 = pd.read_html('http://www.multpl.com/10-year-treasury-rate/table/by-year', header=0) table = tables[0] table2 = tables2[0] table['Date'] = pd.to_datetime(table['Date']) table['Year'] = table['Date'].dt.year table2['Date'] = pd.to_datetime(table2['Date']) table2['Year'] = table2['Date'].dt.year table.head(5) table2.head() import matplotlib.pyplot as plt t = table.as_matrix()[1:11,2] s = table.as_matrix()[1:11,1] s2 = table2.as_matrix()[2:12,1] plt.subplot(2, 1, 1) plt.plot(t, s) plt.ylabel('S&P 500 Dividend Yield') plt.title('S&P 500 vs 10 Year Treasury Rate') plt.grid(False) plt.subplot(2, 1, 2) plt.plot(t, s2, color='orange') plt.xlabel('Time period') plt.ylabel('10 Year Treasury Rate') plt.grid(False) plt.show() import matplotlib.pyplot as plt t = table.as_matrix()[1:11,2] s = table.as_matrix()[1:11,1] s2 = table2.as_matrix()[2:12,1] plt.plot(t, s, label='S&P 500 Rate') plt.plot(t, s2, label='10 Yr Treasury') plt.legend(loc='best') plt.xlabel('Time period') plt.ylabel('S&P 500 Dividend Yield') plt.title('S&P 500 vs 10 Year Treasury Rate') plt.grid(False) plt.grid(False) plt.show() ###Output _____no_output_____ ###Markdown Check: https://www.tradingview.com/chart/DGS10/p4kYTyBy-10-Yr-Treasury-Rates-vs-S-P-500/ GDP by Country ###Code import pandas as pd tables = pd.read_html('http://statisticstimes.com/economy/countries-by-projected-gdp.php', header=1) table = tables[1] table.head(5) df1[['Country[['a','d']] country = table.as_matrix()[1:,0] gdp = table.as_matrix()[1:,1] rank = table.as_matrix()[1:,3] import pandas_datareader.data as web import datetime start = datetime.datetime(2010, 1, 1) end = datetime.datetime(2013, 1, 27) f = web.DataReader('F', 'google', start, end) f.ix['2010-01-04'] from pandas_datareader import data import fix_yahoo_finance as yf yf.pdr_override() from datetime import date, timedelta yesterday = date.today() - timedelta(1) now = date.today() tomorrow = date.today() + timedelta(1) symbol = 'AMZN' start_date = now.strftime("%Y-%m-%d") end_date = tomorrow.strftime("%Y-%m-%d") df = data.get_data_yahoo(symbol, start_date, end_date2) df.head() from datetime import date, timedelta yesterday = date.today() - timedelta(1) now = date.today() print (yesterday.strftime("%Y-%m-%d %H:%M")) print (now.strftime("%Y-%m-%d %H:%M")) import pandas as pd tables = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies', header=1) snp = tables[0] snp from pandas_datareader import data import numpy as np import fix_yahoo_finance as yf yf.pdr_override() from datetime import date, timedelta now = date.today() tomorrow = date.today() + timedelta(1) start_date = now.strftime("%Y-%m-%d") end_date = tomorrow.strftime("%Y-%m-%d") first_n_stocks = 5 symbol_list = snp.as_matrix()[0:first_n_stocks,0] symbol_name = snp.as_matrix()[0:first_n_stocks,1] open_ = np.zeros(first_n_stocks) close_ = np.zeros(first_n_stocks) vol_ = np.zeros(first_n_stocks) for i in range(first_n_stocks): symbol = symbol_list[i] df = data.get_data_yahoo(symbol, start_date, end_date) open_[i] = df.as_matrix()[0:1,0] close_[i] = df.as_matrix()[0:1,3] vol_[i] = df.as_matrix()[0:1,5] print(open_) # bar chart of opening prices of first 5 S&P stocks import matplotlib.pyplot as plt import numpy as np plt.subplot(1, 2, 1) plt.bar(symbol_name,open_,color='green') plt.xticks(rotation=90) plt.title('Opening prices USD') plt.subplot(1, 2, 2) plt.bar(symbol_name,close_,color='red') plt.xticks(rotation=90) plt.title('Closing prices USD') plt.show() symbol_list = snp.as_matrix()[1:11,0] symbol_list snp_tickers = snp['MMM'] snp_tickers range(5) open2=np.zeros(5) open2 df ###Output _____no_output_____
test/TensorFlow.ipynb
###Markdown What's this TensorFlow business?You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, TensorFlow (or PyTorch, if you switch over to that notebook) What is it?TensorFlow is a system for executing computational graphs over Tensor objects, with native support for performing backpropogation for its Variables. In it, we work with Tensors which are n-dimensional arrays analogous to the numpy ndarray. Why?* Our code will now run on GPUs! Much faster training. Writing your own modules to run on GPUs is beyond the scope of this class, unfortunately.* We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand. * We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :) * We want you to be exposed to the sort of deep learning code you might run into in academia or industry. How will I learn TensorFlow?TensorFlow has many excellent tutorials available, including those from [Google themselves](https://www.tensorflow.org/get_started/get_started).Otherwise, this notebook will walk you through much of what you need to do to train models in TensorFlow. See the end of the notebook for some links to helpful tutorials if you want to learn more or need further clarification on topics that aren't fully explained here. Load Datasets ###Code import tensorflow as tf import numpy as np import math import timeit import matplotlib.pyplot as plt %matplotlib inline from cs231n.data_utils import load_CIFAR10 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image return X_train, y_train, X_val, y_val, X_test, y_test # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) ###Output _____no_output_____ ###Markdown Example Model Some useful utilities. Remember that our image data is initially N x H x W x C, where:* N is the number of datapoints* H is the height of each image in pixels* W is the height of each image in pixels* C is the number of channels (usually 3: R, G, B)This is the right way to represent the data when we are doing something like a 2D convolution, which needs spatial understanding of where the pixels are relative to each other. When we input image data into fully connected affine layers, however, we want each data example to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. The example model itselfThe first step to training your own model is defining its architecture.Here's an example of a convolutional neural network defined in TensorFlow -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. In that example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Hinge loss function, and the Adam optimizer being used. Make sure you understand why the parameters of the Linear layer are 5408 and 10. TensorFlow DetailsIn TensorFlow, much like in our previous notebooks, we'll first specifically initialize our variables, and then our network model. ###Code # clear old variables tf.reset_default_graph() # setup input (e.g. the data that changes every batch) # The first dim is None, and gets sets automatically based on batch size fed in X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) def simple_model(X,y): # define our weights (e.g. init_two_layer_convnet) # setup variables Wconv1 = tf.get_variable("Wconv1", shape=[7, 7, 3, 32]) bconv1 = tf.get_variable("bconv1", shape=[32]) W1 = tf.get_variable("W1", shape=[5408, 10]) b1 = tf.get_variable("b1", shape=[10]) # define our graph (e.g. two_layer_convnet) a1 = tf.nn.conv2d(X, Wconv1, strides=[1,2,2,1], padding='VALID') + bconv1 h1 = tf.nn.relu(a1) h1_flat = tf.reshape(h1,[-1,5408]) y_out = tf.matmul(h1_flat,W1) + b1 return y_out y_out = simple_model(X,y) # define our loss total_loss = tf.losses.hinge_loss(tf.one_hot(y,10),logits=y_out) mean_loss = tf.reduce_mean(total_loss) # define our optimizer optimizer = tf.train.AdamOptimizer(5e-4) # select optimizer and set learning rate train_step = optimizer.minimize(mean_loss) ###Output _____no_output_____ ###Markdown TensorFlow supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). * Layers, Activations, Loss functions : https://www.tensorflow.org/api_guides/python/nn* Optimizers: https://www.tensorflow.org/api_guides/python/trainOptimizers* BatchNorm: https://www.tensorflow.org/api_docs/python/tf/contrib/layers/batch_norm Training the model on one epochWhile we have defined a graph of operations above, in order to execute TensorFlow Graphs, by feeding them input data and computing the results, we first need to create a `tf.Session` object. A session encapsulates the control and state of the TensorFlow runtime. For more information, see the TensorFlow [Getting started](https://www.tensorflow.org/get_started/get_started) guide.Optionally we can also specify a device context such as `/cpu:0` or `/gpu:0`. For documentation on this behavior see [this TensorFlow guide](https://www.tensorflow.org/tutorials/using_gpu)You should see a validation loss of around 0.4 to 0.6 and an accuracy of 0.30 to 0.35 below ###Code def run_model(session, predict, loss_val, Xd, yd, epochs=1, batch_size=64, print_every=100, training=None, plot_losses=False): # have tensorflow compute accuracy correct_prediction = tf.equal(tf.argmax(predict,1), y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # shuffle indicies train_indicies = np.arange(Xd.shape[0]) np.random.shuffle(train_indicies) training_now = training is not None # setting up variables we want to compute (and optimizing) # if we have a training function, add that to things we compute variables = [mean_loss,correct_prediction,accuracy] if training_now: variables[-1] = training # counter iter_cnt = 0 for e in range(epochs): # keep track of losses and accuracy correct = 0 losses = [] # make sure we iterate over the dataset once for i in range(int(math.ceil(Xd.shape[0]/batch_size))): # generate indicies for the batch start_idx = (i*batch_size)%X_train.shape[0] idx = train_indicies[start_idx:start_idx+batch_size] # create a feed dictionary for this batch feed_dict = {X: Xd[idx,:], y: yd[idx], is_training: training_now } # get batch size actual_batch_size = yd[i:i+batch_size].shape[0] # have tensorflow compute loss and correct predictions # and (if given) perform a training step loss, corr, _ = session.run(variables,feed_dict=feed_dict) # aggregate performance stats losses.append(loss*actual_batch_size) correct += np.sum(corr) # print every now and then if training_now and (iter_cnt % print_every) == 0: print("Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}"\ .format(iter_cnt,loss,np.sum(corr)/actual_batch_size)) iter_cnt += 1 total_correct = correct/Xd.shape[0] total_loss = np.sum(losses)/Xd.shape[0] print("Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}"\ .format(total_loss,total_correct,e+1)) if plot_losses: plt.plot(losses) plt.grid(True) plt.title('Epoch {} Loss'.format(e+1)) plt.xlabel('minibatch number') plt.ylabel('minibatch loss') plt.show() return total_loss,total_correct with tf.Session() as sess: with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0" sess.run(tf.global_variables_initializer()) print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step,True) print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) ###Output _____no_output_____ ###Markdown Training a specific modelIn this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the TensorFlow documentation and configuring your own model. Using the code provided above as guidance, and using the following TensorFlow documentation, specify a model with the following architecture:* 7x7 Convolutional Layer with 32 filters and stride of 1* ReLU Activation Layer* Spatial Batch Normalization Layer (trainable parameters, with scale and centering)* 2x2 Max Pooling layer with a stride of 2* Affine layer with 1024 output units* ReLU Activation Layer* Affine layer from 1024 input units to 10 outputs ###Code # clear old variables tf.reset_default_graph() # define our input (e.g. the data that changes every batch) # The first dim is None, and gets sets automatically based on batch size fed in X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) # define model def complex_model(X,y,is_training): pass pass y_out = complex_model(X,y,is_training) ###Output _____no_output_____ ###Markdown To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 64 x 10, since our batches have size 64 and the output of the final affine layer should be 10, corresponding to our 10 classes): ###Code # Now we're going to feed a random batch into the model # and make sure the output is the right size x = np.random.randn(64, 32, 32,3) with tf.Session() as sess: with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0" tf.global_variables_initializer().run() ans = sess.run(y_out,feed_dict={X:x,is_training:True}) %timeit sess.run(y_out,feed_dict={X:x,is_training:True}) print(ans.shape) print(np.array_equal(ans.shape, np.array([64, 10]))) ###Output _____no_output_____ ###Markdown You should see the following from the run above `(64, 10)``True` GPU!Now, we're going to try and start the model under the GPU device, the rest of the code stays unchanged and all our variables and operations will be computed using accelerated code paths. However, if there is no GPU, we get a Python exception and have to rebuild our graph. On a dual-core CPU, you might see around 50-80ms/batch running the above, while the Google Cloud GPUs (run below) should be around 2-5ms/batch. ###Code try: with tf.Session() as sess: with tf.device("/gpu:0") as dev: #"/cpu:0" or "/gpu:0" tf.global_variables_initializer().run() ans = sess.run(y_out,feed_dict={X:x,is_training:True}) %timeit sess.run(y_out,feed_dict={X:x,is_training:True}) except tf.errors.InvalidArgumentError: print("no gpu found, please use Google Cloud if you want GPU acceleration") # rebuild the graph # trying to start a GPU throws an exception # and also trashes the original graph tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) y_out = complex_model(X,y,is_training) ###Output _____no_output_____ ###Markdown You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use GPU devices. However, with TensorFlow, the default device is a GPU if one is available, and a CPU otherwise, so we can skip the device specification from now on. Train the model.Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the complex_model you created provided above).Make sure you understand how each TensorFlow function used below corresponds to what you implemented in your custom neural network implementation.First, set up an **RMSprop optimizer** (using a 1e-3 learning rate) and a **cross-entropy loss** function. See the TensorFlow documentation for more information* Layers, Activations, Loss functions : https://www.tensorflow.org/api_guides/python/nn* Optimizers: https://www.tensorflow.org/api_guides/python/trainOptimizers ###Code # Inputs # y_out: is what your model computes # y: is your TensorFlow variable with label information # Outputs # mean_loss: a TensorFlow variable (scalar) with numerical loss # optimizer: a TensorFlow optimizer # This should be ~3 lines of code! mean_loss = None optimizer = None pass train_step = optimizer.minimize(mean_loss) ###Output _____no_output_____ ###Markdown Train the modelBelow we'll create a session and train the model over one epoch. You should see a loss of 3.0 - 5.0 and an accuracy of 0.2 to 0.3. There will be some variation due to random seeds and differences in initialization ###Code sess = tf.Session() sess.run(tf.global_variables_initializer()) print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step) ###Output _____no_output_____ ###Markdown Check the accuracy of the model.Let's see the train and test code in action -- feel free to use these methods when evaluating the models you develop below. You should see a loss of 1.5 to 2.0 with an accuracy of 0.3 to 0.4. ###Code print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) ###Output _____no_output_____ ###Markdown Train a _great_ model on CIFAR-10!Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves ** >= 70% accuracy on the validation set** of CIFAR-10. You can use the `run_model` function from above. Things you should try:- **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient- **Number of filters**: Above we used 32 filters. Do more or fewer do better?- **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?- **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?- **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include: - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM] - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]- **Use TensorFlow Scope**: Use TensorFlow scope and/or [tf.layers](https://www.tensorflow.org/api_docs/python/tf/layers) to make it easier to write deeper networks. See [this tutorial](https://www.tensorflow.org/tutorials/layers) for making how to use `tf.layers`. - **Use Learning Rate Decay**: [As the notes point out](http://cs231n.github.io/neural-networks-3/anneal), decaying the learning rate might help the model converge. Feel free to decay every epoch, when loss doesn't change over an entire epoch, or any other heuristic you find appropriate. See the [Tensorflow documentation](https://www.tensorflow.org/versions/master/api_guides/python/trainDecaying_the_learning_rate) for learning rate decay.- **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter), which is then reshaped into a (Filter) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).- **Regularization**: Add l2 weight regularization, or perhaps use [Dropout as in the TensorFlow MNIST tutorial](https://www.tensorflow.org/get_started/mnist/pros) Tips for trainingFor each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind:- If the parameters are working well, you should see improvement within a few hundred iterations- Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.- Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.- You should use the validation set for hyperparameter search, and we'll save the test set for evaluating your architecture on the best parameters as selected by the validation set. Going above and beyondIf you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try for extra credit.- Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta.- Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.- Model ensembles- Data augmentation- New Architectures - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output. - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together. - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below. What we expectAt the very least, you should be able to train a ConvNet that gets at **>= 70% accuracy on the validation set**. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches.You should use the space below to experiment and train your network. The final cell in this notebook should contain the training and validation set accuracies for your final trained network.Have fun and happy training! ###Code # Feel free to play with this cell def my_model(X,y,is_training): pass pass tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, 32, 32, 3]) y = tf.placeholder(tf.int64, [None]) is_training = tf.placeholder(tf.bool) y_out = my_model(X,y,is_training) mean_loss = None optimizer = None train_step = optimizer.minimize(mean_loss) pass # Feel free to play with this cell # This default code creates a session # and trains your model for 10 epochs # then prints the validation set accuracy sess = tf.Session() sess.run(tf.global_variables_initializer()) print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,10,64,100,train_step,True) print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) # Test your model here, and make sure # the output of this cell is the accuracy # of your best model on the training and val sets # We're looking for >= 70% accuracy on Validation print('Training') run_model(sess,y_out,mean_loss,X_train,y_train,1,64) print('Validation') run_model(sess,y_out,mean_loss,X_val,y_val,1,64) ###Output _____no_output_____ ###Markdown Describe what you did hereIn this cell you should also write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network _Tell us here_ Test Set - Do this only onceNow that we've gotten a result that we're happy with, we test our final model on the test set. This would be the score we would achieve on a competition. Think about how this compares to your validation set accuracy. ###Code print('Test') run_model(sess,y_out,mean_loss,X_test,y_test,1,64) ###Output _____no_output_____
DATASETcovid.ipynb
###Markdown ###Code # import libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv(r'covid.csv') df # Lets convert male to 0 and female to 1: df['SESSO'].replace(to_replace=['M','F'], value=[0,1],inplace=True) df.head() df['Linfociti'] = pd.to_numeric(df['Linfociti'], errors='coerce') data= pd.DataFrame(df) data.to_csv("covid.csv", index=False, header=False) df= pd.read_csv('covid.csv') df ####### 1.KNN IMPUTER######################## .................. .............................. from fancyimpute import KNN df1= pd.read_csv('covid.csv') df1 knn_imputer = KNN(10) #use 10 nearest rows which have a feature to fill in each df1= knn_imputer.fit_transform(df1) df1 data = pd.DataFrame(df1) data.to_csv("KNN_dataset.csv") df1 = pd.read_csv(r'KNN_dataset.csv') df1 ########## 2.Itertive Imputer as MICE IMPUTER ############# ............. ..................... ......................... .................... from fancyimpute import IterativeImputer dataset= pd.read_csv('covid.csv') mice_impute = IterativeImputer() df2 = mice_impute.fit_transform(dataset) df2 data = pd.DataFrame(df2) data.to_csv("MICE_dataset.csv") df2= pd.read_csv('MICE_dataset.csv') df2 ############ 3.MULTIPLE IMPUTER ############## ............. ...................... .................... ............................. pip install autoimpute from autoimpute.imputations import MultipleImputer multiImputer= MultipleImputer() df3= multiImputer.fit_transform(dataset) df3 data = pd.DataFrame(df3) data.to_csv("MULTI_dataset.csv") #####################4. NuclearNormMinimization###### # matrix completion using convex optimization to find low-rank solution # that still matches observed values. Slow! ....................... ................................... ........................................... from fancyimpute import KNN, NuclearNormMinimization, SoftImpute, BiScaler df4= pd.read_csv(r'covid.csv') df4 = NuclearNormMinimization().fit_transform(df4) df4 data= pd.DataFrame(df4) data.to_csv('Nuclear_dataset.csv',index=False,header = False) df4= pd.read_csv('Nuclear_dataset.csv') df4 ##########4. Nuclear with dataset variable ##### ............. ...................... ....................... df4 = NuclearNormMinimization().fit_transform(dataset) df4 data=pd.DataFrame(df4) data.to_csv('Nuclear_datasetprevvariable.csv') df4= pd.read_csv('Nuclear_datasetprevvariable.csv') df4 ##############5. SOFT IMPUTE ######################### .............. ............................... .................................... from fancyimpute import SoftImpute, BiScaler #X_incomplete_normalized = BiScaler().fit_transform(dataset) df5 = SoftImpute().fit_transform(dataset) df5 data= pd.DataFrame(df5) data.to_csv('Soft_dataset.csv',index=False,header=False) df5= pd.read_csv(r'Soft_dataset.csv') df5 ######### 6. ###Output _____no_output_____
notebooks/completeness_and_contamination.ipynb
###Markdown This demonstrates all the steps in my candidate selection before conducting visual inspection ###Code import numpy as np import splat import wisps.data_analysis as wispd from wisps.data_analysis import selection_criteria as sel_crt import shapey import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from scipy import stats import wisps import matplotlib as mpl from tqdm import tqdm import random import matplotlib.pyplot as plt import matplotlib as mpl %matplotlib inline #par = plt.hist2d(np.arange(20), np.arange(1000), norm=mpl.colors.LogNorm(), cmap=mpl.cm.gray) #some functions def get_indices(x): if x is None : return pd.Series({}) else: return pd.concat([pd.Series(x.indices), pd.Series(x.mags), pd.Series(x.snr)]) def get_spt(x): if x is None: return np.nan else: return x.spectral_type[0] #change f-test definition def f_test_fx(x, df1, df2): return stats.f.cdf(x, df1, df2) def box_parameters(idx, spt_range): bs=idx.shapes b=[x for x in bs if x.shape_name==spt_range][0] print ('{} {} m: {} b: {} s:{}, comp : {}, cont: {}'.format(spt_range, idx, round(b.coeffs[0], 2), round(b.coeffs[1], 2), round(b.scatter, 2), round(idx.completeness[spt_range], 2), round(idx.contamination[spt_range], 3))) cands=pd.read_pickle(wisps.LIBRARIES+'/new_real_ucds.pkl') #use the same columns for all data sets alldata=wisps.get_big_file() spex=wisps.Annotator.reformat_table(wisps.datasets['spex']) cands['line_chi']=cands.spectra.apply(lambda x : x.line_chi) cands['spex_chi']=cands.spectra.apply(lambda x: x.spex_chi) cands['f_test']=cands.spectra.apply(lambda x: x.f_test) spex_df=wisps.Annotator.reformat_table(wisps.datasets['spex']).reset_index(drop=True) manj=wisps.Annotator.reformat_table(wisps.datasets['manjavacas']).reset_index(drop=True) schn=wisps.Annotator.reformat_table(wisps.datasets['schneider']).reset_index(drop=True) ydwarfs=(manj[manj['spt'].apply(wisps.make_spt_number)>38].append(schn)).reset_index(drop=True) spex_df['spt']=np.vstack(spex_df['spt'].values)[:,0] manj['spt']=np.vstack(manj['spt'].values)[:,0] schn['spt']=np.vstack(schn['spt'].values)[:,0] cands.grism_id=cands.grism_id.apply(lambda x: x.lower()) cands['spt']=np.vstack(cands['spt'].values) #add x values spex['x']=spex.spex_chi/spex.line_chi alldata['x']=alldata.spex_chi/alldata.line_chi cands['x']=cands.spex_chi/cands.line_chi spex['f_test']=f_test_fx(spex.x.values, spex.dof.values-1, spex.dof.values-2) alldata['f_test']=f_test_fx(alldata.x.values, alldata.nG141.values-1, alldata.nG141.values-2) alldata=alldata.sort_values('x') spex=spex.sort_values('x') cands=cands.sort_values('x') alldata['datalabel']='alldata' spex['datalabel']='spex' cands['datalabel']='ucds' combined_ftest_df=pd.concat([cands, spex, alldata[(alldata.snr1>=3.) & (alldata.mstar_flag !=0)]]) #stats.f.cdf(.85564068, 108-1, 108+2) #list(spex[['x', 'dof']][spex.f_test.values >0.2].values) spex=spex[np.vstack(spex.spt.values)[:,0] >=17.] dt=alldata[(alldata.f_test<.005) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)].reset_index(drop=True) dt['spt']=(dt['spt']).apply(wisps.make_spt_number).apply(float) dt=wisps.Annotator.reformat_table(dt).reset_index(drop=True) len(dt) len(spex[spex.f_test.values < 0.005])/len(spex) # #wisps.Annotator.reformat_table(wisps.datasets['subd']) #get criteria ##only run this if new data crts=sel_crt.save_criteria(conts=dt) crts=sel_crt.crts_from_file() contamns=pd.DataFrame([ x.contamination for x in crts.values()]) compls=pd.DataFrame([ x.completeness for x in crts.values()]) contamns.index=[x for x in crts.keys()] compls.index=[x for x in crts.keys()] #%%capture ''' contamns.style.apply(lambda x: ["background-color: #7FDBFF" if (i >= 0 and (v < 0.1 and v > 0. )) else "" for i, v in enumerate(x)], axis = 1) ''' compls.describe() #crts def get_toplowest_contam(subtype): return {subtype: contamns.sort_values(k).index[0]} spex['spt']=np.vstack(spex.spt.values)[:,0] from tqdm import tqdm def multiplte_indices_selection(k): stat_dict={} indices= [crts[index_name] for index_name in to_use[k]] #make selections for each index separately cand_bools=[] spex_bools=[] trash_bools=[] for idx in indices: xkey=idx.xkey ykey=idx.ykey bx=[x for x in idx.shapes if x.shape_name==k][0] _, cbools=bx._select(np.array([cands[xkey].values,cands[ykey].values])) _, spbools=bx._select(np.array([spex[xkey].values,spex[ykey].values])) _, trbools=bx._select(np.array([dt[xkey].values, dt[ykey].values])) cand_bools.append(cbools) spex_bools.append(spbools) trash_bools.append(trbools) cands_in_that_class_bool=cands.spt.apply(lambda x: wisps.is_in_that_classification(x, k)) spex_in_that_class_bool=spex.spt.apply(lambda x: wisps.is_in_that_classification(x, k)) cand_bools.append(cands_in_that_class_bool) spex_bools.append(spex_in_that_class_bool) cands_selected=cands[np.logical_and.reduce(cand_bools, axis=0)] spexs_selected=spex[np.logical_and.reduce(spex_bools, axis=0)] print (' {} selected {} out of {} UCDS'.format(k, len( cands_selected), len(cands[cands_in_that_class_bool]))) print ('overall completeness {}'.format( len(spexs_selected)/len(spex[spex_in_that_class_bool]))) print ('total contaminants {}'.format(len(dt[np.logical_and.reduce(trash_bools)]))) print ('-------------------------------------------') #for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs']: # multiplte_indices_selection(k) contamns.idxmin(axis=0) from collections import OrderedDict ordered=[(k, contamns.idxmin(axis=0)[k]) for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']] to_use= [ (y, x) for x, y in ordered] to_use import pickle #save the random forest output_file=wisps.OUTPUT_FILES+'/best_indices_to_use.pkl' with open(output_file, 'wb') as file: pickle.dump(to_use,file) fp={} cands=cands[cands.grism_id.isin(dt.grism_id)] #RENAME BOXES box_renames={'M7-L0':'M7-M9', 'L0-L5': 'L0-L4', 'L5-T0': 'L5-T0', 'T0-T5': 'T0-T4', 'T5-T9': 'T5-T9', 'Y dwarfs':'Y dwarfs', 'subdwarfs':'subdwarfs'} def plot_index_box(index_name, box_name, ax): #get the index and the box idx=crts[index_name] bx=[x for x in idx.shapes if x.shape_name==box_name][0] xkey=idx.xkey ykey=idx.ykey to_use_df=spex_df if box_name.lower()=='y dwarfs': to_use_df=ydwarfs if box_name.lower()=='subdwarfs': to_use_df=wisps.Annotator.reformat_table(idx.subdwarfs) to_use_df['spt']=17 print ('templates selected {}'.format(len(to_use_df))) print (bx.xrange, bx.yrange) xlim=[ bx.xrange[0]-abs(np.ptp(bx.xrange)), bx.xrange[1]+abs(np.ptp(bx.xrange))] ylim=[ bx.yrange[0]-abs(np.ptp(bx.yrange)), bx.yrange[1]+abs(np.ptp(bx.yrange))] #remove nans from background bckgrd= dt[[xkey, ykey]].replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna() # ax.scatter(bckgrd[xkey], bckgrd[ykey], s=1, c='#111111', label='Background') bckgrd=bckgrd[(bckgrd[xkey].between(xlim[0], xlim[1])) & (bckgrd[ykey].between(ylim[0], ylim[1]))] h=ax.hist2d(bckgrd[xkey].apply(float).values, bckgrd[ykey].apply(float).values, \ cmap='binary', norm=mpl.colors.LogNorm( vmin=10, vmax=1000), bins=15) cands_slctd, cands_bools=bx._select(np.array([cands[xkey].values,cands[ykey].values])) trash_slctd, trsh_bools=bx._select(np.array([dt[xkey].values, dt[ykey].values])) #simul_slctd, simul_bools=bx._select(np.array([simulated_data[xkey].values, simulated_data[ykey].values])) print ('real selected', len(cands_slctd[0]), len((cands))) cands_in_that_class_bool=(cands).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) spexs_slctd_in_that_class_bool= (to_use_df).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) #simulated_in_that_class_bool=(simulated_data[simul_bools]).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) if box_name.lower()=='subdwarfs': spexs_slctd_in_that_class_bool=np.ones(len(to_use_df), dtype=bool) cands_in_that_class=np.array([cands_slctd[0], \ cands_slctd[1]]) #simulated_in_that_class= np.array([simul_slctd[0][simulated_in_that_class_bool], simul_slctd[1][simulated_in_that_class_bool]]) spexs_slctd_in_that_class=np.array([to_use_df[xkey][spexs_slctd_in_that_class_bool], to_use_df[ykey][spexs_slctd_in_that_class_bool]]) #ax.scatter( simulated_in_that_class[0], simulated_in_that_class[1], facecolors='none', s=10, # edgecolors='#001f3f', label='simulated') ax.scatter(spexs_slctd_in_that_class[0], spexs_slctd_in_that_class[1], facecolors='none',\ edgecolors='#0074D9', label='Templates', s=50.) #ax.scatter(cands[xkey], cands[ykey], marker='x', facecolors='#FF851B', s=40., alpha=0.5) ax.scatter( cands_in_that_class[0], cands_in_that_class[1], marker ='+', s=150., alpha=1., facecolors='#FF851B', label='Discovered UCDs') ax.scatter(cands[xkey].values, cands[ykey].values, marker='+', s=150., alpha=0.3, facecolors='#FF851B') bx.color='None' bx.alpha=1. bx.linewidth=3 bx.linestyle='-' bx.edgecolor='#0074D9' bx.plot(ax=ax, only_shape=True, highlight=False) #cb = plt.colorbar(h[3], ax=ax, orientation='horizontal') #cb.set_label('Counts in bin', fontsize=16) plt.tight_layout() ax.set_xlabel(r'$'+str(idx.name.split(' ')[0])+'$', fontsize=14) ax.set_ylabel(r'$'+str(idx.name.split(' ')[1])+'$', fontsize=14) ax.set_title(box_renames[box_name], fontsize=18) xbuffer=np.nanstd(to_use_df[[xkey,ykey]]) ax.minorticks_on() if (trash_slctd.shape[1])==0: fprate=0.0 else: fprate=(trash_slctd.shape[1]- cands_slctd.shape[1])/trash_slctd.shape[1] if box_name.lower()=='subdwarfs': fprate=1. fp[box_name]= fprate ax.set_xlim(xlim) ax.set_ylim(ylim) plt.tight_layout() #print (' {} background selected {}'.format(box_name, len(bx.select( bckgrd)))) return {str(box_name): bx} to_use ###Output _____no_output_____ ###Markdown cands ###Code idx=crts[to_use[1][0]] len(idx.subdwarfs), import matplotlib fig, ax=plt.subplots(nrows=3, ncols=3, figsize=(12, 14)) bxs=[] for idx, k in enumerate(to_use): print (idx, k) b=plot_index_box( k[0], k[1], np.concatenate(ax)[idx]) bxs.append(b) plt.tight_layout() cax = fig.add_axes([0.5, 0.1, .3, 0.03]) norm= mpl.colors.LogNorm( vmin=10, vmax=1000) mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='binary')# vmin=10, vmax=5000) cbar=plt.colorbar(mp, cax=cax, orientation='horizontal') cbar.ax.set_xlabel(r'Number of Contaminants', fontsize=18) fig.delaxes(np.concatenate(ax)[-1]) fig.delaxes(np.concatenate(ax)[-2]) np.concatenate(ax)[-4].set_title(r'$\geq$ T9 ', fontsize=18) #subdindx_index_crt=crts['H_2O-1/J-Cont H_2O-2/H_2O-1'] #subdrfs=wisps.Annotator.reformat_table(dummy_index_crt.subdwarfs) #tpls=wisps.Annotator.reformat_table(spex_df[spex_df.metallicity_class.isna()]) #a=np.concatenate(ax)[-1] #tpls=tpls[tpls.spt>16] #a.scatter(dt[subdindx_index_crt.xkey], dt[subdindx_index_crt.ykey], s=1., c='#111111', alpha=0.1) #a.scatter(tpls[subdindx_index_crt.xkey], tpls[subdindx_index_crt.ykey], marker='+', facecolors='#0074D9', label='SpeX', s=5.) #a.scatter(subdrfs[subdindx_index_crt.xkey], subdrfs[subdindx_index_crt.ykey], marker='+', facecolors='#2ECC40', label='SpeX', s=30.) #a.set_xlim([0., 1.35]) #a.set_ylim([0., 1.25]) #a.set_title('subdwarfs', fontsize=18) #a.set_xlabel(r'$'+str(subdindx_index_crt.name.split(' ')[0])+'$', fontsize=15) #a.set_ylabel(r'$'+str(subdindx_index_crt.name.split(' ')[1])+'$', fontsize=15) np.concatenate(ax)[-3].legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig(wisps.OUTPUT_FIGURES+'/index_index_plots.pdf', bbox_inches='tight', rasterized=True, dpi=150) to_use #.grism_id.to_csv('/users/caganze/desktop/true_brown_dwarfs.csv') bx_dict={} for b in bxs: bx_dict.update(b) #invert to use inv_to_use = {v: k for k, v in to_use} ncandidates=[] for spt_range in bx_dict.keys(): idx_name=inv_to_use[spt_range] idx=crts[idx_name] s, bools=(bx_dict[spt_range])._select(np.array([dt[idx.xkey].values, dt[idx.ykey].values])) ncandidates.append(dt[bools]) candsss=(pd.concat(ncandidates).drop_duplicates(subset='grism_id')) cands.grism_id=cands.grism_id.apply(lambda x: x.lower().strip()) good_indices=[crts[x] for x in inv_to_use.values()] len(candsss), len(candsss[candsss.grism_id.isin(cands.grism_id.apply(lambda x: x.lower().strip())) & (candsss.spt.apply(wisps.make_spt_number)>16)]) len(candsss.drop_duplicates('grism_id'))/len(alldata) len(candsss[candsss.grism_id.isin(cands.grism_id) & (candsss.spt.apply(wisps.make_spt_number).between(35, 40))]) len(candsss), len(dt), len(alldata[alldata.mstar_flag !=0]) len(dt)/len(alldata) candsss.to_pickle(wisps.OUTPUT_FILES+'/selected_by_indices.pkl') #print out table def round_tuple(tpl, n=2): return round(tpl[0], n), round(tpl[1],n) for index, k in to_use: spt_range=k sindex=crts[index] bs=sindex.shapes bs=[x for x in bs if x.shape_name==spt_range] bx=bs[0] print (" {} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\ ".format(spt_range,sindex.xkey, sindex.ykey, round_tuple(bx.vertices[0]), round_tuple(bx.vertices[1]) , round_tuple(bx.vertices[2]), round_tuple(bx.vertices[3]), round(sindex.completeness[spt_range], 2), round(sindex.contamination[spt_range], 7), round(fp[spt_range],6))) len(candsss), len(dt) #ghjk stars= alldata[alldata.mstar_flag !=0] cands_dff=(cands[np.logical_and(cands['snr1'] >=3., cands['spt'] >=17)]).sort_values('spt') spex_df=spex_df.sort_values('spt') star_snr=stars[['snr1', 'snr2', 'snr3', 'snr4']].apply(np.log10).dropna() star_snr=(star_snr[star_snr.snr1.between(-1, 4) & star_snr.snr3.between(-1, 4) & star_snr.snr4.between(-1, 4)]).reset_index(drop=True) fig, (ax, ax1)=plt.subplots(ncols=2, figsize=(12, 6)) h=ax.hist2d(star_snr['snr1'], star_snr['snr3'], cmap='binary',\ bins=10, label='Point Sources', norm=mpl.colors.LogNorm( vmin=10, vmax=1000)) #ax.scatter(star_snr['snr1'], star_snr['snr3'], c='#111111', s=1, alpha=0.1) cb = plt.colorbar(h[3], ax=ax, orientation='horizontal') cb.set_label('Counts in bin', fontsize=16) plt.tight_layout() #ax.scatter(star_snr['snr1'], star_snr['snr4'], s=1., c='k', alpha=0.1, # label='3D-HST or WISP') ax.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr3'].apply(np.log10), s=10, c=spex_df.spt, cmap='coolwarm', marker='o', alpha=0.1, vmin=15, vmax=40) ax.scatter(spex_df['snr1'].apply(np.log10)[0], spex_df['snr3'].apply(np.log10)[0], s=10, c=spex_df.spt[0], cmap='coolwarm', label='Templates', marker='o', alpha=1., vmin=15, vmax=40) ax.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr3'].apply(np.log10), c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs' , vmin=15, vmax=40) ax.set_xlim([-0.5, 4]) ax.set_ylim([-0.5, 4]) ax.set_xlabel('Log J-SNR', fontsize=18) ax.set_ylabel('Log H-SNR', fontsize=18) ax.legend(fontsize=18, loc='upper left') ax.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--') ax.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--') #ax1.scatter(stars['snr1'].apply(np.log10), stars['snr4'].apply(np.log10), s=1., c='k', alpha=0.1, # label='3D-HST or WISP') #ax1.scatter(star_snr['snr1'], star_snr['snr4'], c='#111111', s=1, alpha=0.1) h1=ax1.hist2d(star_snr['snr1'], star_snr['snr4'], cmap='binary', bins=10, label='Point Sources', \ norm=mpl.colors.LogNorm( vmin=10, vmax=1000)) mp=ax1.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr4'].apply(np.log10), s=10, c=spex_df.spt, cmap='coolwarm', label='Templates', marker='o', alpha=0.1, vmin=15, vmax=40) ax1.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr4'].apply(np.log10), c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs', vmin=15, vmax=40) ax1.set_xlim([-0.5, 4]) ax1.set_ylim([-0.5, 4]) ax1.set_xlabel(' Log J-SNR', fontsize=18) ax1.set_ylabel('Log MEDIAN-SNR', fontsize=18) #ax.legend(fontsize=18) ax1.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--') ax1.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--') cb1 = plt.colorbar(h1[3], ax=ax1, orientation='horizontal') cb1.set_label('Counts in bin', fontsize=16) #plt.tight_layout() import matplotlib cax = fig.add_axes([1.01, 0.21, .03, 0.7]) norm= matplotlib.colors.Normalize(vmin=15,vmax=40) mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='coolwarm') cbar=plt.colorbar(mp, cax=cax, orientation='vertical') cbar.ax.set_ylabel(r'Spectral Type', fontsize=18) ax.minorticks_on() ax1.minorticks_on() cbar.ax.set_yticks([ 17, 20, 25, 30, 35, 40]) cbar.ax.set_yticklabels(['M5', 'L0', 'L5', 'T0', 'T5', 'Y0']) plt.tight_layout() plt.savefig(wisps.OUTPUT_FIGURES+'/snr_cutplots.pdf', \ bbox_inches='tight',rasterized=True, dpi=100) #import wisps big=wisps.get_big_file() bigsnr=big[big.snr1>=3.] # fig, ax=plt.subplots(figsize=(10, 6)) h=ax.hist(big.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linestyle=':', label='All', log=True, linewidth=3) h=ax.hist(stars.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Point Sources', linestyle='--', log=True) h=ax.hist(stars[stars.snr1>3].snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Selected', log=True) #h=ax.hist(bigsnr.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, log=True) ax.minorticks_on() plt.xlabel('Log SNR') plt.ylabel('Number') plt.legend() plt.savefig(wisps.OUTPUT_FIGURES+'/snr_distribution.pdf', bbox_inches='tight', facecolor='white', transparent=False) #s3=wisps.Source(filename='goodss-01-G141_47749') #s4=wisps.Source(filename='goodss-01-G141_45524') bools=np.logical_and(stars.snr1.between(3, 1000), stars.f_test.between(1e-3, 1)) #s4._best_fit_line ###Output _____no_output_____ ###Markdown fig, ax=plt.subplots(figsize=(8, 8))plt.plot(s4.wave, s4.flux, color='111111', label='Flux')plt.plot(s4.wave, s4.noise, '39CCCC', label='Noise')std=splat.getStandard(s4.spectral_type[0])std.normalize(range=[1.2, 1.5])chi, scale=splat.compareSpectra(s4.splat_spectrum, std, comprange=[[1.2, 1.5]], statistic='chisqr', scale=True) std.scale(scale)plt.plot(std.wave, std.flux, color='y', label='Best fit template')plt.plot( s4._best_fit_line[0], color='FF4136', label='Best fit line')plt.xlim([1.1, 1.7])plt.ylim([0, 0.1])plt.xlabel('Wavelength (micron)')plt.ylabel('Normalized Flux')plt.legend()plt.savefig(wisps.OUTPUT_FIGURES+'/example_line_fit.pdf', bbox_inches='tight', facecolor='white', transparent=False) ###Code compls.keys() fig, ax=plt.subplots(figsize=(8,6)) #for k in ['L0-L5', 'L5-T0', 'M7-L0', 'T0-T5', 'T5-T9','subdwarfs']: ax.scatter(compls['M7-L0'].values, contamns['M7-L0'].values, facecolors='none', edgecolors='#0074D9', label='M7-L0') ax.scatter(compls['L0-L5'].values, contamns['L0-L5'].values, marker='^', facecolors='none',\ edgecolors='#FF851B', label='L0-L5') ax.scatter(compls['L5-T0'].values, contamns['L5-T0'].values, marker='s', facecolors='none', edgecolors='#2ECC40', label='L5-T0') ax.scatter(compls['T0-T5'].values, contamns['T0-T5'].values, marker='$...$', facecolors='none', edgecolors='#FF4136', label='T0-T5') ax.scatter(compls['T5-T9'].values, contamns['T5-T9'].values, marker='X', facecolors='none', edgecolors='#111111', label='T5-T9') #h=plt.hist(contams[k].values, bins='auto', histtype='step', # label='All', log=True, linewidth=3) ax.set_xlabel('Completeness') ax.set_ylabel('Contamination') plt.legend() ax.set_yscale('log') plt.savefig(wisps.OUTPUT_FIGURES+'/completeness_contam.pdf', bbox_inches='tight', facecolor='white', transparent=False) compl_contam_table=pd.DataFrame(columns=contamns.columns, index=contamns.index) for k in compl_contam_table.columns: for idx in compl_contam_table.index: compl_contam_table.loc[idx, k]=(round(compls.loc[idx, k], 2), \ round(contamns.loc[idx, k], 3)) (compl_contam_table[['M7-L0', 'L0-L5', 'T0-T5',\ 'T5-T9', 'Y dwarfs', 'subdwarfs']]).to_latex() len(dt)/len(alldata) sds_nn=pd.read_csv(wisps.LIBRARIES+'/subdarfs_nn_preds.csv') len(sds_nn) sds_nn_df=alldata[alldata.grism_id.str.lower().isin(sds_nn.grism_id.str.lower())] len(sds_nn_df[sds_nn_df.mstar_flag==1]) pd.read_table('/users/caganze/research/wisps/libraries/candidates_missed.tex') ###Output _____no_output_____ ###Markdown This demonstrates all the steps in my candidate selection before conducting visual inspection ###Code import numpy as np import splat import wisps.data_analysis as wispd from wisps.data_analysis import selection_criteria as sel_crt import shapey import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from scipy import stats import wisps import matplotlib as mpl from tqdm import tqdm import random import matplotlib.pyplot as plt %matplotlib inline #some functions def get_indices(x): if x is None : return pd.Series({}) else: return pd.concat([pd.Series(x.indices), pd.Series(x.mags), pd.Series(x.snr)]) def get_spt(x): if x is None: return np.nan else: return x.spectral_type[0] #change f-test definition def f_test_fx(x, df1, df2): return stats.f.cdf(x, df1, df2) def box_parameters(idx, spt_range): bs=idx.shapes b=[x for x in bs if x.shape_name==spt_range][0] print ('{} {} m: {} b: {} s:{}, comp : {}, cont: {}'.format(spt_range, idx, round(b.coeffs[0], 2), round(b.coeffs[1], 2), round(b.scatter, 2), round(idx.completeness[spt_range], 2), round(idx.contamination[spt_range], 3))) cands=pd.read_pickle(wisps.LIBRARIES+'/new_real_ucds.pkl') #use the same columns for all data sets alldata=wisps.get_big_file() spex=wisps.Annotator.reformat_table(wisps.datasets['spex']) cands['line_chi']=cands.spectra.apply(lambda x : x.line_chi) cands['spex_chi']=cands.spectra.apply(lambda x: x.spex_chi) cands['f_test']=cands.spectra.apply(lambda x: x.f_test) spex_df=wisps.Annotator.reformat_table(wisps.datasets['spex']).reset_index(drop=True) manj=wisps.Annotator.reformat_table(wisps.datasets['manjavacas']).reset_index(drop=True) schn=wisps.Annotator.reformat_table(wisps.datasets['schneider']).reset_index(drop=True) ydwarfs=(manj[manj['spt'].apply(wisps.make_spt_number)>38].append(schn)).reset_index(drop=True) spex_df['spt']=np.vstack(spex_df['spt'].values)[:,0] manj['spt']=np.vstack(manj['spt'].values)[:,0] schn['spt']=np.vstack(schn['spt'].values)[:,0] cands.grism_id=cands.grism_id.apply(lambda x: x.lower()) cands['spt']=np.vstack(cands['spt'].values) #add x values spex['x']=spex.spex_chi/spex.line_chi alldata['x']=alldata.spex_chi/alldata.line_chi cands['x']=cands.spex_chi/cands.line_chi spex['f_test']=f_test_fx(spex.x.values, spex.dof.values-1, spex.dof.values-2) alldata['f_test']=f_test_fx(alldata.x.values, alldata.nG141.values-1, alldata.nG141.values-2) alldata=alldata.sort_values('x') spex=spex.sort_values('x') cands=cands.sort_values('x') alldata['datalabel']='alldata' spex['datalabel']='spex' cands['datalabel']='ucds' combined_ftest_df=pd.concat([cands, spex, alldata[(alldata.snr1>=3.) & (alldata.mstar_flag !=0)]]) #stats.f.cdf(.85564068, 108-1, 108+2) #list(spex[['x', 'dof']][spex.f_test.values >0.2].values) len(spex[np.logical_and(spex.f_test.values > 0.9, np.vstack(spex.spt.values)[:,0] >=17.)])/len(spex) len(spex[np.logical_and(spex.f_test.values < 0.02, np.vstack(spex.spt.values)[:,0] >=17.)])/len(spex) len(cands[np.logical_and(cands.f_test.values > 0.9, np.vstack(cands.spt.values)[:,0] >=17.)])/len(cands) len(cands[np.logical_and(cands.f_test.values < 0.02, np.vstack(cands.spt.values)[:,0] >=17.)])/len(cands) #star_ids=alldata[alldata['class_star'] !=0] #stars=wisps.Annotator.reformat_table(star_ids).reset_index(drop=True) #cy=stars[stars.grism_id.isin(cx.grism_id)] plt.plot(cands.x[cands.x<1.], '.') dt=alldata[(alldata.f_test<0.02) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)].reset_index(drop=True) dt['spt']=(dt['spt']).apply(wisps.make_spt_number).apply(float) dt=wisps.Annotator.reformat_table(dt).reset_index(drop=True) len(alldata[(alldata.f_test<0.02) & (alldata.snr1>=3.) & (alldata.mstar_flag !=0)]) wisps.datasets.keys() s= #wisps.Annotator.reformat_table(wisps.datasets['subd']) #get criteria ##only run this if new data gbhio=sel_crt.save_criteria(conts=dt) crts=sel_crt.crts_from_file() contamns=pd.DataFrame([ x.contamination for x in crts.values()]) compls=pd.DataFrame([ x.completeness for x in crts.values()]) contamns.index=[x for x in crts.keys()] compls.index=[x for x in crts.keys()] %%capture ''' contamns.style.apply(lambda x: ["background-color: #7FDBFF" if (i >= 0 and (v < 0.1 and v > 0. )) else "" for i, v in enumerate(x)], axis = 1) ''' def get_toplowest_contam(subtype, n): top=contamns.sort_values('L5-T0')[:n] return {subtype: [x for x in top.index]} ordered={} for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']: ordered.update(get_toplowest_contam(k, 6)) to_use= ordered spex['spt']=np.vstack(spex.spt.values)[:,0] from tqdm import tqdm def multiplte_indices_selection(k): stat_dict={} indices= [crts[index_name] for index_name in to_use[k]] #make selections for each index separately cand_bools=[] spex_bools=[] trash_bools=[] for idx in indices: xkey=idx.xkey ykey=idx.ykey bx=[x for x in idx.shapes if x.shape_name==k][0] _, cbools=bx._select(np.array([cands[xkey].values,cands[ykey].values])) _, spbools=bx._select(np.array([spex[xkey].values,spex[ykey].values])) _, trbools=bx._select(np.array([dt[xkey].values, dt[ykey].values])) cand_bools.append(cbools) spex_bools.append(spbools) trash_bools.append(trbools) cands_in_that_class_bool=cands.spt.apply(lambda x: wisps.is_in_that_classification(x, k)) spex_in_that_class_bool=spex.spt.apply(lambda x: wisps.is_in_that_classification(x, k)) cand_bools.append(cands_in_that_class_bool) spex_bools.append(spex_in_that_class_bool) cands_selected=cands[np.logical_and.reduce(cand_bools, axis=0)] spexs_selected=spex[np.logical_and.reduce(spex_bools, axis=0)] print (' {} selected {} out of {} UCDS'.format(k, len( cands_selected), len(cands[cands_in_that_class_bool]))) print ('overall completeness {}'.format( len(spexs_selected)/len(spex[spex_in_that_class_bool]))) print ('total contaminants {}'.format(len(dt[np.logical_and.reduce(trash_bools)]))) print ('-------------------------------------------') #for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs']: # multiplte_indices_selection(k) contamns.idxmin(axis=0) from collections import OrderedDict ordered=[(k, contamns.idxmin(axis=0)[k]) for k in ['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-T9', 'Y dwarfs', 'subdwarfs']] to_use= [ (y, x) for x, y in ordered] to_use import pickle #save the random forest output_file=wisps.OUTPUT_FILES+'/best_indices_to_use.pkl' with open(output_file, 'wb') as file: pickle.dump(to_use,file) fp={} cands=cands[cands.grism_id.isin(dt.grism_id)] def plot_index_box(index_name, box_name, ax): #get the index and the box idx=crts[index_name] bx=[x for x in idx.shapes if x.shape_name==box_name][0] xkey=idx.xkey ykey=idx.ykey to_use_df=spex_df if box_name.lower()=='y dwarfs': to_use_df=ydwarfs if box_name.lower()=='subdwarfs': to_use_df=wisps.Annotator.reformat_table(idx.subdwarfs) to_use_df['spt']=17 xlim=[ bx.xrange[0]-.5*abs(np.ptp(bx.xrange)), bx.xrange[1]+.5*abs(np.ptp(bx.xrange))] ylim=[ bx.yrange[0]-.5*abs(np.ptp(bx.yrange)), bx.yrange[1]+.5*abs(np.ptp(bx.yrange))] if box_name.upper()=='T5-T9': print ('changin scale') print (bx.xrange[1]) xlim=[ bx.xrange[0]-0.2*abs(np.ptp(bx.xrange)), np.round(bx.xrange[1]+0.2*abs(np.ptp(bx.xrange)))] #remove nans from background bckgrd= dt[[xkey, ykey]].replace(-np.inf, np.nan).replace(np.inf, np.nan).dropna() # ax.scatter(bckgrd[xkey], bckgrd[ykey], s=1, c='#111111', label='Background') bckgrd=bckgrd[(bckgrd[xkey].between(xlim[0], xlim[1])) & (bckgrd[ykey].between(ylim[0], ylim[1]))] h=ax.hist2d(bckgrd[xkey].apply(float).values, bckgrd[ykey].apply(float).values, \ cmap='gist_yarg', vmin=50, vmax=1000) cands_slctd, cands_bools=bx._select(np.array([cands[xkey].values,cands[ykey].values])) trash_slctd, trsh_bools=bx._select(np.array([dt[xkey].values, dt[ykey].values])) #simul_slctd, simul_bools=bx._select(np.array([simulated_data[xkey].values, simulated_data[ykey].values])) print (len(cands_slctd[0]), len((cands))) cands_in_that_class_bool=(cands).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) spexs_slctd_in_that_class_bool= (to_use_df).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) #simulated_in_that_class_bool=(simulated_data[simul_bools]).spt.apply(lambda x: wisps.is_in_that_classification(x, box_name)) if box_name.lower()=='subdwarfs': spexs_slctd_in_that_class_bool=np.ones(len(to_use_df), dtype=bool) cands_in_that_class=np.array([cands_slctd[0], \ cands_slctd[1]]) #simulated_in_that_class= np.array([simul_slctd[0][simulated_in_that_class_bool], simul_slctd[1][simulated_in_that_class_bool]]) spexs_slctd_in_that_class=np.array([to_use_df[xkey][spexs_slctd_in_that_class_bool], to_use_df[ykey][spexs_slctd_in_that_class_bool]]) #ax.scatter( simulated_in_that_class[0], simulated_in_that_class[1], facecolors='none', s=10, # edgecolors='#001f3f', label='simulated') ax.scatter(spexs_slctd_in_that_class[0], spexs_slctd_in_that_class[1], facecolors='none',\ edgecolors='#0074D9', label='Templates', s=50.) #ax.scatter(cands[xkey], cands[ykey], marker='x', facecolors='#FF851B', s=40., alpha=0.5) ax.scatter( cands_in_that_class[0], cands_in_that_class[1], marker ='+', s=150., alpha=1., facecolors='#FF851B', label='Discovered UCDs') ax.scatter(cands[xkey].values, cands[ykey].values, marker='+', s=150., alpha=0.3, facecolors='#FF851B') bx.color='None' bx.alpha=1. bx.linewidth=3 bx.linestyle='-' bx.edgecolor='#0074D9' bx.plot(ax=ax, only_shape=True, highlight=False) #cb = plt.colorbar(h[3], ax=ax, orientation='horizontal') #cb.set_label('Counts in bin', fontsize=16) plt.tight_layout() ax.set_xlabel(r'$'+str(idx.name.split(' ')[0])+'$', fontsize=14) ax.set_ylabel(r'$'+str(idx.name.split(' ')[1])+'$', fontsize=14) ax.set_title(box_name, fontsize=18) xbuffer=np.nanstd(to_use_df[[xkey,ykey]]) ax.minorticks_on() if (trash_slctd.shape[1])==0: fprate=0.0 else: fprate=(trash_slctd.shape[1]- cands_slctd.shape[1])/trash_slctd.shape[1] if box_name.lower()=='subdwarfs': fprate=1. fp[box_name]= fprate ax.set_xlim(xlim) ax.set_ylim(ylim) plt.tight_layout() print (' {} selected {}'.format(box_name, len(bx.select( bckgrd)))) return {str(box_name): bx} to_use ###Output _____no_output_____ ###Markdown cands ###Code idx=crts[to_use[1][0]] import matplotlib fig, ax=plt.subplots(nrows=3, ncols=3, figsize=(12, 14)) bxs=[] for idx, k in enumerate(to_use): print (idx, k) b=plot_index_box( k[0], k[1], np.concatenate(ax)[idx]) bxs.append(b) plt.tight_layout() cax = fig.add_axes([0.5, 0.1, .3, 0.03]) norm= matplotlib.colors.Normalize(vmin=50,vmax=1000) mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='gist_yarg')# vmin=10, vmax=5000) cbar=plt.colorbar(mp, cax=cax, orientation='horizontal') cbar.ax.set_xlabel(r'Number of Contaminants', fontsize=18) fig.delaxes(np.concatenate(ax)[-1]) fig.delaxes(np.concatenate(ax)[-2]) np.concatenate(ax)[-4].set_title(r'$\geq$ T9 ', fontsize=18) #subdindx_index_crt=crts['H_2O-1/J-Cont H_2O-2/H_2O-1'] #subdrfs=wisps.Annotator.reformat_table(dummy_index_crt.subdwarfs) #tpls=wisps.Annotator.reformat_table(spex_df[spex_df.metallicity_class.isna()]) #a=np.concatenate(ax)[-1] #tpls=tpls[tpls.spt>16] #a.scatter(dt[subdindx_index_crt.xkey], dt[subdindx_index_crt.ykey], s=1., c='#111111', alpha=0.1) #a.scatter(tpls[subdindx_index_crt.xkey], tpls[subdindx_index_crt.ykey], marker='+', facecolors='#0074D9', label='SpeX', s=5.) #a.scatter(subdrfs[subdindx_index_crt.xkey], subdrfs[subdindx_index_crt.ykey], marker='+', facecolors='#2ECC40', label='SpeX', s=30.) #a.set_xlim([0., 1.35]) #a.set_ylim([0., 1.25]) #a.set_title('subdwarfs', fontsize=18) #a.set_xlabel(r'$'+str(subdindx_index_crt.name.split(' ')[0])+'$', fontsize=15) #a.set_ylabel(r'$'+str(subdindx_index_crt.name.split(' ')[1])+'$', fontsize=15) np.concatenate(ax)[-3].legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig(wisps.OUTPUT_FIGURES+'/index_index_plots.pdf', bbox_inches='tight', rasterized=True, dpi=150) #.grism_id.to_csv('/users/caganze/desktop/true_brown_dwarfs.csv') bx_dict={} for b in bxs: bx_dict.update(b) #invert to use inv_to_use = {v: k for k, v in to_use} ncandidates=[] for spt_range in bx_dict.keys(): idx_name=inv_to_use[spt_range] idx=crts[idx_name] s, bools=(bx_dict[spt_range])._select(np.array([dt[idx.xkey].values, dt[idx.ykey].values])) ncandidates.append(dt[bools]) candsss=(pd.concat(ncandidates).drop_duplicates(subset='grism_id')) cands.grism_id=cands.grism_id.apply(lambda x: x.lower().strip()) good_indices=[crts[x] for x in inv_to_use.values()] len(candsss), len(candsss[candsss.grism_id.isin(cands.grism_id.apply(lambda x: x.lower().strip())) & (candsss.spt.apply(wisps.make_spt_number)>16)]) len(candsss.drop_duplicates('grism_id'))/len(alldata) len(candsss[candsss.grism_id.isin(cands.grism_id) & (candsss.spt.apply(wisps.make_spt_number).between(35, 40))]) len(candsss), len(dt), len(alldata[alldata.mstar_flag !=0]) len(dt)/len(alldata) candsss.to_pickle(wisps.OUTPUT_FILES+'/selected_by_indices.pkl') #print out table def round_tuple(tpl, n=2): return round(tpl[0], n), round(tpl[1],n) for index, k in to_use: spt_range=k sindex=crts[index] bs=sindex.shapes bs=[x for x in bs if x.shape_name==spt_range] bx=bs[0] print (" {} & {} & {} & {} & {} & {} & {} & {} & {} & {} \\\ ".format(spt_range,sindex.xkey, sindex.ykey, round_tuple(bx.vertices[0]), round_tuple(bx.vertices[1]) , round_tuple(bx.vertices[2]), round_tuple(bx.vertices[3]), round(sindex.completeness[spt_range], 2), round(sindex.contamination[spt_range], 7), round(fp[spt_range],6))) len(candsss) #ghjk stars= alldata[alldata.mstar_flag !=0] cands_dff=(cands[np.logical_and(cands['snr1'] >=3., cands['spt'] >=17)]).sort_values('spt') spex_df=spex_df.sort_values('spt') star_snr=stars[['snr1', 'snr2', 'snr3', 'snr4']].apply(np.log10).dropna() star_snr=(star_snr[star_snr.snr1.between(-1, 4) & star_snr.snr3.between(-1, 4) & star_snr.snr4.between(-1, 4)]).reset_index(drop=True) fig, (ax, ax1)=plt.subplots(ncols=2, figsize=(12, 6)) h=ax.hist2d(star_snr['snr1'], star_snr['snr3'], cmap='gist_yarg', bins=10, label='Point Sources') #ax.scatter(star_snr['snr1'], star_snr['snr3'], c='#111111', s=1, alpha=0.1) cb = plt.colorbar(h[3], ax=ax, orientation='horizontal') cb.set_label('Counts in bin', fontsize=16) plt.tight_layout() #ax.scatter(star_snr['snr1'], star_snr['snr4'], s=1., c='k', alpha=0.1, # label='3D-HST or WISP') ax.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr3'].apply(np.log10), s=10, c=spex_df.spt, cmap='coolwarm', marker='o', alpha=0.1, vmin=15, vmax=40) ax.scatter(spex_df['snr1'].apply(np.log10)[0], spex_df['snr3'].apply(np.log10)[0], s=10, c=spex_df.spt[0], cmap='coolwarm', label='Templates', marker='o', alpha=1., vmin=15, vmax=40) ax.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr3'].apply(np.log10), c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs' , vmin=15, vmax=40) ax.set_xlim([-0.5, 4]) ax.set_ylim([-0.5, 4]) ax.set_xlabel('Log J-SNR', fontsize=18) ax.set_ylabel('Log H-SNR', fontsize=18) ax.legend(fontsize=18, loc='upper left') ax.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--') ax.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--') #ax1.scatter(stars['snr1'].apply(np.log10), stars['snr4'].apply(np.log10), s=1., c='k', alpha=0.1, # label='3D-HST or WISP') #ax1.scatter(star_snr['snr1'], star_snr['snr4'], c='#111111', s=1, alpha=0.1) h1=ax1.hist2d(star_snr['snr1'], star_snr['snr4'], cmap='gist_yarg', bins=10, label='Point Sources') mp=ax1.scatter(spex_df['snr1'].apply(np.log10), spex_df['snr4'].apply(np.log10), s=10, c=spex_df.spt, cmap='coolwarm', label='Templates', marker='o', alpha=0.1, vmin=15, vmax=40) ax1.scatter(cands_dff['snr1'].apply(np.log10), cands_dff['snr4'].apply(np.log10), c=cands_dff['spt'], s=40, marker='*', cmap='coolwarm', label='UCDs', vmin=15, vmax=40) ax1.set_xlim([-0.5, 4]) ax1.set_ylim([-0.5, 4]) ax1.set_xlabel(' Log J-SNR', fontsize=18) ax1.set_ylabel('Log MEDIAN-SNR', fontsize=18) #ax.legend(fontsize=18) ax1.axhline(np.log10(3), c='k', xmin=np.log10(3)-0.2, linestyle='--') ax1.axvline(np.log10(3), c='k', ymin=np.log10(3)-0.2, linestyle='--') cb1 = plt.colorbar(h1[3], ax=ax1, orientation='horizontal') cb1.set_label('Counts in bin', fontsize=16) #plt.tight_layout() import matplotlib cax = fig.add_axes([1.01, 0.21, .03, 0.7]) norm= matplotlib.colors.Normalize(vmin=15,vmax=40) mp=matplotlib.cm.ScalarMappable(norm=norm, cmap='coolwarm') cbar=plt.colorbar(mp, cax=cax, orientation='vertical') cbar.ax.set_ylabel(r'Spectral Type', fontsize=18) ax.minorticks_on() ax1.minorticks_on() cbar.ax.set_yticks([ 17, 20, 25, 30, 35, 40]) cbar.ax.set_yticklabels(['M5', 'L0', 'L5', 'T0', 'T5', 'Y0']) plt.tight_layout() plt.savefig(wisps.OUTPUT_FIGURES+'/snr_cutplots.pdf', \ bbox_inches='tight',rasterized=True, dpi=100) #import wisps big=wisps.get_big_file() bigsnr=big[big.snr1>=3.] # fig, ax=plt.subplots(figsize=(10, 6)) h=ax.hist(big.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linestyle=':', label='All', log=True, linewidth=3) h=ax.hist(stars.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Point Sources', linestyle='--', log=True) h=ax.hist(stars[stars.snr1>3].snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, label='Selected', log=True) #h=ax.hist(bigsnr.snr1.apply(np.log10).values, range=[-3, 4], bins=32, histtype='step', linewidth=3, log=True) ax.minorticks_on() plt.xlabel('Log SNR') plt.ylabel('Number') plt.legend() plt.savefig(wisps.OUTPUT_FIGURES+'/snr_distribution.pdf', bbox_inches='tight', facecolor='white', transparent=False) #s3=wisps.Source(filename='goodss-01-G141_47749') #s4=wisps.Source(filename='goodss-01-G141_45524') bools=np.logical_and(stars.snr1.between(3, 1000), stars.f_test.between(1e-3, 1)) #s4._best_fit_line ###Output _____no_output_____ ###Markdown fig, ax=plt.subplots(figsize=(8, 8))plt.plot(s4.wave, s4.flux, color='111111', label='Flux')plt.plot(s4.wave, s4.noise, '39CCCC', label='Noise')std=splat.getStandard(s4.spectral_type[0])std.normalize(range=[1.2, 1.5])chi, scale=splat.compareSpectra(s4.splat_spectrum, std, comprange=[[1.2, 1.5]], statistic='chisqr', scale=True) std.scale(scale)plt.plot(std.wave, std.flux, color='y', label='Best fit template')plt.plot( s4._best_fit_line[0], color='FF4136', label='Best fit line')plt.xlim([1.1, 1.7])plt.ylim([0, 0.1])plt.xlabel('Wavelength (micron)')plt.ylabel('Normalized Flux')plt.legend()plt.savefig(wisps.OUTPUT_FIGURES+'/example_line_fit.pdf', bbox_inches='tight', facecolor='white', transparent=False) ###Code compls.keys() fig, ax=plt.subplots(figsize=(8,6)) #for k in ['L0-L5', 'L5-T0', 'M7-L0', 'T0-T5', 'T5-T9','subdwarfs']: ax.scatter(compls['M7-L0'].values, contamns['M7-L0'].values, facecolors='none', edgecolors='#0074D9', label='M7-L0') ax.scatter(compls['L0-L5'].values, contamns['L0-L5'].values, marker='^', facecolors='none',\ edgecolors='#FF851B', label='L0-L5') ax.scatter(compls['L5-T0'].values, contamns['L5-T0'].values, marker='s', facecolors='none', edgecolors='#2ECC40', label='L5-T0') ax.scatter(compls['T0-T5'].values, contamns['T0-T5'].values, marker='$...$', facecolors='none', edgecolors='#FF4136', label='T0-T5') ax.scatter(compls['T5-T9'].values, contamns['T5-T9'].values, marker='X', facecolors='none', edgecolors='#111111', label='T5-T9') #h=plt.hist(contams[k].values, bins='auto', histtype='step', # label='All', log=True, linewidth=3) ax.set_xlabel('Completeness') ax.set_ylabel('Contamination') plt.legend() ax.set_yscale('log') plt.savefig(wisps.OUTPUT_FIGURES+'/completeness_contam.pdf', bbox_inches='tight', facecolor='white', transparent=False) compl_contam_table=pd.DataFrame(columns=contamns.columns, index=contamns.index) for k in compl_contam_table.columns: for idx in compl_contam_table.index: compl_contam_table.loc[idx, k]=(round(compls.loc[idx, k], 2), \ round(contamns.loc[idx, k], 3)) (compl_contam_table[['M7-L0', 'L0-L5', 'T0-T5',\ 'T5-T9', 'Y dwarfs', 'subdwarfs']]).to_latex() s=wisps.Source(filename='goodss-11-g141_39408') fig, ax=plt.subplots() mask=np.logical_and(s.wave >1.1, s.wave<1.7) plt.plot(s.wave[mask], s.flux[mask]) ax.set(ylim=[-1., 30.]) ###Output _____no_output_____
src/python/small.ipynb
###Markdown COMP305 -> 2-median problem on Optimal Placement of 2 Hospitals Imports ###Code import time import heapq import numpy as np from collections import defaultdict from collections import Counter from random import choice from random import randint ###Output _____no_output_____ ###Markdown Data Read ###Code with open("tests/test1_new.txt") as f: test1 = f.read().splitlines() with open("tests/test2_new.txt") as f: test2 = f.read().splitlines() ###Output _____no_output_____ ###Markdown Change the test to be run below ###Code lines = test2 ###Output _____no_output_____ ###Markdown txt -> Graph ###Code number_of_vertices = int(lines[0]) number_of_edges = int(lines[1]) vertices = lines[2:2+number_of_vertices] edges = lines[2+number_of_vertices:] ids_and_populations = [tuple(map(int, vertices[i].split(" "))) for i in range(len(vertices))] populations = dict(sorted(dict(ids_and_populations).items())) #redundant sort mydict = lambda: defaultdict(lambda: defaultdict()) G = mydict() for i in range(len(edges)): source, target, weight = map(int, edges[i].split(" ")) G[source][target] = weight G[target][source] = weight ###Output _____no_output_____ ###Markdown Our Binary Heap and Priority Queue Implementation ###Code class PriorityQueue: """Priority Queue implemented with binary heap for efficiency""" def __init__(self): """Constructor for the binary heap""" self.size = 0 self.capacity = 0 self.array = [] def is_empty(self): """Returns true if queue is empty false otherwise""" return self.size == 0 def left_child(self, index): """Returns the left child of the node given as an index""" return (2 * index) + 1 def right_child(self, index): """Returns the right child of the node given as an index""" return (2 * index) + 2 def parent(self, index): """Returns the parent of the node given as an index""" return (index - 1) // 2 def hasleftchild(self, index): """Checks if the indexed element has a left child""" return self.left_child(index) < len(self.array) def hasrightchild(self, index): """Checks if the indexed element has a right child""" return self.right_child(index) < len(self.array) def pop(self): """Returns the smallest element from the list and removes the element. Asserts a error if it is called on a empty list """ if self.is_empty(): assert "Empty List" min_el = self.array[0] self.array[0] = self.array[len(self.array) - 1] del self.array[len(self.array) - 1] self.downheap(0) return min_el def view(self): """Returns the smallest element from the list without removing it. Asserts a error if it is called on a empty list """ if self.is_empty(): assert "Empty List" return self.array[0] def push(self, element): """Adds the given element to the queue""" self.array.append(element) self.upheap(len(self.array) - 1) def upheap(self, index): """Corrects the heap property top down if it is broken""" while index > 0: parent = self.parent(index) if self.array[parent] == self.array[index] or self.array[index] > self.array[parent]: break temp = self.array[parent] self.array[parent] = self.array[index] self.array[index] = temp index = parent def __str__(self): """Prints the queue""" string = "[ " for k in range(0, len(self.array)): if k % 20 == 0 and k != 0: string += "\n" string += self.array[k].__str__() + " " string += "]\n" return string def downheap(self, index): """Corrects the heap property bottom up if it is broken""" while self.hasrightchild(index) or self.hasleftchild(index): small = self.left_child(index) if self.hasrightchild(index): rc = self.right_child(index) if self.array[rc] < self.array[small]: small = rc if self.array[small] == self.array[index] or self.array[small] > self.array[index]: break temp = self.array[small] self.array[small] = self.array[index] self.array[index] = temp index = small if __name__ == '__main__': q = PriorityQueue() ###Output _____no_output_____ ###Markdown Heuristic Algorithms' Utilities ###Code def select_neighbors(G, sub_graph, current_node, k): if k == 0: return sub_graph for j in G[current_node].items(): sub_graph[current_node][j[0]] = j[1] sub_graph[j[0]][current_node] = j[1] sub_graph = select_neighbors(G, sub_graph, j[0], k - 1) return sub_graph def merge_graph(dict1, dict2): for key, value in dict2.items(): for subkey, subvalue in value.items(): dict1[key][subkey] = subvalue def dijkstra(G, populations, source): costs = dict() for key in G: costs[key] = np.inf costs[source] = 0 pq = PriorityQueue() for node in G: pq.push((node, costs[node])) while len(pq.array) != 0: current_node, current_node_distance = pq.pop() for neighbor_node in G[current_node]: weight = G[current_node][neighbor_node] distance = current_node_distance + weight if distance < costs[neighbor_node]: costs[neighbor_node] = distance pq.push((neighbor_node, distance)) sorted_costs_lst=list(dict(sorted(costs.items())).values()) populations_values_lst = list(dict(sorted(populations.items())).values()) return np.sum(np.array(sorted_costs_lst) * np.array(populations_values_lst)) def dijkstra_q_impl(G, populations, source): costs = dict() for key in G: costs[key] = np.inf costs[source] = 0 pq = [] for node in G: pq.append((node, costs[node])) while len(pq) != 0: current_node, current_node_distance = pq.pop(0) for neighbor_node in G[current_node]: weight = G[current_node][neighbor_node] distance = current_node_distance + weight if distance < costs[neighbor_node]: costs[neighbor_node] = distance pq.append((neighbor_node, distance)) sorted_costs_lst=list(dict(sorted(costs.items())).values()) populations_values_lst = list(dict(sorted(populations.items())).values()) return np.sum(np.array(sorted_costs_lst) * np.array(populations_values_lst)) def random_start(G): res = [choice(list(G.keys())), choice(list(G.keys()))] if res[0] == res [1]: return random_start(G) print(f"Random start: {res}") return res #//2 * O((V+E)*logV) = O(E*logV) // def allocation_cost(G, population_dict, i,j): return [dijkstra(G,population_dict, i),dijkstra(G,population_dict, j)] # V times Dijkstra def sub_graph_apsp(G, dijkstra_func): population_dict = dict(sorted([(k, populations[k]) for k in G.keys()])) selected_vertex = choice(list(G.keys())) selected_cost = dijkstra_func(G,population_dict, selected_vertex) for node in G.keys(): if node is not selected_vertex: this_cost = dijkstra_func(G, population_dict, node) if this_cost < selected_cost: selected_cost = this_cost selected_vertex = node return selected_vertex, selected_cost def algorithm_sub_graph_apsp(G, starting_node, k, hop_list, dijkstra_func): sub_graph = lambda: defaultdict(lambda: defaultdict()) sub_graph = sub_graph() sub_graph = select_neighbors(G, sub_graph, current_node=starting_node, k=k) next_node, cost = sub_graph_apsp(sub_graph, dijkstra_func) if len(hop_list) > 0 and next_node == hop_list[-1][0]: return next_node, cost hop_list.append((next_node, cost)) return algorithm_sub_graph_apsp(G, next_node, k, hop_list, dijkstra_func) def regional_interchange(G,current_node,k): #k-th neighbor subgraph sub_graph = lambda: defaultdict(lambda: defaultdict()) sub_graph = sub_graph() select_neighbors(G, sub_graph, current_node, k) return sub_graph_apsp(sub_graph,dijkstra_func=dijkstra_q_impl)[0] from multiprocessing import Process res_queue = [] # global queue for threads to enqueue def Teitz_Bart_Spawn_helper(G, k): global res_queue population_dict = dict(sorted([(k, populations[k]) for k in G.keys()])) selected_vertices = random_start(G) selected_costs = allocation_cost(G,population_dict, selected_vertices[0],selected_vertices[1]) counter=0 for not_selected in G.keys(): counter+=1 k-=1; #print(k) if k==-1: res_queue.append((selected_vertices,selected_costs)) break if not_selected not in selected_vertices: bigger = max(selected_costs) this_cost = dijkstra(G,population_dict, not_selected) if this_cost < bigger: bigger_index = selected_costs.index(bigger) selected_costs[bigger_index] = this_cost selected_vertices[bigger_index] = not_selected res_queue.append((selected_vertices,selected_costs))#is this necessary return min(res_queue, key=lambda x:x[1]) ###Output _____no_output_____ ###Markdown Greedy Heuristic Algorithms ###Code # 2*O(V)*O(E*logV) = O(E*V*logV) # def Greedy_Heuristic_Add_Drop(G, dijkstra_func): population_dict = dict(sorted([(k, populations[k]) for k in G.keys()])) selected_vertices = random_start(G) selected_costs = allocation_cost(G,population_dict, selected_vertices[0],selected_vertices[1]) for not_selected in G.keys(): if not_selected not in selected_vertices: bigger = max(selected_costs) this_cost = dijkstra_func(G,population_dict, not_selected) if this_cost < bigger: bigger_index = selected_costs.index(bigger) selected_costs[bigger_index] = this_cost selected_vertices[bigger_index] = not_selected return (selected_vertices,selected_costs) # n: how many random bootstrap positions to spawn # k: how many iterations to run per spawned per Greedy_Heuristic_Add_Drop def Greedy_Heuristic_Add_Drop_Spawn(G, n, k): global res_queue for i in range(n): print('spawning thread '+str(i)) Process(target=Teitz_Bart_Spawn_helper(G, k)).start() return min(res_queue, key=lambda x:x[1]) def Greedy_Heuristic_Subgraph_Expansion(G, k, dijkstra_func, bootstrap_cnt=10): nodes = [] costs = [] for i in range(bootstrap_cnt): node, cost = algorithm_sub_graph_apsp(G, choice(list(G.keys())), k, [], dijkstra_func=dijkstra_func) nodes.append(node) costs.append(cost) counter = Counter(nodes) most_commons = counter.most_common(2) target_nodes = (most_commons[0][0], most_commons[1][0]) sub_graph1 = lambda: defaultdict(lambda: defaultdict()) sub_graph1 = sub_graph1() sub_graph1 = select_neighbors(G, sub_graph1, target_nodes[0], k=k) sub_graph2 = lambda: defaultdict(lambda: defaultdict()) sub_graph2 = sub_graph2() sub_graph2 = select_neighbors(G, sub_graph2, target_nodes[1], k=k) merge_graph(sub_graph1, sub_graph2) points, costs = Greedy_Heuristic_Add_Drop(sub_graph1, dijkstra_func) if np.inf in costs: print("INF") sub_graph1 = lambda: defaultdict(lambda: defaultdict()) sub_graph1 = sub_graph1() sub_graph1 = select_neighbors(G, sub_graph1, current_node=points[0], k=k+1) sub_graph2 = lambda: defaultdict(lambda: defaultdict()) sub_graph2 = sub_graph2() sub_graph2 = select_neighbors(G, sub_graph2, current_node=points[1], k=k+1) merge_graph(sub_graph1, sub_graph2) points, costs = Greedy_Heuristic_Add_Drop(sub_graph1, dijkstra_func) if np.inf not in costs: return points, costs else: print("Graphs are disconnected. Total cost is inf") return points, costs return points, costs # Global/Regional Interchange Algorithm def Greedy_Heuristic_Local_Interchange(G,k): population_dict = dict(sorted([(k, populations[k]) for k in G.keys()])) selected_vertices = random_start(G) selected_costs = allocation_cost(G,population_dict, selected_vertices[0],selected_vertices[1]) for not_selected in G.keys(): if not_selected not in selected_vertices: bigger = max(selected_costs) this_cost = dijkstra(G,population_dict, not_selected) if this_cost < bigger: bigger_index = selected_costs.index(bigger) selected_costs[bigger_index] = this_cost temp = regional_interchange(G,not_selected,k) if temp != min(selected_vertices): selected_vertices[bigger_index] = temp # Regional Interchange # k-th neighbor -> 5. order neighbor subgraph # V^2*logV 1-hospital problem return(selected_vertices,selected_costs) ###Output _____no_output_____ ###Markdown Greedy Heuristic Runs ###Code start = time.time() res = Greedy_Heuristic_Add_Drop(G,dijkstra_func=dijkstra ) diff = time.time()-start print('\npick cities #'+ str(res[0]) +' with costs '+ str(res[1])) print('\ntotal time: '+ str(diff)+ ' sec') start = time.time() res = Greedy_Heuristic_Subgraph_Expansion(G, 5, bootstrap_cnt=10, dijkstra_func=dijkstra) #q for direct Queue based PQ impl (py's pop(0)) diff = time.time()-start print('\npick cities #'+ str(res[0]) +' with costs '+ str(res[1])) print('\ntotal time using our Binary-Heap PQ: '+ str(diff)+ ' sec') start = time.time() res = Greedy_Heuristic_Subgraph_Expansion(G, 5, bootstrap_cnt=10, dijkstra_func=dijkstra_q_impl) #q for direct Queue based PQ impl (py's pop(0)) diff = time.time()-start print('\npick cities #'+ str(res[0]) +' with costs '+ str(res[1])) print('\ntotal time using our direct-Queue-based PQ: '+ str(diff)+ ' sec') start = time.time() res = Greedy_Heuristic_Local_Interchange(G, 5) diff = time.time()-start print('\npick cities #'+ str(res[0]) +' with costs '+ str(res[1])) print('\ntotal time: '+ str(diff)+ ' sec') ###Output Random start: [144, 25] pick cities #[214, 71] with costs [13484179, 13478689] total time: 25.314204931259155 sec ###Markdown Dynamic Programming + Brute Force Combination Selection Algorithm A Dijkstra utility specifically for paths and our V^4 Algorithm ###Code def dijkstra_path(G, population_dict, source): costs = dict() for key in G: costs[key] = np.inf costs[source] = 0 pq = [] for node in G: heapq.heappush(pq, (node, costs[node])) while len(pq) != 0: current_node, current_node_distance = heapq.heappop(pq) for neighbor_node in G[current_node]: weight = G[current_node][neighbor_node] distance = current_node_distance + weight if distance < costs[neighbor_node]: costs[neighbor_node] = distance heapq.heappush(pq, (neighbor_node, distance)) sorted_costs_lst=list(dict(sorted(costs.items())).values()) sorted_populations_lst = list(dict(sorted(population_dict.items())).values()) return np.array(sorted_costs_lst) * np.array(sorted_populations_lst) #return list(dict(sorted(costs.items())).values()) # V4 because runs in V^4 def V4(G): APSP = np.zeros((number_of_vertices,number_of_vertices)) population_dict = dict(sorted([(k, populations[k]) for k in G.keys()])) for vertex in vertices: vertex= int(vertex.split()[0]) APSP[vertex] = [e for e in dijkstra_path(G, population_dict,vertex)] global glob res = {} n = len(APSP) temp_arr = APSP.copy() count=0 count2=0 for first in range(n): for second in range(first+1,n): if first==second: continue count+=1 temp_arr = APSP.copy() for row in temp_arr: if row[first]<row[second]: row[second]=0 else: row[first]=0 to_be_summed = temp_arr[:,[first,second]] summed = sum(sum(to_be_summed)) res[(first,second)]=summed ret=min(res, key=res.get) return ret, res[ret], res start = time.time() res = V4(G) diff = time.time()-start print('\npick cities #'+ str(res[0]) +' with costs '+ str(res[1])) print('\ntotal time: '+ str(diff)+ ' sec') ###Output pick cities #(205, 215) with costs 121635.0 total time: 21.395344257354736 sec ###Markdown Cutting The Graph and Running Floyd-Warshall s Floyd-Warshall Utilities ###Code import readfile def floydwarshall_helper(graph, root, root_index, weight): dist = [ [0]*len(graph) for i in range(len(graph))] marked = [ [0]*len(graph) for i in range(len(graph))] for x in range(len(graph)): if graph[x][root_index] == float('inf'): continue graph[x][root_index] = 0 for x in range(len(graph)): if graph[root_index][x] == float('inf'): continue graph[root_index][x] = 0 for x in range(len(graph)): for y in range(len(graph)): dist[x][y] = graph[x][y] for x in range(len(graph)): for y in range(len(graph)): marked[x][y] = 0 for z in range(len(graph)): for x in range(len(graph)): for y in range(len(graph)): if dist[x][z] == float('inf') or dist[z][y] == float('inf'): continue if dist[x][z] + dist[z][y] < dist[x][y]: dist[x][y] = dist[x][z] + dist[z][y] if z == root_index: marked[x][y] = 1 marked[y][x] = 1 marked[x][z] = 1 marked[z][x] = 1 marked[z][z] = 1 if marked[x][z] == 1 or marked[z][y] == 1: marked[x][y] = 1 value = [0 for i in range(len(graph))] value[root_index] = float('inf') for x in range(len(graph)): for y in range(len(graph)): if dist[x][y] == float('inf'): continue if value[x] == float('inf'): continue if marked[x][y] == 1: continue value[x] += dist[x][y] * weight[x][1] min1 = float('inf') min2 = float('inf') min2_index = 0 min1_index = 0 for x in range(len(graph)): if value[x] <= min2: if value[x] <= min1: min2 = min1 min2_index = min1_index min1 = value[x] min1_index = x else: min2 = value[x] min2_index = x print("Selected min 1 is : ") print(min1_index) print("\n") print("Selected min 2 is : ") print(min2_index) print("\n") def floydwarshall(graph, weight): dist = [ [0]*len(graph) for i in range(len(graph))] marked = [ [0]*len(graph) for i in range(len(graph))] root = float('inf') root_index = 0 for x in range(len(graph)): for y in range(len(graph)): dist[x][y] = graph[x][y] marked[x][y] = 0 for z in range(len(graph)): for x in range(len(graph)): for y in range(len(graph)): if dist[x][z] == float('inf') or dist[z][y] == float('inf'): continue if dist[x][z] + dist[z][y] < dist[x][y]: dist[x][y] = dist[x][z] + dist[z][y] value = [0 for i in range(len(graph))] root = float('inf') for x in range(len(graph)): for y in range(len(graph)): if dist[x][y] == float('inf'): continue value[x] += dist[x][y] * weight[y][1] for x in range(len(graph)): if value[x] <= root: root = value[x] root_index = x print("Root value for the graph is ") print(root_index) print("\n") floydwarshall_helper(graph, root, root_index, weight) if __name__ == '__main__': graph, weight = readfile.readfile("tests/test2_new.txt") start = time.time() floydwarshall(graph, weight) diff = time.time()-start print('\ntotal time: '+ str(diff)+ ' sec') ###Output Root value for the graph is 71 Selected min 1 is : 233 Selected min 2 is : 230 total time: 14.544553756713867 sec
try_scikit_learn.ipynb
###Markdown [View in Colaboratory](https://colab.research.google.com/github/RXV06021/basicPy/blob/master/try_scikit_learn.ipynb) Python scikit-learnの機械学習アルゴリズムチートシートを全実装・解説http://neuro-educator.com/mlcontentstalbe/[scikit-learnの機械学習アルゴリズムチートシート](http://scikit-learn.org/stable/tutorial/machine_learning_map/) 【1】SGDの実装とクラス分類http://neuro-educator.com/mlearn1/* 使用するアルゴリズム    :SGD(stochastic gradient descent)* 使用する機械学習ライブラリ :scikit-learn(サイキット・ラーン)[scikit-learnのマップ](http://scikit-learn.org/stable/tutorial/machine_learning_map/)より、  START→データが50以上→カテゴリーデータ→ラベルありデータ→データ数10万以上→「SGD」**■ 課題**ワイン178本のデータを使用し、未知のワインの色とプロリン(アミノ酸の一種)の量の2変数から、その未知のワインが3つのブドウ品種のどれから作られたのかを識別する識別器を作成すること**■ 使用するデータ**ラベル:0, 1, 2(ブドウの品種)データ:178(ワインの数)変数1:ワインの色変数2:ワインに含まれるプロリンの量 解説0:必要なライブラリのインストール ###Code ! pip install mlxtend ###Output Requirement already satisfied: mlxtend in /usr/local/lib/python3.6/dist-packages (0.12.0) Requirement already satisfied: matplotlib>=1.5.1 in /usr/local/lib/python3.6/dist-packages (from mlxtend) (2.1.2) Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from mlxtend) (39.2.0) Requirement already satisfied: numpy>=1.10.4 in /usr/local/lib/python3.6/dist-packages (from mlxtend) (1.14.3) Requirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from mlxtend) (0.19.1) Requirement already satisfied: scipy>=0.17 in /usr/local/lib/python3.6/dist-packages (from mlxtend) (0.19.1) Requirement already satisfied: pandas>=0.17.1 in /usr/local/lib/python3.6/dist-packages (from mlxtend) (0.22.0) Requirement already satisfied: six>=1.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.5.1->mlxtend) (1.11.0) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.5.1->mlxtend) (2.5.3) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.5.1->mlxtend) (2.2.0) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.5.1->mlxtend) (0.10.0) Requirement already satisfied: pytz in /usr/local/lib/python3.6/dist-packages (from matplotlib>=1.5.1->mlxtend) (2018.4) ###Markdown 解説1:ライブラリのインポート ###Code #解説 1:ライブラリのインポート-------------------------------- import numpy as np # numpyという行列などを扱うライブラリを利用 import pandas as pd # pandasというデータ分析ライブラリを利用 import matplotlib.pyplot as plt # プロット用のライブラリを利用 from sklearn import linear_model, metrics, preprocessing, cross_validation # 機械学習用のライブラリを利用 from mlxtend.plotting import plot_decision_regions # 学習結果をプロットする外部ライブラリを利用 ###Output _____no_output_____ ###Markdown 解説2:Wineのデータセットの読み込みhttps://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.dataからWineのデータを読み込んでいます。そのうち、品種(0列、1~3)と色(10列)とプロリンの量(13列)を使用します。●[Wineデータの詳細はこちら](https://archive.ics.uci.edu/ml/datasets/Wine) ###Code # 解説 2:Wineのデータセットを読み込む-------------------------------- df_wine_all=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None) # 品種(0列、1~3)と色(10列)とプロリンの量(13列)を使用する df_wine=df_wine_all[[0,10,13]] df_wine.columns = [u'class', u'color', u'proline'] # 列の名称を指定 pd.DataFrame(df_wine).head() # この行を実行するとデータの最初の5行が見れる df_wine.describe() ###Output _____no_output_____ ###Markdown 解説3:プロット ###Code # 解説 3:プロットしてみる------------------------------------------------------ df_wine.plot(subplots=True, figsize=(9, 9)); plt.legend(loc='best') # 解説 3:散布図---------------------------------------------------------------- x=df_wine["color"] y=df_wine["proline"] z=df_wine["class"]-1 plt.scatter(x,y,c=z) plt.show() # 解説 3:pandas plotを使った散布図---------------------------------------------- df1 = df_wine[df_wine['class']==1] df2 = df_wine[df_wine['class']==2] df3 = df_wine[df_wine['class']==3] ax1 = df1.plot(kind='scatter', x='color', y='proline', color='red' ) ax2 = df2.plot(kind='scatter', x='color', y='proline', color='green' ,ax=ax1) df3.plot( kind='scatter', x='color', y='proline', color='orange' ,ax=ax2) ###Output _____no_output_____ ###Markdown **※ matplotlib.pyplot.scatter の使い方**https://pythondatascience.plavox.info/matplotlib/%E6%95%A3%E5%B8%83%E5%9B%B3https://matplotlib.org/api/_as_gen/matplotlib.pyplot.scatter.htmlmatplotlib.pyplot.scatterhttps://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplot.htmlmatplotlib.pyplot.scatter(x, y, s=20, c=None, marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, verts=None, edgecolors=None, hold=None, data=None, **kwargs) ** ※ matplotlib.pyplot.scatter の主要な引数** x, y:グラフに出力するデータ s:サイズ (デフォルト値: 20) c:色、または、連続した色の値 marker:マーカーの形 (デフォルト値: ‘o’= 円) cmap:カラーマップ。c が float 型の場合のみ利用可能です。 norm:c を float 型の配列を指定した場合のみ有効。正規化を行う場合の Normalize インスタンスを指定。 vmin, vmax :正規化時の最大、最小値。 指定しない場合、データの最大・最小値となります。norm にインスタンスを指定した場合、vmin, vmax の指定は無視されます。 alpha:透明度。0(透明)~1(不透明)の間の数値を指定。 linewidths:線の太さ。 edgecolors:線の色。 解説 4:データの正規化 ###Code #解説 4:データの整形------------------------------------------------------- X=df_wine[["color","proline"]] sc=preprocessing.StandardScaler() sc.fit(X) X_std=sc.transform(X) plt.plot(X_std) # Create plots with pre-defined labels. fig, ax = plt.subplots() ax.plot(X_std[:,0], 'r' , label='color') ax.plot(X_std[:,1], 'b' , label='proline') legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large') # Put a nicer background color on the legend. legend.get_frame().set_facecolor('#00FFCC') plt.show() ###Output _____no_output_____ ###Markdown 解説5:機械学習で分類今回は線形分離のSGDを用いたClassifierを作成。 ###Code #解説 5:機械学習で分類する--------------------------------------------------- clf_result=linear_model.SGDClassifier(loss="hinge") #loss="hinge", loss="log" ###Output _____no_output_____ ###Markdown 解説6:実際の識別器がどの程度の性能を持つのかをK分割交差検証法で検討これはデータをK等分して、そのうち1固まりをテストデータにし、残りのK-1個を学習データにして正答率を検討する手法。K回分の平均をとることで正答率と正答率の標準偏差を求めることが出来る。 ###Code #解説 6:K分割交差検証(cross validation)で性能を評価する--------------------- scores=cross_validation.cross_val_score(clf_result, X_std, z, cv=10) print("平均正解率 = ", scores.mean()) print("正解率の標準偏差 = ", scores.std()) # 解説 7:トレーニングデータとテストデータに分けて実行してみる------------------ X_train, X_test, train_label, test_label=cross_validation.train_test_split(X_std,z, test_size=0.1, random_state=0) clf_result.fit(X_train, train_label) # 正答率を求める pre=clf_result.predict(X_test) ac_score=metrics.accuracy_score(test_label,pre) print("正答率 = ",ac_score) # plotする X_train_plot=np.vstack(X_train) train_label_plot=np.hstack(train_label) X_test_plot=np.vstack(X_test) test_label_plot=np.hstack(test_label) #p lot_decision_regions(X_train_plot, train_label_plot, clf=clf_result, res=0.01) # 学習データをプロット plot_decision_regions(X_test_plot, test_label_plot, clf=clf_result, res=0.01, legend=2) # テストデータをプロット #解説 8:任意のデータに対する識別結果を見てみる------------------ #predicted_label=clf_result.predict([1,-1]) #print("このテストデータのラベル = ", predicted_label) #解説 9:識別平面の式を手に入れる-------------------------------- print(clf_result.intercept_) print(clf_result.coef_ ) # coef[0]*x + coef[1]*y + intercept = 0 ###Output [ -9.31998866 -10.91424585 -3.84711508] [[ -9.69473758 21.8417196 ] [-22.46264536 -11.06530575] [ 15.74949076 -12.30251889]]
code/rediscovery.ipynb
###Markdown Binding energies and rediscovery of zeolitesThis notebook reproduces Fig. S10 from the paper, highlighting classical synthesis routes for the MFI, IFR, and ISV zeolites. ###Code import itertools import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import cm import seaborn as sns from scipy import stats df = pd.read_csv('../data/binding.csv', index_col=0) ###Output /home/dskoda/.conda/envs/htvs/lib/python3.7/site-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tm ###Markdown Defining a few useful functionsBelow, we start defining a few useful functions and parameters to help us plot more eficiently. ###Code def get_literature_markers(in_literature): if in_literature == 1.0: return '^' return 'o' def mscatter(x, y, ax=None, m=None, **kw): import matplotlib.markers as mmarkers ax = ax or plt.gca() sc = ax.scatter(x, y, **kw) if (m is not None) and (len(m)==len(x)): paths = [] for marker in m: if isinstance(marker, mmarkers.MarkerStyle): marker_obj = marker else: marker_obj = mmarkers.MarkerStyle(marker) path = marker_obj.get_path().transformed( marker_obj.get_transform()) paths.append(path) sc.set_paths(paths) return sc grid_kws = {"width_ratios": (0.45, 0.45, .02), "hspace": .3} COLS_EXPORT = ['Zeolite', 'SMILES', 'Templating', 'SCScore', 'Volume (Angstrom3)', 'In literature?'] cmap = 'inferno_r' scatter_kws = { 'linewidths': 0.7, 'edgecolors': 'k', 's': 60, } def plot_osda_annot(ax, d, norm, osdas, color_option, cmap=cmap): for i, sp in osdas.items(): sp_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [sp_data[x]], [sp_data[y]], c=[sp_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (sp_data[x], sp_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ###Output _____no_output_____ ###Markdown Figure S10A: MFI ###Code osdas = { 'a': 'CCC[N+](CCC)(CCC)CCC', } SIZE = 350 LINEWIDTH = 2.5 color_option = 'Competition (OSDA)' zeolite = 'MFI' grid_kws = {"width_ratios": (0.45, 0.45, .02), "hspace": .3} fig, ax_fig = plt.subplots(1, 3, figsize=(10, 4), gridspec_kw=grid_kws) y = 'Templating' YLIM = [16, 20.00001] SCS_LIMS = [0.9, 3.0] VOL_LIMS = [100, 300] S = 80 SIZE = 350 LINEWIDTH = 2.5 d = df.loc[ (df['Zeolite'] == zeolite) & (~df['SMILES'].str.contains('O')) & (df['SMILES'].str.contains('+', regex=False)) & (df[y] > YLIM[0]) & (df[y] < YLIM[1]) & (df['SCScore'] > SCS_LIMS[0]) & (df['SCScore'] < SCS_LIMS[1]) & (df['Volume (Angstrom3)'] > VOL_LIMS[0]) & (df['Volume (Angstrom3)'] < VOL_LIMS[1]) ].sort_values('Templating', ascending=False) color_values = (d[color_option]).values.clip(min=-4.5, max=4.5) norm = mpl.colors.Normalize(vmin=-4.5, vmax=4.5) cmap = 'coolwarm_r' # color = cm.coolwarm_r(norm(color_values)) markers = d['In literature?'].apply(get_literature_markers).values.tolist() ax = ax_fig[0] x = 'SCScore' scat2 = mscatter( d[x], d[y], ax=ax, c=color_values, m=markers, norm=norm, s=S, linewidths=0.7, edgecolors='k', cmap=cmap, ) ax.set_xlabel(x) ax.set_ylabel(y) ax.set_xlim(SCS_LIMS) ax.set_ylim(YLIM) ax.set_yticks(np.arange(*YLIM)) for i, sp in osdas.items(): spiro_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [spiro_data[x]], [spiro_data[y]], c=[spiro_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (spiro_data[x], spiro_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ax = ax_fig[1] x = 'Volume (Angstrom3)' scat1 = mscatter( d[x], d[y], ax=ax, c=color_values, m=markers, s=S, norm=norm, linewidths=0.7, edgecolors='k', cmap=cmap, ) ax.set_xlabel(x) ax.set_xlim([125, 250]) ax.set_ylim(YLIM) ax.set_yticks(np.arange(*YLIM)) ax.set_yticklabels([]) for i, sp in osdas.items(): spiro_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [spiro_data[x]], [spiro_data[y]], c=[spiro_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (spiro_data[x], spiro_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ax = ax_fig[2] cbar = fig.colorbar(scat1, cax=ax) cbar.set_label(color_option) cbar.set_ticks(np.arange(-4.5, 4.6, 1.5)) plt.show() d[COLS_EXPORT].to_csv('../data/figS10/figS10A.csv') ###Output _____no_output_____ ###Markdown Figure S10B: IFR ###Code osdas = { 'b': 'c1ccc(C[N+]23CCC(CC2)CC3)cc1', } SIZE = 350 LINEWIDTH = 2.5 color_option = 'Competition (SiO2)' zeolite = 'IFR' grid_kws = {"width_ratios": (0.45, 0.45, .02), "hspace": .3} fig, ax_fig = plt.subplots(1, 3, figsize=(10, 4), gridspec_kw=grid_kws) y = 'Templating' YLIM = [15, 20.00001] SCS_LIMS = [0.9, 3.2] VOL_LIMS = [150, 250] S = 80 SIZE = 350 LINEWIDTH = 2.5 d = df.loc[ (df['Zeolite'] == zeolite) & (~df['SMILES'].str.contains('O')) & (df['SMILES'].str.contains('+', regex=False)) & (df[y] > YLIM[0]) & (df[y] < YLIM[1]) & (df['SCScore'] > SCS_LIMS[0]) & (df['SCScore'] < SCS_LIMS[1]) & (df['Volume (Angstrom3)'] > VOL_LIMS[0]) & (df['Volume (Angstrom3)'] < VOL_LIMS[1]) ].sort_values('Templating', ascending=False) color_values = (d[color_option]).values.clip(min=-4.5, max=4.5) norm = mpl.colors.Normalize(vmin=-4.5, vmax=4.5) cmap = 'coolwarm_r' # color = cm.coolwarm_r(norm(color_values)) markers = d['In literature?'].apply(get_literature_markers).values.tolist() ax = ax_fig[0] x = 'SCScore' scat2 = mscatter( d[x], d[y], ax=ax, c=color_values, m=markers, norm=norm, s=S, linewidths=0.7, edgecolors='k', cmap=cmap, ) ax.set_xlabel(x) ax.set_ylabel(y) ax.set_xlim(SCS_LIMS) ax.set_ylim(YLIM) ax.set_yticks(np.arange(*YLIM)) for i, sp in osdas.items(): spiro_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [spiro_data[x]], [spiro_data[y]], c=[spiro_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (spiro_data[x], spiro_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ax = ax_fig[1] x = 'Volume (Angstrom3)' scat1 = mscatter( d[x], d[y], ax=ax, c=color_values, m=markers, s=S, norm=norm, linewidths=0.7, edgecolors='k', cmap=cmap, ) ax.set_xlabel(x) ax.set_xlim(VOL_LIMS) ax.set_ylim(YLIM) ax.set_yticks(np.arange(*YLIM)) ax.set_yticklabels([]) for i, sp in osdas.items(): spiro_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [spiro_data[x]], [spiro_data[y]], c=[spiro_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (spiro_data[x], spiro_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ax = ax_fig[2] cbar = fig.colorbar(scat1, cax=ax) cbar.set_label(color_option) cbar.set_ticks(np.arange(-4.5, 4.6, 1.5)) plt.show() d[COLS_EXPORT].to_csv('../data/figS10/figS10B.csv') ###Output _____no_output_____ ###Markdown Figure S10C: ISV ###Code osdas = { 'c': 'CC1(C)C[C@@H]2C[C@](C)(C1)C[N+]21CCCCC1', 'd': 'CC(C)[C@]12C=C[C@](C)(CC1)[C@@H]1C[N+](C)(C)C[C@H]12', } SIZE = 350 LINEWIDTH = 2.5 color_option = 'Competition (SiO2)' zeolite = 'ISV' grid_kws = {"width_ratios": (0.45, 0.45, .02), "hspace": .3} fig, ax_fig = plt.subplots(1, 3, figsize=(10, 4), gridspec_kw=grid_kws) y = 'Templating' YLIM = [15, 19.00001] SCS_LIMS = [0.9, 3.2] VOL_LIMS = [150, 400] S = 80 SIZE = 350 LINEWIDTH = 2.5 d = df.loc[ (df['Zeolite'] == zeolite) & (~df['SMILES'].str.contains('O')) & (df['SMILES'].str.contains('+', regex=False)) & (df[y] > YLIM[0]) & (df[y] < YLIM[1]) & (df['SCScore'] > SCS_LIMS[0]) & (df['SCScore'] < SCS_LIMS[1]) & (df['Volume (Angstrom3)'] > VOL_LIMS[0]) & (df['Volume (Angstrom3)'] < VOL_LIMS[1]) ].sort_values('Templating', ascending=False) color_values = (d[color_option]).values.clip(min=-4.5, max=4.5) norm = mpl.colors.Normalize(vmin=-4.5, vmax=4.5) cmap = 'coolwarm_r' # color = cm.coolwarm_r(norm(color_values)) markers = d['In literature?'].apply(get_literature_markers).values.tolist() ax = ax_fig[0] x = 'SCScore' scat2 = mscatter( d[x], d[y], ax=ax, c=color_values, m=markers, norm=norm, s=S, linewidths=0.7, edgecolors='k', cmap=cmap, ) ax.set_xlabel(x) ax.set_ylabel(y) ax.set_xlim(SCS_LIMS) ax.set_ylim(YLIM) ax.set_yticks(np.arange(*YLIM)) for i, sp in osdas.items(): spiro_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [spiro_data[x]], [spiro_data[y]], c=[spiro_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (spiro_data[x], spiro_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ax = ax_fig[1] x = 'Volume (Angstrom3)' scat1 = mscatter( d[x], d[y], ax=ax, c=color_values, m=markers, s=S, norm=norm, linewidths=0.7, edgecolors='k', cmap=cmap, ) ax.set_xlabel(x) ax.set_xlim(VOL_LIMS) ax.set_ylim(YLIM) ax.set_yticks(np.arange(*YLIM)) ax.set_yticklabels([]) for i, sp in osdas.items(): spiro_data = d.loc[d['SMILES'] == sp].iloc[0] ax.scatter( [spiro_data[x]], [spiro_data[y]], c=[spiro_data[color_option]], s=SIZE, norm=norm, linewidths=LINEWIDTH, edgecolors='k', cmap=cmap, marker='s', ) ax.annotate( str(i), (spiro_data[x], spiro_data[y]), zorder=3, ha='center', va='center', fontsize=12, ) ax = ax_fig[2] cbar = fig.colorbar(scat1, cax=ax) cbar.set_label(color_option) cbar.set_ticks(np.arange(-4.5, 4.6, 1.5)) plt.show() d[COLS_EXPORT].to_csv('../data/figS10/figS10C.csv') ###Output _____no_output_____
find-cities.ipynb
###Markdown - NHGISST - State FIPS Code- NHGISCTY - County FIPS Code- GISJOIN2 - Tract ID ###Code data.head() codes = np.unique(data.NHGISST.values) codes # import matplotlib import descartes %matplotlib inline print(np.unique(data[data.NHGISST == codes[14]].NHGISCTY.values)) data[data.NHGISST == '060'] print(data[data.NHGISCTY == codes[16]].GISJOIN2) codes[16] ###Output _____no_output_____ ###Markdown Manually verify that the tract crosswalk matches up: ###Code import matplotlib.pyplot as plt polygon = data[data.GISJOIN2 == '18009700136'].geometry.values[0] polygon10 = data10[data10.GEOID10 == "18097320301"].geometry.values[0].centroid polygon10_2 = data10[data10.GEOID10 == "18097320304"].geometry.values[0].centroid polygon10_3 = data10[data10.GEOID10 == "18097320500"].geometry.values[0].centroid x, y = polygon.exterior.xy x10, y10 = polygon10.x, polygon10.y x10_2, y10_2 = polygon10_2.x, polygon10_2.y x10_3, y10_3 = polygon10_3.x, polygon10_3.y fig, ax = plt.subplots() ax.plot(x, y, c='r', alpha=0.5) ax.scatter(x10, y10, c='blue') ax.scatter(x10_2, y10_2, c='blue') ax.scatter(x10_3, y10_3, c='blue') centers_of_population = gpd.read_file("./nhgis0002_shape/nhgis0002_shape/nhgis0002_shapefile_cenpop2010_us_tract_cenpop_2010/US_tract_cenpop_2010.shp") centers_of_population ###Output _____no_output_____
notebooks/Confidence_Intervals.ipynb
###Markdown ```SPDX-License-Identifier: Apache-2.0Copyright (C) 2021, Arm Limited and contributorsLicensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License.``` Exploring confidence intervals from model's outputIn this notebook, we will show an example on how to visualise confidence intervals in CIFAR10 outputs, for a stochastic model. ###Code %matplotlib inline import sys sys.path.append('..') from PIL import Image #import os #os.environ["CUDA_VISIBLE_DEVICES"]="0" from datasets import cifar10 from models.resnet20 import model, trainer, parser, inferencer, benchmarker, converter from common import mcdo, brancher, utils import numpy as np import matplotlib.pyplot as plt import scipy.special as sc CIFAR10_LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] ###Output _____no_output_____ ###Markdown Running infence on model and understanding outputs ###Code ## Adapted from `inference()` in `ResNet20.py` # Loading pre-trained model my_model = model.Model() my_model.load('../experiment_models/tf/full_mcdo.h5') # Preparing CIFAR10 dataset with batch size of 1 # (batch size of 1 will make it easier to access individual images later in this notebook, but for better speed you should increase this value) inputs = cifar10.CIFAR10() inputs.configure(1) inputs.prepare() # Run inference on model to get logits inference = inferencer.Inferencer() inference.configure(inputs.get_test_split(), 1, my_model.model_format) predictions, true_labels = inference.run(my_model.model) # Model has 5 branches, corresponding to different samples from the model: (num_branches, num_elements_in_dataset, num_classes) print(f'Shape of `predictions`: {predictions.shape}') print(f'Shape of `true_labels`: {true_labels.shape}') # Applying softmax() on logits output softmax_predictions = sc.softmax(predictions, axis=-1) softmax_predictions.shape print(f'Shape of `softmax_predictions`: {softmax_predictions.shape}') # We provide an off-the-shelf utility function to give useful outputs from the samples # Here, confidence intervals will be calculated for a 95% confidence interval mean_preds, lower_lim, upper_lim, std_preds = utils.calculate_output(predictions, confidence_interval=0.95) # Uncomment and execute this cell to retrieve its documentation #utils.calculate_output? # Checking shapes print(f'Shape of `mean_preds`: {mean_preds.shape}') print(f'Shape of `lower_lim`: {lower_lim.shape}') print(f'Shape of `upper_lim`: {upper_lim.shape}') print(f'Shape of `std_preds`: {std_preds.shape}') ###Output Shape of `mean_preds`: (10000, 10) Shape of `lower_lim`: (10000, 10) Shape of `upper_lim`: (10000, 10) Shape of `std_preds`: (10000, 10) ###Markdown Choosing input examples to analyseWe will be exploring the following 4 images from CIFAR10. ###Code pil_images = {} pil_labels = {} for i, elem in enumerate(inputs.ds_test): if i in [0, 49, 605, 1009]: print(f'## Input example {i}') img = Image.fromarray((elem[0][0].numpy()* 255).astype(np.uint8), 'RGB') pil_images[i] = img pil_labels[i] = CIFAR10_LABELS[true_labels[i]] img = img.resize((100,100), Image.ANTIALIAS) display(img) ###Output ## Input example 0 ###Markdown Visually inspecting confidence intervalsThe following plots allow us to visually inspect the sampled softmax scores from the stochastic model, as well as the corresponding confidence intervals, for 4 representative examples in CIFAR10.It is possible to see that for the CIFAR10 example with ID 1009 (`input_example=1009`), we have a very high level of uncertainty for classes `airplane`, `dog`, and `truck` because their confidence intervals are very wide (e.g. ranging from around `0.1` to `0.9` for `dog`); on the contrary, for the other classes, the confidence intervals are narrow and mostly close to zero. This indicates that the model is in doubt on whether the output for this example 1009 is `airplane`, `dog`, or `truck`, but not any other class. This level of uncertainty is illustrated by the plot in the middle (i.e. `Outputs for input_example=1009`), as we see different samples having an opposite behaviour when identifying which of `dog` or `truck` should be the correct output. This highlights the utility of uncertainty estimations for what could be an otherwise over-confident model.For `input_example=0` it is very clear that the model thinks the input correspond to class `horse`, as the confidence interval is narrow and convering very high values above `0.9`, while all the other classes are very close to zero. ###Code # Utily function for plotting def plot_sampled_scores(example_id): _, axs = plt.subplots(1, 3, figsize=(15, 5)) # Plotting the corresponding CIFAR10 image axs[0].use_sticky_edges = False axs[0].imshow(np.asarray(pil_images[example_id])) axs[0].margins(0.5) axs[0].axis('off') axs[0].set_title(f'input_example={example_id} \n(gt_label={pil_labels[example_id]})') # Outputted sampled for i in range(5): axs[1].scatter(CIFAR10_LABELS, softmax_predictions[i, example_id, :], label=f'Sample {i+1}') axs[1].legend() axs[1].set_ylim(0, 1) axs[1].set_xlabel('Class') axs[1].set_ylabel('Softmax Score') axs[1].set_title(f'Outputs for input_example={example_id}') for tick in axs[1].get_xticklabels(): tick.set_rotation(45) # Right plot for confidence intervals axs[2].set_title(f'Confidence intervals for input_example={example_id}') axs[2].set_ylim(0, 1) for lower, upper, x in zip(lower_lim[example_id], upper_lim[example_id], CIFAR10_LABELS): axs[2].plot((x, x), (lower, upper), 'b-', zorder=1) axs[2].scatter(CIFAR10_LABELS, lower_lim[example_id], marker='_', c='blue', zorder=2) axs[2].scatter(CIFAR10_LABELS, upper_lim[example_id], marker='_', c='blue', zorder=2) axs[2].scatter(CIFAR10_LABELS, mean_preds[example_id], marker='s', c='red', label='Mean', zorder=3) for tick in axs[2].get_xticklabels(): tick.set_rotation(45) axs[2].legend() plt.tight_layout() plt.show() plt.close() for elem in [0, 49, 605, 1009]: plot_sampled_scores(elem) ###Output _____no_output_____ ###Markdown Checking images from another datasetWe manually downloaded 4 examples from CIFAR100 to evaluate on the model trained on CIFAR10. We will compare how the outputs compare between the stochastic and corresponding deterministic model (i.e. without dropout activated at inference time). ###Code # Loading deterministic model deterministic_model = model.Model() deterministic_model.load('../experiment_models/tf/vanilla.h5') def calculate_and_plot(output_arr, ax, title, mean_label): mean_ood, lower_ood, upper_ood, std_ood = utils.calculate_output(output_arr, confidence_interval=0.95) for lower, upper, x in zip(lower_ood[0, :], upper_ood[0, :], CIFAR10_LABELS): ax.plot((x, x), (lower, upper), 'b-', zorder=1) ax.scatter(CIFAR10_LABELS, lower_ood[0, :], marker='_', c='blue', zorder=2) ax.scatter(CIFAR10_LABELS, upper_ood[0, :], marker='_', c='blue', zorder=2) ax.scatter(CIFAR10_LABELS, mean_ood[0, :], marker='s', c='red', label=mean_label, zorder=3) for tick in ax.get_xticklabels(): tick.set_rotation(45) ax.legend() ax.set_title(title) #plt.xticks(rotation=45) ax.set_ylim(0, 1) def run_inference_and_plot(model_stochastic, model_deterministic, image_arr): _, axs = plt.subplots(1, 3, figsize=(15, 5)) # Plotting the corresponding CIFAR100 image axs[0].use_sticky_edges = False axs[0].imshow(image_arr[0]) axs[0].margins(0.5) axs[0].axis('off') #axs[0].set_title(f'input_example={example_id}') stochastic_output = np.array(model_stochastic.predict(image_arr)) deterministic_output = np.array(model_deterministic.predict(image_arr))[None] calculate_and_plot(stochastic_output, axs[1], 'Confidence intervals for stochastic model', 'Mean') calculate_and_plot(deterministic_output, axs[2], 'Output from deterministic model', 'Output') plt.tight_layout() plt.show() plt.close() run_inference_and_plot(my_model.model, deterministic_model.model, np.asarray(Image.open('cifar100_lion.png'))[None] / 255.) run_inference_and_plot(my_model.model, deterministic_model.model, np.asarray(Image.open('cifar100_baby.png'))[None] / 255.) run_inference_and_plot(my_model.model, deterministic_model.model, np.asarray(Image.open('cifar100_mountain.png'))[None] / 255.) run_inference_and_plot(my_model.model, deterministic_model.model, np.asarray(Image.open('cifar100_pear.png'))[None] / 255.) ###Output _____no_output_____
workshop/nipype_tutorial/notebooks/basic_data_output.ipynb
###Markdown Data OutputSimilarly important to data input is data output. Using a data output module allows you to restructure and rename computed output and to spatially differentiate relevant output files from the temporary computed intermediate files in the working directory. Nipype provides the following modules to handle data stream output: DataSink JSONFileSink MySQLSink SQLiteSink XNATSinkThis tutorial covers only `DataSink`. For the rest, see the section [``interfaces.io``](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.html) on the official homepage. DataSinkA workflow working directory is like a **cache**. It contains not only the outputs of various processing stages, it also contains various extraneous information such as execution reports, hashfiles determining the input state of processes. All of this is embedded in a hierarchical structure that reflects the iterables that have been used in the workflow. This makes navigating the working directory a not so pleasant experience. And typically the user is interested in preserving only a small percentage of these outputs. The [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) interface can be used to extract components from this `cache` and store it at a different location. For XNAT-based storage, see [XNATSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmlnipype-interfaces-io-xnatsink).Unlike other interfaces, a [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink)'s inputs are defined and created by using the workflow connect statement. Currently disconnecting an input from the [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) does not remove that connection port.Let's assume we have the following workflow.The following code segment defines the [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) node and sets the `base_directory` in which all outputs will be stored. The `container` input creates a subdirectory within the `base_directory`. If you are iterating a workflow over subjects, it may be useful to save it within a folder with the subject id. ```pythondatasink = pe.Node(nio.DataSink(), name='sinker')datasink.inputs.base_directory = '/path/to/output'workflow.connect(inputnode, 'subject_id', datasink, 'container')``` If we wanted to save the realigned files and the realignment parameters to the same place the most intuitive option would be: ```pythonworkflow.connect(realigner, 'realigned_files', datasink, 'motion')workflow.connect(realigner, 'realignment_parameters', datasink, 'motion')``` However, this will not work as only one connection is allowed per input port. So we need to create a second port. We can store the files in a separate folder. ```pythonworkflow.connect(realigner, 'realigned_files', datasink, 'motion')workflow.connect(realigner, 'realignment_parameters', datasink, 'motion.par')``` The period (.) indicates that a subfolder called par should be created. But if we wanted to store it in the same folder as the realigned files, we would use the `.@` syntax. The @ tells the [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) interface to not create the subfolder. This will allow us to create different named input ports for [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) and allow the user to store the files in the same folder. ```pythonworkflow.connect(realigner, 'realigned_files', datasink, 'motion')workflow.connect(realigner, 'realignment_parameters', datasink, 'motion.@par')``` The syntax for the input port of [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) takes the following form: string[[.[@]]string[[.[@]]string] ...] where parts between paired [] are optional. MapNodeIn order to use [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) inside a MapNode, its inputs have to be defined inside the constructor using the `infields` keyword arg. ParameterizationAs discussed in [Iterables](basic_iteration.ipynb), one can run a workflow iterating over various inputs using the iterables attribute of nodes. This means that a given workflow can have multiple outputs depending on how many iterables are there. Iterables create working directory subfolders such as `_iterable_name_value`. The `parameterization` input parameter controls whether the data stored using [DataSink](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.htmldatasink) is in a folder structure that contains this iterable information or not. It is generally recommended to set this to `True` when using multiple nested iterables. SubstitutionsThe ``substitutions`` and ``regexp_substitutions`` inputs allow users to modify the output destination path and name of a file. Substitutions are a list of 2-tuples and are carried out in the order in which they were entered. Assuming that the output path of a file is: /root/container/_variable_1/file_subject_realigned.niiwe can use substitutions to clean up the output path.```pythondatasink.inputs.substitutions = [('_variable', 'variable'), ('file_subject_', '')]```This will rewrite the file as: /root/container/variable_1/realigned.niiNote: In order to figure out which substitutions are needed it is often useful to run the workflow on a limited set of iterables and then determine the substitutions. Realistic Example PreparationBefore we can use `DataSink` we first need to run a workflow. For this purpose, let's create a very short preprocessing workflow that realigns and smooths one functional image of one subject. First, let's create a `SelectFiles` node. For an explanation of this step, see the [Data Input](basic_data_input.ipynb) tutorial. ###Code from nipype import SelectFiles, Node # Create SelectFiles node templates={'func': '{subject}/{session}/func/{subject}_{session}_task-fingerfootlips_bold.nii.gz'} sf = Node(SelectFiles(templates), name='selectfiles') sf.inputs.base_directory = '/data/ds000114' sf.inputs.subject = 'sub-01' sf.inputs.session = 'ses-test' ###Output _____no_output_____ ###Markdown Second, let's create the motion correction and smoothing node. For an explanation about this step, see the [Nodes](basic_nodes.ipynb) and [Interfaces](basic_interfaces.ipynb) tutorial. ###Code from nipype.interfaces.fsl import MCFLIRT, IsotropicSmooth # Create Motion Correction Node mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True), name='mcflirt') # Create Smoothing node smooth = Node(IsotropicSmooth(fwhm=4), name='smooth') ###Output _____no_output_____ ###Markdown Third, let's create the workflow that will contain those three nodes. For an explanation about this step, see the [Workflow](basic_workflow.ipynb) tutorial. ###Code from nipype import Workflow from os.path import abspath # Create a preprocessing workflow wf = Workflow(name="preprocWF") wf.base_dir = '/output/working_dir' # Connect the three nodes to each other wf.connect([(sf, mcflirt, [("func", "in_file")]), (mcflirt, smooth, [("out_file", "in_file")])]) ###Output _____no_output_____ ###Markdown Now that everything is set up, let's run the preprocessing workflow. ###Code wf.run() ###Output _____no_output_____ ###Markdown After the execution of the workflow we have all the data hidden in the working directory `'working_dir'`. Let's take a closer look at the content of this folder: ###Code ! tree /output/working_dir/preprocWF ###Output _____no_output_____ ###Markdown As we can see, there is way too much content that we might not really care about. To relocate and rename all the files that are relevant to you, you can use `DataSink`. How to use `DataSink``DataSink` is Nipype's standard output module to restructure your output files. It allows you to relocate and rename files that you deem relevant.Based on the preprocessing pipeline above, let's say we want to keep the smoothed functional images as well as the motion correction parameters. To do this, we first need to create the `DataSink` object. ###Code from nipype.interfaces.io import DataSink # Create DataSink object sinker = Node(DataSink(), name='sinker') # Name of the output folder sinker.inputs.base_directory = '/output/working_dir/preprocWF_output' # Connect DataSink with the relevant nodes wf.connect([(smooth, sinker, [('out_file', 'in_file')]), (mcflirt, sinker, [('mean_img', 'mean_img'), ('par_file', 'par_file')]), ]) wf.run() ###Output _____no_output_____ ###Markdown Let's take a look at the `output` folder: ###Code ! tree /output/working_dir/preprocWF_output ###Output _____no_output_____ ###Markdown This looks nice. It is what we asked it to do. But having a specific output folder for each individual output file might be suboptimal. So let's change the code above to save the output in one folder, which we will call `'preproc'`.For this we can use the same code as above. We only have to change the connection part: ###Code wf.connect([(smooth, sinker, [('out_file', 'preproc.@in_file')]), (mcflirt, sinker, [('mean_img', 'preproc.@mean_img'), ('par_file', 'preproc.@par_file')]), ]) wf.run() ###Output _____no_output_____ ###Markdown Let's take a look at the new output folder structure: ###Code ! tree /output/working_dir/preprocWF_output ###Output _____no_output_____ ###Markdown This is already much better. But what if you want to rename the output files to represent something a bit more readable. For this `DataSink` has the `substitution` input field.For example, let's assume we want to get rid of the string `'task-fingerfootlips'` and `'bold_mcf'` and that we want to rename the mean file, as well as adapt the file ending of the motion parameter file: ###Code # Define substitution strings substitutions = [('_task-fingerfootlips', ''), ("_ses-test", ""), ('_bold_mcf', ''), ('.nii.gz_mean_reg', '_mean'), ('.nii.gz.par', '.par')] # Feed the substitution strings to the DataSink node sinker.inputs.substitutions = substitutions # Run the workflow again with the substitutions in place wf.run() ###Output _____no_output_____ ###Markdown Now, let's take a final look at the output folder: ###Code ! tree /output/working_dir/preprocWF_output ###Output _____no_output_____ ###Markdown Cool, much clearer filenames! Exercise 1Create a simple workflow for skullstriping with FSL, the first node should use `BET` interface and the second node will be a ``DataSink``. Test two methods of connecting the nodes and check the content of the output directory. ###Code # write your solution here from nipype import Node, Workflow from nipype.interfaces.io import DataSink from nipype.interfaces.fsl import BET # Skullstrip process ex1_skullstrip = Node(BET(mask=True), name="ex1_skullstrip") ex1_skullstrip.inputs.in_file = "/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz" # Create DataSink node ex1_sinker = Node(DataSink(), name='ex1_sinker') ex1_sinker.inputs.base_directory = '/output/working_dir/ex1_output' # and a workflow ex1_wf = Workflow(name="ex1", base_dir = '/output/working_dir') # let's try the first method of connecting the BET node to the DataSink node ex1_wf.connect([(ex1_skullstrip, ex1_sinker, [('mask_file', 'mask_file'), ('out_file', 'out_file')]), ]) ex1_wf.run() # and we can check our sinker directory ! tree /output/working_dir/ex1_output # now we can try the other method of connecting the node to DataSink ex1_wf.connect([(ex1_skullstrip, ex1_sinker, [('mask_file', 'bet.@mask_file'), ('out_file', 'bet.@out_file')]), ]) ex1_wf.run() # and check the content of the output directory (you should see a new `bet` subdirectory with both files) ! tree /output/working_dir/ex1_output ###Output _____no_output_____
4_Clasification_DigitRecognizer/3_DL_Multi_Layer_CNN_for_DigitRecognizer.ipynb
###Markdown Digit RecognizerLearn computer vision fundamentals with the famous MNIST dathttps://www.kaggle.com/c/digit-recognizer Competition DescriptionMNIST ("Modified National Institute of Standards and Technology") is the de facto “hello world” dataset of computer vision. Since its release in 1999, this classic dataset of handwritten images has served as the basis for benchmarking classification algorithms. As new machine learning techniques emerge, MNIST remains a reliable resource for researchers and learners alike.In this competition, your goal is to correctly identify digits from a dataset of tens of thousands of handwritten images. We’ve curated a set of tutorial-style kernels which cover everything from regression to neural networks. We encourage you to experiment with different algorithms to learn first-hand what works well and how techniques compare. Practice SkillsComputer vision fundamentals including simple neural networksClassification methods such as SVM and K-nearest neighbors Acknowledgements More details about the dataset, including algorithms that have been tried on it and their levels of success, can be found at http://yann.lecun.com/exdb/mnist/index.html. The dataset is made available under a Creative Commons Attribution-Share Alike 3.0 license. ###Code import pandas as pd import math import numpy as np import matplotlib.pyplot as plt, matplotlib.image as mpimg from sklearn.model_selection import train_test_split import tensorflow as tf %matplotlib inline from tensorflow import keras from tensorflow.keras import models from tensorflow.keras import losses,optimizers,metrics from tensorflow.keras import layers ###Output _____no_output_____ ###Markdown Data Preparation ###Code from google.colab import drive drive.mount('/content/gdrive') labeled_images = pd.read_csv('gdrive/My Drive/dataML/train.csv') #labeled_images = pd.read_csv('train.csv') images = labeled_images.iloc[:,1:] labels = labeled_images.iloc[:,:1] train_images, test_images,train_labels, test_labels = train_test_split(images, labels, test_size=0.01) print(train_images.shape) print(train_labels.shape) print(test_images.shape) print(test_labels.shape) ###Output (41580, 784) (41580, 1) (420, 784) (420, 1) ###Markdown Keras convert the data to the right type ###Code x_train = train_images.values.reshape(train_images.shape[0],28,28,1)/255 x_test = test_images.values.reshape(test_images.shape[0],28,28,1)/255 y_train = train_labels.values y_test = test_labels.values plt.imshow(x_train[12].squeeze()) x_train.shape ###Output _____no_output_____ ###Markdown convert the data to the right type convert class vectors to binary class matrices - this is for use in the categorical_crossentropy loss below ###Code y_train = keras.utils.to_categorical(y_train) y_test = keras.utils.to_categorical(y_test) ###Output _____no_output_____ ###Markdown Creating the Model ###Code model = models.Sequential() model.add(layers.Conv2D(filters = 12, kernel_size=(6,6), strides=(1,1), padding = 'same', activation = 'relu', input_shape = (28,28,1))) model.add(layers.Conv2D(filters = 24,kernel_size=(5,5),strides=(2,2), padding = 'same', activation = 'relu')) model.add(layers.Conv2D(filters = 48,kernel_size=(4,4),strides=(2,2), padding = 'same', activation = 'relu')) model.add(layers.Flatten()) model.add(layers.Dense(units=200, activation='relu')) model.add(layers.Dropout(0.25)) model.add(layers.Dense(units=10, activation='softmax')) model.summary() adam = keras.optimizers.Adam(lr = 0.0001) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=adam, metrics=['accuracy']) H = model.fit(x_train, y_train, batch_size=100, epochs=100, verbose=1, validation_data=(x_test, y_test)) H.history.keys() plt.plot(H.history['acc']) plt.plot(H.history['val_acc'],'r') plt.plot(H.history['loss']) plt.plot(H.history['val_loss'],'r') ###Output _____no_output_____ ###Markdown Predict ###Code unlabeled_images_test = pd.read_csv('gdrive/My Drive/dataML/test.csv') #unlabeled_images_test = pd.read_csv('test.csv') X_unlabeled = unlabeled_images_test.values.reshape(unlabeled_images_test.shape[0],28,28,1)/255 y_pred = model.predict(X_unlabeled) y_label = np.argmax(y_pred, axis=1) ###Output _____no_output_____ ###Markdown Save csv ###Code imageId = np.arange(1,y_label.shape[0]+1).tolist() prediction_pd = pd.DataFrame({'ImageId':imageId, 'Label':y_label}) prediction_pd.to_csv('gdrive/My Drive/dataML/out_cnn08.csv',sep = ',', index = False) ###Output _____no_output_____ ###Markdown Tensorflow Helper functions for batch learning ###Code def one_hot_encode(vec, vals=10): ''' For use to one-hot encode the 10- possible labels ''' n = len(vec) out = np.zeros((n, vals)) out[range(n), vec] = 1 return out class CifarHelper(): def __init__(self): self.i = 0 # Intialize some empty variables for later on self.training_images = None self.training_labels = None self.test_images = None self.test_labels = None def set_up_images(self): print("Setting Up Training Images and Labels") # Vertically stacks the training images self.training_images = train_images.as_matrix() train_len = self.training_images.shape[0] # Reshapes and normalizes training images self.training_images = self.training_images.reshape(train_len,28,28,1)/255 # One hot Encodes the training labels (e.g. [0,0,0,1,0,0,0,0,0,0]) self.training_labels = one_hot_encode(train_labels.as_matrix().reshape(-1), 10) print("Setting Up Test Images and Labels") # Vertically stacks the test images self.test_images = test_images.as_matrix() test_len = self.test_images.shape[0] # Reshapes and normalizes test images self.test_images = self.test_images.reshape(test_len,28,28,1)/255 # One hot Encodes the test labels (e.g. [0,0,0,1,0,0,0,0,0,0]) self.test_labels = one_hot_encode(test_labels.as_matrix().reshape(-1), 10) def next_batch(self, batch_size): # Note that the 100 dimension in the reshape call is set by an assumed batch size of 100 x = self.training_images[self.i:self.i+batch_size] y = self.training_labels[self.i:self.i+batch_size] self.i = (self.i + batch_size) % len(self.training_images) return x, y # Before Your tf.Session run these two lines ch = CifarHelper() ch.set_up_images() # During your session to grab the next batch use this line # (Just like we did for mnist.train.next_batch) # batch = ch.next_batch(100) ###Output Setting Up Training Images and Labels Setting Up Test Images and Labels ###Markdown Creating the Model ** Create 2 placeholders, x and y_true. Their shapes should be: *** X shape = [None,28,28,1]* Y_true shape = [None,10]** Create three more placeholders * lr: learning rate* step:for learning rate decay* drop_rate ###Code X = tf.placeholder(tf.float32, shape=[None,28,28,1]) Y_true = tf.placeholder(tf.float32, shape=[None,10]) lr = tf.placeholder(tf.float32) step = tf.placeholder(tf.int32) drop_rate = tf.placeholder(tf.float32) ###Output _____no_output_____ ###Markdown Initialize Weights and bias neural network structure for this sample:X [batch, 28, 28, 1] Layer 1: conv. layer 6x6x1=>6, stride 1 W1 [6, 6, 1, 6] , B1 [6]Y1 [batch, 28, 28, 6]Layer 2: conv. layer 5x5x6=>12, stride 2 W2 [5, 5, 6, 12] , B2 [12]Y2 [batch, 14, 14, 12]Layer 3: conv. layer 4x4x12=>24, stride 2 W3 [4, 4, 12, 24] , B3 [24]Y3 [batch, 7, 7, 24] => reshaped to YY [batch, 7*7*24]Layer 4: fully connected layer (relu+dropout), W4 [7*7*24, 200] B4 [200]Y4 [batch, 200]Layer 5: fully connected layer (softmax) W5 [200, 10] B5 [10]Y [batch, 10] ###Code # three convolutional layers with their channel counts, and a # fully connected layer (the last layer has 10 softmax neurons) K = 12 # first convolutional layer output depth L = 24 # second convolutional layer output depth M = 48 # third convolutional layer N = 200 # fully connected layer W1 = tf.Variable(tf.truncated_normal([6,6,1,K], stddev=0.1)) B1 = tf.Variable(tf.ones([K])/10) W2 = tf.Variable(tf.truncated_normal([5,5,K,L], stddev=0.1)) B2 = tf.Variable(tf.ones([L])/10) W3 = tf.Variable(tf.truncated_normal([4,4,L,M], stddev=0.1)) B3 = tf.Variable(tf.ones([M])/10) W4 = tf.Variable(tf.truncated_normal([7*7*M,N], stddev=0.1)) B4 = tf.Variable(tf.ones([N])/10) W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1)) B5 = tf.Variable(tf.zeros([10])) ###Output _____no_output_____ ###Markdown layers ###Code Y1 = tf.nn.relu(tf.nn.conv2d(X, W1, strides = [1,1,1,1], padding='SAME') + B1) Y2 = tf.nn.relu(tf.nn.conv2d(Y1,W2, strides = [1,2,2,1], padding='SAME') + B2) Y3 = tf.nn.relu(tf.nn.conv2d(Y2,W3, strides = [1,2,2,1], padding='SAME') + B3) #flat the inputs for the fully connected nn YY3 = tf.reshape(Y3, shape = (-1,7*7*M)) Y4 = tf.nn.relu(tf.matmul(YY3, W4) + B4) Y4d = tf.nn.dropout(Y4,rate = drop_rate) Ylogits = tf.matmul(Y4d, W5) + B5 Y = tf.nn.softmax(Ylogits) ###Output _____no_output_____ ###Markdown Loss Function ###Code #cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_true,logits=Ylogits)) cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels = Y_true, logits = Ylogits) #cross_entropy = -tf.reduce_mean(y_true * tf.log(Ylogits)) * 1000.0 ###Output WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/losses/losses_impl.py:209: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. ###Markdown Optimizer ###Code lr = 0.0001 + tf.train.exponential_decay(learning_rate = 0.003, global_step = step, decay_steps = 2000, decay_rate = 1/math.e ) #optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.005) optimizer = tf.train.AdamOptimizer(learning_rate=lr) train = optimizer.minimize(cross_entropy) ###Output _____no_output_____ ###Markdown Intialize Variables ###Code init = tf.global_variables_initializer() ###Output _____no_output_____ ###Markdown Saving the Model ###Code saver = tf.train.Saver() ###Output _____no_output_____ ###Markdown Graph Session** Perform the training and test print outs in a Tf session and run your model! ** ###Code history = {'acc_train':list(),'acc_val':list(), 'loss_train':list(),'loss_val':list(), 'learning_rate':list()} with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch = ch.next_batch(100) sess.run(train, feed_dict={X: batch[0], Y_true: batch[1], step: i, drop_rate: 0.25}) # PRINT OUT A MESSAGE EVERY 100 STEPS if i%100 == 0: # Test the Train Model feed_dict_train = {X: batch[0], Y_true: batch[1], drop_rate: 0.25} feed_dict_val = {X:ch.test_images, Y_true:ch.test_labels, drop_rate: 0} matches = tf.equal(tf.argmax(Y,1),tf.argmax(Y_true,1)) acc = tf.reduce_mean(tf.cast(matches,tf.float32)) history['acc_train'].append(sess.run(acc, feed_dict = feed_dict_train)) history['acc_val'].append(sess.run(acc, feed_dict = feed_dict_val)) history['loss_train'].append(sess.run(cross_entropy, feed_dict = feed_dict_train)) history['loss_val'].append(sess.run(cross_entropy, feed_dict = feed_dict_val)) history['learning_rate'].append(sess.run(lr, feed_dict = {step: i})) print("Iteration {}:\tlearning_rate={:.6f},\tloss_train={:.6f},\tloss_val={:.6f},\tacc_train={:.6f},\tacc_val={:.6f}" .format(i,history['learning_rate'][-1], history['loss_train'][-1], history['loss_val'][-1], history['acc_train'][-1], history['acc_val'][-1])) print('\n') saver.save(sess,'models_saving/my_model.ckpt') plt.plot(history['acc_train'],'b') plt.plot(history['acc_val'],'r') plt.plot(history['loss_train'],'b') plt.plot(history['loss_val'],'r') plt.plot(history['learning_rate']) ###Output _____no_output_____ ###Markdown Loading a Model ###Code unlabeled_images_test = pd.read_csv('gdrive/My Drive/dataML/test.csv') #unlabeled_images_test = pd.read_csv('test.csv') X_unlabeled = unlabeled_images_test.values.reshape(unlabeled_images_test.shape[0],28,28,1)/255 with tf.Session() as sess: # Restore the model saver.restore(sess, 'models_saving/my_model.ckpt') # Fetch Back Results label = sess.run(Y, feed_dict={X:X_unlabeled,drop_rate:0}) label ###Output _____no_output_____ ###Markdown Predict the unlabeled test sets using the model ###Code imageId = np.arange(1,label.shape[0]+1).tolist() prediction_pd = pd.DataFrame({'ImageId':imageId, 'Label':label}) prediction_pd.to_csv('gdrive/My Drive/dataML/out_cnn4.csv',sep = ',', index = False) ###Output _____no_output_____ ###Markdown Digit RecognizerLearn computer vision fundamentals with the famous MNIST dathttps://www.kaggle.com/c/digit-recognizer Competition DescriptionMNIST ("Modified National Institute of Standards and Technology") is the de facto “hello world” dataset of computer vision. Since its release in 1999, this classic dataset of handwritten images has served as the basis for benchmarking classification algorithms. As new machine learning techniques emerge, MNIST remains a reliable resource for researchers and learners alike.In this competition, your goal is to correctly identify digits from a dataset of tens of thousands of handwritten images. We’ve curated a set of tutorial-style kernels which cover everything from regression to neural networks. We encourage you to experiment with different algorithms to learn first-hand what works well and how techniques compare. Practice SkillsComputer vision fundamentals including simple neural networksClassification methods such as SVM and K-nearest neighbors Acknowledgements More details about the dataset, including algorithms that have been tried on it and their levels of success, can be found at http://yann.lecun.com/exdb/mnist/index.html. The dataset is made available under a Creative Commons Attribution-Share Alike 3.0 license. ###Code import pandas as pd import math import numpy as np import matplotlib.pyplot as plt, matplotlib.image as mpimg from sklearn.model_selection import train_test_split import tensorflow as tf %matplotlib inline from tensorflow import keras from tensorflow.keras import models from tensorflow.keras import losses,optimizers,metrics from tensorflow.keras import layers ###Output _____no_output_____ ###Markdown Data Preparation ###Code from google.colab import drive drive.mount('/content/gdrive') labeled_images = pd.read_csv('gdrive/My Drive/dataML/train.csv') #labeled_images = pd.read_csv('train.csv') images = labeled_images.iloc[:,1:] labels = labeled_images.iloc[:,:1] train_images, test_images,train_labels, test_labels = train_test_split(images, labels, test_size=0.01) print(train_images.shape) print(train_labels.shape) print(test_images.shape) print(test_labels.shape) ###Output (41580, 784) (41580, 1) (420, 784) (420, 1) ###Markdown Keras convert the data to the right type ###Code x_train = train_images.values.reshape(train_images.shape[0],28,28,1)/255 x_test = test_images.values.reshape(test_images.shape[0],28,28,1)/255 y_train = train_labels.values y_test = test_labels.values plt.imshow(x_train[12].squeeze()) x_train.shape ###Output _____no_output_____ ###Markdown convert the data to the right type convert class vectors to binary class matrices - this is for use in the categorical_crossentropy loss below ###Code y_train = keras.utils.to_categorical(y_train) y_test = keras.utils.to_categorical(y_test) ###Output _____no_output_____ ###Markdown Creating the Model ###Code model = models.Sequential() model.add(layers.Conv2D(filters = 12, kernel_size=(6,6), strides=(1,1), padding = 'same', activation = 'relu', input_shape = (28,28,1))) model.add(layers.Conv2D(filters = 24,kernel_size=(5,5),strides=(2,2), padding = 'same', activation = 'relu')) model.add(layers.Conv2D(filters = 48,kernel_size=(4,4),strides=(2,2), padding = 'same', activation = 'relu')) model.add(layers.Flatten()) model.add(layers.Dense(units=200, activation='relu')) model.add(layers.Dropout(0.25)) model.add(layers.Dense(units=10, activation='softmax')) model.summary() adam = keras.optimizers.Adam(lr = 0.0001) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=adam, metrics=['accuracy']) H = model.fit(x_train, y_train, batch_size=100, epochs=100, verbose=1, validation_data=(x_test, y_test)) H.history.keys() plt.plot(H.history['acc']) plt.plot(H.history['val_acc'],'r') plt.plot(H.history['loss']) plt.plot(H.history['val_loss'],'r') ###Output _____no_output_____ ###Markdown Predict ###Code unlabeled_images_test = pd.read_csv('gdrive/My Drive/dataML/test.csv') #unlabeled_images_test = pd.read_csv('test.csv') X_unlabeled = unlabeled_images_test.values.reshape(unlabeled_images_test.shape[0],28,28,1)/255 y_pred = model.predict(X_unlabeled) y_label = np.argmax(y_pred, axis=1) ###Output _____no_output_____ ###Markdown Save csv ###Code imageId = np.arange(1,y_label.shape[0]+1).tolist() prediction_pd = pd.DataFrame({'ImageId':imageId, 'Label':y_label}) prediction_pd.to_csv('gdrive/My Drive/dataML/out_cnn08.csv',sep = ',', index = False) ###Output _____no_output_____ ###Markdown Tensorflow Helper functions for batch learning ###Code def one_hot_encode(vec, vals=10): ''' For use to one-hot encode the 10- possible labels ''' n = len(vec) out = np.zeros((n, vals)) out[range(n), vec] = 1 return out class CifarHelper(): def __init__(self): self.i = 0 # Intialize some empty variables for later on self.training_images = None self.training_labels = None self.test_images = None self.test_labels = None def set_up_images(self): print("Setting Up Training Images and Labels") # Vertically stacks the training images self.training_images = train_images.as_matrix() train_len = self.training_images.shape[0] # Reshapes and normalizes training images self.training_images = self.training_images.reshape(train_len,28,28,1)/255 # One hot Encodes the training labels (e.g. [0,0,0,1,0,0,0,0,0,0]) self.training_labels = one_hot_encode(train_labels.as_matrix().reshape(-1), 10) print("Setting Up Test Images and Labels") # Vertically stacks the test images self.test_images = test_images.as_matrix() test_len = self.test_images.shape[0] # Reshapes and normalizes test images self.test_images = self.test_images.reshape(test_len,28,28,1)/255 # One hot Encodes the test labels (e.g. [0,0,0,1,0,0,0,0,0,0]) self.test_labels = one_hot_encode(test_labels.as_matrix().reshape(-1), 10) def next_batch(self, batch_size): # Note that the 100 dimension in the reshape call is set by an assumed batch size of 100 x = self.training_images[self.i:self.i+batch_size] y = self.training_labels[self.i:self.i+batch_size] self.i = (self.i + batch_size) % len(self.training_images) return x, y # Before Your tf.Session run these two lines ch = CifarHelper() ch.set_up_images() # During your session to grab the next batch use this line # (Just like we did for mnist.train.next_batch) # batch = ch.next_batch(100) ###Output Setting Up Training Images and Labels Setting Up Test Images and Labels ###Markdown Creating the Model ** Create 2 placeholders, x and y_true. Their shapes should be: *** X shape = [None,28,28,1]* Y_true shape = [None,10]** Create three more placeholders * lr: learning rate* step:for learning rate decay* drop_rate ###Code X = tf.placeholder(tf.float32, shape=[None,28,28,1]) Y_true = tf.placeholder(tf.float32, shape=[None,10]) lr = tf.placeholder(tf.float32) step = tf.placeholder(tf.int32) drop_rate = tf.placeholder(tf.float32) ###Output _____no_output_____ ###Markdown Initialize Weights and bias neural network structure for this sample:X [batch, 28, 28, 1] Layer 1: conv. layer 6x6x1=>6, stride 1 W1 [6, 6, 1, 6] , B1 [6]Y1 [batch, 28, 28, 6]Layer 2: conv. layer 5x5x6=>12, stride 2 W2 [5, 5, 6, 12] , B2 [12]Y2 [batch, 14, 14, 12]Layer 3: conv. layer 4x4x12=>24, stride 2 W3 [4, 4, 12, 24] , B3 [24]Y3 [batch, 7, 7, 24] => reshaped to YY [batch, 7*7*24]Layer 4: fully connected layer (relu+dropout), W4 [7*7*24, 200] B4 [200]Y4 [batch, 200]Layer 5: fully connected layer (softmax) W5 [200, 10] B5 [10]Y [batch, 10] ###Code # three convolutional layers with their channel counts, and a # fully connected layer (the last layer has 10 softmax neurons) K = 12 # first convolutional layer output depth L = 24 # second convolutional layer output depth M = 48 # third convolutional layer N = 200 # fully connected layer W1 = tf.Variable(tf.truncated_normal([6,6,1,K], stddev=0.1)) B1 = tf.Variable(tf.ones([K])/10) W2 = tf.Variable(tf.truncated_normal([5,5,K,L], stddev=0.1)) B2 = tf.Variable(tf.ones([L])/10) W3 = tf.Variable(tf.truncated_normal([4,4,L,M], stddev=0.1)) B3 = tf.Variable(tf.ones([M])/10) W4 = tf.Variable(tf.truncated_normal([7*7*M,N], stddev=0.1)) B4 = tf.Variable(tf.ones([N])/10) W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1)) B5 = tf.Variable(tf.zeros([10])) ###Output _____no_output_____ ###Markdown layers ###Code Y1 = tf.nn.relu(tf.nn.conv2d(X, W1, strides = [1,1,1,1], padding='SAME') + B1) Y2 = tf.nn.relu(tf.nn.conv2d(Y1,W2, strides = [1,2,2,1], padding='SAME') + B2) Y3 = tf.nn.relu(tf.nn.conv2d(Y2,W3, strides = [1,2,2,1], padding='SAME') + B3) #flat the inputs for the fully connected nn YY3 = tf.reshape(Y3, shape = (-1,7*7*M)) Y4 = tf.nn.relu(tf.matmul(YY3, W4) + B4) Y4d = tf.nn.dropout(Y4,rate = drop_rate) Ylogits = tf.matmul(Y4d, W5) + B5 Y = tf.nn.softmax(Ylogits) ###Output _____no_output_____ ###Markdown Loss Function ###Code #cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_true,logits=Ylogits)) cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels = Y_true, logits = Ylogits) #cross_entropy = -tf.reduce_mean(y_true * tf.log(Ylogits)) * 1000.0 ###Output WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/losses/losses_impl.py:209: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.cast instead. ###Markdown Optimizer ###Code lr = 0.0001 + tf.train.exponential_decay(learning_rate = 0.003, global_step = step, decay_steps = 2000, decay_rate = 1/math.e ) #optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.005) optimizer = tf.train.AdamOptimizer(learning_rate=lr) train = optimizer.minimize(cross_entropy) ###Output _____no_output_____ ###Markdown Intialize Variables ###Code init = tf.global_variables_initializer() ###Output _____no_output_____ ###Markdown Saving the Model ###Code saver = tf.train.Saver() ###Output _____no_output_____ ###Markdown Graph Session** Perform the training and test print outs in a Tf session and run your model! ** ###Code history = {'acc_train':list(),'acc_val':list(), 'loss_train':list(),'loss_val':list(), 'learning_rate':list()} with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(20000): batch = ch.next_batch(100) sess.run(train, feed_dict={X: batch[0], Y_true: batch[1], step: i, drop_rate: 0.25}) # PRINT OUT A MESSAGE EVERY 100 STEPS if i%100 == 0: # Test the Train Model feed_dict_train = {X: batch[0], Y_true: batch[1], drop_rate: 0.25} feed_dict_val = {X:ch.test_images, Y_true:ch.test_labels, drop_rate: 0} matches = tf.equal(tf.argmax(Y,1),tf.argmax(Y_true,1)) acc = tf.reduce_mean(tf.cast(matches,tf.float32)) history['acc_train'].append(sess.run(acc, feed_dict = feed_dict_train)) history['acc_val'].append(sess.run(acc, feed_dict = feed_dict_val)) history['loss_train'].append(sess.run(cross_entropy, feed_dict = feed_dict_train)) history['loss_val'].append(sess.run(cross_entropy, feed_dict = feed_dict_val)) history['learning_rate'].append(sess.run(lr, feed_dict = {step: i})) print("Iteration {}:\tlearning_rate={:.6f},\tloss_train={:.6f},\tloss_val={:.6f},\tacc_train={:.6f},\tacc_val={:.6f}" .format(i,history['learning_rate'][-1], history['loss_train'][-1], history['loss_val'][-1], history['acc_train'][-1], history['acc_val'][-1])) print('\n') saver.save(sess,'models_saving/my_model.ckpt') plt.plot(history['acc_train'],'b') plt.plot(history['acc_val'],'r') plt.plot(history['loss_train'],'b') plt.plot(history['loss_val'],'r') plt.plot(history['learning_rate']) ###Output _____no_output_____ ###Markdown Loading a Model ###Code unlabeled_images_test = pd.read_csv('gdrive/My Drive/dataML/test.csv') #unlabeled_images_test = pd.read_csv('test.csv') X_unlabeled = unlabeled_images_test.values.reshape(unlabeled_images_test.shape[0],28,28,1)/255 with tf.Session() as sess: # Restore the model saver.restore(sess, 'models_saving/my_model.ckpt') # Fetch Back Results label = sess.run(Y, feed_dict={X:X_unlabeled,drop_rate:0}) label ###Output _____no_output_____ ###Markdown Predict the unlabeled test sets using the model ###Code imageId = np.arange(1,label.shape[0]+1).tolist() prediction_pd = pd.DataFrame({'ImageId':imageId, 'Label':label}) prediction_pd.to_csv('gdrive/My Drive/dataML/out_cnn4.csv',sep = ',', index = False) ###Output _____no_output_____
authorship/Authorship and contribution for IBL behavior paper.ipynb
###Markdown Credit assignment in a large-scale neuroscience collaborationBy Anne Urai, CSHL, 2020To fairly acknowledge work of the 36 authors of the IBL's first overview paper (https://doi.org/10.1101/2020.01.17.909838), we experimented with several ways of defining authorship and contribution statement. Here's an overview of what worked, what didn't work, and what we ultimately settled on. Issue 1: author orderTraditionally, papers in neuroscience list the authors in specific order: the person who lead the project (usually a postdoc or PhD student) goes first, then everyone in decreasing order of importance in the middle, and the laboratory head (PI) goes last. Would this work for a large collaborative paper like the IBL? We quickly realized the answer is *no*, for a couple of reasons:* no single person took the lead on the project* no single person was the head PI on the project* there are a huge number of authors who contributed in very different ways, making it impossible to assign an order (comparing apples to oranges).* even if we allow for shared first authorship (as is becoming increasingly common in the field), we would struggle to determine 1. who gets to be part of that shared-first list, 2. what is the order within that shared-first list, and 3. what's the order of authors after that.Our answer: _the alphabetical author list_. While common in e.g. physics, this idea is met with a lot of suspicion in neuroscience. It has the following advantages:* no more tension, disappointment and negotiation over author order* adding authors at a later stage is easy* the contribution statement (more below) can be used to indicate fine-grained information about each person's role in the project.__Note__: we decided to make 'The International Brain Laboratory' the first author on every IBL Overview Paper. This will lead to all those papers being cited as 'The IBL et al., 2020a, 2020b, 2020c'. Not only does this focus on the collaboration instead of the individuals, it also avoids the situation that Barbara Aaron gets mistaken for the lead author of the paper. Issue 2: the contribution statementWithout author order to convey credit, we moved to the contribution statement. In the following order, we tried a few formats: 1. the free-for-all tableInspired by Nick Steinmetz (https://twitter.com/SteinmetzNeuro/status/1147241128858570752), we created a Google Sheets where everyone could put their name, and each of the contributions they made, into a table. The value (1-5) then showed the level of contribution for that individual.This did not work, for the following reasons:* there was no curation of the tasks that could appear in the first column. The granularity thus ranged a lot, ranging from 'managed the collaboration' to 'ordered mouse food on Wednesday the 3rd of April' (not literally). * everyone worked on the same document simultaneously, so people often updated their statements after they saw what others wrote ('I forgot I did that too, I'd better still add it!').* the levels 1-5 were ill-defined: does '5' mean you did that task all on your own; that you were a regular participant; or just that you worked really hard on it?* adding numbers 1-5 led to a bit of a rat-race in accumulating a high total score, a bit like grade inflation. 2a. the free-form statementWe decided to take a step back, and instead (2 December 2019) gather each individual statement in a form that's as inclusive and free as possible. After Lauren Wool collected these statements, they were too varied to be useful. I did mine in bulletpoints, but others wrote a paragraph. 2b. the structured statementThis quickly led to another attempt (7 January 2020) to a new request for contribution statements, now with a few restrictions:Write in full sentencesVerbs should describe what you did for the project, not who you were._Yes_: X.Y.Z. wrote the abstract and attended WG meetings_No_: X.Y.Z. was a writer for the abstract and was a member of the WGWhile this more structured form helped, there was still a lot of variation in the level of detail that people listed (e.g. 'wrote the paper' vs. 'found literature references for paragraph 1-4 on page 3'. 3a. the CRediT-based statementMeanwhile, the IBL's Publication WG had further discussions about streamlining contribution statements, leading to the following recommendation on January 17, 2020:These contribution statements were collected in a Google Sheet. Specifying which levels of contribution were available (lead, equal, support) was a good level of granularity. 3b. the curated CRediT-based statementEven this more streamlined, CRediT-based statement suffered from the following remaining problems:* similar tasks were grouped, by different people, under various different CRediT categories (e.g. is 'Building rigs' Methodology or Investigation?)* granularity differences remained: should rig maintanenace, surgeries, husbandry, and animal training all be grouped under 'collected behavioral data'?This was solved in the good old fashioned way of having a huge Zoom meeting, and going one-by-one through each statement to equalize them. Matteo Carandini, Hannah Bayer, Lauren Wool and Gaelle Chapuis took the lead on this, and completed the long list of statements on 3 February 2020. 4. the curated CRediT-based tableWhile working on a revision of the paper in April 2020, I wanted to put our now-curated text statement back into a table to have a better vizual way of inspecting each person's contributions. ###Code import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np sns.set() df = pd.read_csv('Behavior paper (2019) - Contribution statement - Written contributions.csv') df = df.rename(columns={'First Name':'first_name', 'Last Name':'last_name', 'credit_proposal':'credit'}) df = df[['first_name', 'last_name', 'credit']] df.dropna() new_df = [] for aidx, author in df.groupby(['last_name', 'first_name']): # tease apart the credit assignment column statement = author['credit'].item() if not isinstance(statement, str): continue statement_split = statement.split("[") for s in statement_split: cat_split = s.split("]") if len(cat_split) > 1: credit_category = cat_split[0] task_split = cat_split[1].split(")") for t in task_split: contrib_split = t.split("(") if len(contrib_split) ==2: # print(contrib_split) task = contrib_split[0] contribution = contrib_split[1] # parse for initials words_last = author.last_name.item().split() letters_last = [word[0] for word in words_last] words_first = author.first_name.item().split() letters_first = [word[0] for word in words_first] initials = '.'.join(letters_first + letters_last) + '.' # add these new_df.append({'last_name':author.last_name.item(), 'first_name':author.first_name.item(), 'author_initials':initials, 'credit_category': credit_category.lower().replace('–', '-'), 'task': task.lower().replace('\n', '').strip(), 'contribution': contribution.lower()}) new_df = pd.DataFrame(new_df) # add some vars new_df['credit_task'] = new_df.credit_category.str.upper() + '; ' + new_df.task new_df['contribution_level'] = new_df.contribution.map({'lead':3, 'equal':2, 'support':1}) new_df['author'] = new_df.last_name + ', ' + new_df.first_name # ensure initials are unique new_df.groupby(['author_initials'])['author'].unique() sorted(new_df.credit_category.unique()) sorted(new_df.credit_task.unique()) new_df.contribution.unique() assert(all(new_df.groupby(['task'])['credit_category'].nunique() == 1)) matrix = new_df.pivot("credit_task", "author", "contribution_level").fillna(0) matrix.to_csv('contributions_matrixform.csv') cmap = sns.light_palette("purple", n_colors=4) cmap[0] = (0.95, 0.95, 0.95) fig, ax = plt.subplots(1,1,figsize=(14,10)) sns.heatmap(matrix, annot=False, yticklabels=True, cmap=cmap, linewidths=.5, ax=ax, cbar=False) ax.set_xlabel('') ax.set_ylabel('') ax.tick_params(axis="x", bottom=True, top=True, labelbottom=False, labeltop=True, rotation=90) plt.tight_layout() fig.savefig('contributions.png', dpi=400) ## WRITE THIS OUT FOR THE CONTRIBUTION STATEMENT IN THE PAPER new_df['task'] = new_df['credit_task'] + ' (' + new_df['contribution'] + ')' f = open("ContributionStatement.txt", "a") for author, tmpdf in new_df.groupby(['author']): txt = tmpdf['author_initials'].unique()[0] + ': ' + '; '.join(tmpdf.task.values) #print(txt) f.write(txt + "\n") #f.write('\n') #print(tmpdf['author_initials'].unique()[0]) f.close() ###Output _____no_output_____ ###Markdown Issue 3: the structure of contributionsLet's look at the structure of this matrix. ###Code # get a super rough estimate of each person's total contributions matrix.sum().reset_index().sort_values(0, ascending=False) sns.clustermap(matrix.fillna(0), cmap=cmap, linewidths=.5, cbar_pos=None, figsize=(18,15), yticklabels=True) plt.xlabel('') plt.ylabel('') plt.savefig('clustered_contributions.png') from sklearn.decomposition import PCA, NMF from sklearn.metrics import explained_variance_score # read data V = matrix.reset_index().copy() V.set_index('credit_task', inplace=True) # DETERMINE THE NUMBER OF COMPONENTS # https://stackoverflow.com/questions/48148689/how-to-compare-predictive-power-of-pca-and-nmf """ Estimate performance of the model on the data """ def get_score(model, data, scorer=metrics.explained_variance_score): prediction = model.inverse_transform(model.transform(data)) return scorer(data, prediction) # REPEAT THIS, BUT ON HELD-OUT DATA V_train, V_test = model_selection.train_test_split(V, test_size=0.2, random_state=1) perfs_train = [] perfs_test = [] for k in range(1, 30): nmf = decomposition.NMF(n_components=k).fit(V_train) perfs_train.append(get_score(nmf, V_train)) perfs_test.append(get_score(nmf, V_test)) plt.plot(range(1, 30), perfs_train, 'o-', range(1, 30), perfs_test, 'o-') plt.xlabel('# components') plt.ylabel('Fraction variance explained') plt.legend(['Training', 'Testing']) # EYEBALL THIS AND PICK NUMBER OF COMPONENTS n_comp = 7 # can give meaningful names, shown below # now fit the real NMF model # https://medium.com/logicai/non-negative-matrix-factorization-for-recommendation-systems-985ca8d5c16c nmf = decomposition.NMF(n_components=n_comp).fit(V) H = pd.DataFrame(np.round(nmf.components_, 2), columns=V.columns) # give some meaningful-ish names H.index = ['Data collection', 'PI', 'Coordination and management', 'Task development', 'Software', 'Rig testing', 'Paper writing'] W = pd.DataFrame(np.round(nmf.transform(V),2), columns=H.index) W.index = V.index reconstructed = pd.DataFrame(np.round(np.dot(W,H),2), columns=V.columns) reconstructed.index = V.index ## LET's look at those results W.style.background_gradient(cmap='PuRd') ## LET'S VISUALIZE THIS IN A CLEARER WAY from IPython.display import display for col in W.columns: Wtmp = W[[col]] # drop any tasks that are zero Wtmp.drop(Wtmp[ Wtmp[col] < 0.0001 ].index , inplace=True) Wtmp = Wtmp.sort_values(by=col, ascending=False) display(Wtmp.style.background_gradient(cmap='PuRd')) ## WHO IS IN EACH OF THESE GROUPS (AND TO WHICH EXTENT?) for row in H.index: Htmp = H.loc[row].reset_index() # drop any tasks that are zero Htmp.drop(Htmp[ Htmp[row] < 0.0001 ].index , inplace=True) Htmp = Htmp.sort_values(by=row, ascending=False) display(Htmp.style.background_gradient(cmap='PuRd')) ## NOW, WHAT DOES THE NEW CLUSTERING PLOT LOOK LIKE? fig, ax = plt.subplots(2,1,figsize=(12,18)) sns.heatmap(matrix, annot=False, yticklabels=True, cmap=cmap, linewidths=.5, ax=ax[0], cbar=False) ax[0].set_xlabel('') ax[0].set_ylabel('') ax[0].tick_params(axis="x", bottom=True, top=True, labelbottom=False, labeltop=True, rotation=90) ax[0].set_title('Original contribution matrix') sns.heatmap(reconstructed, annot=False, yticklabels=True, cmap=cmap, linewidths=.5, ax=ax[1], cbar=False) ax[1].set_xlabel('') ax[1].set_ylabel('') ax[1].tick_params(axis="x", bottom=True, top=True, labelbottom=False, labeltop=False, rotation=90) ax[1].set_title('Reconstructed from %d NFM components'%n_comp) plt.tight_layout() ###Output _____no_output_____
python/Recipes - doc2vec.ipynb
###Markdown AboutAnalysis of recipes with doc2vec. Prerequesites* Python libraries: * nltk - natural language processing toolkit which includes functions for cleaning text data. * gensim - doc2vec implementation - cython is required for ensuring speedy computations. ###Code import re # Regular Expressions import pandas as pd # DataFrames & Manipulation import nltk.data # Sentence tokenizer from bs4 import BeautifulSoup # HTML processing from gensim.models.doc2vec import LabeledSentence, Doc2Vec train_input = "../data/recipes.tsv.bz2" # keep empty strings (http://pandas-docs.github.io/pandas-docs-travis/io.html#na-values) train = pd.read_csv(train_input, delimiter="\t", quoting=3, encoding="utf-8", keep_default_na=False) # load sentence tokenizer model and initialize for german language nltk.download("punkt") tokenizer = nltk.data.load('tokenizers/punkt/german.pickle') def normalize( text ): """ Remove HTML, non-letter characters, and convert to lower case. Return list of words. """ # remove HTML markup with BeautifulSoup (and keep spaces after removal) plainText = " ".join(BeautifulSoup(text, 'html.parser').strings) # retain only letters (include umlauts) onlyLetters = re.sub(u"[^a-zA-ZäöüÄÖÜß]", " ", plainText) # get lower case words words = onlyLetters.lower().split() return words def split_sentences(text): """ Split text by sentences and clean each sentence. """ return filter(None, [normalize(sentence) for sentence in tokenizer.tokenize(text)]) sentences = [] size = train['instructions'].size for i in xrange ( 0, size ): if (i+1) % 10000 == 0: print "Processing %d of %d recipies." % ( i+1, size ) # either keep complete text or split into sentences but label all parts with the same ID sentences.append(LabeledSentence(normalize(train['instructions'][i]), [i])) #sentences += [LabeledSentence(words, [i]) for words in split_sentences(train['instructions'][i])] print "Total: %d sentences.\n" % len(sentences) # Set values for various parameters num_features = 300 # Word vector dimensionality min_word_count = 40 # Minimum word count num_workers = 4 # Number of threads to run in parallel context = 10 # Context window size downsampling = 1e-3 # Downsample setting for frequent words # Import the built-in logging module and configure it so that Word2Vec creates nice output messages import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) print "Training model..." model = Doc2Vec(sentences, workers=num_workers, \ size=num_features, min_count = min_word_count, \ window = context, sample = downsampling) vec = model.docvecs.most_similar(1) ids = [k for (k,v) in vec] ids vec train.loc[ids] model.most_similar('pasta') ###Output _____no_output_____
bayer-test.ipynb
###Markdown This notebook walks through the process of converting from images captured by the Raspberry Pi using the `--raw` command to useful numpy structures of the Bayer grid data.This uses most of the example code from [the `picamera` module's section on Raw Bayer Captures](https://picamera.readthedocs.io/en/release-1.13/recipes2.htmlraw-bayer-data-captures). ###Code import numpy as np import matplotlib.pyplot as plt from PIL import Image import io import time from numpy.lib.stride_tricks import as_strided # load our pickled stream object containing the image data import pickle with open('wall2.pickle','rb') as f: stream = pickle.load(f) # # alternatively, just open the jpeg file (also works) # with open('wall1.jpeg', 'rb') as f: # stream = io.BytesIO(f.read()) assert isinstance(stream, io.BytesIO) ver = 1 # we used a v1 camera module for this image. Use `2` for v2 # Extract the raw Bayer data from the end of the stream, check the # header and strip if off before converting the data into a numpy array offset = { 1: 6404096, 2: 10270208, }[ver] data = stream.getvalue()[-offset:] assert data[:4] == b'BRCM' data = data[32768:] data = np.frombuffer(data, dtype=np.uint8) # For the V1 module, the data consists of 1952 rows of 3264 bytes of data. # The last 8 rows of data are unused (they only exist because the maximum # resolution of 1944 rows is rounded up to the nearest 16). # # For the V2 module, the data consists of 2480 rows of 4128 bytes of data. # There's actually 2464 rows of data, but the sensor's raw size is 2466 # rows, rounded up to the nearest multiple of 16: 2480. # # Likewise, the last few bytes of each row are unused (why?). Here we # reshape the data and strip off the unused bytes. reshape, crop = { 1: ((1952, 3264), (1944, 3240)), 2: ((2480, 4128), (2464, 4100)), }[ver] data = data.reshape(reshape)[:crop[0], :crop[1]] # Horizontally, each row consists of 10-bit values. Every four bytes are # the high 8-bits of four values, and the 5th byte contains the packed low # 2-bits of the preceding four values. In other words, the bits of the # values A, B, C, D and arranged like so: # # byte 1 byte 2 byte 3 byte 4 byte 5 # AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD AABBCCDD # # Here, we convert our data into a 16-bit array, shift all values left by # 2-bits and unpack the low-order bits from every 5th byte in each row, # then remove the columns containing the packed bits data = data.astype(np.uint16) << 2 for byte in range(4): data[:, byte::5] |= ((data[:, 4::5] >> ((4 - byte) * 2)) & 0b11) data = np.delete(data, np.s_[4::5], 1) # Now to split the data up into its red, green, and blue components. The # Bayer pattern of the OV5647 sensor is BGGR. In other words the first # row contains alternating green/blue elements, the second row contains # alternating red/green elements, and so on as illustrated below: # # GBGBGBGBGBGBGB # RGRGRGRGRGRGRG # GBGBGBGBGBGBGB # RGRGRGRGRGRGRG # # Please note that if you use vflip or hflip to change the orientation # of the capture, you must flip the Bayer pattern accordingly rgb = np.zeros(data.shape + (3,), dtype=data.dtype) rgb[1::2, 0::2, 0] = data[1::2, 0::2] # Red rgb[0::2, 0::2, 1] = data[0::2, 0::2] # Green rgb[1::2, 1::2, 1] = data[1::2, 1::2] # Green rgb[0::2, 1::2, 2] = data[0::2, 1::2] # Blue # At this point we now have the raw Bayer data with the correct values # and colors but the data still requires de-mosaicing and # post-processing. If you wish to do this yourself, end the script here! rgb plt.imshow(rgb) plt.imshow(rgb.astype(np.uint8)) ###Output _____no_output_____ ###Markdown That looks odd... ###Code rgb[rgb > 255].astype(np.uint16) rgb[rgb > 255].astype(np.uint8) np.max(rgb.astype(np.uint8)) np.max(rgb) ###Output _____no_output_____ ###Markdown ok, `astype` doesn't remap the values to the 0-255 range, it overflows... ###Code np.max(rgb * 255/1023) # remap from 10-bit to 8-bit range plt.imshow((rgb * 255/1023).astype(np.uint8)) ###Output _____no_output_____ ###Markdown still the odd artifacts... ###Code np.max(rgb * 255) 0b1111111111111111 # max value of uint16 np.max(rgb * (255/1023)) np.max(rgb * 255/1023) 65535/1023 ###Output _____no_output_____ ###Markdown looks like the `*255` caused it to overflow _before_ converting to a float, while multiplying it by the fraction `(255/1023)` converts it to a float initially. ###Code uint16_to_uint8 = lambda a: (a * (255/1023)).astype(np.uint8) # note, this only works b/c the values are actually 10-bit # uint16_to_uint8 = lambda a: (a >> 2).astype(np.uint8) # or bit-shift as suggested at the end rgb8 = uint16_to_uint8(rgb) np.max(rgb8) assert rgb8.dtype == np.uint8 plt.imshow(rgb8) ###Output _____no_output_____ ###Markdown that looks much more expected ###Code im = Image.fromarray(rgb8) im.save('mosaic_rgb.png') # save mosaiced image in color ###Output _____no_output_____ ###Markdown now for black and white, we can just collapse the 3-dimensional array by summing the RGB values (since only one of the three will have a value for any given entry) ###Code np.sum(rgb8,axis=2).shape # make sure I'm collapsing the right axis assert np.max(np.sum(rgb8, axis=2)) < 255 # sum returns uint64, I'm making sure here that it won't overflow somehow imbw = Image.fromarray(np.sum(rgb8, axis=2).astype(np.uint8)) imbw.save('mosaic_bw.png') # Below we present a fairly naive de-mosaic method that simply # calculates the weighted average of a pixel based on the pixels # surrounding it. The weighting is provided by a byte representation of # the Bayer filter which we construct first: bayer = np.zeros(rgb.shape, dtype=np.uint8) bayer[1::2, 0::2, 0] = 1 # Red bayer[0::2, 0::2, 1] = 1 # Green bayer[1::2, 1::2, 1] = 1 # Green bayer[0::2, 1::2, 2] = 1 # Blue # Allocate an array to hold our output with the same shape as the input # data. After this we define the size of window that will be used to # calculate each weighted average (3x3). Then we pad out the rgb and # bayer arrays, adding blank pixels at their edges to compensate for the # size of the window when calculating averages for edge pixels. output = np.empty(rgb.shape, dtype=rgb.dtype) window = (3, 3) borders = (window[0] - 1, window[1] - 1) border = (borders[0] // 2, borders[1] // 2) rgb_padded = np.pad(rgb, [ (border[0], border[0]), (border[1], border[1]), (0, 0), ], 'constant') bayer = np.pad(bayer, [ (border[0], border[0]), (border[1], border[1]), (0, 0), ], 'constant') # For each plane in the RGB data, we use a nifty numpy trick # (as_strided) to construct a view over the plane of 3x3 matrices. We do # the same for the bayer array, then use Einstein summation on each # (np.sum is simpler, but copies the data so it's slower), and divide # the results to get our weighted average: for plane in range(3): p = rgb_padded[..., plane] b = bayer[..., plane] pview = as_strided(p, shape=( p.shape[0] - borders[0], p.shape[1] - borders[1]) + window, strides=p.strides * 2) bview = as_strided(b, shape=( b.shape[0] - borders[0], b.shape[1] - borders[1]) + window, strides=b.strides * 2) psum = np.einsum('ijkl->ij', pview) bsum = np.einsum('ijkl->ij', bview) output[..., plane] = psum // bsum # At this point output should contain a reasonably "normal" looking # image, although it still won't look as good as the camera's normal # output (as it lacks vignette compensation, AWB, etc). # # If you want to view this in most packages (like GIMP) you'll need to # convert it to 8-bit RGB data. The simplest way to do this is by # right-shifting everything by 2-bits (yes, this makes all that # unpacking work at the start rather redundant...) output = (output >> 2).astype(np.uint8) with open('image.data', 'wb') as f: output.tofile(f) plt.imshow(output) Image.fromarray(output).save('demosaiced.png') # alternatively, convolution? from scipy.signal import convolve bayer = np.zeros(rgb.shape, dtype=np.uint8) bayer[1::2, 0::2, 0] = 1 # Red bayer[0::2, 0::2, 1] = 1 # Green bayer[1::2, 1::2, 1] = 1 # Green bayer[0::2, 1::2, 2] = 1 # Blue Image.fromarray(rgb8[:,:,0]).save('red.png') Image.fromarray(rgb8[:,:,1]).save('green.png') Image.fromarray(rgb8[:,:,2]).save('blue.png') r = convolve(bayer[:,:,0],rgb8[:,:,0]*0.5) r.shape np.max(r) # res = np.zeros(rgb8.shape, dtype=np.uint8) # for i in range(3): # res[::,::,i] = convolve() ###Output _____no_output_____ ###Markdown I'm having difficulty thinking of a way to make this work without restructuring the original rgb array or doing funky summation/boolean filtering.The closest I've gotten is convolving across each r/g/b plane and dividing by the sum of `[[1,1,1],[1,1,1],[1,1,1]]` of `bayer`. ###Code kernel = np.ones((3,3),dtype=np.uint8) kernel bayer_conv = np.zeros(bayer.shape, dtype=np.uint8) for i in range(3): bayer_conv[:,:,i] = convolve(bayer[:,:,i], kernel, mode='same') bayer_conv[:3,:3,0] # peek at top left corner of r plane ###Output _____no_output_____ ###Markdown this is pretty much minesweeper where r, g, and b are mines ###Code np.array([['','','','',''],['','g','b','g','b'],['','r','g','r','g'],['','g','b','g','b']], dtype=str) # top left corner of bayer array rgb8_conv = np.zeros(rgb8.shape, dtype=np.uint16) # max sum here should be 1275 for 5 maxed green sencels for i in range(3): rgb8_conv[:,:,i] = convolve(rgb8[:,:,i].astype(np.uint16), kernel, mode='same') np.max(rgb8_conv) res = rgb8_conv / bayer_conv res.shape np.max(res) res = res.astype(np.uint8) plt.imshow(res) Image.fromarray(res).save('demosaiced_convolution.png') ###Output _____no_output_____
Unsupervised_Clustering/Practice_Notebooks/Day1.ipynb
###Markdown Auto MPG Dataset ###Code car = pd.read_csv("car-mpg.csv") car.head() car.info() car.describe() car.shape car.isnull().sum() car[car.hp=="?"] car[car.hp!="?"].hp.median() #Replace the Q Mark with values car = car.replace('?',np.nan) a = car['hp'].median() car = car.fillna(a) car['hp'] = car.hp.astype("float64") car.dtypes car.dtypes car.hp.nunique() ks = range(1,11) inertias = [] for k in ks: #Define the model model = KMeans(n_clusters=k) #Fit the model model.fit(car.drop('car_name',axis=1)) #Append the Inertias inertias.append(model.inertia_) inertias plt.plot(ks,inertias,"-o") plt.xlabel("No. of Clusters") plt.ylabel("Inertia Values") plt.xticks(ks) plt.show() model = KMeans(n_clusters=4) model.fit(car.drop('car_name',axis=1)) labels = model.predict(car.drop('car_name',axis=1)) labels centroids = model.cluster_centers_ centroids centroid_x = centroids[:,0] #Here we are considering the mpg and disp which have the direct relationship with average of car centroid_y = centroids[:,3] df1 = car.drop('car_name',axis=1) df1.head() xs = df1.iloc[:,0] ys = df1.iloc[:,3] import matplotlib.pyplot as plt plt.scatter(xs,ys,c=labels,s=35) plt.scatter(centroid_x,centroid_y,marker='D',c='r',s=80) plt.show() dendogram = sch.dendrogram(sch.linkage(car.drop('car_name',axis=1),method="ward")) plt.title("Dendogram") plt.xlabel("Car Data") plt.ylabel("Distane Measure") plt.show() ###Output _____no_output_____ ###Markdown Dendogram for Iris ###Code import scipy.cluster.hierarchy as sch dendogram = sch.dendrogram(sch.linkage(iris,method="ward")) plt.title("Dendogram") plt.xlabel("Iris Data") plt.ylabel("Distane Measure") plt.show() ###Output _____no_output_____ ###Markdown Calculate the Euclidean Distance ###Code x = [[-1,2,3]] y = [[4,0,-3]] from sklearn.metrics.pairwise import euclidean_distances euclidean_distances(x,y) ###Output _____no_output_____
competition-analysis.ipynb
###Markdown Random forest ###Code from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1) clf.fit(d1_training.drop('label', axis=1).values, d1_training['label'].values) roc_auc_score(y_score=clf.predict(d1_testing.drop('label', axis=1).values), y_true=d1_testing['label'].values) from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn.cross_validation import cross_val_score from sklearn.metrics import roc_auc_score training_features = d1_training.drop('label', axis=1).values training_labels = d1_training['label'].values pipeline = make_pipeline(StandardScaler(), PolynomialFeatures(), RandomForestClassifier(n_estimators=1000, n_jobs=-1)) cross_val_score(pipeline, training_features, training_labels, cv=10, scoring='roc_auc') from sklearn.preprocessing import StandardScaler from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn.metrics import roc_auc_score training_features = d1_training.drop('label', axis=1).values training_labels = d1_training['label'].values testing_features = d1_testing.drop('label', axis=1).values testing_labels = d1_testing['label'].values clf = make_pipeline(StandardScaler(), RandomForestClassifier(n_estimators=1000, n_jobs=-1)) clf.fit(training_features, training_labels) roc_auc_score(y_score=clf.predict(d1_testing.drop('label', axis=1).values), y_true=d1_testing['label'].values) ###Output _____no_output_____ ###Markdown Support vector machine ###Code from sklearn.svm import SVC from sklearn.metrics import roc_auc_score clf = SVC(C=1., kernel='linear') clf.fit(d1_training.drop('label', axis=1).values, d1_training['label'].values) roc_auc_score(y_score=clf.predict(d1_testing.drop('label', axis=1).values), y_true=d1_testing['label'].values) from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.svm import SVC from sklearn.pipeline import make_pipeline from sklearn.cross_validation import cross_val_score from sklearn.metrics import roc_auc_score training_features = d1_training.drop('label', axis=1).values training_labels = d1_training['label'].values clf = make_pipeline(StandardScaler(), PolynomialFeatures(), SVC(C=1., kernel='linear')) cross_val_score(clf, training_features, training_labels, cv=10, scoring='roc_auc') from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.pipeline import make_pipeline from sklearn.metrics import roc_auc_score training_features = d1_training.drop('label', axis=1).values training_labels = d1_training['label'].values testing_features = d1_testing.drop('label', axis=1).values testing_labels = d1_testing['label'].values clf = make_pipeline(StandardScaler(), SVC(C=1., kernel='linear')) clf.fit(training_features, training_labels) roc_auc_score(y_score=clf.predict(d1_testing.drop('label', axis=1).values), y_true=d1_testing['label'].values) from sklearn.grid_search import GridSearchCV import numpy as np import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") parameters = {'kernel': ('linear', 'rbf'), 'C': list(np.arange(0.01, 1.0, 0.01)) + list(range(1, 10))} svr = SVC() clf = GridSearchCV(svr, parameters, n_jobs=-1) clf.fit(training_features, training_labels) clf.best_params_, clf.best_score_ clf.best_estimator_.fit(training_features, training_labels) clf.best_estimator_.score(testing_features, testing_labels) ###Output _____no_output_____ ###Markdown TPOT ###Code from tpot import TPOT my_tpot = TPOT(generations=1000, verbosity=2) my_tpot.fit(d1_training.drop('label', axis=1).values, d1_training['label'].values) my_tpot.score(d1_testing.drop('label', axis=1).values, d1_testing['label'].values) my_tpot.score(d1_training.drop('label', axis=1).values, d1_training['label'].values) ###Output _____no_output_____
notebooks/federated_learning/federated_learning_evaluation_methodology.ipynb
###Markdown Federated learning: evaluation methodologyIn this notebook, we study the different evaluation methodologies that we can use when we want to evaluate federated learning (FL) simulations. First, we set up the FL configuration (for more information see the notebook [Federated learning basic concepts](./federated_learning_basic_concepts.ipynb)). ###Code import shfl import tensorflow as tf import numpy as np import random random.seed(123) np.random.seed(seed=123) class Reshape(shfl.private.FederatedTransformation): def apply(self, labeled_data): labeled_data.data = np.reshape(labeled_data.data, (labeled_data.data.shape[0], labeled_data.data.shape[1], labeled_data.data.shape[2],1)) class Normalize(shfl.private.FederatedTransformation): def __init__(self, mean, std): self.__mean = mean self.__std = std def apply(self, labeled_data): labeled_data.data = (labeled_data.data - self.__mean)/self.__std def model_builder(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1, input_shape=(28, 28, 1))) model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1)) model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) criterion = tf.keras.losses.CategoricalCrossentropy() optimizer = tf.keras.optimizers.RMSprop() metrics = [tf.keras.metrics.categorical_accuracy] return shfl.model.DeepLearningModel(model=model, criterion=criterion, optimizer=optimizer, metrics=metrics) #Read data database = shfl.data_base.Emnist() train_data, train_labels, test_data, test_labels = database.load_data() #Distribute among clients non_iid_distribution = shfl.data_distribution.NonIidDataDistribution(database) federated_data, test_data, test_labels = non_iid_distribution.get_federated_data(num_nodes=5, percent=10) #Set up aggregation operator aggregator = shfl.federated_aggregator.FedAvgAggregator() federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, aggregator) #Reshape and normalize shfl.private.federated_operation.apply_federated_transformation(federated_data, Reshape()) mean = np.mean(test_data.data) std = np.std(test_data.data) shfl.private.federated_operation.apply_federated_transformation(federated_data, Normalize(mean, std)) ###Output _____no_output_____ ###Markdown Evaluation methodology 1: global test dataset The first evaluation methodology that we propose consists of the federated version of the classical evaluation methods. For this purpose, we use a common test dataset allocated in the server. We show the evaluation metrics (loss and accuracy, in this case) in each round of learning, both in local models and updated global model. The behaviour of this evaluation methodology is as follows. ###Code test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1)) federated_government.run_rounds(1, test_data, test_labels) ###Output _____no_output_____ ###Markdown This methodology is the simplest and shows both local and global models. The problem with this methodology is that the local evaluation metrics are biased by the distribution of the test set data. That is, the performance of the local models is not properly represented when using a non-IID scenario (see notebook [Federated learning sampling methods](./federated_learning_sampling.ipynb)) because the distribution of training data for each client is different from that of the test data we use. For that reason, we propose the following evaluation methodology. Evaluation methodology 2: global test dataset and local test datasetsIn this evaluation methodology, we consider that there is a global test dataset and that each client has a local test dataset, according to the distribution of their training data. Hence, in each round, we show the evaluation metrics of each client for their global and local tests. This evaluation methodology is more complete as it shows the performance of the local FL models in the global and local distribution of the data, which gives us more information. First, we split each client's data in train and test partitions. You can find this method in [Federated Operation](https://github.com/sherpaai/Sherpa.ai-Federated-Learning-Framework/blob/master/shfl/private/federated_operation.py). ###Code shfl.private.federated_operation.split_train_test(federated_data) ###Output _____no_output_____ ###Markdown After that, each client owns a training set, which is used for training the local learning model and a test set, which is used to evaluate it. We are now ready to show the behaviour of this evaluation methodology. ###Code # We restart federated government federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, aggregator) test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1)) federated_government.run_rounds(1, test_data, test_labels) ###Output _____no_output_____ ###Markdown Evaluation Methodology of Federated LearningIn this notebook we study the different evaluation methodologies that we can use when we want to evaluate FL problems. First, we set up the FL configuration (for more information see [Basic Concepts Notebook](./federated_learning_basic_concepts.ipynb)). ###Code import shfl import tensorflow as tf import numpy as np import random random.seed(123) np.random.seed(seed=123) class Reshape(shfl.private.FederatedTransformation): def apply(self, labeled_data): labeled_data.data = np.reshape(labeled_data.data, (labeled_data.data.shape[0], labeled_data.data.shape[1], labeled_data.data.shape[2],1)) class Normalize(shfl.private.FederatedTransformation): def __init__(self, mean, std): self.__mean = mean self.__std = std def apply(self, labeled_data): labeled_data.data = (labeled_data.data - self.__mean)/self.__std def model_builder(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1, input_shape=(28, 28, 1))) model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1)) model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid')) model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]) return shfl.model.DeepLearningModel(model) #Read data database = shfl.data_base.Emnist() train_data, train_labels, test_data, test_labels = database.load_data() #Distribute among clients non_iid_distribution = shfl.data_distribution.NonIidDataDistribution(database) federated_data, test_data, test_labels = non_iid_distribution.get_federated_data(num_nodes=5, percent=10) #Set up aggregation operator aggregator = shfl.federated_aggregator.FedAvgAggregator() federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, aggregator) #Reshape and normalize shfl.private.federated_operation.apply_federated_transformation(federated_data, Reshape()) mean = np.mean(test_data.data) std = np.std(test_data.data) shfl.private.federated_operation.apply_federated_transformation(federated_data, Normalize(mean, std)) ###Output _____no_output_____ ###Markdown Evaluation methodology 1: Global test dataset The first evaluation methodology that we propose consists of the federated version of the classical evaluation methods. For that purpose, we use a common test dataset allocated in the server. We show the evaluation metrics (loss and accuracy in this case) in each round of learning both in local models and global updated model. We show the behaviour of this evaluation methodology as follows. ###Code test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1)) federated_government.run_rounds(1, test_data, test_labels) ###Output _____no_output_____ ###Markdown This methodology is the simplest and shows both local and global models. The problem with this methodology is that the local evaluation metrics are biased by the distribution of test set data. That is, the performance of the local models is not properly represented when using a non-iid scenario (see [Federated Sampling](./federated_learning_sampling.ipynb)) because the distribution of training data for each client is different from the data we test on. For that reason, we propose the following evaluation methodology. Evaluation methodology 2: Global test dataset and local test datasetsIn this evaluation methodology we consider that there is, as in the previous one, a global test dataset and that each client has a local test dataset according to the distribution of their training data. Hence, in each round we show the evaluation metrics of each client on their local and the global test. This evaluation methodology is more complete as it shows the performance of the local FL models in the global and local distribution of the data, which gives as more information. First, we split each client's data in train and test partitions. You can find this method in [Federated Operation](https://github.com/sherpaai/Sherpa.ai-Federated-Learning-Framework/blob/master/shfl/private/federated_operation.py). ###Code shfl.private.federated_operation.split_train_test(federated_data) ###Output _____no_output_____ ###Markdown After that, each client owns a training set which uses for training the local learning model and a test set which uses to evaluate. We are now ready to show the behaviour of this evaluation methodology. ###Code #We restart federated goverment federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, aggregator) test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1)) federated_government.run_rounds(1, test_data, test_labels) ###Output _____no_output_____
intermediate_notebooks/examples/linear_regression_demo.ipynb
###Markdown Intro to Linear Regression with cuMLCorresponding notebook to [*Beginner’s Guide to Linear Regression in Python with cuML*](http://bit.ly/cuml_lin_reg_friend) story on MediumLinear Regression is a simple machine learning model where the response `y` is modelled by a linear combination of the predictors in `X`. The `LinearRegression` function implemented in the `cuML` library allows users to change the `fit_intercept`, `normalize`, and `algorithm` parameters. Here is a brief on RAPIDS' Linear Regression parameters:- `algorithm`: 'eig' or 'svd' (default = 'eig') - `Eig` uses a eigen decomposition of the covariance matrix, and is much faster - `SVD` is slower, but guaranteed to be stable- `fit_intercept`: boolean (default = True) - If `True`, `LinearRegresssion` tries to correct for the global mean of `y` - If `False`, the model expects that you have centered the data.- `normalize`: boolean (default = False) - If True, the predictors in X will be normalized by dividing by it’s L2 norm - If False, no scaling will be doneMethods that can be used with `LinearRegression` are:- `fit`: Fit the model with `X` and `y`- `get_params`: Sklearn style return parameter state- `predict`: Predicts the `y` for `X`- `set_params`: Sklearn style set parameter state to dictionary of params`cuML`'s `LinearRegression` expects expects either `cuDF` DataFrame or `NumPy` matrix inputs Note: `CuPy` is not installed by default with RAPIDS `Conda` or `Docker` packages, but is needed for visualizing results in this notebook.- install with `pip` via the cell below ###Code # install cupy !pip install cupy ###Output Requirement already satisfied: cupy in /opt/conda/envs/rapids/lib/python3.6/site-packages (7.4.0) Requirement already satisfied: six>=1.9.0 in /opt/conda/envs/rapids/lib/python3.6/site-packages (from cupy) (1.14.0) Requirement already satisfied: numpy>=1.9.0 in /opt/conda/envs/rapids/lib/python3.6/site-packages (from cupy) (1.18.4) Requirement already satisfied: fastrlock>=0.3 in /opt/conda/envs/rapids/lib/python3.6/site-packages (from cupy) (0.4) ###Markdown Load data- for this demo, we will be utilizing the Boston housing dataset from `sklearn` - start by loading in the set and printing a map of the contents ###Code from sklearn.datasets import load_boston # load Boston dataset boston = load_boston() # let's see what's inside print(boston.keys()) ###Output dict_keys(['data', 'target', 'feature_names', 'DESCR', 'filename']) ###Markdown Boston house prices dataset- a description of the dataset is provided in `DESCR` - let's explore ###Code # what do we know about this dataset? print(boston.DESCR) ###Output .. _boston_dataset: Boston house prices dataset --------------------------- **Data Set Characteristics:** :Number of Instances: 506 :Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target. :Attribute Information (in order): - CRIM per capita crime rate by town - ZN proportion of residential land zoned for lots over 25,000 sq.ft. - INDUS proportion of non-retail business acres per town - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) - NOX nitric oxides concentration (parts per 10 million) - RM average number of rooms per dwelling - AGE proportion of owner-occupied units built prior to 1940 - DIS weighted distances to five Boston employment centres - RAD index of accessibility to radial highways - TAX full-value property-tax rate per $10,000 - PTRATIO pupil-teacher ratio by town - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town - LSTAT % lower status of the population - MEDV Median value of owner-occupied homes in $1000's :Missing Attribute Values: None :Creator: Harrison, D. and Rubinfeld, D.L. This is a copy of UCI ML housing dataset. https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics ...', Wiley, 1980. N.B. Various transformations are used in the table on pages 244-261 of the latter. The Boston house-price data has been used in many machine learning papers that address regression problems. .. topic:: References - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261. - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. ###Markdown Build Dataframe- Import `cuDF` and input the data into a DataFrame - Then add a `PRICE` column equal to the `target` key ###Code import cudf # build dataframe from data key bos = cudf.DataFrame(list(boston.data)) # set column names to feature_names bos.columns = boston.feature_names # add PRICE column from target bos['PRICE'] = boston.target # let's see what we're working with bos.head() ###Output _____no_output_____ ###Markdown Split Train from Test- For basic Linear Regression, we will predict `PRICE` (Median value of owner-occupied homes) based on `TAX` (full-value property-tax rate per $10,000) - Go ahead and trim data to just these columns ###Code # simple linear regression X and Y X = bos['TAX'] Y = bos['PRICE'] ###Output _____no_output_____ ###Markdown We can now set training and testing sets for our model- Use `cuML`'s `train_test_split` to do this - Train on 70% of data - Test on 30% of data ###Code from cuml.preprocessing.model_selection import train_test_split # train/test split (70:30) sX_train, sX_test, sY_train, sY_test = train_test_split(X, Y, train_size = 0.7) # see what it looks like print(sX_train.shape) print(sX_test.shape) print(sY_train.shape) print(sY_test.shape) ###Output (354,) (152,) (354,) (152,) ###Markdown Predict Values1. fit the model with `TAX` (*X_train*) and corresponding `PRICE` (*y_train*) values - so it can build an understanding of their relationship 2. predict `PRICE` (*y_test*) for a test set of `TAX` (*X_test*) values - and compare `PRICE` predictions to actual median house (*y_test*) values - use `sklearn`'s `mean_squared_error` to do this ###Code sX_train.head() from cuml import LinearRegression from sklearn.metrics import mean_squared_error # call Linear Regression model slr = LinearRegression() # train the model slr.fit(sX_train, sY_train) # make predictions for test X values sY_pred = slr.predict(sX_test) # calculate error mse = mean_squared_error(sY_test.to_array(), sY_pred.to_array()) print(mse) ###Output /opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:8: UserWarning: Changing solver from 'eig' to 'svd' as eig solver does not support training data with 1 column currently. ###Markdown 3. visualize prediction accuracy with `matplotlib` ###Code import cupy import matplotlib.pyplot as plt # scatter actual and predicted results plt.scatter(sY_test.to_array(), sY_pred.to_array()) # label graph plt.xlabel("Actual Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") plt.show() ###Output _____no_output_____ ###Markdown Multiple Linear Regression - Our mean squared error for Simple Linear Regression looks kinda high. - Let's try Multiple Linear Regression (predicting based on multiple variables rather than just `TAX`) and see if that produces more accurate predictions1. Set X to contain all values that are not `PRICE` from the unsplit data - i.e. `CRIM`, `ZN`, `INDUS`, `CHAS`, `NOX`, `RM`, `AGE`, `DIS`, `RAD`, `TAX`, `PTRATIO`, `B`, `LSTAT` - Y to still represent just 1 target value (`PRICE`) - also from the unsplit data ###Code # set X to all variables except price mX = bos.drop('PRICE', axis=1) # and, like in the simple Linear Regression, set Y to price mY = bos['PRICE'] ###Output _____no_output_____ ###Markdown 2. Split the data into `multi_X_train`, `multi_X_test`, `Y_train`, and `Y_test` - Use `cuML`'s `train_test_split` - And the same 70:30 train:test ratio ###Code # train/test split (70:30) mX_train, mX_test, mY_train, mY_test = train_test_split(mX, mY, train_size = 0.7) # see what it looks like print(mX_train.shape) print(mX_test.shape) print(mY_train.shape) print(mY_test.shape) ###Output (354, 13) (152, 13) (354,) (152,) ###Markdown 3. fit the model with `multi_X_train` and corresponding `PRICE` (*y_train*) values - so it can build an understanding of their relationships 4. predict `PRICE` (*y_test*) for the test set of independent (*multi_X_test*) values - and compare `PRICE` predictions to actual median house (*y_test*) values - use `sklearn`'s `mean_squared_error` to do this ###Code # call Linear Regression model mlr = LinearRegression() # train the model for multiple regression mlr.fit(mX_train, mY_train) # make predictions for test X values mY_pred = mlr.predict(mX_test) # calculate error mmse = mean_squared_error(mY_test.to_array(), mY_pred.to_array()) print(mmse) ###Output 28.312087834147203 ###Markdown 5. visualize with `matplotlib` ###Code # scatter actual and predicted results plt.scatter(mY_test.to_array(), mY_pred.to_array()) # label graph plt.xlabel("Actual Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") plt.show() ###Output _____no_output_____ ###Markdown Intro to Linear Regression with cuMLCorresponding notebook to [*Beginner’s Guide to Linear Regression in Python with cuML*](http://bit.ly/cuml_lin_reg_friend) story on MediumLinear Regression is a simple machine learning model where the response `y` is modelled by a linear combination of the predictors in `X`. The `LinearRegression` function implemented in the `cuML` library allows users to change the `fit_intercept`, `normalize`, and `algorithm` parameters. Here is a brief on RAPIDS' Linear Regression parameters:- `algorithm`: 'eig' or 'svd' (default = 'eig') - `Eig` uses a eigen decomposition of the covariance matrix, and is much faster - `SVD` is slower, but guaranteed to be stable- `fit_intercept`: boolean (default = True) - If `True`, `LinearRegresssion` tries to correct for the global mean of `y` - If `False`, the model expects that you have centered the data.- `normalize`: boolean (default = False) - If True, the predictors in X will be normalized by dividing by it’s L2 norm - If False, no scaling will be doneMethods that can be used with `LinearRegression` are:- `fit`: Fit the model with `X` and `y`- `get_params`: Sklearn style return parameter state- `predict`: Predicts the `y` for `X`- `set_params`: Sklearn style set parameter state to dictionary of params`cuML`'s `LinearRegression` expects expects either `cuDF` DataFrame or `NumPy` matrix inputs Note: `CuPy` is not installed by default with RAPIDS `Conda` or `Docker` packages, but is needed for visualizing results in this notebook.- install with `pip` via the cell below ###Code # install cupy !pip install cupy ###Output WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip. Please see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue. To avoid this problem you can invoke Python with '-m pip' instead of running pip directly. Requirement already satisfied: cupy in /opt/conda/envs/rapids/lib/python3.6/site-packages (7.1.1) Requirement already satisfied: numpy>=1.9.0 in /opt/conda/envs/rapids/lib/python3.6/site-packages (from cupy) (1.17.5) Requirement already satisfied: fastrlock>=0.3 in /opt/conda/envs/rapids/lib/python3.6/site-packages (from cupy) (0.4) Requirement already satisfied: six>=1.9.0 in /opt/conda/envs/rapids/lib/python3.6/site-packages (from cupy) (1.14.0) ###Markdown Load data- for this demo, we will be utilizing the Boston housing dataset from `sklearn` - start by loading in the set and printing a map of the contents ###Code from sklearn.datasets import load_boston # load Boston dataset boston = load_boston() # let's see what's inside print(boston.keys()) ###Output dict_keys(['data', 'target', 'feature_names', 'DESCR', 'filename']) ###Markdown Boston house prices dataset- a description of the dataset is provided in `DESCR` - let's explore ###Code # what do we know about this dataset? print(boston.DESCR) ###Output .. _boston_dataset: Boston house prices dataset --------------------------- **Data Set Characteristics:** :Number of Instances: 506 :Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target. :Attribute Information (in order): - CRIM per capita crime rate by town - ZN proportion of residential land zoned for lots over 25,000 sq.ft. - INDUS proportion of non-retail business acres per town - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) - NOX nitric oxides concentration (parts per 10 million) - RM average number of rooms per dwelling - AGE proportion of owner-occupied units built prior to 1940 - DIS weighted distances to five Boston employment centres - RAD index of accessibility to radial highways - TAX full-value property-tax rate per $10,000 - PTRATIO pupil-teacher ratio by town - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town - LSTAT % lower status of the population - MEDV Median value of owner-occupied homes in $1000's :Missing Attribute Values: None :Creator: Harrison, D. and Rubinfeld, D.L. This is a copy of UCI ML housing dataset. https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics ...', Wiley, 1980. N.B. Various transformations are used in the table on pages 244-261 of the latter. The Boston house-price data has been used in many machine learning papers that address regression problems. .. topic:: References - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261. - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. ###Markdown Build Dataframe- Import `cuDF` and input the data into a DataFrame - Then add a `PRICE` column equal to the `target` key ###Code import cudf # build dataframe from data key bos = cudf.DataFrame(list(boston.data)) # set column names to feature_names bos.columns = boston.feature_names # add PRICE column from target bos['PRICE'] = boston.target # let's see what we're working with bos.head() ###Output _____no_output_____ ###Markdown Split Train from Test- For basic Linear Regression, we will predict `PRICE` (Median value of owner-occupied homes) based on `TAX` (full-value property-tax rate per $10,000) - Go ahead and trim data to just these columns ###Code # simple linear regression X and Y X = bos['TAX'] Y = bos['PRICE'] ###Output _____no_output_____ ###Markdown We can now set training and testing sets for our model- Use `cuML`'s `train_test_split` to do this - Train on 70% of data - Test on 30% of data ###Code from cuml.preprocessing.model_selection import train_test_split # train/test split (70:30) sX_train, sX_test, sY_train, sY_test = train_test_split(X, Y, train_size = 0.7) # see what it looks like print(sX_train.shape) print(sX_test.shape) print(sY_train.shape) print(sY_test.shape) ###Output (354,) (152,) (354,) (152,) ###Markdown Predict Values1. fit the model with `TAX` (*X_train*) and corresponding `PRICE` (*y_train*) values - so it can build an understanding of their relationship 2. predict `PRICE` (*y_test*) for a test set of `TAX` (*X_test*) values - and compare `PRICE` predictions to actual median house (*y_test*) values - use `sklearn`'s `mean_squared_error` to do this ###Code sX_train.head() from cuml import LinearRegression from sklearn.metrics import mean_squared_error # call Linear Regression model slr = LinearRegression() # train the model slr.fit(sX_train, sY_train) # make predictions for test X values sY_pred = slr.predict(sX_test) # calculate error mse = mean_squared_error(sY_test, sY_pred) print(mse) ###Output 74.39119249959165 ###Markdown 3. visualize prediction accuracy with `matplotlib` ###Code import cupy import matplotlib.pyplot as plt # scatter actual and predicted results plt.scatter(sY_test.to_array(), sY_pred.to_array()) # label graph plt.xlabel("Actual Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") plt.show() ###Output _____no_output_____ ###Markdown Multiple Linear Regression - Our mean squared error for Simple Linear Regression looks kinda high. - Let's try Multiple Linear Regression (predicting based on multiple variables rather than just `TAX`) and see if that produces more accurate predictions1. Set X to contain all values that are not `PRICE` from the unsplit data - i.e. `CRIM`, `ZN`, `INDUS`, `CHAS`, `NOX`, `RM`, `AGE`, `DIS`, `RAD`, `TAX`, `PTRATIO`, `B`, `LSTAT` - Y to still represent just 1 target value (`PRICE`) - also from the unsplit data ###Code # set X to all variables except price mX = bos.drop('PRICE', axis=1) # and, like in the simple Linear Regression, set Y to price mY = bos['PRICE'] ###Output _____no_output_____ ###Markdown 2. Split the data into `multi_X_train`, `multi_X_test`, `Y_train`, and `Y_test` - Use `cuML`'s `train_test_split` - And the same 70:30 train:test ratio ###Code # train/test split (70:30) mX_train, mX_test, mY_train, mY_test = train_test_split(mX, mY, train_size = 0.7) # see what it looks like print(mX_train.shape) print(mX_test.shape) print(mY_train.shape) print(mY_test.shape) ###Output (354, 13) (152, 13) (354,) (152,) ###Markdown 3. fit the model with `multi_X_train` and corresponding `PRICE` (*y_train*) values - so it can build an understanding of their relationships 4. predict `PRICE` (*y_test*) for the test set of independent (*multi_X_test*) values - and compare `PRICE` predictions to actual median house (*y_test*) values - use `sklearn`'s `mean_squared_error` to do this ###Code # call Linear Regression model mlr = LinearRegression() # train the model for multiple regression mlr.fit(mX_train, mY_train) # make predictions for test X values mY_pred = mlr.predict(mX_test) # calculate error mmse = mean_squared_error(mY_test, mY_pred) print(mmse) ###Output 19.26235079557486 ###Markdown 5. visualize with `matplotlib` ###Code # scatter actual and predicted results plt.scatter(mY_test.to_array(), mY_pred.to_array()) # label graph plt.xlabel("Actual Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") plt.show() ###Output _____no_output_____ ###Markdown Intro to Linear Regression with cuMLCorresponding notebook to [*Beginner’s Guide to Linear Regression in Python with cuML*](http://bit.ly/cuml_lin_reg_friend) story on MediumLinear Regression is a simple machine learning model where the response `y` is modelled by a linear combination of the predictors in `X`. The `LinearRegression` function implemented in the `cuML` library allows users to change the `fit_intercept`, `normalize`, and `algorithm` parameters. Here is a brief on RAPIDS' Linear Regression parameters:- `algorithm`: 'eig' or 'svd' (default = 'eig') - `Eig` uses a eigendecomposition of the covariance matrix, and is much faster - `SVD` is slower, but guaranteed to be stable- `fit_intercept`: boolean (default = True) - If `True`, `LinearRegresssion` tries to correct for the global mean of `y` - If `False`, the model expects that you have centered the data.- `normalize`: boolean (default = False) - If True, the predictors in X will be normalized by dividing by it’s L2 norm - If False, no scaling will be doneMethods that can be used with `LinearRegression` are:- `fit`: Fit the model with `X` and `y`- `get_params`: Sklearn style return parameter state- `predict`: Predicts the `y` for `X`- `set_params`: Sklearn style set parameter state to dictionary of params`cuML`'s `LinearRegression` expects expects either `cuDF` DataFrame or `NumPy` matrix inputs Note: `CuPy` is not installed by default with RAPIDS `Conda` or `Docker` packages, but is needed for visualizing results in this notebook.- install with `pip` via the cell below ###Code # install cupy !pip install cupy ###Output _____no_output_____ ###Markdown Load data- for this demo, we will be utilizing the Boston housing dataset from `sklearn` - start by loading in the set and printing a map of the contents ###Code from sklearn.datasets import load_boston # load Boston dataset boston = load_boston() # let's see what's inside print(boston.keys()) ###Output dict_keys(['data', 'target', 'feature_names', 'DESCR', 'filename']) ###Markdown Boston house prices dataset- a description of the dataset is provided in `DESCR` - let's explore ###Code # what do we know about this dataset? print(boston.DESCR) ###Output .. _boston_dataset: Boston house prices dataset --------------------------- **Data Set Characteristics:** :Number of Instances: 506 :Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target. :Attribute Information (in order): - CRIM per capita crime rate by town - ZN proportion of residential land zoned for lots over 25,000 sq.ft. - INDUS proportion of non-retail business acres per town - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) - NOX nitric oxides concentration (parts per 10 million) - RM average number of rooms per dwelling - AGE proportion of owner-occupied units built prior to 1940 - DIS weighted distances to five Boston employment centres - RAD index of accessibility to radial highways - TAX full-value property-tax rate per $10,000 - PTRATIO pupil-teacher ratio by town - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town - LSTAT % lower status of the population - MEDV Median value of owner-occupied homes in $1000's :Missing Attribute Values: None :Creator: Harrison, D. and Rubinfeld, D.L. This is a copy of UCI ML housing dataset. https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics ...', Wiley, 1980. N.B. Various transformations are used in the table on pages 244-261 of the latter. The Boston house-price data has been used in many machine learning papers that address regression problems. .. topic:: References - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261. - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann. ###Markdown Build Dataframe- Import `cuDF` and input the data into a DataFrame - Then add a `PRICE` column equal to the `target` key ###Code import cudf # build dataframe from data key bos = cudf.DataFrame(list(boston.data)) # set column names to feature_names bos.columns = boston.feature_names # add PRICE column from target bos['PRICE'] = boston.target # let's see what we're working with bos.head() ###Output _____no_output_____ ###Markdown Split Train from Test- For basic Linear Regression, we will predict `PRICE` (Median value of owner-occupied homes) based on `TAX` (full-value property-tax rate per $10,000) - Go ahead and trim data to just these columns ###Code # simple linear regression X and Y X = bos['TAX'] Y = bos['PRICE'] ###Output _____no_output_____ ###Markdown We can now set training and testing sets for our model- Use `cuML`'s `train_test_split` to do this - Train on 70% of data - Test on 30% of data ###Code from cuml.preprocessing.model_selection import train_test_split # train/test split (70:30) sX_train, sX_test, sY_train, sY_test = train_test_split(X, Y, train_size = 0.7) # see what it looks like print(sX_train.shape) print(sX_test.shape) print(sY_train.shape) print(sY_test.shape) ###Output (354,) (152,) (354,) (152,) ###Markdown Predict Values1. fit the model with `TAX` (*X_train*) and corresponding `PRICE` (*y_train*) values - so it can build an understanding of their relationship 2. predict `PRICE` (*y_test*) for a test set of `TAX` (*X_test*) values - and compare `PRICE` predictions to actual median house (*y_test*) values - use `sklearn`'s `mean_squared_error` to do this ###Code from cuml import LinearRegression from sklearn.metrics import mean_squared_error # call Linear Regression model slr = LinearRegression() # train the model slr.fit(sX_train, sY_train) # make predictions for test X values sY_pred = slr.predict(sX_test) # calculate error mse = mean_squared_error(sY_test, sY_pred) print(mse) ###Output 54.32312606491228 ###Markdown 3. visualize prediction accuracy with `matplotlib` ###Code import cupy import matplotlib.pyplot as plt # scatter actual and predicted results plt.scatter(sY_test, sY_pred) # label graph plt.xlabel("Actual Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") plt.show() ###Output _____no_output_____ ###Markdown Multiple Linear Regression - Our mean squared error for Simple Linear Regression looks kinda high.. - Let's try Multiple Linear Regression (predicting based on multiple variables rather than just `TAX`) and see if that produces more accurate predictions1. Set X to contain all values that are not `PRICE` from the unsplit data - i.e. `CRIM`, `ZN`, `INDUS`, `CHAS`, `NOX`, `RM`, `AGE`, `DIS`, `RAD`, `TAX`, `PTRATIO`, `B`, `LSTAT` - Y to still represent just 1 target value (`PRICE`) - also from the unsplit data ###Code # set X to all variables except price mX = bos.drop('PRICE', axis=1) # and, like in the simple Linear Regression, set Y to price mY = bos['PRICE'] ###Output _____no_output_____ ###Markdown 2. Split the data into `multi_X_train`, `multi_X_test`, `Y_train`, and `Y_test` - Use `cuML`'s `train_test_split` - And the same 70:30 train:test ratio ###Code # train/test split (70:30) mX_train, mX_test, mY_train, mY_test = train_test_split(mX, mY, train_size = 0.7) # see what it looks like print(mX_train.shape) print(mX_test.shape) print(mY_train.shape) print(mY_test.shape) ###Output (354, 13) (152, 13) (354,) (152,) ###Markdown 3. fit the model with `multi_X_train` and corresponding `PRICE` (*y_train*) values - so it can build an understanding of their relationships 4. predict `PRICE` (*y_test*) for the test set of independent (*multi_X_test*) values - and compare `PRICE` predictions to actual median house (*y_test*) values - use `sklearn`'s `mean_squared_error` to do this ###Code # call Linear Regression model mlr = LinearRegression() # train the model for multiple regression mlr.fit(mX_train, mY_train) # make predictions for test X values mY_pred = mlr.predict(mX_test) # calculate error mmse = mean_squared_error(mY_test, mY_pred) print(mmse) ###Output 16.691811854229723 ###Markdown 5. visualize with `matplotlib` ###Code # scatter actual and predicted results plt.scatter(mY_test, mY_pred) # label graph plt.xlabel("Actual Prices: $Y_i$") plt.ylabel("Predicted prices: $\hat{Y}_i$") plt.title("Prices vs Predicted prices: $Y_i$ vs $\hat{Y}_i$") plt.show() ###Output _____no_output_____ ###Markdown Linear RegressionThis notebook compares a CPU implementation and a GPU implementation of Linear Regression. It includes code example for doing Linear Regression using RAPIDS cuDF and cuML. Notebook Credits**Authorship**Original Author: Unknown Last Edit: Taurean Dyer, 9/25/2019**Test System Specs**Test System Hardware: GV100Test System Software: Ubuntu 18.04RAPIDS Version: 0.10.0a - Docker InstallDriver: 410.79CUDA: 10.0**Known Working Systems**RAPIDS Versions: 0.4, 0.5, 0.5.1, 0.6, 0.6.1, 0.7, 0.8, 0.9, 0.10 Let's Begin: Linear Regression ImportsLet's start with our Imports ###Code import numpy as np import pandas as pd from sklearn import linear_model as sklGLM from cuml import LinearRegression as cumlOLS from cuml import Ridge as cumlRidge import cudf import os ###Output _____no_output_____ ###Markdown Helper Functions ###Code from timeit import default_timer class Timer(object): def __init__(self): self._timer = default_timer def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def start(self): """Start the timer.""" self.start = self._timer() def stop(self): """Stop the timer. Calculate the interval in seconds.""" self.end = self._timer() self.interval = self.end - self.start import gzip def load_data(nrows, ncols, cached = '../../data/mortgage/mortgage.npy.gz'): if os.path.exists(cached): print('use mortgage data') with gzip.open(cached) as f: X = np.load(f) # the 4th column is 'adj_remaining_months_to_maturity' # used as the label X = X[:,[i for i in range(X.shape[1]) if i!=4]] y = X[:,4:5] rindices = np.random.randint(0,X.shape[0]-1,nrows) X = X[rindices,:ncols] y = y[rindices] else: print('use random data') X = np.random.rand(nrows,ncols) y = np.random.rand(nrows,1) df_X = pd.DataFrame({'fea%d'%i:X[:,i] for i in range(X.shape[1])}) df_y = pd.DataFrame({'fea%d'%i:y[:,i] for i in range(y.shape[1])}) return df_X, df_y from sklearn.metrics import mean_squared_error def array_equal(a,b,threshold=2e-3,with_sign=True): a = to_nparray(a).ravel() b = to_nparray(b).ravel() if with_sign == False: a,b = np.abs(a),np.abs(b) error = mean_squared_error(a,b) res = error<threshold return res def to_nparray(x): if isinstance(x,np.ndarray) or isinstance(x,pd.DataFrame): return np.array(x) elif isinstance(x,np.float64): return np.array([x]) elif isinstance(x,cudf.DataFrame) or isinstance(x,cudf.Series): return x.to_pandas().values return x ###Output _____no_output_____ ###Markdown Now that we have our Helper functions, lets start to compare the speed and results for SciKit Learn's CPU impletmenation versus RAPIDS cuML GPU impletementation. ###Code %%time nrows = 2**20 ncols = 399 X, y = load_data(nrows,ncols) print('data',X.shape) print('label',y.shape) ###Output _____no_output_____ ###Markdown Even though the OLS interface of cuML is very similar to Scikit-Learn's implemetation, cuML doesn't use some of the parameters such as "copy" and "n_jobs". Also, cuML includes two different implementation of OLS using SVD and Eigen decomposition. Eigen decomposition based implementation is very fast but causes very small errors in the coefficients which is negligible for most of the applications. SVD is stable but slower than eigen decomposition based implementation. Get MSE for SciKit Learn ###Code fit_intercept = True normalize = False algorithm = "eig" # eig: eigen decomposition based method, svd: singular value decomposition based method. %%time reg_sk = sklGLM.LinearRegression(fit_intercept=fit_intercept, normalize=normalize) result_sk = reg_sk.fit(X, y) %%time y_sk = reg_sk.predict(X) error_sk = mean_squared_error(y,y_sk) ###Output _____no_output_____ ###Markdown Get MSE for cuML ###Code %%time X_cudf = cudf.DataFrame.from_pandas(X) y_cudf = y.values y_cudf = y_cudf[:,0] y_cudf = cudf.Series(y_cudf) %%time reg_cuml = cumlOLS(fit_intercept=fit_intercept, normalize=normalize, algorithm=algorithm) result_cuml = reg_cuml.fit(X_cudf, y_cudf) %%time y_cuml = reg_cuml.predict(X_cudf) y_cuml = to_nparray(y_cuml).ravel() error_cuml = mean_squared_error(y,y_cuml) ###Output _____no_output_____ ###Markdown Final Comparison Between SKL and cuMLYour final output should have both MSE results close to 0 (about 1.0e-7 to 1.0e-14). However, despite having similar answers, you should see a **massive reduction to the sys time** when using **RAPIDS cuML** versus **SciKit Learn**. Go RAPIDS! ###Code print("SKL MSE(y):") print(error_sk) print("CUML MSE(y):") print(error_cuml) ###Output _____no_output_____ ###Markdown Linear RegressionThis notebook compares a CPU implementation and a GPU implementation of Linear Regression. It includes code example for doing Linear Regression using RAPIDS cuDF and cuML. Notebook Credits AuthorshipOriginal Author: Unknown Last Edit: Taurean Dyer, 2/20/2019 Test System SpecsTest System Hardware: DGX-2 Test System Software: Ubuntu 16.04 RAPIDS Version: 0.5.1 - Docker Install Driver: 410.79 CUDA: 10.0 Known Working SystemsRAPIDS Versions: 0.4, 0.5, 0.5.1 Let's Begin: Linear Regression ImportsLet's start with our Imports ###Code import numpy as np import pandas as pd from sklearn import linear_model as sklGLM from cuml import LinearRegression as cumlOLS from cuml import Ridge as cumlRidge import cudf import os ###Output _____no_output_____ ###Markdown Helper Functions ###Code from timeit import default_timer class Timer(object): def __init__(self): self._timer = default_timer def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() def start(self): """Start the timer.""" self.start = self._timer() def stop(self): """Stop the timer. Calculate the interval in seconds.""" self.end = self._timer() self.interval = self.end - self.start import gzip def load_data(nrows, ncols, cached = '../../data/mortgage/mortgage.npy.gz'): if os.path.exists(cached): print('use mortgage data') with gzip.open(cached) as f: X = np.load(f) # the 4th column is 'adj_remaining_months_to_maturity' # used as the label X = X[:,[i for i in range(X.shape[1]) if i!=4]] y = X[:,4:5] rindices = np.random.randint(0,X.shape[0]-1,nrows) X = X[rindices,:ncols] y = y[rindices] else: print('use random data') X = np.random.rand(nrows,ncols) y = np.random.rand(nrows,1) df_X = pd.DataFrame({'fea%d'%i:X[:,i] for i in range(X.shape[1])}) df_y = pd.DataFrame({'fea%d'%i:y[:,i] for i in range(y.shape[1])}) return df_X, df_y from sklearn.metrics import mean_squared_error def array_equal(a,b,threshold=2e-3,with_sign=True): a = to_nparray(a).ravel() b = to_nparray(b).ravel() if with_sign == False: a,b = np.abs(a),np.abs(b) error = mean_squared_error(a,b) res = error<threshold return res def to_nparray(x): if isinstance(x,np.ndarray) or isinstance(x,pd.DataFrame): return np.array(x) elif isinstance(x,np.float64): return np.array([x]) elif isinstance(x,cudf.DataFrame) or isinstance(x,cudf.Series): return x.to_pandas().values return x ###Output _____no_output_____ ###Markdown Now that we have our Helper functions, lets start to compare the speed and results for SciKit Learn's CPU impletmenation versus RAPIDS cuML GPU impletementation. ###Code %%time nrows = 2**20 ncols = 399 X, y = load_data(nrows,ncols) print('data',X.shape) print('label',y.shape) ###Output _____no_output_____ ###Markdown Even though the OLS interface of cuML is very similar to Scikit-Learn's implemetation, cuML doesn't use some of the parameters such as "copy" and "n_jobs". Also, cuML includes two different implementation of OLS using SVD and Eigen decomposition. Eigen decomposition based implementation is very fast but causes very small errors in the coefficients which is negligible for most of the applications. SVD is stable but slower than eigen decomposition based implementation. Get MSE for SciKit Learn ###Code fit_intercept = True normalize = False algorithm = "eig" # eig: eigen decomposition based method, svd: singular value decomposition based method. %%time reg_sk = sklGLM.LinearRegression(fit_intercept=fit_intercept, normalize=normalize) result_sk = reg_sk.fit(X, y) %%time y_sk = reg_sk.predict(X) error_sk = mean_squared_error(y,y_sk) ###Output _____no_output_____ ###Markdown Get MSE for cuML ###Code %%time X_cudf = cudf.DataFrame.from_pandas(X) y_cudf = y.values y_cudf = y_cudf[:,0] y_cudf = cudf.Series(y_cudf) %%time reg_cuml = cumlOLS(fit_intercept=fit_intercept, normalize=normalize, algorithm=algorithm) result_cuml = reg_cuml.fit(X_cudf, y_cudf) %%time y_cuml = reg_cuml.predict(X_cudf) y_cuml = to_nparray(y_cuml).ravel() error_cuml = mean_squared_error(y,y_cuml) ###Output _____no_output_____ ###Markdown Final Comparison Between SKL and cuMLYour final output should have both MSE results close to 0 (about 1.0e-7 to 1.0e-14). However, despite having similar answers, you should see a **massive reduction to the sys time** when using **RAPIDS cuML** versus **SciKit Learn**. Go RAPIDS! ###Code print("SKL MSE(y):") print(error_sk) print("CUML MSE(y):") print(error_cuml) ###Output _____no_output_____
lessons/Chapter8/20_time_dependent_1d.ipynb
###Markdown 10. Time dependent problem in 1D we consider the time dependent problem\begin{align*} -k_{\perp}^2 \partial_t \phi &= \partial_s J \\ \partial_t A + \mu \partial_t J &= \partial_s \left( n - \phi \right) \\ \partial_t n &= \partial_s J \\ \beta J &= k_{\perp}^2 A% \label{}\end{align*}where $s \in [-\pi, \pi]$, $\beta \sim 10^{-3}$, $\mu \sim 10^{-4}$ and $k_{\perp} \in [10^{-2},10^{1}]$.\\It is easy to check that the eigenvalues related to the previous system are $\{-V_a k_{\parallel}, 0, V_a k_{\parallel} \}$ with $V_a := \frac{1+k_{\perp}^2}{\beta + \mu k_{\perp}^2}$. Wave equation for $A$Multiplying the equation on $n$ by $k_{\perp}^2$ then adding it to the equation on $\phi$, we get$$\left( \beta + \mu k_{\perp}^2 \right) \partial_{tt} A = \left( 1 + k_{\perp}^2 \right) \partial_{ss} A$$which leads to the wave equation$$\partial_{tt} A = \frac{1+k_{\perp}^2}{\beta + \mu k_{\perp}^2} \partial_{ss} A$$ Time discretizationLet's define $\gamma := \frac{k_{\perp}^2}{\beta}$, and replace $J$ in the equation on $A$. We get\begin{align*} \partial_t \phi &= - \frac{1}{\beta} \partial_s A \\ \partial_t A &= \frac{1}{1 + \mu \gamma} \partial_s \left( n - \phi \right) \\ \partial_t n &= \gamma \partial_s A% \label{}\end{align*}Using a full implicit time scheme, we have,\begin{align*} \frac{\phi^{k+1} - \phi^{k}}{\Delta t} &= - \frac{1}{\beta} \partial_s A^{k+1} \\ \frac{A^{k+1} - A^{k}}{\Delta t} &= \frac{1}{1 + \mu \gamma} \partial_s \left( n^{k+1} - \phi^{k+1} \right) \\ \frac{n^{k+1} - n^{k}}{\Delta t} &= \gamma \partial_s A^{k+1}% \label{}\end{align*}finally, \begin{align*} \phi^{k+1} + \frac{\Delta t}{\beta} \partial_s A^{k+1} &= \phi^k \\ \frac{\Delta t}{1+\mu \gamma} \phi^{k+1} + A^{k+1} - \frac{\Delta t}{1+\mu \gamma} \partial_s n^{k+1} &= A^k \\ -\Delta t \gamma \partial_s A^{k+1} + n^{k+1} &= n^k\end{align*} Weak formulationLet $v$ denote a test function, in a Finite Elements space $V \subset H^1(\Omega)$. Multiplying all the equations by $v$, then integrating over the whole domain, we get\begin{align*} \langle \phi^{k+1}, v \rangle + \frac{\Delta t}{\beta} \langle \partial_s A^{k+1}, v \rangle &= \langle \phi^k, v \rangle \\ \frac{\Delta t}{1+\mu \gamma} \langle \phi^{k+1}, v \rangle + \langle A^{k+1}, v \rangle - \frac{\Delta t}{1+\mu \gamma} \langle \partial_s n^{k+1}, v \rangle &= \langle A^k, v \rangle \\ -\Delta t \gamma \langle \partial_s A^{k+1}, v \rangle + \langle n^{k+1}, v \rangle &= \langle n^k, v \rangle\end{align*}We use a symmetrized weak formulation, where we assume having periodic boundary conditions: \begin{align*} \langle \phi^{k+1}, v \rangle + \frac{\Delta t}{2 \beta} \langle \partial_s A^{k+1}, v \rangle - \frac{\Delta t}{2 \beta} \langle A^{k+1}, \partial_s v \rangle &= \langle \phi^k, v \rangle \\ \frac{\Delta t}{1+\mu \gamma} \langle \phi^{k+1}, v \rangle + \langle A^{k+1}, v \rangle - \frac{\Delta t}{2+2\mu \gamma} \langle \partial_s n^{k+1}, v \rangle + \frac{\Delta t}{2+2\mu \gamma} \langle n^{k+1}, \partial_s v \rangle &= \langle A^k, v \rangle \\ -\frac{\Delta t \gamma}{2} \langle \partial_s A^{k+1}, v \rangle +\frac{\Delta t \gamma}{2} \langle A^{k+1}, \partial_s v \rangle + \langle n^{k+1}, v \rangle &= \langle n^k, v \rangle\end{align*} In order to simplify the notation, we introduction the following bilinear form $$b( v,u ) := \frac{1}{2} \left( \langle \partial_s u, v \rangle - \langle u, \partial_s v \rangle \right)$$then our weak formulation writes\begin{align*} \langle \phi^{k+1}, v \rangle + \frac{\Delta t}{\beta} b(v,A^{k+1}) &= \langle \phi^k, v \rangle \\ \frac{\Delta t}{1+\mu \gamma} \langle \phi^{k+1}, v \rangle + \langle A^{k+1}, v \rangle - \frac{\Delta t}{1+\mu \gamma} b(v,n^{k+1}) &= \langle A^k, v \rangle \\ -\Delta t \gamma b(v,A^{k+1}) + \langle n^{k+1}, v \rangle &= \langle n^k, v \rangle\end{align*}Finally, let's introduce the weak formulation related to the mass matrix $a_m(v,u) := \langle u, v \rangle$, \begin{align*} a_m(v, \phi^{k+1}) + \frac{\Delta t}{\beta} b(v, A^{k+1}) &= a_m(v, \phi^k) \\ \frac{\Delta t}{1+\mu \gamma} a_m(v, \phi^{k+1}) + a_m(v, A^{k+1}) - \frac{\Delta t}{1+\mu \gamma} b(v, n^{k+1}) &= a_m(v, A^k) \\ - \Delta t \gamma b(v, A^{k+1}) + a_m(v, n^{k+1}) &= a_m(v, n^k)\end{align*} ###Code import numpy as np from numpy import linspace, zeros, pi from sympy.core.containers import Tuple from sympy import symbols from sympy import Symbol from sympy import Lambda from sympy import Function from gelato.glt import glt_symbol from gelato.calculus import (Dot, Cross, Grad, Curl, Rot, Div, dx) from gelato.calculus import Constant from gelato.fem.assembly import assemble_matrix from gelato.fem.utils import compile_kernel from gelato.fem.utils import compile_symbol from spl.fem.splines import SplineSpace from spl.fem.vector import VectorFemSpace from IPython.display import Math from sympy import latex x = Symbol('x') u = Symbol('u') v = Symbol('v') a_m = lambda v,u: u*v b = lambda v,u: 0.5*(dx(u)*v - u*dx(v)) ###Output _____no_output_____ ###Markdown \begin{align*} a_m(v, \phi^{k+1}) + \frac{\Delta t}{\beta} b(v, A^{k+1}) &= a_m(v, \phi^k) \\ \frac{\Delta t}{1+\mu \gamma} a_m(v, \phi^{k+1}) + a_m(v, A^{k+1}) - \frac{\Delta t}{1+\mu \gamma} b(v, n^{k+1}) &= a_m(v, A^k) \\ - \Delta t \gamma b(v, A^{k+1}) + a_m(v, n^{k+1}) &= a_m(v, n^k)\end{align*} ###Code phi, A, n = symbols('phi A n') v0, v1, v2 = symbols('v0 v1 v2') dt = Constant('dt') beta = Constant('beta') mu = Constant('mu') gamma = Constant('gamma') a = Lambda((x,v0,v1,v2,phi,A,n), a_m(v0, phi) + dt/beta * b(v0, A) + dt/(1+mu * gamma) * a_m(v1, phi) + a_m(v1, A) - dt/(1+mu * gamma) * b(v1, n) - dt * gamma * b(v2, A) + a_m(v2, n)) # create a finite element space p = 3 ne = 64 grid = linspace(0., 1., ne+1) W = SplineSpace(p, grid=grid) V = VectorFemSpace(W, W, W) symbol = glt_symbol(a, space=V) Math(latex(symbol)) eigen = symbol.eigenvals() eigen = list(eigen.keys()) Math(latex(eigen)) # compute the symbol of the mass symbol_m = glt_symbol(Lambda((x,v,u), u*v), space=V) symbol_a = glt_symbol(Lambda((x,v,u), dx(u)*v), space=V) eigen_normalized = [e/symbol_m for e in eigen] Math(latex(eigen_normalized)) e = eigen_normalized[1] from sympy import simplify, cancel, collect, expand Math(latex(cancel(e-1))) Math(latex(symbol_a)) print(symbol_a.is_complex) from IPython.core.display import HTML def css_styling(): styles = open("../../styles/custom.css", "r").read() return HTML(styles) css_styling() ###Output _____no_output_____
aws_sagemaker_studio/frameworks/mxnet_onnx_ei/mxnet_onnx_ei.ipynb
###Markdown Hosting ONNX models with Amazon Elastic Inference*(This notebook was tested with the "Python 3 (MXNet CPU Optimized)" kernel.)*Amazon Elastic Inference (EI) is a resource you can attach to your Amazon EC2 instances to accelerate your deep learning (DL) inference workloads. EI allows you to add inference acceleration to an Amazon SageMaker hosted endpoint or Jupyter notebook and reduce the cost of running deep learning inference by up to 75%, when compared to using GPU instances. For more information, please visit: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.htmlAmazon EI provides support for a variety of frameworks, including Apache MXNet and ONNX models. The [Open Neural Network Exchange](https://onnx.ai/) (ONNX) is an open standard format for deep learning models that enables interoperability between deep learning frameworks such as Apache MXNet, Microsoft Cognitive Toolkit (CNTK), PyTorch and more. This means that we can use any of these frameworks to train the model, export these pretrained models in ONNX format and then import them in MXNet for inference.In this example, we use the ResNet-152v1 model from [Deep residual learning for image recognition](https://arxiv.org/abs/1512.03385). This model, alongside many others, can be found at the [ONNX Model Zoo](https://github.com/onnx/models).We use the SageMaker Python SDK to host this ONNX model in SageMaker and perform inference requests. SetupFirst, we get the IAM execution role from our notebook environment, so that SageMaker can access resources in your AWS account later in the example. ###Code from sagemaker import get_execution_role role = get_execution_role() ###Output _____no_output_____ ###Markdown The inference scriptWe need to provide an inference script that can run on the SageMaker platform. This script is invoked by SageMaker when we perform inference.The script we're using here implements two functions:* `model_fn()` - loads the model* `transform_fn()` - uses the model to take the input and produce the output ###Code !pygmentize resnet152.py ###Output _____no_output_____ ###Markdown Preparing the modelTo create a SageMaker Endpoint, we first need to prepare the model to be used in SageMaker. Downloading the modelFor this example, we use a pre-trained ONNX model from the [ONNX Model Zoo](https://github.com/onnx/models), where you can find a collection of pre-trained models to work with. Here, we download the [ResNet-152v1 model](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx) trained on ImageNet dataset. ###Code import mxnet as mx mx.test_utils.download( "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx" ) ###Output _____no_output_____ ###Markdown Compressing the model dataNow that we have the model data locally, we need to compress it, and then upload it to S3. ###Code import tarfile from sagemaker import s3, session with tarfile.open("onnx_model.tar.gz", mode="w:gz") as archive: archive.add("resnet152v1.onnx") bucket = session.Session().default_bucket() model_data = s3.S3Uploader.upload( "onnx_model.tar.gz", "s3://{}/mxnet-onnx-resnet152-example/model".format(bucket) ) ###Output _____no_output_____ ###Markdown Creating a SageMaker Python SDK Model instanceWith the model data uploaded to S3, we now have everything we need to instantiate a SageMaker Python SDK Model. We provide the constructor the following arguments:* `model_data`: the S3 location of the model data* `entry_point`: the script for model hosting that we looked at above* `role`: the IAM role used* `framework_version`: the MXNet version in use, in this case '1.4.1'For more about creating an `MXNetModel` object, see the [SageMaker Python SDK API docs](https://sagemaker.readthedocs.io/en/latest/sagemaker.mxnet.htmlmxnet-model). ###Code from sagemaker.mxnet import MXNetModel mxnet_model = MXNetModel( model_data=model_data, entry_point="resnet152.py", role=role, py_version="py3", framework_version="1.4.1", ) ###Output _____no_output_____ ###Markdown Creating an inference endpoint and attaching an Elastic Inference(EI) acceleratorNow we can use our `MXNetModel` object to build and deploy an `MXNetPredictor`. This creates a SageMaker Model and Endpoint, the latter of which we can use for performing inference. We pass the following arguments to the `deploy()` method:* `instance_count` - how many instances to back the endpoint.* `instance_type` - which EC2 instance type to use for the endpoint.* `accelerator_type` - which EI accelerator type to attach to each of our instances.For information on supported instance types and accelerator types, please see [the AWS documentation](https://aws.amazon.com/sagemaker/pricing/instance-types). How our models are loadedBy default, the predefined SageMaker MXNet containers have a default `model_fn`, which loads the model. The default `model_fn` loads an MXNet Module object with a context based on the instance type of the endpoint.This applies for EI as well. If an EI accelerator is attached to your endpoint and a custom `model_fn` isn't provided, then the default `model_fn` loads the MXNet Module object with an EI context, `mx.eia()`. This default `model_fn` works with the default save function provided by the pre-built SageMaker MXNet Docker image for training. If the model is saved in a different manner, then a custom `model_fn` implementation may be needed. For more information on `model_fn`, see [the SageMaker documentation](https://sagemaker.readthedocs.io/en/stable/using_mxnet.htmlload-a-model). Choosing instance typesHere, we deploy our model with instance type `ml.m5.xlarge` and `ml.eia1.medium`. For this model, we found that it requires more CPU memory and thus chose an M5 instance, which has more memory than C5 instances, making it more cost effective. With other models, you may want to experiment with other instance types and accelerators based on your model requirements. ###Code %%time predictor = mxnet_model.deploy( initial_instance_count=1, instance_type="ml.m5.xlarge", accelerator_type="ml.eia1.medium" ) ###Output _____no_output_____ ###Markdown Performing inferenceWith our Endpoint deployed, we can now send inference requests to it. We use one image as an example here. Preparing the imageFirst, we download the image (and view it). ###Code import matplotlib.pyplot as plt img_path = mx.test_utils.download("https://s3.amazonaws.com/onnx-mxnet/examples/mallard_duck.jpg") img = mx.image.imread(img_path) plt.imshow(img.asnumpy()) ###Output _____no_output_____ ###Markdown Next, we preprocess inference image. We resize it to 256x256, take center crop of 224x224, normalize image, and add a dimension to batchify the image. ###Code from mxnet.gluon.data.vision import transforms def preprocess(img): transform_fn = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] ) img = transform_fn(img) img = img.expand_dims(axis=0) return img input_image = preprocess(img) ###Output _____no_output_____ ###Markdown Sending the inference requestNow we can use the predictor object to classify the input image: ###Code scores = predictor.predict(input_image.asnumpy()) ###Output _____no_output_____ ###Markdown To see the inference result, let's download and load `synset.txt` file containing class labels for ImageNet. The top 5 classes generated in order, along with the probabilities are: ###Code import numpy as np mx.test_utils.download("https://s3.amazonaws.com/onnx-model-zoo/synset.txt") with open("synset.txt", "r") as f: labels = [l.rstrip() for l in f] a = np.argsort(scores)[::-1] for i in a[0:5]: print("class=%s; probability=%f" % (labels[i], scores[i])) ###Output _____no_output_____ ###Markdown Deleting the EndpointSince we've reached the end, we delete the SageMaker Endpoint to release the instance associated with it. ###Code predictor.delete_endpoint() ###Output _____no_output_____ ###Markdown Hosting ONNX models with Amazon Elastic Inference*(This notebook was tested with the "Python 3 (MXNet CPU Optimized)" kernel.)*Amazon Elastic Inference (EI) is a resource you can attach to your Amazon EC2 instances to accelerate your deep learning (DL) inference workloads. EI allows you to add inference acceleration to an Amazon SageMaker hosted endpoint or Jupyter notebook and reduce the cost of running deep learning inference by up to 75%, when compared to using GPU instances. For more information, please visit: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.htmlAmazon EI provides support for a variety of frameworks, including Apache MXNet and ONNX models. The [Open Neural Network Exchange](https://onnx.ai/) (ONNX) is an open standard format for deep learning models that enables interoperability between deep learning frameworks such as Apache MXNet, Microsoft Cognitive Toolkit (CNTK), PyTorch and more. This means that we can use any of these frameworks to train the model, export these pretrained models in ONNX format and then import them in MXNet for inference.In this example, we use the ResNet-152v1 model from [Deep residual learning for image recognition](https://arxiv.org/abs/1512.03385). This model, alongside many others, can be found at the [ONNX Model Zoo](https://github.com/onnx/models).We use the SageMaker Python SDK to host this ONNX model in SageMaker and perform inference requests. SetupFirst, we get the IAM execution role from our notebook environment, so that SageMaker can access resources in your AWS account later in the example. ###Code from sagemaker import get_execution_role role = get_execution_role() ###Output _____no_output_____ ###Markdown The inference scriptWe need to provide an inference script that can run on the SageMaker platform. This script is invoked by SageMaker when we perform inference.The script we're using here implements two functions:* `model_fn()` - loads the model* `transform_fn()` - uses the model to take the input and produce the output ###Code !pygmentize resnet152.py ###Output _____no_output_____ ###Markdown Preparing the modelTo create a SageMaker Endpoint, we first need to prepare the model to be used in SageMaker. Downloading the modelFor this example, we use a pre-trained ONNX model from the [ONNX Model Zoo](https://github.com/onnx/models), where you can find a collection of pre-trained models to work with. Here, we download the [ResNet-152v1 model](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx) trained on ImageNet dataset. ###Code import mxnet as mx mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx') ###Output _____no_output_____ ###Markdown Compressing the model dataNow that we have the model data locally, we need to compress it, and then upload it to S3. ###Code import tarfile from sagemaker import s3, session with tarfile.open('onnx_model.tar.gz', mode='w:gz') as archive: archive.add('resnet152v1.onnx') bucket = session.Session().default_bucket() model_data = s3.S3Uploader.upload('onnx_model.tar.gz', 's3://{}/mxnet-onnx-resnet152-example/model'.format(bucket)) ###Output _____no_output_____ ###Markdown Creating a SageMaker Python SDK Model instanceWith the model data uploaded to S3, we now have everything we need to instantiate a SageMaker Python SDK Model. We provide the constructor the following arguments:* `model_data`: the S3 location of the model data* `entry_point`: the script for model hosting that we looked at above* `role`: the IAM role used* `framework_version`: the MXNet version in use, in this case '1.4.1'For more about creating an `MXNetModel` object, see the [SageMaker Python SDK API docs](https://sagemaker.readthedocs.io/en/latest/sagemaker.mxnet.htmlmxnet-model). ###Code from sagemaker.mxnet import MXNetModel mxnet_model = MXNetModel(model_data=model_data, entry_point='resnet152.py', role=role, py_version='py3', framework_version='1.4.1') ###Output _____no_output_____ ###Markdown Creating an inference endpoint and attaching an Elastic Inference(EI) acceleratorNow we can use our `MXNetModel` object to build and deploy an `MXNetPredictor`. This creates a SageMaker Model and Endpoint, the latter of which we can use for performing inference. We pass the following arguments to the `deploy()` method:* `instance_count` - how many instances to back the endpoint.* `instance_type` - which EC2 instance type to use for the endpoint.* `accelerator_type` - which EI accelerator type to attach to each of our instances.For information on supported instance types and accelerator types, please see [the AWS documentation](https://aws.amazon.com/sagemaker/pricing/instance-types). How our models are loadedBy default, the predefined SageMaker MXNet containers have a default `model_fn`, which loads the model. The default `model_fn` loads an MXNet Module object with a context based on the instance type of the endpoint.This applies for EI as well. If an EI accelerator is attached to your endpoint and a custom `model_fn` isn't provided, then the default `model_fn` loads the MXNet Module object with an EI context, `mx.eia()`. This default `model_fn` works with the default save function provided by the pre-built SageMaker MXNet Docker image for training. If the model is saved in a different manner, then a custom `model_fn` implementation may be needed. For more information on `model_fn`, see [the SageMaker documentation](https://sagemaker.readthedocs.io/en/stable/using_mxnet.htmlload-a-model). Choosing instance typesHere, we deploy our model with instance type `ml.m5.xlarge` and `ml.eia1.medium`. For this model, we found that it requires more CPU memory and thus chose an M5 instance, which has more memory than C5 instances, making it more cost effective. With other models, you may want to experiment with other instance types and accelerators based on your model requirements. ###Code %%time predictor = mxnet_model.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge', accelerator_type='ml.eia1.medium') ###Output _____no_output_____ ###Markdown Performing inferenceWith our Endpoint deployed, we can now send inference requests to it. We use one image as an example here. Preparing the imageFirst, we download the image (and view it). ###Code import matplotlib.pyplot as plt img_path = mx.test_utils.download('https://s3.amazonaws.com/onnx-mxnet/examples/mallard_duck.jpg') img = mx.image.imread(img_path) plt.imshow(img.asnumpy()) ###Output _____no_output_____ ###Markdown Next, we preprocess inference image. We resize it to 256x256, take center crop of 224x224, normalize image, and add a dimension to batchify the image. ###Code from mxnet.gluon.data.vision import transforms def preprocess(img): transform_fn = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img = transform_fn(img) img = img.expand_dims(axis=0) return img input_image = preprocess(img) ###Output _____no_output_____ ###Markdown Sending the inference requestNow we can use the predictor object to classify the input image: ###Code scores = predictor.predict(input_image.asnumpy()) ###Output _____no_output_____ ###Markdown To see the inference result, let's download and load `synset.txt` file containing class labels for ImageNet. The top 5 classes generated in order, along with the probabilities are: ###Code import numpy as np mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/synset.txt') with open('synset.txt', 'r') as f: labels = [l.rstrip() for l in f] a = np.argsort(scores)[::-1] for i in a[0:5]: print('class=%s; probability=%f' %(labels[i],scores[i])) ###Output _____no_output_____ ###Markdown Deleting the EndpointSince we've reached the end, we delete the SageMaker Endpoint to release the instance associated with it. ###Code predictor.delete_endpoint() ###Output _____no_output_____ ###Markdown Hosting ONNX models with Amazon Elastic Inference*(This notebook was tested with the "Python 3 (MXNet CPU Optimized)" kernel.)*Amazon Elastic Inference (EI) is a resource you can attach to your Amazon EC2 instances to accelerate your deep learning (DL) inference workloads. EI allows you to add inference acceleration to an Amazon SageMaker hosted endpoint or Jupyter notebook and reduce the cost of running deep learning inference by up to 75%, when compared to using GPU instances. For more information, please visit: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.htmlAmazon EI provides support for a variety of frameworks, including Apache MXNet and ONNX models. The [Open Neural Network Exchange](https://onnx.ai/) (ONNX) is an open standard format for deep learning models that enables interoperability between deep learning frameworks such as Apache MXNet, Microsoft Cognitive Toolkit (CNTK), PyTorch and more. This means that we can use any of these frameworks to train the model, export these pretrained models in ONNX format and then import them in MXNet for inference.In this example, we use the ResNet-152v1 model from [Deep residual learning for image recognition](https://arxiv.org/abs/1512.03385). This model, alongside many others, can be found at the [ONNX Model Zoo](https://github.com/onnx/models).We use the SageMaker Python SDK to host this ONNX model in SageMaker and perform inference requests. SetupFirst, we get the IAM execution role from our notebook environment, so that SageMaker can access resources in your AWS account later in the example. ###Code from sagemaker import get_execution_role role = get_execution_role() ###Output _____no_output_____ ###Markdown The inference scriptWe need to provide an inference script that can run on the SageMaker platform. This script is invoked by SageMaker when we perform inference.The script we're using here implements two functions:* `model_fn()` - loads the model* `transform_fn()` - uses the model to take the input and produce the output ###Code !pygmentize resnet152.py ###Output _____no_output_____ ###Markdown Preparing the modelTo create a SageMaker Endpoint, we first need to prepare the model to be used in SageMaker. Downloading the modelFor this example, we use a pre-trained ONNX model from the [ONNX Model Zoo](https://github.com/onnx/models), where you can find a collection of pre-trained models to work with. Here, we download the [ResNet-152v1 model](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx) trained on ImageNet dataset. ###Code import mxnet as mx mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx') ###Output _____no_output_____ ###Markdown Compressing the model dataNow that we have the model data locally, we need to compress it, and then upload it to S3. ###Code import tarfile from sagemaker import s3, session with tarfile.open('onnx_model.tar.gz', mode='w:gz') as archive: archive.add('resnet152v1.onnx') bucket = session.Session().default_bucket() model_data = s3.S3Uploader.upload('onnx_model.tar.gz', 's3://{}/mxnet-onnx-resnet152-example/model'.format(bucket)) ###Output _____no_output_____ ###Markdown Creating a SageMaker Python SDK Model instanceWith the model data uploaded to S3, we now have everything we need to instantiate a SageMaker Python SDK Model. We provide the constructor the following arguments:* `model_data`: the S3 location of the model data* `entry_point`: the script for model hosting that we looked at above* `role`: the IAM role used* `framework_version`: the MXNet version in use, in this case '1.4.1'For more about creating an `MXNetModel` object, see the [SageMaker Python SDK API docs](https://sagemaker.readthedocs.io/en/latest/sagemaker.mxnet.htmlmxnet-model). ###Code from sagemaker.mxnet import MXNetModel mxnet_model = MXNetModel(model_data=model_data, entry_point='resnet152.py', role=role, py_version='py3', framework_version='1.4.1') ###Output _____no_output_____ ###Markdown Creating an inference endpoint and attaching an Elastic Inference(EI) acceleratorNow we can use our `MXNetModel` object to build and deploy an `MXNetPredictor`. This creates a SageMaker Model and Endpoint, the latter of which we can use for performing inference. We pass the following arguments to the `deploy()` method:* `instance_count` - how many instances to back the endpoint.* `instance_type` - which EC2 instance type to use for the endpoint.* `accelerator_type` - which EI accelerator type to attach to each of our instances.For information on supported instance types and accelerator types, please see [the AWS documentation](https://aws.amazon.com/sagemaker/pricing/instance-types). How our models are loadedBy default, the predefined SageMaker MXNet containers have a default `model_fn`, which loads the model. The default `model_fn` loads an MXNet Module object with a context based on the instance type of the endpoint.This applies for EI as well. If an EI accelerator is attached to your endpoint and a custom `model_fn` isn't provided, then the default `model_fn` loads the MXNet Module object with an EI context, `mx.eia()`. This default `model_fn` works with the default save function provided by the pre-built SageMaker MXNet Docker image for training. If the model is saved in a different manner, then a custom `model_fn` implementation may be needed. For more information on `model_fn`, see [the SageMaker documentation](https://sagemaker.readthedocs.io/en/stable/using_mxnet.htmlload-a-model). Choosing instance typesHere, we deploy our model with instance type `ml.m5.xlarge` and `ml.eia1.medium`. For this model, we found that it requires more CPU memory and thus chose an M5 instance, which has more memory than C5 instances, making it more cost effective. With other models, you may want to experiment with other instance types and accelerators based on your model requirements. ###Code %%time predictor = mxnet_model.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge', accelerator_type='ml.eia1.medium') ###Output _____no_output_____ ###Markdown Performing inferenceWith our Endpoint deployed, we can now send inference requests to it. We use one image as an example here. Preparing the imageFirst, we download the image (and view it). ###Code import matplotlib.pyplot as plt img_path = mx.test_utils.download('https://s3.amazonaws.com/onnx-mxnet/examples/mallard_duck.jpg') img = mx.image.imread(img_path) plt.imshow(img.asnumpy()) ###Output _____no_output_____ ###Markdown Next, we preprocess inference image. We resize it to 256x256, take center crop of 224x224, normalize image, and add a dimension to batchify the image. ###Code from mxnet.gluon.data.vision import transforms def preprocess(img): transform_fn = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img = transform_fn(img) img = img.expand_dims(axis=0) return img input_image = preprocess(img) ###Output _____no_output_____ ###Markdown Sending the inference requestNow we can use the predictor object to classify the input image: ###Code scores = predictor.predict(input_image.asnumpy()) ###Output _____no_output_____ ###Markdown To see the inference result, let's download and load `synset.txt` file containing class labels for ImageNet. The top 5 classes generated in order, along with the probabilities are: ###Code import numpy as np mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/synset.txt') with open('synset.txt', 'r') as f: labels = [l.rstrip() for l in f] a = np.argsort(scores)[::-1] for i in a[0:5]: print('class=%s; probability=%f' %(labels[i],scores[i])) ###Output _____no_output_____ ###Markdown Deleting the EndpointSince we've reached the end, we delete the SageMaker Endpoint to release the instance associated with it. ###Code predictor.delete_endpoint() ###Output _____no_output_____ ###Markdown Hosting ONNX models with Amazon Elastic Inference*(This notebook was tested with the "Python 3 (MXNet CPU Optimized)" kernel.)*Amazon Elastic Inference (EI) is a resource you can attach to your Amazon EC2 instances to accelerate your deep learning (DL) inference workloads. EI allows you to add inference acceleration to an Amazon SageMaker hosted endpoint or Jupyter notebook and reduce the cost of running deep learning inference by up to 75%, when compared to using GPU instances. For more information, please visit: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.htmlAmazon EI provides support for a variety of frameworks, including Apache MXNet and ONNX models. The [Open Neural Network Exchange](https://onnx.ai/) (ONNX) is an open standard format for deep learning models that enables interoperability between deep learning frameworks such as Apache MXNet, Microsoft Cognitive Toolkit (CNTK), PyTorch and more. This means that we can use any of these frameworks to train the model, export these pretrained models in ONNX format and then import them in MXNet for inference.In this example, we use the ResNet-152v1 model from [Deep residual learning for image recognition](https://arxiv.org/abs/1512.03385). This model, alongside many others, can be found at the [ONNX Model Zoo](https://github.com/onnx/models).We use the SageMaker Python SDK to host this ONNX model in SageMaker and perform inference requests. SetupFirst, we get the IAM execution role from our notebook environment, so that SageMaker can access resources in your AWS account later in the example. ###Code from sagemaker import get_execution_role role = get_execution_role() ###Output _____no_output_____ ###Markdown The inference scriptWe need to provide an inference script that can run on the SageMaker platform. This script is invoked by SageMaker when we perform inference.The script we're using here implements two functions:* `model_fn()` - loads the model* `transform_fn()` - uses the model to take the input and produce the output ###Code !pygmentize resnet152.py ###Output _____no_output_____ ###Markdown Preparing the modelTo create a SageMaker Endpoint, we first need to prepare the model to be used in SageMaker. Downloading the modelFor this example, we use a pre-trained ONNX model from the [ONNX Model Zoo](https://github.com/onnx/models), where you can find a collection of pre-trained models to work with. Here, we download the [ResNet-152v1 model](https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx) trained on ImageNet dataset. ###Code import mxnet as mx mx.test_utils.download( "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet152v1/resnet152v1.onnx" ) ###Output _____no_output_____ ###Markdown Compressing the model dataNow that we have the model data locally, we need to compress it, and then upload it to S3. ###Code import tarfile from sagemaker import s3, session with tarfile.open("onnx_model.tar.gz", mode="w:gz") as archive: archive.add("resnet152v1.onnx") bucket = session.Session().default_bucket() model_data = s3.S3Uploader.upload( "onnx_model.tar.gz", "s3://{}/mxnet-onnx-resnet152-example/model".format(bucket) ) ###Output _____no_output_____ ###Markdown Creating a SageMaker Python SDK Model instanceWith the model data uploaded to S3, we now have everything we need to instantiate a SageMaker Python SDK Model. We provide the constructor the following arguments:* `model_data`: the S3 location of the model data* `entry_point`: the script for model hosting that we looked at above* `role`: the IAM role used* `framework_version`: the MXNet version in use, in this case '1.4.1'For more about creating an `MXNetModel` object, see the [SageMaker Python SDK API docs](https://sagemaker.readthedocs.io/en/latest/sagemaker.mxnet.htmlmxnet-model). ###Code from sagemaker.mxnet import MXNetModel mxnet_model = MXNetModel( model_data=model_data, entry_point="resnet152.py", role=role, py_version="py3", framework_version="1.4.1", ) ###Output _____no_output_____ ###Markdown Creating an inference endpoint and attaching an Elastic Inference(EI) acceleratorNow we can use our `MXNetModel` object to build and deploy an `MXNetPredictor`. This creates a SageMaker Model and Endpoint, the latter of which we can use for performing inference. We pass the following arguments to the `deploy()` method:* `instance_count` - how many instances to back the endpoint.* `instance_type` - which EC2 instance type to use for the endpoint.* `accelerator_type` - which EI accelerator type to attach to each of our instances.For information on supported instance types and accelerator types, please see [the AWS documentation](https://aws.amazon.com/sagemaker/pricing/instance-types). How our models are loadedBy default, the predefined SageMaker MXNet containers have a default `model_fn`, which loads the model. The default `model_fn` loads an MXNet Module object with a context based on the instance type of the endpoint.This applies for EI as well. If an EI accelerator is attached to your endpoint and a custom `model_fn` isn't provided, then the default `model_fn` loads the MXNet Module object with an EI context, `mx.eia()`. This default `model_fn` works with the default save function provided by the pre-built SageMaker MXNet Docker image for training. If the model is saved in a different manner, then a custom `model_fn` implementation may be needed. For more information on `model_fn`, see [the SageMaker documentation](https://sagemaker.readthedocs.io/en/stable/using_mxnet.htmlload-a-model). Choosing instance typesHere, we deploy our model with instance type `ml.m5.xlarge` and `ml.eia1.medium`. For this model, we found that it requires more CPU memory and thus chose an M5 instance, which has more memory than C5 instances, making it more cost effective. With other models, you may want to experiment with other instance types and accelerators based on your model requirements. ###Code %%time predictor = mxnet_model.deploy( initial_instance_count=1, instance_type="ml.m5.xlarge", accelerator_type="ml.eia1.medium" ) ###Output _____no_output_____ ###Markdown Performing inferenceWith our Endpoint deployed, we can now send inference requests to it. We use one image as an example here. Preparing the imageFirst, we download the image (and view it). ###Code import matplotlib.pyplot as plt img_path = mx.test_utils.download("https://s3.amazonaws.com/onnx-mxnet/examples/mallard_duck.jpg") img = mx.image.imread(img_path) plt.imshow(img.asnumpy()) ###Output _____no_output_____ ###Markdown Next, we preprocess inference image. We resize it to 256x256, take center crop of 224x224, normalize image, and add a dimension to batchify the image. ###Code from mxnet.gluon.data.vision import transforms def preprocess(img): transform_fn = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] ) img = transform_fn(img) img = img.expand_dims(axis=0) return img input_image = preprocess(img) ###Output _____no_output_____ ###Markdown Sending the inference requestNow we can use the predictor object to classify the input image: ###Code scores = predictor.predict(input_image.asnumpy()) ###Output _____no_output_____ ###Markdown To see the inference result, let's download and load `synset.txt` file containing class labels for ImageNet. The top 5 classes generated in order, along with the probabilities are: ###Code import numpy as np mx.test_utils.download("https://s3.amazonaws.com/onnx-model-zoo/synset.txt") with open("synset.txt", "r") as f: labels = [l.rstrip() for l in f] a = np.argsort(scores)[::-1] for i in a[0:5]: print("class=%s; probability=%f" % (labels[i], scores[i])) ###Output _____no_output_____ ###Markdown Deleting the EndpointSince we've reached the end, we delete the SageMaker Endpoint to release the instance associated with it. ###Code predictor.delete_endpoint() ###Output _____no_output_____
Decision Tree.ipynb
###Markdown Author: Ankit Kumar The Sparks Foundation Prediction using Decision Tree Algorithm Objective: Create the Decision Tree classifier and visualize it graphically. Importing Libraries ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.metrics import confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier import seaborn as sns from sklearn.metrics import accuracy_score ###Output _____no_output_____ ###Markdown Reading Data File ###Code df=pd.read_csv("C:\\Users\\ankit\\Downloads\\Iris.csv") df.head() df.isnull().sum() df.drop(columns=["Id"],inplace=True) df["Species"].value_counts() ###Output _____no_output_____ ###Markdown Data Visualization for Selecting Algorithm ###Code sns.pairplot(df,hue='Species') # Labeling data by using LabelEncoder label=LabelEncoder() df["Species"]=label.fit_transform(df["Species"]) df.head() ###Output _____no_output_____ ###Markdown Dividing the data in Attributes and Label ###Code X=df.drop("Species",axis=1) y=df["Species"] ###Output _____no_output_____ ###Markdown Split Data in test and train ###Code X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0) ###Output _____no_output_____ ###Markdown Predicting using DecisionTreeClassifier ###Code dt=DecisionTreeClassifier() dt.fit(X_train,y_train) predict_dt=dt.predict(X_test) print(classification_report(y_test,predict_dt)) print(confusion_matrix(y_test,predict_dt)) ## Comparison b/w Actual data and Predicted data pre=pd.DataFrame({"Species":y_test,"Prediction":predict_dt}) pre.head() ###Output _____no_output_____ ###Markdown Accuracy of Model ###Code accuracy=accuracy_score(y_test,predict_dt) accuracy ###Output _____no_output_____ ###Markdown Visualizing DecisionTreeClassifier ###Code from six import StringIO from IPython.display import Image from sklearn.tree import export_graphviz import pydotplus # Visualize the graph new_column=df.select_dtypes(include=float).columns dot_data = StringIO() export_graphviz(dt, out_file=dot_data, feature_names=new_column, filled=True, rounded=True, special_characters=True) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png()) ###Output _____no_output_____ ###Markdown Decision Trees decision tree is a classification algorithm. We will use this algorithm to build a model from historical data from patients and their response to different medications. Then we'll use this decision tree to predict the class of an unknown patient or to find a proper drug to a new patient. ###Code import pandas as pd import numpy as np from sklearn.tree import DecisionTreeClassifier ###Output _____no_output_____ ###Markdown About datasetImagine that you are a medical researcher compiling data for a study. You have collected data about a set of patients, all of whom suffered from the same illness. During their course of treatment, each patient responded to one of 5 medications, Drug A, Drug B, Drug c, Drug x and y. Part of your job is to build a model to find out which drug might be appropriate for a future patient with the same illness. The feature sets of this dataset are Age, Sex, Blood Pressure, and Cholesterol of patients, and the target is the drug that each patient responded to. It is a sample of binary classifier, and you can use the training part of the dataset to build a decision tree, and then use it to predict the class of a unknown patient, or to prescribe it to a new patient. Download dataset ###Code !wget -O drug200.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/drug200.csv my_data = pd.read_csv('drug200.csv', delimiter=',') my_data my_data.head() ###Output _____no_output_____ ###Markdown Pre-processing Using my_data as the Drug.csv data read by pandas, declare the following variables: X as the Feature Matrix (data of my_data) y as the response vector (target) ###Code # pd.set_option('display.max_rows',200) pd.reset_option('display.max_rows',15) my_data X = my_data[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values X ###Output _____no_output_____ ###Markdown As some featurs in this dataset are catergorical such as __Sex__ or __BP__. Unfortunately, Sklearn Decision Trees do not handle categorical variables. But still we can convert these features to numerical values. __pandas.get_dummies()__Convert categorical variable into dummy/indicator variables. ###Code from sklearn import preprocessing le_sex = preprocessing.LabelEncoder() le_sex.fit(['F', 'M']) X[:, 1] = le_sex.transform(X[:,1]) X[:5] le_BP = preprocessing.LabelEncoder() le_BP.fit(['LOW', 'NORMAL', 'HIGH']) X[:,2] = le_BP.transform(X[:,2]) le_Chol = preprocessing.LabelEncoder() le_Chol.fit([ 'NORMAL', 'HIGH']) X[:,3] = le_Chol.transform(X[:,3]) X[0:5] ###Output _____no_output_____ ###Markdown Now we can fill the target variable. ###Code y = my_data['Drug'] y[:5] ###Output _____no_output_____ ###Markdown --- Setting up the Decision TreeWe will be using train/test split on our decision tree. Let's import train_test_split from sklearn.cross_validation. Now train_test_split will return 4 different parameters. We will name them:X_trainset, X_testset, y_trainset, y_testset The train_test_split will need the parameters: X, y, test_size=0.3, and random_state=3. The X and y are the arrays required before the split, the test_size represents the ratio of the testing dataset, and the random_state ensures that we obtain the same splits. ###Code from sklearn.model_selection import train_test_split X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3) X_trainset.shape X_testset.shape y_trainset y_testset.shape ###Output _____no_output_____ ###Markdown ModelingWe will first create an instance of the DecisionTreeClassifier called drugTree.Inside of the classifier, specify criterion="entropy" so we can see the information gain of each node. ###Code from sklearn.tree import DecisionTreeClassifier drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4) drugTree # it shows the default parameters drugTree.fit(X_trainset,y_trainset) ###Output _____no_output_____ ###Markdown predictionLet's make some predictions on the testing dataset and store it into a variable called predTree. ###Code predTree = drugTree.predict(X_testset) ###Output _____no_output_____ ###Markdown print out predTree and y_testset as we want to visually compare the prediction to the actual values. ###Code print(predTree) print(predTree.shape) print(y_testset) ###Output ['drugY' 'drugX' 'drugX' 'drugX' 'drugX' 'drugC' 'drugY' 'drugA' 'drugB' 'drugA' 'drugY' 'drugA' 'drugY' 'drugY' 'drugX' 'drugY' 'drugX' 'drugX' 'drugB' 'drugX' 'drugX' 'drugY' 'drugY' 'drugY' 'drugX' 'drugB' 'drugY' 'drugY' 'drugA' 'drugX' 'drugB' 'drugC' 'drugC' 'drugX' 'drugX' 'drugC' 'drugY' 'drugX' 'drugX' 'drugX' 'drugA' 'drugY' 'drugC' 'drugY' 'drugA' 'drugY' 'drugY' 'drugY' 'drugY' 'drugY' 'drugB' 'drugX' 'drugY' 'drugX' 'drugY' 'drugY' 'drugA' 'drugX' 'drugY' 'drugX'] (60,) 40 drugY 51 drugX 139 drugX 197 drugX 170 drugX 82 drugC 183 drugY 46 drugA 70 drugB 100 drugA 179 drugY 83 drugA 25 drugY 190 drugY 159 drugX 173 drugY 95 drugX 3 drugX 41 drugB 58 drugX 14 drugX 143 drugY 12 drugY 6 drugY 182 drugX 161 drugB 128 drugY 122 drugY 101 drugA 86 drugX 64 drugB 47 drugC 158 drugC 34 drugX 38 drugX 196 drugC 4 drugY 72 drugX 67 drugX 145 drugX 156 drugA 115 drugY 155 drugC 15 drugY 61 drugA 175 drugY 120 drugY 130 drugY 23 drugY 153 drugX 31 drugB 103 drugX 89 drugY 132 drugX 109 drugY 126 drugY 17 drugA 30 drugX 178 drugY 162 drugX Name: Drug, dtype: object ###Markdown EvaluationNow check accuracy of our model with metrics of sklearn ###Code import sklearn from sklearn import metrics import matplotlib.pyplot as plt print("Accuracy of Decision tree is: ", metrics.accuracy_score(predTree, y_testset)) sklearn.__version__ ###Output _____no_output_____ ###Markdown __Accuracy classification score__ computes subset accuracy: the set of labels predicted for a sample must exactly match the corresponding set of labels in y_true. In multilabel classification, the function returns the subset accuracy. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0. ###Code from sklearn.externals.six import StringIO import pydotplus import matplotlib.image as mapimg from sklearn import tree %matplotlib inline !pip install python-graphviz dot_data = StringIO() filename = "drugtree.png" featureNames = my_data.columns[0:5] targetNames = my_data["Drug"].unique().tolist() out=tree.export_graphviz(drugTree,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_trainset), filled=True, special_characters=True,rotate=False) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) graph.write_png(filename) img = mapimg.imread(filename) plt.figure(figsize=(100, 200)) plt.imshow(img,interpolation='nearest') conda info --envs ###Output _____no_output_____ ###Markdown Image Classification with SciKit-Learn (Decision Tree) ###Code from scipy import ndimage, misc def prep_data (folder): # iterate through folders, assembling feature, label, and classname data objects import os import numpy as np import matplotlib.pyplot as plt class_id = 0 features = [] labels = np.array([]) classnames = [] for root, dirs, filenames in os.walk(folder): for d in sorted(dirs): classnames.append(d) files = os.listdir(os.path.join(root,d)) for f in files: # Load the image file imgFile = os.path.join(root,d, f) img = plt.imread(imgFile) img = misc.imresize(img, (128, 128)) features.append(img.ravel()) labels = np.append(labels, class_id ) class_id += 1 features = np.array(features) return features, labels, classnames training_folder_name = '../data/faces/training' features, labels, classnames = prep_data(training_folder_name) print(len(features), 'features') print(len(labels), 'labels') print(len(classnames), 'classes:', classnames) print('Feature Shape:',features.shape) print('Labels Shape:',labels.shape) from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(features, labels, test_size=0.30) print('Training records:',Y_train.size) print('Test records:',Y_test.size) # Train the model from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.tree import DecisionTreeClassifier X_train_float = X_train.astype('float64') img_pipeline = Pipeline([('norm', MinMaxScaler()), ('classify', DecisionTreeClassifier()), ]) clf = img_pipeline.fit(X_train_float, Y_train) # Evaluate classifier from sklearn import metrics from sklearn.metrics import accuracy_score, confusion_matrix import numpy as np import matplotlib.pyplot as plt %matplotlib inline X_test_float = X_test.astype('float64') predictions = clf.predict(X_test) cm = confusion_matrix(Y_test, np.round(predictions, 0)) plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues) plt.colorbar() tick_marks = np.arange(len(classnames)) plt.xticks(tick_marks, classnames, rotation=85) plt.yticks(tick_marks, classnames) plt.xlabel("Predicted Label") plt.ylabel("True Label") plt.show() ###Output Classifier Metrics: precision recall f1-score support 1 0.86 0.86 0.86 7 10 1.00 1.00 1.00 5 11 1.00 0.83 0.91 6 12 1.00 1.00 1.00 3 13 0.20 1.00 0.33 1 14 1.00 0.50 0.67 2 15 1.00 0.50 0.67 2 16 1.00 0.75 0.86 4 17 0.60 0.75 0.67 4 18 1.00 0.67 0.80 3 19 1.00 0.67 0.80 6 2 0.50 0.75 0.60 4 20 1.00 0.50 0.67 8 3 0.43 0.50 0.46 6 4 0.75 0.75 0.75 4 5 1.00 0.33 0.50 6 6 0.83 0.83 0.83 6 7 0.40 0.67 0.50 3 8 1.00 1.00 1.00 3 9 0.17 1.00 0.29 1 micro avg 0.71 0.71 0.71 84 macro avg 0.79 0.74 0.71 84 weighted avg 0.84 0.71 0.74 84 Accuracy: 71.43% Confusion Matrix: ###Markdown Decision tree of fruits It's a decision tree model that shows what question ask to find each fruits Method **print_tree** print qestions and fruits that mach to those questions eg: Is size >= 2?--> True: Predict {'Apple': 2}--> False: Predict {'Grape': 1}Below data is predicted with 100% of accuracy for each fruit. And model gives what question ask to distinguish each fruit ###Code # Traingin data training_data = [ ['Green', 10, "Round", 'Mango'], ['Yellow', 12, "Round", 'Mango'], ['Red', 7, "Round", 'Apple'], ['Violet', 0.6, "Round", 'Grape'], ['Green', 0.8, "Round", 'Grape'], ['Yellow', 5, "Round", 'Lemon'], ['Red', 2, "Round", 'Cherry'], ['Yellow', 17, "Long", 'Banana'], ] header = ["color", "size", "shape", "label"] def unique_values(rows, col): return set([row[col] for row in rows]) def class_counts(rows): counts = {} for row in rows: label = row[-1] if label not in counts: counts[label] = 0 counts[label] += 1 return counts def is_numeric(value): return isinstance(value, int) or isinstance(value, float) class Question: def __init__(self, column, value): self.column = column self.value = value def match(self, example): val = example[self.column] if is_numeric(val): return val >= self.value else: return val == self.value def __repr__(self): condition = "==" if is_numeric(self.value): condition = ">=" return "Is %s %s %s?" % (header[self.column], condition, str(self.value)) def partition(rows, question): true_rows, false_rows = [], [] for row in rows: if question.match(row): true_rows.append(row) else: false_rows.append(row) return true_rows, false_rows def gini(rows): counts = class_counts(rows) impurity = 1 for lbl in counts: prob_of_lbl = counts[lbl] / float(len(rows)) impurity -= prob_of_lbl**2 return impurity def info_gain(left, right, current_uncertainty): p = float(len(left)) / (len(left) + len(right)) return current_uncertainty - p *gini(left) - (1-p) * gini(right) def find_best_split(rows): best_gain = 0 # keep the best information gain best_question = None current_uncertainty = gini(rows) n_features = len(rows[0]) - 1 for col in range(n_features): values = set([row[col] for row in rows]) for val in values: question = Question(col, val) true_rows, false_rows = partition(rows, question) if len(true_rows) == 0 or len(false_rows) == 0: continue gain = info_gain(true_rows, false_rows, current_uncertainty) if gain >= best_gain: best_gain, best_question = gain, question return best_gain, best_question class Leaf: def __init__(self, rows): self.predictions = class_counts(rows) class Decision_Node: def __init__(self, question, true_branch, false_branch): self.question = question self.true_branch = true_branch self.false_branch = false_branch def build_tree(rows): gain, question = find_best_split(rows) if gain == 0: return Leaf(rows) true_rows, false_rows = partition(rows, question) true_branch = build_tree(true_rows) false_branch = build_tree(false_rows) return Decision_Node(question, true_branch, false_branch) def print_tree(node, spacing=""): if isinstance(node, Leaf): print(spacing + "Predict", node.predictions) return print(spacing + str(node.question)) print(spacing + "--> True:") print_tree(node.true_branch, spacing + " ") print(spacing + "--> False: ") print_tree(node.false_branch, spacing + " ") def classify(row, node): if isinstance(node, Leaf): return node.predictions if node.question.match(row): return classify(row, node.true_branch) else: return classify(row, node.false_branch) def print_leaf(counts): total = sum(counts.values()) * 1.0 probs = {} for lbl in counts.keys(): probs[lbl] = str(int(counts[lbl] / total * 100)) + "%" return probs ########### testing_data = [ ['Green', 11, "Round", 'Mango'], ['Red', 8, "Round", 'Apple'], ['Violet', 0.6, "Round", 'Grape'], ['Green', 0.8, "Round", 'Grape'], ['Yellow', 5, "Round", 'Lemon'], ['Red', 2, "Round", 'Cherry'], ['Yellow', 17.5, "Long", 'Banana'], ] my_tree = build_tree(training_data) print_tree(my_tree) for row in testing_data: print("Actual: %s. Predicted: %s" % (row[-1], print_leaf(classify(row, my_tree)))) ###Output Is size >= 2? --> True: Is size >= 10? --> True: Is shape == Round? --> True: Predict {'Mango': 2} --> False: Predict {'Banana': 1} --> False: Is size >= 5? --> True: Is size >= 7? --> True: Predict {'Apple': 1} --> False: Predict {'Lemon': 1} --> False: Predict {'Cherry': 1} --> False: Predict {'Grape': 2} Actual: Mango. Predicted: {'Mango': '100%'} Actual: Apple. Predicted: {'Apple': '100%'} Actual: Grape. Predicted: {'Grape': '100%'} Actual: Grape. Predicted: {'Grape': '100%'} Actual: Lemon. Predicted: {'Lemon': '100%'} Actual: Cherry. Predicted: {'Cherry': '100%'} Actual: Banana. Predicted: {'Banana': '100%'} ###Markdown QUESTION 3(a) ###Code dataset = pd.read_csv("iris.csv", header=None) dataset.head() ###Output _____no_output_____ ###Markdown Rename Column Names ###Code dataset.columns = ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width', 'Class'] dataset.head() ###Output _____no_output_____ ###Markdown Check for missing values ###Code sum(dataset.isnull().values.ravel()) ###Output _____no_output_____ ###Markdown QUESTION 3(b) Decision Tree ###Code from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, classification_report from sklearn import tree x = dataset.drop(['Class'], axis=1) y = dataset.Class x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.40, random_state=123) dtree = DecisionTreeClassifier() dtree.fit(x_train, y_train) y_pred = dtree.predict(x_test) print("\n Accuracy: ", accuracy_score(y_test,y_pred)) ###Output Accuracy: 0.966666666667 ###Markdown GridSearchCV for Decision Tree ###Code from sklearn.model_selection import GridSearchCV clf = GridSearchCV(dtree, {'criterion': ["gini","entropy"], 'max_depth': [4,6,8], 'min_samples_split': [2, 3, 4, 5, 6]}, verbose=1) clf.fit(x_train, y_train) print(clf.best_score_) print(clf.best_params_) dtree = DecisionTreeClassifier(criterion = "gini", max_depth = 6, min_samples_split = 2, random_state = 123) ###Output _____no_output_____ ###Markdown Classification ###Code dtree.fit(x_train, y_train) y_pred = dtree.predict(x_test) y_pred print("Accuracy: ", accuracy_score(y_test,y_pred)) ###Output Accuracy: 0.966666666667 ###Markdown Graphviz ###Code import graphviz dot_data = tree.export_graphviz(dtree, feature_names=['Sepal Length','Sepal Width','Petal Length','Petal Width'], class_names=['Iris-setosa','Iris-veriscolor','Iris-virginica'], out_file=None, filled=True) graph = graphviz.Source(dot_data) graph ###Output _____no_output_____ ###Markdown Classification Report ###Code print(classification_report(y_test,y_pred)) ###Output precision recall f1-score support Iris-setosa 1.00 1.00 1.00 22 Iris-versicolor 0.89 1.00 0.94 16 Iris-virginica 1.00 0.91 0.95 22 avg / total 0.97 0.97 0.97 60 ###Markdown QUESTION 3(c) Adaboost ###Code from sklearn.ensemble import AdaBoostClassifier dtree_ada = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(criterion = "gini", max_depth = 4, min_samples_split = 2, random_state = 123), algorithm="SAMME", learning_rate = 0.1, n_estimators=200) dtree_ada.fit(x_train, y_train) y_pred = dtree_ada.predict(x_test) print("Accuracy: ", accuracy_score(y_test,y_pred)) ###Output Accuracy: 0.933333333333 ###Markdown GridsearchCV for Adaboost ###Code dtree_ada_gs = GridSearchCV(dtree_ada, {'learning_rate': [0.1,0.2,0.3], 'n_estimators': [1,2,3,4]}, verbose=1) dtree_ada_gs.fit(x, y) print(dtree_ada_gs.best_score_) print(dtree_ada_gs.best_params_) dtree_ada = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(criterion = "gini", max_depth = 6, min_samples_split = 2, random_state = 123), algorithm="SAMME", learning_rate = 0.1, n_estimators=2) ###Output _____no_output_____ ###Markdown Classification ###Code dtree_ada.fit(x_train, y_train) y_pred = dtree_ada.predict(x_test) print("Accuracy: ", accuracy_score(y_test,y_pred)) ###Output Accuracy: 0.966666666667 ###Markdown Classification Report of Best Parameters ###Code print(classification_report(y_test,y_pred)) ###Output precision recall f1-score support Iris-setosa 1.00 1.00 1.00 22 Iris-versicolor 0.89 1.00 0.94 16 Iris-virginica 1.00 0.91 0.95 22 avg / total 0.97 0.97 0.97 60 ###Markdown Projeto Opening file ###Code file = pd.read_csv('data.csv') file.shape ###Output _____no_output_____ ###Markdown Calculating Rolling Mean with (10,150) windows ###Code file['MASmall'] = file['Close'].rolling(window=10).mean() file['MABig'] = file['Close'].rolling(window=150).mean() file.tail(5) ###Output _____no_output_____ ###Markdown Price x Moving Average 10 x Moving Average 150 ###Code plt.plot(file['Close']) plt.plot(file['MASmall']) plt.plot(file['MABig']) ###Output _____no_output_____ ###Markdown Calculating binary to show buy oportunities ###Code file['Strat1'] = file['MABig'] > file['MASmall'] file['Strat1'] = file['Strat1'].astype(int) plt.plot(file['Strat1']) file['Strat1'].value_counts() ###Output _____no_output_____ ###Markdown Backtest ###Code historico = [] montante = 100 tem = 0 for i, n in enumerate(file['Strat1']): if n==1 and tem==0: compra = file['Close'][i] tem = 1 elif n==0 and tem==1: venda = file['Close'][i] tem = 0 montante = montante*(venda/compra) historico.append(montante) montante plt.plot(historico) file['Close'][len(file)-1]/file['Close'][0] file.tail(5) ###Output _____no_output_____ ###Markdown Machine Learning Creating Target: Variation of next 10 days ###Code variacao = [] for i in range(len(file['Close'])-10): variacao.append(1 - file['Close'][i+10]/file['Close'][i]) plt.plot(variacao) file['target'] = pd.DataFrame(variacao) matrix = file.as_matrix() ###Output c:\users\isabel\appdata\local\programs\python\python37-32\lib\site-packages\ipykernel_launcher.py:1: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead. """Entry point for launching an IPython kernel. ###Markdown m is the data matrix with everything exepet date and starting at line 149 (because of MA) and ending before last 10 lines (because of variation that predicts for the next 10 days) ###Code m = np.array(matrix[149:len(matrix)-10,1:]) ###Output _____no_output_____ ###Markdown X is m without the variation column ###Code X = m[:,:len(m[0])-1] ###Output _____no_output_____ ###Markdown Y is only yhe variation column ###Code Y = m[:,-1] Y X ###Output _____no_output_____ ###Markdown Splitting dataset into train and test ###Code size = int(0.8*len(X)) X_train = X[:size] Y_train = Y[:size] X_test = X[size:] Y_test = Y[size:] ###Output _____no_output_____ ###Markdown Decision Tree Model ###Code clf = tree.DecisionTreeRegressor() X_train[0] clf = clf.fit(X_train,Y_train) predict = clf.predict(X_test) ###Output _____no_output_____ ###Markdown Root Mean squared error $$RMSE = \sqrt{\frac{1}{n}\Sigma_{i=1}^{n}{(Predict_i - Ytest_i)^2}}$$ ###Code error = 0 for i, n in enumerate(predict): error += (n - Y_test[i])**2 error = error/len(predict) import math math.sqrt(error) Y_test.mean() Y_test.std() ###Output _____no_output_____ ###Markdown Decison TreeDecision trees are formed from very simple components. Decision trees are particularly popular because it can be very easy to understand exactly why a specific prediction was made.Decision tree work by recusively splitting an input dataset into two subgroups. The split is decided using some metric which is comparable to a loss function in other settings. Metrics used to decide on the split include Gini impurity, Information Gain, Variance Reduction and measure of goodness. Decision trees are often referred to as Classification And Regression Trees (CART) which was introduced by Brieman in 1984. CART form the underlying components of ensemble methods such as Random Forests, AdaBoost and Rotation Forests.Notable decision tree algorihtms include:* CART - Nonparametric decsion tree learning technique that produce a classification or regression dependent on the input variable.* Chi-squared automatic interation and detection - Often used in the context of direct marketing to select groups of consumers and predict how their reponses to some variables affect some other variables.* MARS - Multivariate Adaptive Regression Splines are a from of regression analysis that work by fitting piece wise linear regressions. They are capable of automatically modeling nonlinearities. The algorithm is computationally very expensive.For the purposes of these notes we are going to focus on CART. MathematicsCART works by recursively partitioning the training set $T$. The aim is to find a partition of the training set that minimises a loss function $L$. Each node in the tree is associated with the generation of a particular subsets $T_i \subset T = \{(x,y)\}^k_{n=1}$, where $x$ is an vector of independent variables and $y$ is the the dependent variable. The partition is then defined by a function capable of splitting the data according to the value of a particular variable from the input dataset $x_i \in T$.Considering a feature $j$ from an input set $T$ and taking $a$ as an arbitraty value then a split can be defined by two subsets:$$T_l = \{t \in T : x_j \leq A\}$$and $$T_r = \{t\in T : x_j \geq A\}$$A categorical feature can be split by using $$T_l = \{t\in T : x_j = A\}$$and $$T_r = \{t\in T : x_j \neq A\}$$When partitoning a dataset the decision tree takes into account all possible paritions, it tests each partition and selects the one that minimises the defined loss function $L$. The loss function $L$ used to measure compare the value of different splits tends to be different for continuous and categorical variables, and their exist many loss functions that can be used in both cases. In the continuous setting a square loss can be calculated for each subset using:$$L = \sum_{t \in T}(y_t - f(x_t))^2$$There are many other continuous loss functions that could be used in place of an L2 loss.In the case of CART the Gini impurity is used which provides an estimate on how pure (homogeneous) a subgroup is. The Gini impurity is calculated using the the probability of selecting a sample that corresponds to given class $c_i \in C$ for the sample in the split$$L = \sum_{c_i \in C}p(c_i)(1-p(c_i))$$This leads to an impurity of 0 when all measures of the same class and increases as the homogeneity of the class decreases.The process of partitioning continues for each generated node until some stopping criteria has been met. The final tree is then returned and can be used for prediction. PythonIn this section we will break down decision trees into the various components. In this function we treat the decision tree as a function that creates a tree by recursively splitting nodes until the model has been trained. The model includes several parts, the first is initialisation of a node which is given a subset of data. Once the node has been split we then find the optimal variable to split the node on `find_varsplit`. During the `find_varsplit` method we use `find_better_split` which assess the efficacy of the current split vs the current best split. This assessment is made on the value of the a equivalent to the RMSE which can be seen as a loss function. Once the best split has been found it is returned and the data is split between the `lhs` and `rhs`, which both initialise their own nodes. This recursion happens again until the stopping criterion is met. ###Code import pandas as pd from sklearn.metrics import mean_squared_error from math import sqrt # Fit the model given training data and parameters def fit(self, X, y, min_leaf = 5): self.dtree = Node(X, y, np.array(np.arange(len(y))), min_leaf) return self # Make a prediction def predict(self, X): return self.dtree.predict(X.values) # Node constructor class Node: # Initialise the necessary aspects of a node def __init__(self, x, y, idxs, min_leaf=5): self.x = x self.y = y self.idxs = idxs self.min_leaf = min_leaf self.row_count = len(idxs) self.col_count = x.shape[1] self.val = np.mean(y[idxs]) self.score = float('inf') self.find_varsplit() # Find the correct splitpoint def find_varsplit(self): # Go through each of the features looking for best split for c in range(self.col_count): self.find_better_split(c) # Return without action if leaf is found if self.is_leaf: return # Split the values into lhs and rhs and recurse x = self.split_col lhs = np.nonzero(x <= self.split)[0] rhs = np.nonzero(x > self.split)[0] self.lhs = Node(self.x, self.y, self.idxs[lhs], self.min_leaf) self.rhs = Node(self.x, self.y, self.idxs[rhs], self.min_leaf) # Using the variable index to assess split on feature def find_better_split(self, var_idx): # Get all the values from the index in question x = self.x.values[self.idxs, var_idx] # Append through the rows of the input matrix and split on the defined value for r in range(self.row_count): lhs = x <= x[r] rhs = x > x[r] # Early check to see if there are too few samples in the leaf if rhs.sum() < self.min_leaf or lhs.sum() < self.min_leaf: continue # Get the score for the current split curr_score = self.find_score(lhs, rhs) # Replace best split if better if curr_score < self.score: self.var_idx = var_idx self.score = curr_score self.split = x[r] # Calculate the score of the split this is equivalent to the rmse def find_score(self, lhs, rhs): y = self.y[self.idxs] lhs_std = y[lhs].std() rhs_std = y[rhs].std() return lhs_std * lhs.sum() + rhs_std * rhs.sum() @property def split_col(self): return self.x.values[self.idxs,self.var_idx] @property def is_leaf(self): return self.score == float('inf') def predict(self, x): return np.array([self.predict_row(xi) for xi in x]) def predict_row(self, xi): if self.is_leaf: return self.val node = self.lhs if xi[self.var_idx] <= self.split else self.rhs return node.predict_row(xi) # Import training dataset import numpy as np from sklearn.datasets import load_diabetes import matplotlib.pyplot as plt data = load_diabetes() X_train = pd.DataFrame(data=data.data) y_train = pd.DataFrame(data=data.target).iloc[:,0] regressor = DecisionTreeRegressor().fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Decision Tree ###Code import numpy as np import pandas as pd from sklearn.tree import DecisionTreeClassifier ###Output _____no_output_____ ###Markdown DatasetA medical researcher compiling data for a study who has collected some data about a set of patients, all of whom suffered from the same illness. During their course of treatment each patient responded to one of 5 medications: Drug A, Drug B, Drug c, Drug x and Drug y. We have to build a model to find out which drug might be appropriate for a future patient with the same illness. The features of this dataset are Age, Sex, Blood Pressure and the Cholesterol of the patients and the target is the drug that each patient responded to.It is a sample of multiclass classifier and we can use the training part of the dataset to build a decision tree and then use it to predict the class of an unknown patient or to prescribe a drug to a new patient. ###Code !wget -O drug200.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/data/drug200.csv ###Output --2021-12-11 21:20:41-- https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%203/data/drug200.csv Resolving cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)... 169.63.118.104 Connecting to cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud (cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud)|169.63.118.104|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 5827 (5.7K) [text/csv] Saving to: ‘drug200.csv’ drug200.csv 100%[===================>] 5.69K --.-KB/s in 0s 2021-12-11 21:20:41 (167 MB/s) - ‘drug200.csv’ saved [5827/5827] ###Markdown Now, read the data using pandas dataframe: ###Code my_data = pd.read_csv("drug200.csv", delimiter=",") my_data[0:5] # my_data.head() ###Output _____no_output_____ ###Markdown Size of the dataset ###Code my_data.shape ###Output _____no_output_____ ###Markdown Pre processing Removing the column containing the target name since it doesn't contain numeric values. ###Code X = my_data[['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K']].values X[0:5] ###Output _____no_output_____ ###Markdown As we may figure out, some features in this dataset are categorical, such as **Sex** or **BP**. Unfortunately, Sklearn Decision Trees does not handle categorical variables. We can still convert these features to numerical values using **pandas.get_dummies()**to convert the categorical variable into dummy/indicator variables. ###Code from sklearn import preprocessing le_sex = preprocessing.LabelEncoder() le_sex.fit(['F','M']) X[:,1] = le_sex.transform(X[:,1]) le_BP = preprocessing.LabelEncoder() le_BP.fit([ 'LOW', 'NORMAL', 'HIGH']) X[:,2] = le_BP.transform(X[:,2]) le_Chol = preprocessing.LabelEncoder() le_Chol.fit([ 'NORMAL', 'HIGH']) X[:,3] = le_Chol.transform(X[:,3]) X[0:5] ###Output _____no_output_____ ###Markdown Now we can fill the target variable. ###Code y = my_data["Drug"] y[0:5] ###Output _____no_output_____ ###Markdown Setting up the Decision Tree ###Code from sklearn.model_selection import train_test_split ###Output _____no_output_____ ###Markdown Now train_test_split will return 4 different parameters. We will name them:X_trainset, X_testset, y_trainset, y_testset The train_test_split will need the parameters: X, y, test_size=0.3, and random_state=3. The X and y are the arrays required before the split, the test_size represents the ratio of the testing dataset and the random_state ensures that we obtain the same splits. ###Code X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3) ###Output _____no_output_____ ###Markdown Print the shape of X_trainset and y_trainset. Ensure that the dimensions match. ###Code print('Shape of X training set {}'.format(X_trainset.shape),' Size of Y training set {}'.format(y_trainset.shape)) ###Output Shape of X training set (140, 5) Size of Y training set (140,) ###Markdown Print the shape of X_testset and y_testset. Ensure that the dimensions match. ###Code print( f"shape of X testset {X_testset.shape} and shape of y testset is {y_testset.shape}") ###Output shape of X testset (60, 5) and shape of y testset is (60,) ###Markdown Modeling We will first create an instance of the DecisionTreeClassifier called drugTree.Inside of the classifier, specify criterion="entropy" so we can see the information gain of each node. ###Code drugTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4) drugTree # it shows the default parameters ###Output _____no_output_____ ###Markdown Next, we will fit the data with the training feature matrix X_trainset and training response vector y_trainset ###Code drugTree.fit(X_trainset,y_trainset) ###Output /home/jupyterlab/conda/envs/python/lib/python3.7/site-packages/sklearn/tree/tree.py:149: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations y_encoded = np.zeros(y.shape, dtype=np.int) ###Markdown Prediction Let's make some predictions on the testing dataset and store it into a variable called predTree. ###Code predTree = drugTree.predict(X_testset) ###Output _____no_output_____ ###Markdown We can print out predTree and y_testset if we want to visually compare the predictions to the actual values. ###Code print (predTree [0:5]) print (y_testset [0:5]) ###Output ['drugY' 'drugX' 'drugX' 'drugX' 'drugX'] 40 drugY 51 drugX 139 drugX 197 drugX 170 drugX Name: Drug, dtype: object ###Markdown EvaluationNext, let's import metrics from sklearn and check the accuracy of our model. ###Code from sklearn import metrics import matplotlib.pyplot as plt print("DecisionTrees's Accuracy: ", metrics.accuracy_score(y_testset, predTree)) ###Output DecisionTrees's Accuracy: 0.9833333333333333 ###Markdown VisualizationLet's visualize the tree ###Code # Notice: one might need to uncomment and install the pydotplus and graphviz libraries if one has not installed these before #!conda install -c conda-forge pydotplus -y #!conda install -c conda-forge python-graphviz -y from io import StringIO import pydotplus import matplotlib.image as mpimg from sklearn import tree %matplotlib inline dot_data = StringIO() filename = "drugtree.png" featureNames = my_data.columns[0:5] out=tree.export_graphviz(drugTree,feature_names=featureNames, out_file=dot_data, class_names= np.unique(y_trainset), filled=True, special_characters=True,rotate=False) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) graph.write_png(filename) img = mpimg.imread(filename) plt.figure(figsize=(100, 200)) plt.imshow(img,interpolation='nearest') ###Output _____no_output_____ ###Markdown Decision Tree Author: Kirti Gupta ###Code import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt iris=pd.read_csv('F:\The Spark Foundation/Iris (1).csv') iris iris.info() iris.Species.value_counts() iris['Species_class']=np.where(iris.Species=='Iris-virginica',1,np.where(iris.Species=='Iris-versicolor',2,3)) iris.Species_class.value_counts() iris.columns cols=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'] ###Output _____no_output_____ ###Markdown Model Preparation ###Code from sklearn.model_selection import train_test_split train_X, test_X, train_y, test_y = train_test_split( iris[cols], iris['Species_class'], test_size = 0.2, random_state = 123 ) ###Output _____no_output_____ ###Markdown Model Building ###Code param_grid = {'max_depth': np.arange(2, 8), 'max_features': np.arange(2,5)} from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier, export_graphviz, export tree = GridSearchCV(DecisionTreeClassifier(), param_grid, cv = 10,verbose=1,n_jobs=-1) tree.fit( train_X, train_y ) tree.best_score_ tree.best_estimator_ tree.best_params_ train_pred = tree.predict(train_X) test_pred = tree.predict(test_X) import sklearn.metrics as metrics print(metrics.classification_report(test_y, test_pred)) ###Output precision recall f1-score support 1 1.00 0.91 0.95 11 2 0.86 1.00 0.92 6 3 1.00 1.00 1.00 13 accuracy 0.97 30 macro avg 0.95 0.97 0.96 30 weighted avg 0.97 0.97 0.97 30 ###Markdown The problem we are going to address is To model a classifier for evaluating balance tip’s direction. ###Code import numpy as np import pandas as pd from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn import tree balance_data = pd.read_csv( 'https://archive.ics.uci.edu/ml/machine-learning-databases/balance-scale/balance-scale.data', sep= ',', header= None) balance_data X = balance_data.values[:,1:5] y = balance_data.values[:,0] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =0.3, random_state=100) clf_gini = DecisionTreeClassifier(criterion = "gini", random_state=100, max_depth=3, min_samples_leaf=5) clf_gini.fit(X_train, y_train) clf_entropy = DecisionTreeClassifier(criterion="entropy", random_state=100, max_depth=3, min_samples_leaf=5) clf_entropy.fit(X_train, y_train) clf_gini.predict([[4,4,3,3]]) y_pred = clf_gini.predict(X_test) y_pred y_pred_en = clf_entropy.predict(X_test) y_pred_en print("Accuracy is ", accuracy_score(y_test,y_pred)*100) print("Accuracy of entropy ", accuracy_score(y_test, y_pred_en)*100) ###Output Accuracy of entropy 70.74468085106383 ###Markdown COMPSCI 589 HW1 Name: Haochen Wang SECTION 0: Load Libraries ###Code import sklearn.model_selection import scipy import numpy as np import csv import math import matplotlib.pyplot as plt from operator import itemgetter from collections import Counter ###Output _____no_output_____ ###Markdown SECTION 2: Evaluating Decision Tree ###Code # Import Document house_file = open('house_votes_84.csv', encoding='utf-8-sig') csvreader = csv.reader(house_file) houserows = [] for row in csvreader: houserows.append(row) hvcat = houserows[0] # print(housevotecat) def catindex(categoryname: str): # a small helper function return hvcat.index(categoryname) #housevotedat = np.array([[int(element) for element in row] for row in houserows[1:]]).T housevotedat = np.array(houserows[1:]).astype(int) # print(len(housevotedat)) # Split to test and training data. def split_test_train(data, rand): housetra, housetes = sklearn.model_selection.train_test_split(data, train_size=0.8, test_size=0.2, random_state=rand, shuffle=True) return housetra.T, housetes.T # housetrain, housetest = split_test_train(housevotedat, 11589) # Node Class class Treenode: type = "" label = None testattribute = "" edge = {} majority = -1 # threshold = -1 We don't have numerical here. def __init__(self, label, type): self.label = label self.type = type # self.left = left # self.right = right # Define helper functions that I use in decision tree. def same(column): return all(item == column[0] for item in column) def majority(column): return np.argmax(np.bincount(column)) def entropy(col): values = list(Counter(col).values()) ent = 0 for value in values: k = (value/sum(values)) ent += -k*math.log(k,2) return ent def gini(col): values = list(Counter(col).values()) ginivalue = 1 for value in values: prob = (value/sum(values)) ginivalue -= prob**2 return ginivalue # test = np.array([1,1,3,3,11,11]) # same(test) # majority(test) # test1 = np.array([1,1,0,0,0]) # should get entropy .971, gini .48 # test2 = np.array([1,1,1,1,1,0,0,0,0,0,0,0,0,0]) # should get entropy .940, gini.459 # entropy(test2) # gini(test2) # Define three test criteria: # ID3 - Entropy Gain def id3(collist, listattribution): original_ent = entropy(collist[-1]) smallest_ent = 1 i = 0 # bestindex = i best = listattribution[i] for attributes in listattribution[:-1]: # I keep the last column: the target/label. liskey = list(Counter(collist[i]).keys()) listofcategory = [] for value in liskey: index = [idx for idx, element in enumerate(collist[i]) if element == value] category = np.array(collist[-1][index]) listofcategory.append(category) # list of nparrays of target/label/categories. ent = 0 for cat in listofcategory: a = len(cat)/len(collist[i]) # This is probability ent += a * entropy(cat) # probability multiple by entropy if ent < smallest_ent: smallest_ent = ent best = attributes # bestindex = i i+=1 return best, original_ent-ent # C4.5 - Entrophy Ratio # CART - Gini Impurity def cart(collist, listattribution): smallest_gini = 1 i = 0 # bestindex = i best = listattribution[i] for attributes in listattribution[:-1]: # I keep the last column: the target/label. liskey = list(Counter(collist[i]).keys()) listofcategory = [] for value in liskey: index = [idx for idx, element in enumerate(collist[i]) if element == value] category = np.array(collist[-1][index]) listofcategory.append(category) # list of nparrays of target/label/categories. gin = 0 for cat in listofcategory: a = len(cat)/len(collist[i]) # This is probability gin += a * gini(cat) # probability multiple by gini if gin < smallest_gini: smallest_gini = gin best = attributes # bestindex = i i+=1 return best, gin # Decision Tree def decisiontree(dataset: np.array, listattributes: list, algortype: str ='id3'): def processbest(algor): if algor == "id3" or algor == "infogain": return id3(datasetcopy, listattricopy) elif algor == "cart" or algor == "gini": return cart(datasetcopy, listattricopy) else: return cart(datasetcopy, listattricopy) datasetcopy = np.copy(dataset) listattricopy = listattributes.copy() node = Treenode(label=-1,type="decision") node.majority = majority(datasetcopy[-1]) if same(datasetcopy[-1]): node.type = "leaf" node.label = datasetcopy[-1][0] return node if len(listattricopy) == 0: node.type = "leaf" node.label = majority(datasetcopy[-1]) return node bestattribute = processbest(algortype)[0] node.testattribute = bestattribute bindex = listattricopy.index(bestattribute) bigv = list(Counter(datasetcopy[bindex]).keys()) subdatalists = [] for smallv in bigv: index = [idx for idx, element in enumerate(datasetcopy[bindex]) if element == smallv] subdatav = np.array(datasetcopy.T[index]).T subdatav = np.delete(subdatav,bindex,0) # I delete the column I already used using bindex as reference. # Then, later, pop the same index from list attribute. subdatalists.append(subdatav) # list of nparrays of target/label/categories. listattricopy.pop(bindex) edge = {} sdindex = 0 for subvdata in subdatalists: if subvdata.size == 0: node.type = "leaf" node.label = majority(subdatav[-1]) subtree = decisiontree(subvdata, listattricopy, algortype) attributevalue = bigv[sdindex] edge[attributevalue] = subtree sdindex += 1 node.edge = edge return node # Prediction using tree def prediction(tree: Treenode, instance): # note that the instance if by row. (I formerly used by column) predict = tree.majority correct = instance[-1] if tree.type == 'leaf': predict = tree.label return predict==correct, predict, correct testindex = catindex(tree.testattribute) if instance[testindex] not in tree.edge: return predict==correct, predict, correct nexttree = tree.edge[instance[testindex]] return prediction(nexttree, instance) # Test with one instance # instance1 = np.array([1,2,2,1,1,1,2,2,2,1,1,1,2,1,0,0,0]) # print(firsttree.edge[0].edge[2].edge[2].type) # print(prediction(firsttree,instance1)) # print(len(housetest.T)) def oneaccurcy(data,treeuse): yescount = 0 for ins in data.T: if prediction(treeuse,ins)[0]: yescount+=1 return yescount/len(data.T) # housetrain, housetest = split_test_train(housevotedat, 608) # firsttree = decisiontree(housetrain, hvcat, 'id3') # print(oneaccurcy(housetest,firsttree)) # housetrain, housetest = split_test_train(housevotedat, 201589) # secondtree = decisiontree(housetrain, hvcat, 'id3') # print(oneaccurcy(housetest,secondtree)) def manyaccuarcy(datause: str, algorithm: str, rand2number): accuracylist = [] count = 1 while count <= 100: # print(count) housetrain, housetest = split_test_train(housevotedat, 589+rand2number*count) traintree = decisiontree(housetrain, hvcat, algorithm) # print(traintree.edge[0].edge) if datause == 'traindata' or datause == 'train': oc = oneaccurcy(housetrain,traintree) elif datause == 'testdata' or datause == 'test': oc = oneaccurcy(housetest,traintree) accuracylist.append(oc) count+=1 return accuracylist # Plot the graphs def plothist(testortrain,algor,rand2,clor= 'purple'): if algor != 'all' and not (testortrain == 'train' or testortrain == 'traindata'): plotlist = np.array(manyaccuarcy(testortrain,algor,rand2)) print('The mean accuracy is ' + str(plotlist.mean()) + ', and the std is '+ str(plotlist.std())) # print(plotlist) plt.hist(plotlist,density=1, bins=10, color=clor, alpha=0.5) plt.axis([0.65, 1.1, 0, 30]) #[xmin,xmax,ymin,ymax] plt.ylabel('Accuarcy Frequency On '+ testortrain +' Data') plt.xlabel('Accuracy') plt.title("Decision Tree Using "+testortrain+ " Data With " + algor + " Algorithm") plt.show() return elif algor == 'all': plotlistid3 = np.array(manyaccuarcy(testortrain,'id3',rand2)) plotlistcart = np.array(manyaccuarcy(testortrain,'cart',rand2)) print('For ID3/infogain, The mean accuracy is ' + str(plotlistid3.mean()) + ', and the std is '+ str(plotlistid3.std())) print('For CART/gini, The mean accuracy is ' + str(plotlistcart.mean()) + ', and the std is '+ str(plotlistcart.std())) # print(plotlist) plt.hist(plotlistid3,density=1, bins=10, color=clor, alpha=0.4, label="id3") plt.hist(plotlistcart,density=1, bins=10, color='yellow', alpha=0.4, label='cart') plt.legend() plt.axis([0.65, 1.1, 0, 30]) #[xmin,xmax,ymin,ymax] plt.ylabel('Accuarcy Frequency On '+ testortrain +' Data') plt.xlabel('Accuracy') plt.title("Decision Tree Using "+testortrain+ " Data With Comparasion of Two Algorithms") plt.show() return if testortrain == 'train' or testortrain == 'traindata': plotlist = np.array(manyaccuarcy(testortrain,algor,rand2)) print('The mean accuracy is ' + str(plotlist.mean()) + ', and the std is '+ str(plotlist.std())) # print(plotlist) plt.hist(plotlist,density=1, bins=100, color=clor, alpha=0.5) plt.axis([0.8, 1.2, 0, 150]) #[xmin,xmax,ymin,ymax] plt.ylabel('Accuarcy Frequency On '+ testortrain +' Data') plt.xlabel('Accuracy') plt.title("Decision Tree Using "+testortrain+ " Data With " + algor + " Algorithm") plt.show() return # plothist('test', 'cart', 197, 'blue') # plothist('test', 'all', 397) ###Output _____no_output_____ ###Markdown Q2.1 (12 Points) In the first histogram, you should show the accuracy distribution when the algorithm was evaluated over training data. The horizontal axis should show different accuracy values, and the vertical axis should show the frequency with which that accuracy was observed while conducting these 100 experiments/training processes. The histogram should look like the one in Figure 3 (though the “shape” of the histogram you obtain may be different, of course). You should also report the mean accuracy and its standard deviation. ###Code plothist('train', 'id3', 197, 'blue') # plothist('test', 'all', 397) ###Output The mean accuracy is 1.0, and the std is 0.0 ###Markdown Q2.2 (12 Points) In the second histogram, you should show the accuracy distribution when the algorithm was evaluated over testing data. The horizontal axis should show different accuracy values, and the vertical axis should show the frequency with which that accuracy was observed while conducting these 100 experiments/training processes. You should also report the mean accuracy and its standard deviation. ###Code plothist('test', 'id3', 397, 'green') ###Output The mean accuracy is 0.9410344827586203, and the std is 0.02392655119376817 ###Markdown Q2.3 (12 Points) Explain intuitively why each of these histograms look the way they do. Is there more variance in one of the histograms? If so, why do you think that is the case? Does one histogram show higher average accuracy than the other? If so, why do you think that is the case? Answer:It is very apparent that, comparing the result of testing and the training data vs. the accuracy. In the aspect of variance, it's clear that the traverse of training data using decision tree model trained using training dataset is less variant, with mean accuracy is 1.0, and the std is 0.0. That means for all instance in training item, we are able to get the correct prediction. While for the accuracy, the training data is very accurate, indeed, it's 100% accurate. This is the case because since the instance of training data is always on the list, so the way we traverse the tree is exactly the same the way we trained it, hence 1.00 accuracy.And for the testing data instance is The mean accuracy is 0.9410344827586203, and the std is 0.02392655119376817. Obviously more variance because larget std, but still not very big: range between 0.89 to 0.99, mean around 0.94, which is a very acceptable prediction result for me. It's less accurate because there are probably some instance that are outliers, or the tree running out of the subnode so we use the 'majority' vote way to determine the label. Q2.4 (8 Points) By comparing the two histograms, would you say that the Decision Trees algorithm, when used in this dataset, is underfitting, overfitting, or performing reasonably well? Explain your reasoning. Answer: For the training dataset, I would say it is definitely overfitting, since we have 100% accuracy, which is almost impossible in real world. The decision tree fit too many attributes of the tree which would cause larger space complexity if the data is more than 17 attribute we are having in the example mini dataset. For the testing dataset, though I'd like to say it's performing reasonably well, there's still a little portion that the result is overfitting: if we restrict the layer of the decision tree, so we might have sub....treenode entropy not equal zero, that would be more realistic in realworld, since if there is one branch/one leaf node that have very deep layer, even though that might give us the correct result, but the process could consider to be redundant, hence might be overfitting. Q2.5 (6 Points) In class, we discussed how Decision Trees might be non-robust. Is it possible to experimentally confirm this property/tendency via these experiments, by analyzing the histograms you generated and their corresponding average accuracies and standard deviations? Explain your reasoning. Answer:In fact, if check on the gini part and the compariasion part below, we can find out the result varies with std around 0.02±0.005. Even though that might be small, but since we hav eall the data from the same 435 datas, the only difference is how we shuffle them again and again, so we can tell there's no large difference between all the reshuffles (Not like, for example trees in Europe and Trees in America and trees in Asia, which might have huge difference). All the data are from same 435 instance and say if we reshuffle them perfectly, for the training data, the tree would have at least 261 instances being the same as last shuffle. In average there should be only about 49 instance that are difference from last shuffle. But giving this, we can still find out that the result range from 0.89 to 0.99, hence that could show us that decision trees might be non-robust or less abstracly decribe, non-stable. Extra points (15 Points) Repeat the experiment above but now using the Gini criterion for node splitting, instead of the Information Gain criterion. ###Code plothist('train', 'gini', 197, 'blue') plothist('test', 'gini', 397, 'green') plothist('test', 'all', 691) print('Now try compare Gini/CART with InfoGain/ID3 with another random value: 245.') plothist('test', 'all', 246) print('Now try compare Gini/CART with InfoGain/ID3 with another random value: 589.') plothist('test', 'all', 589) ###Output The mean accuracy is 1.0, and the std is 0.0 ###Markdown BT2101 Introduction to Decision Tree 1 GoalIn this notebook, we will explore **Decision Tree** including:* User-defined functions* Open-source package: `scikit-learn`For the **Decision Tree** method, you will:* Use numpy to write functions* Write binary recursive splitting functions* Write decision functions* Write pruning functions* Use open-source package to do classification ###Code # -*- coding:utf-8 -*- import numpy as np import pandas as pd import matplotlib.pyplot as plt from math import sqrt, log from __future__ import division from collections import defaultdict %matplotlib inline ###Output _____no_output_____ ###Markdown 2 Summary of Classification Tree Classification TreeA typical classification tree looks like this: Steps for Binary Splitting (E.g., Entropy)1. Compute the entropy for data-set;2. For every attribute/feature, calculate information gain for this attribute;3. Pick the feature with highest information gain;4. Repeat until we get the tree we desired; Entropy and Information Gain Alternative Criterion for Binary SplittingThere are a few possible criteria we can use for selecting features and making the binary splits of classification decision tree:* Classification Error Rate* Gini Index 3 Case: Kaggle Competition - Lending Club Loan Status 3.1 Data OverviewThe file "LoanStats_2018Q1.csv" contains complete loan data for all loans issued through the 2018 Quarter-1, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. The file containing loan data through the "present" contains complete loan data for all loans issued through the previous completed calendar quarter. Additional features include credit scores, number of finance inquiries, address including zip codes, and state, and collections among others. Please see https://www.kaggle.com/wendykan/lending-club-loan-data/home. AttributesThe dataset can be downloaded [here](https://www.lendingclub.com/info/download-data.action). Information on the columns and features can be found in data dictionary. A data dictionary is provided in a separate file "LCDataDictionary.xlsx". GoalOur goal is to show how to do binary splitting and tree pruning for a classification tree. Selected FeaturesFor the sake of simplicity, We only select 3 categorical variables as features. We will further transform these categorical variables into binary ones. You need to learn how to fit decision trees when features are continuous variables. 3.2 Build Tree Function 1. Calculating entropy value of a given tree node with labels of samples. ###Code def entropy(sample_labels): '''This function is used to calculate entropy value of a given tree node, in which there are samples with labels (0, 1) or (-1, 1). Inputs: 1) sample_labels: Labels for samples in the current tree node, such as (1, 0, 0, 1, 0) or (1, -1, -1, 1, 0) Outputs: 1) entropy: Entropy value of labels in the current tree node. ''' # Assert np.array sample_labels = np.array(sample_labels) # What if sample_labels are empty if sample_labels.size == 0: return 0 # What if all the labels are the same class_values = np.unique(sample_labels) # Sample labels/classes; Usually (0,1), sometimes (-1,1) num0 = len(filter(lambda x:x==class_values[0], sample_labels)) # Number of samples with one label num1 = len(filter(lambda x:x==class_values[1], sample_labels)) if class_values.size > 1 else 0 # Number of samples with another label if sample_labels.size == num0 or sample_labels.size == num1: return 0 # Calculate entropy value p0 = num0 / (num0+num1) # Probability of class 0 labels p1 = 1 - p0 # Probability of class 1 labels entropy = -(p0*log(p0,2) + p1*log(p1,2)) return entropy ###Output _____no_output_____ ###Markdown Function 2. Calculating information gain when a given tree node is splitted by a given feature ###Code def info_gain(samples, output, feature): '''This function is used to calculate information gain when a given tree node is splitted by a given feature. Inputs: 1) samples: Samples in the current tree node before making split on the feature (Pandas Dataframe) 1) output: Name of the output column 2) feature: Name of the feature used to split the current tree node. Remember the features we selected in this case are binary. Outputs: 1) information_gain: How much reduction in entropy value if the current tree node is splitted by the feature 2) subsamples[0]: Data samples where feature values are one label (e.g., 0 or -1) 3) subsamples[1]: Data samples where feature values are another label (e.g., 1) ''' # Split samples by feature values into subsamples subsamples = defaultdict() entropy_after = 0 # Entropy value after splitting for feature_value in np.unique(samples[feature]): subsamples[feature_value] = samples[samples[feature] == feature_value] # add entire row where feature = feature value temp = subsamples[feature_value] # Store a temporary copy p = len(temp) / len(samples) # Proportion of this subsample entropy_after += p * entropy(temp[output]) # Calculate information gain information_gain = entropy(samples[output]) - entropy_after # Left or Right subtree may be None return (information_gain, subsamples[0] if 0 in subsamples else None, subsamples[1] if 1 in subsamples else None) # [1] is left split, [2] is right split # Let us have a test a = np.array([[1,0,0,1],[0,1,1,0],[1,1,1,1],[0,0,0,0],[1,1,0,0]]) data = pd.DataFrame(a, columns=['x1','x2','x3','y']) info_gain(data, 'y', 'x1')[1] # Let us have a test a = np.array([[1,0,0,1],[1,1,1,0],[1,1,1,1],[1,0,0,0],[1,1,0,0]]) data = pd.DataFrame(a, columns=['x1','x2','x3','y']) info_gain(data, 'y', 'x1') ###Output _____no_output_____ ###Markdown Why is information gain nonnegative?Math Proof: https://www.cs.cmu.edu/~ggordon/780-fall07/fall06/homework/15780f06-hw4sol.pdf Function 3. Decide the best feature to split on: Using information gain and entropy as criterion1. Loop over each feature in the feature list;2. For each loop (feature f), split the data into 2 groups: In group 1 (left split), all samples' feature f has value 0. In group 2 (right split), all samples' feature f has value 1;3. Calculate the information gain for this split;4. If the information gain for this split using this feature is highest, then pick this feature. ###Code def best_feature_split(samples, output, features): '''This function is used to determine the best feature to split based on maximized information gain. Inputs: 1) samples: Samples in the current tree node before making split on the feature (Pandas Dataframe) 2) output: Name of the output column 3) features: A list of feature names Outputs: 1) best_feature: The best feature which is used to do binary splitting 2) best_left_split: Data samples where the best feature's values are 0 3) best_right_split: Data samples where the best feature's values are 1 ''' # Initialize best feature, best information gain value, best left/right split samples best_feature = None best_information_gain = 0 best_left_split = None best_right_split = None samples_row = float(len(samples)) # Number of rows in the data samples # Loop through features and find the best feature for feature in features: # Splitting the data samples current_split = info_gain(samples, output, feature) information_gain = current_split[0] left_split = current_split[1] right_split = current_split[2] # Check if this feature is better if information_gain >= best_information_gain: best_feature, best_information_gain, best_left_split, best_right_split = feature, information_gain, left_split, right_split return (best_feature, best_information_gain, best_left_split, best_right_split) # Let us have a test a = np.array([[1,0,0,1],[0,1,1,0],[1,1,1,1],[0,0,0,0],[1,1,0,0]]) data = pd.DataFrame(a, columns=['x1','x2','x3','y']) best_feature_split(data, 'y', ['x1','x2','x3']) a = np.array([[1,0,0,1],[1,1,1,0],[1,1,1,1],[1,0,0,0],[1,1,0,0]]) data = pd.DataFrame(a, columns=['x1','x2','x3','y']) best_feature_split(data, 'y', ['x1','x2','x3']) ###Output _____no_output_____ ###Markdown Function 4. Build our classification tree and do pre-pruningWe need to decide stopping conditions (i.e., pre-pruning):1. The samples' labels in the current node are the same (either 0 or 1);2. All the features have already been used for split;3. The current tree has already reached maximum depth **max_depth**;4. The number of samples in the current node is lower than minimum number **min_number**;5. The information gain for the current split is lower than a threshold **min_infogain** Stopping Condition 1: The samples' labels in the current node are the same (either 0/-1 or 1) ###Code def stop_1(node_labels): '''This function is used to verify whether stopping condition 1 is satisfied. Inputs: 1) node_labels: The samples' labels in the current node Outputs: 1) True if they are all the same, False if otherwise ''' # numpy array node_labels = np.array(node_labels) # Empty labels if len(node_labels) == 0: return True if len(np.unique(node_labels)) == 1: print ("Stopping Condition 1: The samples' labels in the current node are the same (either 0/-1 or 1)") return True else: return False ###Output _____no_output_____ ###Markdown Stopping Condition 2: All the features have already been used for split ###Code def stop_2(features): '''This function is used to verify whether stopping condition 2 is satisfied. Inputs: 1) features: A list of feature names Outputs: 1) True if the feature list is empty, False if otherwise ''' if len(features) == 0 or features == None: print ("Stopping Condition 2: All the features have already been used for split") return True else: return False ###Output _____no_output_____ ###Markdown Stopping Condition 3: The current tree has already reached maximum depth **max_depth** ###Code def stop_3(tree_depth, max_depth): '''This function is used to verify whether stopping condition 3 is satisfied. Inputs: 1) tree_depth: The depth of the current tree 2) max_depth: Maximum tree depth Outputs: 1) True if the current depth reaches maximum depth, False if otherwise ''' if tree_depth >= max_depth: print ("Stopping Condition 3: The current tree has already reached maximum depth") return True else: return False ###Output _____no_output_____ ###Markdown Stopping Condition 4: The number of samples in the current node is lower than minimum number **min_number** ###Code def stop_4(samples, min_number): '''This function is used to verify whether stopping condition 4 is satisfied. Inputs: 1) samples: Data samples in the current node (Pandas DataFrame) 2) min_number: Minimum number of node size Outputs: 1) True if sample size is smaller than the minimum number, False if otherwise ''' if samples.size <= min_number: print ("Stopping Condition 4: The number of samples in the current node is lower than minimum number") return True else: return False ###Output _____no_output_____ ###Markdown Stopping Condition 5: The information gain for the current split is lower than a threshold **min_infogain** ###Code # info_gain(samples, output, feature) -> information gain, left, right # best_feature_split(samples, output, features) -> feature, information gain, left, right def stop_5(info_gain, min_infogain): '''This function is used to verify whether stopping condition 5 is satisfied. Inputs: 1) info_gain: Information gain after this best split 2) min_infogain: Minimum information gain Outputs: 1) True if information gain after this best splitting is smaller than the minimum number, False if otherwise ''' if info_gain <= min_infogain: print ("Stopping Condition 5: The information gain for the current split is lower than a threshold") return True else: return False ###Output _____no_output_____ ###Markdown Build classification treeThe data structure for the nested tree structure (including temporary tree nodes, and leaf nodes) is shown as:{ 'label': None for temporary node, or predicted label at the leaf node (e.g., "Majority Voting" criterion) for leaf node; 'left_tree': Left tree after the selected feature (=0 or -1) is splitted for temporary node, None for leaf node; 'right_tree': Right tree after the selected feature (=1) is splitted for temporary node, None for leaf node; 'best_feature': The feature that is selected to do binary split for temporary node, None for leaf node. } ###Code def majority_vote(output_labels): '''This function is used to get predicted label based on "Majority Voting" criterion for the current leaf node. Inputs: 1) output_labels: Outputs (labels) in this leaf node, such as [1, 0, 0, 1, 1] Outputs: 1) prediction: Predicted label for this leaf node (e.g., 0/-1, or 1) ''' # numpy array output_labels = np.array(output_labels) # Empty label if output_labels.size == 0: return None # Count output labels (0/-1 or 1) values = np.unique(output_labels) if len(values) == 1: return values[0] else: num0 = len(output_labels[output_labels == values[0]]) num1 = len(output_labels[output_labels == values[1]]) return values[1] if num1 >= num0 else values[0] # Prediction based on "Majority Voting" criterion def ClassificationTree(samples, output, features, step, tree_depth, max_depth, min_number, min_infogain): '''This function is used to build a classification tree in a recursive way. Remember how you build a binary tree in the previous C++ and Data Structure courses). Inputs: 1) samples: Samples in the current tree node before making split on the feature (Pandas Dataframe) 2) output: Name of the output column 3) features: A list of feature names 4) step: The current binary split step 5) tree_depth: The depth of the current tree 6) max_depth: Maximum depth this tree can grow 7) min_number: Minimum number of node size 8) min_infogain: Minimum information gain Outputs: 1) tree_nodes: Nested tree nodes, which are stored and shown in nested dictionary type ''' # If samples are empty, return None if samples is None or len(samples)==0: return None current_features = features # Current feature list labels = samples[output] # Output labels in the current tree node print "----------------------------------------------------------------------------" print "----------------------------------------------------------------------------" print "Step %s: Current tree depth is %s. Current tree node has %s data points" % (step, tree_depth, len(samples)) # Sample size # Verify whether stopping conditions 1-4 are satisfied. If satisfied, return a leaf_node if stop_1(labels) or stop_2(current_features) or stop_3(tree_depth, max_depth) or stop_4(samples, min_number): return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None } # If pass stopping conditions 1-4, then do best splitting best_split = best_feature_split(samples, output, current_features) best_feature, best_infogain, best_left, best_right = best_split[0], best_split[1], best_split[2], best_split[3] # Verify whether stopping condition 5 is satisfied. If satisfied, return a leaf node if stop_5(best_infogain, min_infogain): return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None } # If pass stopping condition 5, then move on step += 1 print ("Step %s: Binary split on %s. Size of Left and Right tree is (%s, %s)" % \ (step, best_feature, len(best_left) if best_left is not None else 0, len(best_right) if best_right is not None else 0)) current_features.remove(best_feature) # Remove this feature if this feature is used for split # Do binary split on left tree and right tree in a recursive way left_split = ClassificationTree(best_left, output, current_features, step+1, tree_depth+1, max_depth, min_number, min_infogain) right_split = ClassificationTree(best_right, output, current_features, step+1, tree_depth+1, max_depth, min_number, min_infogain) return { 'label': None, 'left_tree': left_split, 'right_tree': right_split, 'best_feature': best_feature } ###Output _____no_output_____ ###Markdown 3.3 Data CleaningWe need to do some simple data cleaning work for original lend club loan data. ###Code %pwd loan_data = pd.read_csv("./LoanStats_2018Q1.csv", low_memory=False, header=1) loan_data.head(n=10) loan_data.shape loan_data.describe() loan_data["loan_status"].value_counts() # Select features and output features = ['grade', 'term', 'home_ownership'] output = 'risky' loan_data = loan_data[loan_data['loan_status'] != 'Current'] loan_data[output] = loan_data['loan_status'].map(lambda x: 1 if x in ['Late (31-120 days)', 'Late (16-30 days)', 'Charged Off'] else 0) dataset = loan_data[features+[output]] dataset.head() # Transform categorical features to binary features grade_dummy = pd.get_dummies(dataset['grade'], prefix='grade') # For every unique variable in grade, create a new column term_dummy = pd.get_dummies(dataset['term'], prefix='term') # 1 indicated found, 0 indicates not found home_ownership_dummy = pd.get_dummies(dataset['home_ownership'], prefix='home_ownership') # column name starts with home_ownership dataset = dataset.join([grade_dummy, term_dummy, home_ownership_dummy]) # Add in all the newly created columns dataset = dataset.drop(features, axis=1) # Remove categorical columns dataset = dataset.dropna() # Remove all missing values dataset = dataset.reset_index() print dataset[output].value_counts() # Very unbalanced dataset dataset.head(n=5) dataset.shape ###Output _____no_output_____ ###Markdown Note that you need to do oversampling on rare outputs (which is '1' in this example) Otherwise the tree nodes will always predict 0 (according to majority voting method) That will be problematic. ###Code # Update our features and output import copy dataset_copy = copy.deepcopy(dataset) # Please note the difference between shallow copy and deep copy in Python features = list(dataset_copy.columns[2:]) # Index 0 is index, index 1 is output (risky). Features start from index 2 output = dataset_copy.columns[1] # Index 1 is output (risky) dataset_copy.head() # Address Unbalanced data: Oversampling # You may need to set seed first, otherwise if you use different training data to train the model, you will get # different tree model and you may get different post-pruning results import random random.seed(12345) # Class count count_class_0, count_class_1 = dataset_copy[output].value_counts() # Divide by class df_class_0 = dataset_copy[dataset_copy[output] == 0] df_class_1 = dataset_copy[dataset_copy[output] == 1] # Address unblanced data issue: Oversampling on data samples with rare outputs (which is '1' in this example) df_class_1_over = df_class_1.sample(count_class_0, replace=True, random_state=12345) # Random sample of items # (number of data (same as class 0), sample with replacement, seed) dataset_copy = pd.concat([df_class_1_over, df_class_0], axis=0) dataset_copy[output].value_counts() dataset_copy.to_csv('./dataset.csv', index=None) # Store a copy: Export dataset to csv file dataset_copy.head() ###Output _____no_output_____ ###Markdown 3.4 Classification and Simple Visualization ###Code # Suppose max_depth = 6; min_infogain=5e-4 features = list(dataset.columns[2:]) output = dataset.columns[1] tree_model = ClassificationTree(dataset_copy, output, features, step=0, tree_depth=0, max_depth=7, min_number=5, min_infogain=5e-4) tree_model ###Output _____no_output_____ ###Markdown Visualize the TreeThe data structure for the nested tree structure (including temporary tree nodes, and leaf nodes) is shown as:{ 'label': None for temporary node, or predicted label at the leaf node (e.g., "Majority Voting" criterion) for leaf node; 'left_tree': Left tree after the selected feature (=0 or -1) is splitted for temporary node, None for leaf node; 'right_tree': Right tree after the selected feature (=1) is splitted for temporary node, None for leaf node; 'best_feature': The feature that is selected to do binary split for temporary node, None for leaf node. } ###Code def print_tree(tree, depth=0, LR=0): '''This function is used to visualize the tree model Inputs: 1) tree: tree model 2) depth: 3) LR: Left_subtree: feature=0; Right_subtree: feature=1 Outputs: 1) Print the tree model structure (i.e., nested dictionary) ''' if depth==0: # Root node print tree['best_feature'] print_tree(tree['left_tree'], depth+1, 0) print_tree(tree['right_tree'], depth+1, 1) else: if tree['best_feature'] is not None: # Not leaf node print "\t" * depth, "=%s :" %(LR), tree['best_feature'] try: print_tree(tree['left_tree'], depth+1, 0) except: pass try: print_tree(tree['right_tree'], depth+1, 1) except: pass else: print "\t" * depth, "=%s : -" %(LR), "(Predict %s)" %(tree['label']) # Leaf node print_tree(tree_model, depth=0, LR=0) ###Output grade_A =0 : grade_B =0 : home_ownership_MORTGAGE =0 : grade_C =0 : - (Predict 1) =1 : term_ 60 months =0 : home_ownership_OWN =0 : home_ownership_RENT =0 : - (Predict 0) =1 : - (Predict 1) =1 : - (Predict 1) =1 : - (Predict 1) =1 : grade_G =0 : grade_F =0 : grade_E =0 : - (Predict 0) =1 : term_ 36 months =0 : - (Predict 1) =1 : - (Predict 0) =1 : - (Predict 1) =1 : - (Predict 1) =1 : - (Predict 0) =1 : - (Predict 0) ###Markdown Assignments:Set your own stopping conditions for pre-pruning, For example: * You can adjust initial parameter values for these stopping conditions.* Limiting the number of binary-split.Choose one of them and describe or write your function/code. ###Code '''def stop_6(data): ''This function is used to verify whether stopping condition 6 is satisfied. Inputs: 1) data: The dataset Outputs: 1) True if there are no more data, False if otherwise '' if len(data) == 0: print ("Stopping Condition 6: The current tree has no more data") return True else: return False''' # Suppose: Limiting the number of binary-split # Note that python cannot define a static variable (unless you define it in python "Class" method) # You can also count how many variables have already been used for splitting. This number is equal to the number of splits def ClassificationTree(samples, output, features, step, tree_depth, max_depth, min_number, min_infogain, counter, max_split): '''This function is used to build a classification tree in a recursive way. Remember how you build a binary tree in the previous C++ and Data Structure courses). Inputs: 1) samples: Samples in the current tree node before making split on the feature (Pandas Dataframe) 2) output: Name of the output column 3) features: A list of feature names 4) step: The current binary split step 5) tree_depth: The depth of the current tree 6) max_depth: Maximum depth this tree can grow 7) min_number: Minimum number of node size 8) min_infogain: Minimum information gain 9) counter: Indicate this is nth split (List of number) 10) max_split: Maximum number of splits Outputs: 1) tree_nodes: Nested tree nodes, which are stored and shown in nested dictionary type ''' # If samples are empty, return None if samples is None or len(samples)==0: return None current_features = features # Current feature list labels = samples[output] # Output labels in the current tree node print "----------------------------------------------------------------------------" print "----------------------------------------------------------------------------" print "Step %s: Current tree depth is %s. Current tree node has %s data points" % (step, tree_depth, len(samples)) # Sample size # Verify whether stopping conditions 1-4 are satisfied. If satisfied, return a leaf_node if stop_1(labels) or stop_2(current_features) or stop_3(tree_depth, max_depth) or stop_4(samples, min_number): return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None } # If pass stopping conditions 1-4 , then do best splitting best_split = best_feature_split(samples, output, current_features) best_feature, best_infogain, best_left, best_right = best_split[0], best_split[1], best_split[2], best_split[3] # Verify whether stopping condition 5 is satisfied. If satisfied, return a leaf node if stop_5(best_infogain, min_infogain): return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None } # If pass stopping condition 5, then move on # If reach maximum number of splits split_counter = counter.pop(0) # Indicate this is nth split. counter is a list of numbers in a sequence if split_counter >= max_split: print "Stopping Condition: You already reach maximum number of splits." return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None } else: step += 1 print "Step %s: Binary split on %s. Size of Left and Right tree is (%s, %s)" % \ (step, best_feature, len(best_left) if best_left is not None else 0, len(best_right) if best_right is not None else 0) current_features.remove(best_feature) # Remove this feature if this feature is used for split print "Note: You have already done %s splits." % (split_counter) # Do binary split on left tree and right tree in a recursive way left_split = ClassificationTree(best_left, output, current_features, step+1, tree_depth+1, max_depth, min_number, min_infogain, \ counter, max_split) right_split = ClassificationTree(best_right, output, current_features, step+1, tree_depth+1, max_depth, min_number, min_infogain, \ counter, max_split) return { 'label': None, 'left_tree': left_split, 'right_tree': right_split, 'best_feature': best_feature } # Suppose max_depth = 6; min_infogain=5e-4 features = list(dataset_copy.columns[2:]) output = dataset_copy.columns[1] maximum_split = 7 counters = [x for x in range(1,101)] tree_model = ClassificationTree(dataset_copy, output, features, step=0, tree_depth=0, max_depth=10, min_number=5, min_infogain=-1, \ counter=counters, max_split=maximum_split) ###Output ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 0: Current tree depth is 0. Current tree node has 11578 data points Step 1: Binary split on grade_A. Size of Left and Right tree is (9844, 1734) Note: You have already done 1 splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 2: Current tree depth is 1. Current tree node has 9844 data points Step 3: Binary split on grade_B. Size of Left and Right tree is (7280, 2564) Note: You have already done 2 splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 4: Current tree depth is 2. Current tree node has 7280 data points Step 5: Binary split on home_ownership_MORTGAGE. Size of Left and Right tree is (4317, 2963) Note: You have already done 3 splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 6: Current tree depth is 3. Current tree node has 4317 data points Step 7: Binary split on grade_C. Size of Left and Right tree is (2397, 1920) Note: You have already done 4 splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 8: Current tree depth is 4. Current tree node has 2397 data points Step 9: Binary split on grade_G. Size of Left and Right tree is (2389, 8) Note: You have already done 5 splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 10: Current tree depth is 5. Current tree node has 2389 data points Step 11: Binary split on term_ 60 months. Size of Left and Right tree is (1289, 1100) Note: You have already done 6 splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 12: Current tree depth is 6. Current tree node has 1289 data points Stopping Condition: You already reach maximum number of splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 12: Current tree depth is 6. Current tree node has 1100 data points Stopping Condition: You already reach maximum number of splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 10: Current tree depth is 5. Current tree node has 8 data points Stopping Condition: You already reach maximum number of splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 8: Current tree depth is 4. Current tree node has 1920 data points Stopping Condition: You already reach maximum number of splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 6: Current tree depth is 3. Current tree node has 2963 data points Stopping Condition: You already reach maximum number of splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 4: Current tree depth is 2. Current tree node has 2564 data points Stopping Condition: You already reach maximum number of splits. ---------------------------------------------------------------------------- ---------------------------------------------------------------------------- Step 2: Current tree depth is 1. Current tree node has 1734 data points Stopping Condition: You already reach maximum number of splits. ###Markdown 3.5 Predictions Suppose you want to predict new samples' labels. Remember our tree structure is like: { 'label': None for temporary node, or predicted label at the leaf node (e.g., "Majority Voting" criterion) for leaf node; 'left_tree': Left tree after the selected feature (=0 or -1) is splitted for temporary node, None for leaf node; 'right_tree': Right tree after the selected feature (=1) is splitted for temporary node, None for leaf node; 'best_feature': The feature that is selected to do binary split for temporary node, None for leaf node. } ###Code def predict_label(new_sample, train_tree): '''This function is used to predict the label of one new sample. Inputs: 1) new_sample: A new sample, we would like to predict its label (Pandas DataFrame) 2) train_tree: The classification tree we have just trained Outputs: 1) predict_label: The predicted label for this new sample ''' # If move to the leaf node if train_tree['best_feature']==None: # Leaf node has no best feature return train_tree['label'] # return the label (e.g. 0 or 1) # If still stay at temporary node else: # Find the value of the best feature in the current node # If value is 0, then go to left tree # If value is 1, then go to right tree # Remember what your have learned in Data Structure course, about binary tree best_feature = train_tree['best_feature'] return predict_label(new_sample, train_tree['left_tree']) if new_sample[best_feature]==0 \ else predict_label(new_sample, train_tree['right_tree']) # You need to learn partial and apply function. They are powerful. from functools import partial prediction = partial(predict_label, train_tree=tree_model) # A partial function. Any parameter passed to predicton will # take train_tree as the 2nd parameter predicted_labels = dataset_copy.apply(lambda x: prediction(x), axis=1) # Find the predicted label for every row # Concatenate predicted_labels into our dataset dataset_copy['prediction'] = predicted_labels dataset_copy.head() ###Output _____no_output_____ ###Markdown Assignments: * Write functions to calculate `Gini index` and `misclassification error rate` metrics. ###Code def gini(sample_labels): '''This function is used to calculate gini index of a given tree node, in which there are samples with labels (0, 1) or (-1, 1). Inputs: 1) sample_labels: Labels for samples in the current tree node, such as (1, 0, 0, 1, 0) or (1, -1, -1, 1, 0) Outputs: 1) gini: Gini index of labels in the current tree node. ''' # Assert np.array sample_labels = np.array(sample_labels) # What if sample_labels are empty if sample_labels.size == 0: return 0 # What if all the labels are the same class_values = np.unique(sample_labels) # Sample labels/classes; Usually (0,1), sometimes (-1,1) num0 = len(tuple(filter(lambda x:x==class_values[0], sample_labels))) # Number of samples with one label num1 = len(tuple(filter(lambda x:x==class_values[1], sample_labels))) if class_values.size > 1 else 0 # Number of samples with another label if sample_labels.size == num0 or sample_labels.size == num1: return 0 # Calculate entropy value p0 = num0 / (num0+num1) # Probability of class 0 labels p1 = 1 - p0 # Probability of class 1 labels gini = p0*(1-p0) + p1*(1-p1) return gini def error(sample_labels): '''This function is used to calculate misclassification error of a given tree node, in which there are samples with labels (0, 1) or (-1, 1). Inputs: 1) sample_labels: Labels for samples in the current tree node, such as (1, 0, 0, 1, 0) or (1, -1, -1, 1, 0) Outputs: 1) error: misclassification error rate of labels in the current tree node. ''' # Assert np.array sample_labels = np.array(sample_labels) # What if sample_labels are empty if sample_labels.size == 0: return 0 # What if all the labels are the same class_values = np.unique(sample_labels) # Sample labels/classes; Usually (0,1), sometimes (-1,1) num0 = len(tuple(filter(lambda x:x==class_values[0], sample_labels))) # Number of samples with one label num1 = len(tuple(filter(lambda x:x==class_values[1], sample_labels))) if class_values.size > 1 else 0 # Number of samples with another label error = min(num0,num1) return error def error_rate(samples, output, feature): '''This function is used to calculate misclassification error rate when a given tree node is splitted by a given feature. Inputs: 1) samples: Samples in the current tree node before making split on the feature (Pandas Dataframe) 1) output: Name of the output column 2) feature: Name of the feature used to split the current tree node. Remember the features we selected in this case are binary. Outputs: 1) error_rate: The misclassification error rate if the current tree node is splitted by the feature ''' # Split samples by feature values into subsamples subsamples = defaultdict() N_errors = 0 # Initialize total misclassification errors for feature_value in np.unique(samples[feature]): subsamples[feature_value] = samples[samples[feature] == feature_value] temp = subsamples[feature_value] # Store a temporary copy N_errors += error(temp[output]) # Total misclassification errors # Calculate error rate error_rate = N_errors / float(len(samples)) return error_rate # Let us have a test a = np.array([[1,0,0,1],[0,1,1,0],[1,1,1,1],[0,0,0,0],[1,1,0,0]]) data = pd.DataFrame(a, columns=['x1','x2','x3','y']) #best_feature_split(data, 'y', ['x1','x2','x3'])[0] print "Gini index:", gini(data['y']) print "Misclassification error:", error(data['y']) print "Misclassification error rate of one split:", error_rate(data, 'y', 'x1') ###Output Gini index: 0.48 Misclassification error: 2 Misclassification error rate of one split: 0.2 ###Markdown Hints: Assignments: Post-pruning See textbook Chapter 9.4 pp.130 We need to split the original dataset into training dataset (used for training model) and pruning dataset (used for post-pruning). ###Code # Set a % of tree for training model(70) and pruning(30) # 1) build tree # 2) compare error rate before pruning and after pruning. if error smaller, prune # error_rate # prune_Data label # N_prune data # traverse each node and calculate error rate of each node # in the updated train node, calculate the error node (train model is from ClassificationTree()) # until tree model remains unchanged # show structure before pruning and after pruning from sklearn.utils import shuffle import copy import random random.seed(12345) # To get the same training and pruning data copy_of_dataset = copy.deepcopy(dataset_copy) features = list(dataset.columns[2:]) output = dataset.columns[1] num_rows = copy_of_dataset.shape[0] copy_of_dataset = shuffle(copy_of_dataset, random_state=12345) # Randomly shuffle the dataset with the seed train_data = copy_of_dataset.iloc[:int(num_rows*0.7),:] # Get all rows from start to 0.7 of total rows. Get all columns prune_data = copy_of_dataset.iloc[int(num_rows*0.7):,:] # Get all rows from 0.7 of total rows to the end. Get all columns print train_data.shape[0], prune_data.shape[0] # Get information about train and prune data assert train_data.shape[0]+prune_data.shape[0] == copy_of_dataset.shape[0] # Test and ensure data is correctly split #Store a copy: Export to csv file train_data.to_csv('./traindata_postpruning.csv', index=None) prune_data.to_csv('./prunedata_postpruning.csv', index=None) print train_data[output].value_counts() print prune_data[output].value_counts() # Import the csv of training and pruning data train_data=pd.read_csv('./traindata_postpruning.csv') prune_data=pd.read_csv('./prunedata_postpruning.csv') train_data.head() # Return the first 5 items prune_data.head() # Prune data ###Output _____no_output_____ ###Markdown Redefine the tree function and tree structure:Remember we plan to calculate error rate of each node. Revise our tree structure is like: { 'label': Predicted label at the current node (e.g., "Majority Voting" criterion) for leaf node; 'left_tree': Left tree after the selected feature (=0 or -1) is splitted for temporary node, None for leaf node; 'right_tree': Right tree after the selected feature (=1) is splitted for temporary node, None for leaf node; 'best_feature': The feature that is selected to do binary split for temporary node, None for leaf node; 'error_rate': error rate of this node; calculated using pruning dataset; Need initialization; 'depth': How deep is this node; 'prune_data_labels': A list of actual labels of pruning dataset; 'N_prune_data': Number of pruning data samples in current node. } ###Code # Redefine the tree function: Add 3 more keys in node dictionary, for conveniently calculating prediction error rate of each node. # Now the intermediate node should also have predicted output label (using majority voting) def ClassificationTree(samples, output, features, step, tree_depth, max_depth, min_number, min_infogain, counter, max_split): '''This function is used to build a classification tree in a recursive way. Remember how you build a binary tree in the previous C++ and Data Structure courses). Inputs: 1) samples: Samples in the current tree node before making split on the feature (Pandas Dataframe) 2) output: Name of the output column 3) features: A list of feature names 4) step: The current binary split step 5) tree_depth: The depth of the current tree 6) max_depth: Maximum depth this tree can grow 7) min_number: Minimum number of node size 8) min_infogain: Minimum information gain 9) counter: Indicate this is nth split 10) max_split: Maximum number of splits Outputs: 1) tree_nodes: Nested tree nodes, which are stored and shown in nested dictionary type ''' # If samples are empty, return None if samples is None or len(samples)==0: return None current_features = features # Current feature list labels = samples[output] # Output labels in the current tree node print "----------------------------------------------------------------------------" print "----------------------------------------------------------------------------" print "Step %s: Current tree depth is %s. Current tree node has %s data points" % (step, tree_depth, len(samples)) # Sample size # Verify whether stopping conditions 1-4 are satisfied. If satisfied, return a leaf_node if stop_1(labels) or stop_2(current_features) or stop_3(tree_depth, max_depth) or stop_4(samples, min_number): return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None, 'error_rate': -float('inf'), # Minimum error rate (low error, keep this node, cause haven't prune yet) 'depth': tree_depth, 'prune_data_labels': [], # No label yet 'N_prune_data': 0 # Not pruned yet } # If pass stopping conditions 1-4 , then do best splitting best_split = best_feature_split(samples, output, current_features) best_feature, best_infogain, best_left, best_right = best_split[0], best_split[1], best_split[2], best_split[3] # Verify whether stopping condition 5 is satisfied. If satisfied, return a leaf node if stop_5(best_infogain, min_infogain): return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None, 'error_rate': -float('inf'), 'depth': tree_depth, 'prune_data_labels': [], 'N_prune_data': 0 } # If pass stopping condition 5, then move on # If reach maximum number of splits split_counter = counter.pop(0) # Indicate this is nth split, counter is a list of numbers in sequence if split_counter >= max_split: print "Stopping Condition: You already reach maximum number of splits." return { 'label': majority_vote(labels), 'left_tree': None, 'right_tree': None, 'best_feature': None, 'error_rate': -float('inf'), 'depth': tree_depth, 'prune_data_labels': [], 'N_prune_data': 0 } else: step += 1 print "Step %s: Binary split on %s. Size of Left and Right tree is (%s, %s)" % \ (step, best_feature, len(best_left) if best_left is not None else 0, len(best_right) if best_right is not None else 0) current_features.remove(best_feature) # Remove this feature if this feature is used for split print "Note: You have already done %s splits." % (split_counter) # Do binary split on left tree and right tree in a recursive way left_split = ClassificationTree(best_left, output, current_features, step+1, tree_depth+1, max_depth, min_number, min_infogain, \ counter, max_split) right_split = ClassificationTree(best_right, output, current_features, step+1, tree_depth+1, max_depth, min_number, min_infogain, \ counter, max_split) return { 'label': majority_vote(labels),# Now the intermediate node should also have predicted output label (using majority voting) 'left_tree': left_split, 'right_tree': right_split, 'best_feature': best_feature, 'error_rate': float('inf'), 'depth': tree_depth, 'prune_data_labels': [], 'N_prune_data': 0 } # Use pruning dataset to get error_rate of each node # Each pruning data sample should traverse the train_model, store actual labels of pruning data into respective nodes features = list(dataset.columns[2:]) output = dataset.columns[1] def traverse(new_sample, train_tree, output_name): '''This function is used to traverse the train tree model of each sample in pruning data, and update the node values in the train tree model. Inputs: 1) new_sample: A new sample, we would like to predict its label (Pandas DataFrame) 2) train_tree: The classification tree we have just trained 3) output_name: output variable name of this new_sample Outputs: 1) Update the tree node values of the training tree model we have obtained, such as 'prune_data_labels' and 'error_rate' ''' # Store this actual label into this node train_tree['prune_data_labels'].append(new_sample[output_name]) # Append into the list # Update current error_rate temp = train_tree['prune_data_labels'] train_tree['error_rate'] = len(filter(lambda x: x != train_tree['label'], temp)) / len(temp) # Update the number of pruning data samples in the current tree node train_tree['N_prune_data'] = len(temp) # If in the leaf node, then stop if train_tree['best_feature']==None: return # If in a temporary node, then need further traverse else: # Find the value of the best feature in the current node # If value is 0, then go to left tree # If value is 1, then go to right tree best_feature = train_tree['best_feature'] traverse(new_sample, train_tree['left_tree'], output_name) if new_sample[best_feature]==0 \ else traverse(new_sample, train_tree['right_tree'], output_name) features = list(dataset.columns[2:]) output = dataset.columns[1] maximum_split = 20 counters = [x for x in range(1,101)] train_model = ClassificationTree(train_data, output, features, step=0, tree_depth=0, max_depth=4, min_number=5, min_infogain=5e-4, \ counter=counters, max_split=maximum_split) for i in range(0, len(prune_data)): row = prune_data.iloc[i] traverse(row, train_model, output) # Post-pruning the training tree model: From bottom to up # Recursively pruning on the branch that the parent node's left+right child nodes are both leaf nodes import copy # Post-pruning # Idea: (by recursing down the tree) Find the father node whose left and right child are both leaf nodes (stopping case) -> # Compare parent node's error rate with weighted error rate (still in stoping case) -> # Repeat until nothing to do def post_prune(train_tree): '''This function is used to do post-pruning for one tree model. Inputs: 1) train_tree: The classification tree we have just trained Outputs: 1) Update the train_tree ''' # In the current node, if left and right child are both leaf nodes, then decide whether to prune if train_tree['left_tree']['best_feature'] == None and train_tree['right_tree']['best_feature'] == None: # both leaf p_left = train_tree['left_tree']['N_prune_data'] / train_tree['N_prune_data'] # proportion of samples in the left leaf node p_right = train_tree['right_tree']['N_prune_data'] / train_tree['N_prune_data'] # proportion of samples in the right leaf node weight_error = p_left * train_tree['left_tree']['error_rate'] + p_right * train_tree['right_tree']['error_rate'] # weighted error rate print train_tree['best_feature'], ':', 'Parent node error_rate:', train_tree['error_rate'], ',', 'Weighted error_rate:', weight_error # If weighted error_rate of left+right child nodes is higher than error_rate of parent node, then do pruning if weight_error >= train_tree['error_rate']: # set the 2 leaf nodes to none, best feature to none also (leaf have no best feature) train_tree['left_tree'], train_tree['right_tree'], train_tree['best_feature'] = None, None, None return else: # Pruning left subtree and then right subtree in a recursive way # In the current node, if left subtree or right subtree is None (i.e., current node is leaf node), then pass try: post_prune(train_tree['left_tree']) except: pass try: post_prune(train_tree['right_tree']) except: pass # Make a deepcopy of the original model # Please note the difference between shallow copy and deep copy in Python prune_model = copy.deepcopy(train_model) # Do post-pruning multiple times, until the tree structure does not change flag = True while flag: old = copy.deepcopy(prune_model) # Make a copy of old tree post_prune(prune_model) if prune_model == old: # If new tree is the same as the old tree, stop flag = False # Tree structure before pruning print_tree(train_model, depth=0, LR=0) # Tree structure after pruning print_tree(prune_model, depth=0, LR=0) ###Output grade_A =0 : grade_B =0 : - (Predict 1) =1 : home_ownership_OWN =0 : - (Predict 0) =1 : term_ 60 months =0 : - (Predict 0) =1 : - (Predict 1) =1 : - (Predict 0) ###Markdown 4 Open-Source Packages Take a break and let us use open-source package to run decision tree models. Use `Scikit-learn`to make classification trees and make predictions: http://scikit-learn.org/stable/modules/tree.html. ###Code # Import libraries from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn import tree from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix # Update our features and output features = list(dataset.columns[2:]) output = dataset.columns[1] # Split dataset to do validation X = dataset[features] y = dataset[output] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) # Fit the model on train data decision_tree = DecisionTreeClassifier() decision_tree_model = decision_tree.fit(X_train, y_train) decision_tree_model.classes_ # Get predicted labels for test data y_pred = decision_tree_model.predict(X_test) # Create confusion matrix cm = confusion_matrix(y_test, y_pred) TN, FP, FN, TP = cm.ravel() print (cm) print (TN, FP, FN, TP) # Performance of decision tree model print ("Accuracy: ", accuracy_score(y_test, y_pred)) print ("Sensitivity: ", recall_score(y_test, y_pred)) print ("Precision: ", precision_score(y_test, y_pred)) ###Output ('Accuracy: ', 0.8254196642685852) ('Sensitivity: ', 0.005509641873278237) ('Precision: ', 0.4) ###Markdown How to calculate:1. Accuracy2. Misclassification rate3. Precision4. Sensitivity ###Code # ROC and AUC from sklearn.metrics import roc_curve, auc # Get predicted scores Pr(y=1): Used as thresholds for calculating TP Rate and FP Rate score = decision_tree_model.predict_proba(X_test)[:, 1] # Plot ROC Curve fpr, tpr, thresholds = roc_curve(y_test, score) # fpr: FP Rate, tpr: TP Rate, thresholds: Pr(y=1) roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, label='AUC = %0.2f'% roc_auc) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--') plt.xlim([-0.1,1.1]) plt.ylim([-0.1,1.1]) plt.title('Receiver operating characteristic') plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() # Plot the decision tree # Remember you should install package graphviz first import graphviz dot_data = tree.export_graphviz(decision_tree_model, out_file=None, feature_names=features, class_names=output, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # Store in .pdf graph.render("Lending Club Loan Status") ###Output _____no_output_____ ###Markdown Assignments: * Use Titanic data (“train.csv”); Fit the model using `scikit-learn` with different metrics (e.g., information gain, gini index)* Observe and report the differences (e.g., best features for splitting, tree structure, performance, etc.)* There are no right or wrong answers. Don't worry. Just report what you've seen. **Note:** You may need to do simple data cleaning by yourself, such as binarizing output variable "survived", and transforming categorical variables to dummy variables. ###Code %pwd titanic = pd.read_csv("./Train.csv", low_memory=False) titanic.head(n=10) # Transform categorical features to binary features age_dummy = pd.get_dummies(titanic['Age Class'], prefix='Ageclass') passenger_dummy = pd.get_dummies(titanic['Passenger Class'], prefix='Passenger') siblings_dummy = pd.get_dummies(titanic['No of Siblings or Spouses on Board'], prefix='Siblings') parents_dummy = pd.get_dummies(titanic['No of Parents or Children on Board'], prefix='Parents') titanic['Output'] = titanic['Survived'].map(lambda x: 1 if x == 'Yes' else 0) # Since only 2 possibilities titanic['Gender'] = titanic['Gender'].map(lambda x: 1 if x == 'Male' else 0) # Since only 2 gender titanic.head() # Select features and output features = ['Age'] output = ['Output'] dataset = titanic[features+output] dataset.head() dataset = dataset.join([age_dummy, passenger_dummy,siblings_dummy,parents_dummy]) dataset.rename(columns={'Siblings_>=2': 'Siblings_2'}, inplace=True) dataset.rename(columns={'Parents_>=2': 'Parents_2'}, inplace=True) dataset.head() # Update our features and output features = dataset.columns[2:] output = dataset.columns[1] features # Fit the model on train data X = dataset[features] y = dataset[output] decision_tree = DecisionTreeClassifier(criterion='entropy') decision_tree_model = decision_tree.fit(X, y) decision_tree_model.classes_ # You can also do cross-validation import graphviz dot_data = tree.export_graphviz(decision_tree_model, out_file=None, feature_names=features, class_names=output, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # Store in .pdf graph.render("Titanic_entropy") # Fit the model on train data decision_tree = DecisionTreeClassifier(criterion='gini') decision_tree_model = decision_tree.fit(X, y) dot_data = tree.export_graphviz(decision_tree_model, out_file=None, feature_names=features, class_names=output, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # Store in .pdf graph.render("Titanic_gini") ###Output _____no_output_____ ###Markdown Observations:1) Both metrics (gini and entropy) produce very similar trees (same depth, number of leaf and internal nodes, sample size at each node, number of instances that fall into each category in each node2) Best features for splitting are mostly the same except 2 nodes (gini -> parents_2 to ageClass_childhood vs entropy -> ageClass_youngAdulthood to parents_2 5 Questions (Just think about them) 5.1 What if features are continuous?* Please refer to text book chapter 8 pp.93 5.2 What if output is continuous? * Regression Tree ###Code # 5.1 - Split the continuous variables into different range of numbers. Each of this range is now a category # 5.2 - Use regression tree instead to predict outcomes instead of classifying them ###Output _____no_output_____ ###Markdown Decision Tree Step 0: IMPORTANT! Intall one more dependencyBefore running the code below, you need to install one more dependency, graphviz. First open command line window and run these two commands: ###Code pip install graphviz conda install graphviz ###Output _____no_output_____ ###Markdown Step 1: Understand the dataset and our taskDataset bc_wisc.csv contains data on breast cancer tumors from a cohort of patients in Wisconsin. The rows are tumor samples, and the columns are different features/variables/attributes about the tumor. The first column contains sample ID, which we don't need to train our model. The second column, 'diagnosis', says whether the given tumor was benign = 0 or malignant = 1. The third column + the rest have measurement of the tumor morphology (e.g. size measured by radius_mean). We will use the features on tumor morphology to train a decision tree that will predict whether the tumor was benign or malignant. Step 2: Load the dataset ###Code import numpy as np data = np.loadtxt('bc_wisc.csv',delimiter=',',skiprows=1) #skipping header row which contains column names X = data[:,2:] # Use 3rd, 4th, ..., last column, as our features, X. # Note that we start counting at 0, so that's why it is 2: rather than 3: y = data[:,1] # Use 2nd column (diagnosis) as our response variable, the thing we're trying to predict. n = X.shape[0] # n = number of rows/samples in the dataset ###Output _____no_output_____ ###Markdown Step 3: Split the dataset to trainset and testset ###Code n_train = int(n*0.6) # We'll use roughly 60% of our dataset to train, the rest to test the model's performance X_train = X[0:n_train,:] y_train = y[0:n_train] X_test = X[n_train:,:] y_test = y[n_train:] ###Output _____no_output_____ ###Markdown Step 4: Train a decision tree to predict diagnosis from tumor morphology ###Code from sklearn import tree model = tree.DecisionTreeClassifier() model = model.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Step 5: Visualize the decision tree ###Code import graphviz # First, some logistical things.. # Load in the feature/column names: f = open('bc_wisc.csv') column_names = f.readline().strip().split(',') f.close() feature_names = column_names[2:] # Specify what 0 and 1 means in our response variable so that # the rendered decision tree is more readable target_names = ['benign','malignant'] # 0 = benign tumor, 1 = malignant tumor # Render the decision tree into a PDF file model_info = tree.export_graphviz(model, out_file=None, feature_names=feature_names, class_names=target_names, filled=True, rounded=True,special_characters=True) graph = graphviz.Source(model_info) graph.render("decision_tree_bc_wisc",view=True) ###Output _____no_output_____ ###Markdown Step 6: Make predictions on the testset, see how well the model performs ###Code from sklearn.metrics import confusion_matrix # Now we make predictions: y_predict = model.predict(X_test) # See how many we got 'right'. # Note that negative = benign, positive = malignant, even though malignant tumor doesn't seem like a 'positive' thing. true_negative, false_positive, false_negative, true_positive = confusion_matrix(y_test, y_predict).ravel() print('True negative: '+ str(true_negative)) print('False positive: ' + str(false_positive)) print('False negative: ' + str(false_negative)) print('True_positive: ' + str(true_positive)) ###Output True negative: 131 False positive: 11 False negative: 8 True_positive: 74 ###Markdown Step 7: Plot the test results ###Code import seaborn as sns import matplotlib.pyplot as plt test_result = np.array([[true_negative, false_positive],[false_negative, true_positive]]) ax = sns.heatmap(test_result,cmap='Blues',annot=True,fmt='d') ax.set_xlabel('Prediction') ax.set_ylabel('True value') ax.set_xticklabels(target_names) ax.set_yticklabels(target_names,va='center') plt.show() ###Output _____no_output_____ ###Markdown This is a summary on working with Decision trees ###Code import pandas as pd HousingData = pd.read_csv("Melbourne_housing_FULL.csv") #Drop rows with missing values HousingData = HousingData.dropna(axis =0) #Display column names print(HousingData.columns) #Extract target variable and Features y = HousingData.Price features = ['Landsize', 'Rooms', 'Bathroom', 'BuildingArea'] #Just select few non categorical features X = HousingData[features] X.head(5) #View first few feature #Split to training and test set from sklearn.model_selection import train_test_split # split data into training and validation data, for both features and target. Set random_state for reproducibality train_X, val_X, train_y, val_y = train_test_split(X, y,random_state = 0) ###Output _____no_output_____ ###Markdown We will use Decision Tree Regressor. To find the ideal leaf nodes, use a for loop ###Code from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y): model = DecisionTreeRegressor(max_leaf_nodes= max_leaf_nodes,random_state=0) model.fit(train_X,train_y) pred = model.predict(val_X) mae = mean_absolute_error(pred,val_y) return mae for max_leaf_nodes in [5,25,50,100,250,500,750,1000]: mae = get_mae(max_leaf_nodes, train_X,val_X,train_y,val_y) print("Max leaf nodes: %d \t\t Mean Absolute Error: %f" %(max_leaf_nodes, mae)) ###Output Max leaf nodes: 5 Mean Absolute Error: 403888.085106 Max leaf nodes: 25 Mean Absolute Error: 375460.744184 Max leaf nodes: 50 Mean Absolute Error: 384288.785164 Max leaf nodes: 100 Mean Absolute Error: 387866.435969 Max leaf nodes: 250 Mean Absolute Error: 398606.923908 Max leaf nodes: 500 Mean Absolute Error: 420635.817301 Max leaf nodes: 750 Mean Absolute Error: 438046.116887 Max leaf nodes: 1000 Mean Absolute Error: 450015.212028 ###Markdown We notice that A Leaf node of 25 seems to produce the lowest error. So Build a final model around that ###Code DecisionTree = DecisionTreeRegressor(max_leaf_nodes= 25,random_state=0) DecisionTree.fit(train_X,train_y) Predictions = DecisionTree.predict(val_X) print("The Mean Absoulte Error: %f" %mean_absolute_error(Predictions,val_y)) ###Output The Mean Absoulte Error: 375460.744184 ###Markdown One-hot-Encoding ###Code train['Sex'].value_counts() train['Gender'] = (train['Sex'] == 'female') train['Gender'] train['Embarked'].value_counts() train['Embarked'].isnull().sum() test['Embarked'].value_counts() test['Embarked'].isnull().sum() train['Embarked_S'] = train['Embarked'] == "S" train['Embarked_C'] = train['Embarked'] == "C" train['Embarked_Q'] = train['Embarked'] == "Q" train['Embarked_S'].sum() train['Embarked_C'].head() train['Embarked_Q'].head() # boolean 데이터도 사용가능 # int 로 수치형 데이터로 바꾸기 가능 train[['Embarked', 'Embarked_S', 'Embarked_C', 'Embarked_Q']].head() test['Embarked_S'] = test['Embarked'] == "S" test['Embarked_C'] = test['Embarked'] == "C" test['Embarked_Q'] = test['Embarked'] == "Q" ###Output _____no_output_____ ###Markdown 가족수 구하기 ###Code train.columns train['FamilySize'] = train['SibSp'] + train['Parch'] + 1 # 가족 수에 자신도 포함되게 하여 1명 이상이 되도록 만들어준다. train['FamilySize'].value_counts # 혼자 탄 사람이 많다. train['Family'] = train['FamilySize'] train.loc[train['FamilySize'] == 1, 'Family'] = 'S' train.loc[(train['FamilySize'] > 1) & (train['FamilySize'] < 5), 'Family'] = 'M' train.loc[train['FamilySize'] > 4, 'Family'] = 'L' train[['Family', 'FamilySize']].head() train['Family_S'] = train['Family'] == "S" train['Family_M'] = train['Family'] == "M" train['Family_L'] = train['Family'] == "L" train[['Family', 'Family_S', 'Family_M', 'Family_L', 'FamilySize']].head() test['FamilySize'] = test['SibSp'] + test['Parch'] + 1 test['Family'] = test['FamilySize'] test.loc[test['FamilySize'] == 1, 'Family'] = 'S' test.loc[(test['FamilySize'] >1) & (test['FamilySize'] < 4), 'Family'] = 'M' test.loc[test['FamilySize'] > 4, 'Family'] = 'L' test[['Family', 'FamilySize']].head() test['Family_S'] = test['Family'] == "S" test['Family_M'] = test['Family'] == "M" test['Family_L'] = test['Family'] == "L" test[['Family', 'Family_S', 'Family_M', 'Family_L', 'FamilySize']].head() ###Output _____no_output_____ ###Markdown feature selection ###Code feature_names = ['Gender', 'Age_mean', 'Embarked_S', 'Embarked_C', 'Embarked_Q', 'Family_S', 'Family_M', 'Family_L'] x_train = train[feature_names] x_train.head() y_label = train['Survived'] print(y_label.shape) y_label.head() !pip install graphviz import os os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/' from sklearn.tree import export_graphviz import graphviz export_graphviz(model, feature_names=feature_names, class_names=["Perish", "Survived"], out_file="decision-tree.dot") with open("decision-tree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) from sklearn.tree import DecisionTreeClassifier model = DecisionTreeClassifier(max_depth=3, random_state=2018) model model.fit(x_train, y_label) # prediction = model.predict(x_test) test['Gender'] = (test['Sex'] == 'female') x_test = test[feature_names] prediction = model.predict(x_test) prediction[:10] test['Survived'] = prediction test.columns submission = test[['PassengerId','Survived']] submission.head() submission.to_csv("submission_ML.csv", index=False) pd.read_csv("submission_ML.csv").head() from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score k_fold = KFold(n_splits=9, shuffle=True, random_state=2018) scoring = 'accuracy' score = cross_val_score(model, x_train, y_label, cv=k_fold, n_jobs=-1, scoring=scoring) print(score) round(np.mean(score)*100, 2) prediction = model.predict(x_test) prediction ###Output _____no_output_____ ###Markdown Data Science and Business Analytics Intern Paras Bhirud @ GRIP - The Sparks Foundation Task 6: Prediction using decision tree algorithm Create the Decision Tree classifier and visualize it graphically. The purpose is, if we feed any new data to this classifier, it would be able to predict the right class accordingly. ###Code import sklearn.datasets as datasets import pandas as pd iris=datasets.load_iris() X = pd.DataFrame(iris.data, columns=iris.feature_names) X.head() X.tail() X.info() X.describe() X.isnull().sum() Y = iris.target Y from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42) from sklearn.tree import DecisionTreeClassifier dtc = DecisionTreeClassifier() dtc.fit(X_train,y_train) print('Decision Tree Classifer Created Successfully') y_predict = dtc.predict(X_test) from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_predict) from sklearn import tree import matplotlib.pyplot as plt fn=['sepal length (cm)','sepal width (cm)','petal length (cm)','petal width (cm)'] cn=['setosa','versicolor','virginica'] fig, axes = plt.subplots(nrows = 1, ncols = 1, figsize = (4,4), dpi = 300) tree.plot_tree(dtc, feature_names = fn, class_names = cn, filled = True); ###Output _____no_output_____ ###Markdown Decision Tree AlgorithmDecision trees are a powerful prediction method and extremely popular.They are popular because the final model is so easy to understand by practitioners and domain experts alike. The final decision tree can explain exactly why a specific prediction was made, making it very attractive for operational use.Decision trees also provide the foundation for more advanced ensemble methods such as bagging, random forests and gradient boosting.Reference: [How to implement decision tree from scratch in python](https://machinelearningmastery.com/implement-decision-tree-algorithm-scratch-python/) The various decision tree algorithms following as: - ID3 (Only apply on classification problems and only allow to input discrete data) - C4.5 (Improve ID3 algorithm can apply on regression and classification problems, it also accept discrete data and continuous data) - C5 - CART (Classification and Regression Tree)(skit-learn use) 決策樹演算法概念: - 決策數的生成: - 依據屬性選擇指標(Information Gain, Gini Index or Chi-square, etc)尋找最佳節點(node)變數,並從節點變數中尋找最佳切割值 - 目的: - 切割後同分支資料的同質性(Homogeneous)愈高愈好,不純度(Impurity)愈低愈好 - 常見的屬性選擇指標: 1. Information Gain(based on Entropy) $\to$ ID3, C4, C4.5, C5 2. Gini Index $\to$ CART 3. Chi-square test of Independence $\to$ CHAID ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt #import matplotlib #print(np.__version__) #print(pd.__version__) #print(matplotlib.__version__) ###Output _____no_output_____ ###Markdown Entropy - 如果資料集合S具有n個不同的類別,那麼資料集合S的 *Entropy* 計算方式為: - $Entropy(s) = \sum_{i = 1}^{n}(-p_{i}log_{2}p_{i})$ ###Code import math def Entropy(Prob): if sum(Prob) != 1: raise ValueError("The summation of probability must equal to 1.") entropy = 0 for p in Prob: entropy += -p * math.log(p, 2) return entropy prob = np.linspace(0, 1, 10000) prob = np.delete(prob, [0, len(prob)-1]) complement_prob = np.abs(1 - prob) prob_arr = np.c_[prob, complement_prob] entropy_list = [] for arr in prob_arr: entropy_list.append(Entropy(arr)) plt.style.use("ggplot") plt.plot(prob, entropy_list, 'r') plt.xlabel(s = 'Probability') plt.ylabel(s = 'Entropy') plt.show() ###Output _____no_output_____ ###Markdown 資訊獲取量 (Inforamtion Gain) - ID3 是利用 Information Gain 來衡量資料集中任一變數在特定值之下切割資料分類的好壞 - 變數 A 在資料集 S 的資訊獲取量為: - $Gain(S, A) = Entropy(s) - \sum_{j = 1}^{v}\frac{\mid S_{j}\mid}{\mid S \mid} Entropy(S_{j})$ - 假設變數$A$中有$v$個不同值$\{a1 , a2 ,…, av \}$,而資料集合$S$會因為這些不同值而產生(分割)出$v$個不同的資料子集合$\{S_{1}, S_{2},…, S_{v}\}$ - $Entropy(s)$: 資料$S$整體的亂度 - $Entropy(s_{j})$: 資料子集合 $S_{j}$ 中的亂度, 其中 $j = 1, 2, 3,.., v$ - $\frac{\mid S_{j} \mid}{\mid S \mid}$: 第 j 個子集合之資料個數佔總資料集合的比例(權重) - $Gain(S, A)$: 利用變數$A$對資料集合$S$進行分割的獲利 - $Gain$值愈大,表示利用變數$A$來切割資料**亂度愈小**,用來**分類資料較佳** - $Gain$值愈小,表示利用變數$A$來切割資料**亂度愈大**,用來**分類資料較差** ###Code play_or_not = [0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0] play_or_not = [bool(ele) for ele in play_or_not] wind = ['weak', 'strong', 'weak', 'weak', 'weak', 'strong', 'weak', 'weak', 'weak', 'strong', 'strong', 'strong', "weak",'strong'] humidity = ["High", "High", "High", "High", 'Normal', 'Normal', 'Normal', 'High', 'Normal', 'Normal', 'Normal', 'High', 'Normal', 'High'] dat = {"humidity": humidity, 'wind_strength': wind, "play": play_or_not} df = pd.DataFrame(dat, columns = ['humidity', 'wind_strength', 'play']) df.head(3) def Information_Gain(df, target_variable, class_variable): total_df = df[target_variable].count() prob_target = df[target_variable].sum()/total_df entropy_s = Entropy([prob_target, 1 - prob_target]) # calculate Entropy of original dataset class_info = {} for class_type in df[class_variable].unique(): # calculate Entropy of subset subset = df[target_variable][df[class_variable] == class_type] if subset.count() == 0: prob_subset = 0 else: prob_subset = subset.sum()/subset.count() entropy_si = Entropy([prob_subset, 1 - prob_subset]) class_info[class_type] = [subset.count()/total_df, entropy_si] weight_entropy = 0 for sub_entropy in class_info.values(): weight_entropy += sub_entropy[0]*sub_entropy[1] gain = entropy_s - weight_entropy return gain IG_humidity = Information_Gain(df, target_variable = 'play', class_variable = 'humidity') IG_wind = Information_Gain(df, target_variable = 'play', class_variable = 'wind_strength') print("Humidity Information Gain: {:.5f}".format(IG_humidity)) print("wind strength Information Gain: {:.5f}".format(IG_wind)) ###Output Humidity Information Gain: 0.15184 wind strength Information Gain: 0.04813 ###Markdown C4.5 - ID3演算法使用的資訊獲量會傾向選擇擁有較多類別的變數做為節點,而C4.5則是利用了正規化(Normalize)的概念改進了ID3的缺點,在計算資訊獲取量時,尚須計算該變數的分割資訊值(Split Information),進而計算獲利比例(Gain Ratio) - $SplitInfo_{A}(S) = -\sum_{j = 1}^{v} \frac{\mid S_{j} \mid}{\mid S \mid} log_{2}{(\frac{\mid S_{j} \mid}{\mid S \mid})}$ - $GainRatio(A) = Gain(S, A) \ / \ SplitInfo_{A}(S)$ - $GainRatio$愈大,表示利用變數A來切割資料亂度愈小,用來分類資料較佳 ###Code def GainRatio(df, target_variable, class_variable): # modify Information_Gain function total_df = df[target_variable].count() prob_target = df[target_variable].sum()/total_df entropy_s = Entropy([prob_target, 1 - prob_target]) # calculate Entropy of original dataset class_info = {} for class_type in df[class_variable].unique(): # calculate Entropy of subset subset = df[target_variable][df[class_variable] == class_type] if subset.count() == 0: prob_subset = 0 else: prob_subset = subset.sum()/subset.count() entropy_si = Entropy([prob_subset, 1 - prob_subset]) class_info[class_type] = [subset.count()/total_df, entropy_si] weight_entropy = 0 split_info = 0 for sub_entropy in class_info.values(): split_info += (-sub_entropy[0])*np.log2(sub_entropy[0]) # calculate SplitInfo weight_entropy += sub_entropy[0]*sub_entropy[1] gain_ratio = (entropy_s - weight_entropy)/split_info return gain_ratio GainR_humidity = GainRatio(df, target_variable = 'play', class_variable = 'humidity') GainR_wind = GainRatio(df, target_variable = 'play', class_variable = 'wind_strength') print("Humidity GainRatio: {:.5f}".format(GainR_humidity)) print("wind strength GainRatio: {:.5f}".format(GainR_wind)) ###Output Humidity GainRatio: 0.15184 wind strength GainRatio: 0.04885 ###Markdown CART - CART與ID3、C4.5與C5.0演算法的最大差異為在每一個節點上都是採用二分法,也就是每個節點只能有兩個分支,而 ID3、C4.5與 C5.0則在每個節點上能有多個不同的分支 - 假設資料集合 S 中包含 n 個類別,則 - $Gini(S) = 1 - \sum_{j = 1}^{n} P_{j}^{2}$, 其中 $P_{j}$ 為在S中屬於 j 類別的機率 - 利用變數 A 切割資料集合 $S$ 為 $S_{1}$ 和 $S_{2}$(二元切割),則利用此變數分割資料的 Gini Index 如下所示: - $Gini_{A}(S) = \frac{\mid S_{1}\mid}{\mid S \mid} Gini(S_{1}) + \frac{\mid S_{2}\mid}{\mid S \mid} Gini(S_{2})$ - 不純度(Impurity)的降低值: - $\Delta Gini(A) = Gini(S) - Gini_{A}(S)$ - 挑選擁有最大不純度的降低值,或是令 $Gini_{A}(S)$ 最小的變數做為切割資料的變數 ###Code def Gini_score(Prob): if sum(Prob) != 1: raise ValueError("The summation of probability must equal to 1.") temp_p = 0 for p in Prob: temp_p += p**2 gini = 1 - temp_p return gini def Gini_Index(df, target_variable, class_variable): total_df = df[target_variable].count() prob_target = df[target_variable].sum()/total_df gini_s = Gini_score(Prob = [prob_target, 1 - prob_target]) class_info = {} for class_type in df[class_variable].unique(): subset = df[target_variable][df[class_variable] == class_type] if subset.count() == 0: prob_subset = 0 else: prob_subset = subset.sum()/subset.count() gini_si = Gini_score([prob_subset, 1 - prob_subset]) class_info[class_type] = (subset.count()/total_df)*gini_si delta_gini = gini_s - sum(class_info.values()) return delta_gini gini_wind = Gini_Index(df, target_variable = 'play', class_variable = 'wind_strength') gini_humidity = Gini_Index(df, target_variable = 'play', class_variable = 'humidity') print("Humidity Gini-Index: {:.5f}".format(gini_humidity)) print("wind strength Gini-Index: {:.5f}".format(gini_wind)) ###Output Humidity Gini-Index: 0.09184 wind strength Gini-Index: 0.03061 ###Markdown scikit-learn uses an optimised version of the CART algorithm. [(skit-learn official Website)](http://scikit-learn.org/stable/modules/tree.html) ###Code from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn import metrics #print(sklearn.__version__) iris = datasets.load_iris() iris_arr = np.concatenate((iris.data, iris.target.reshape(-1, 1)), axis = 1) col_names = iris.feature_names.copy() col_names.append('species') iris_df = pd.DataFrame(data = iris_arr, columns = col_names) iris_df.head(3) map_dic = {class_num : name for class_num, name in zip(np.unique(iris.target), iris.target_names)} map_dic def plot_iris(df, x_axis, y_axis, class_name): % matplotlib inline plt.figure() #plt.style.use('seaborn') subset = iris_df[[x_axis, y_axis, class_name]] class_num = subset[class_name].unique().astype(np.int64) for num in class_num: x = subset[x_axis][subset[class_name] == num] y = subset[y_axis][subset[class_name] == num] plt.scatter(x, y, color = ['r', 'g', 'b'][num], label = map_dic[num], s = 40) plt.legend(loc = 'best', prop={'size': 10}) plt.xlabel(x_axis) plt.ylabel(y_axis) plt.show() #print(plt.style.available) # can select plot style plot_iris(df = iris_df, x_axis = 'sepal length (cm)', y_axis = 'petal length (cm)', class_name = 'species') ###Output _____no_output_____ ###Markdown DecisionTreeClassifier [參數說明:](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.htmlsklearn.tree.DecisionTreeClassifier) - criterion: 用哪種指標當作分割節點的標準($Gini \ \to \ Gini \ Impurity$, $entropy \ \to \ Information \ Gain$) - splitter: 如何決定節點,有 `best`和`random`兩種,`best`指的是選取特徵中最好的分割點,`random`為特徵中隨機的分割點, default 為 `best` - max_depth: 樹的最大深度,達到樹的最大深度樹即停止生長,預設為`None`,若為`None`樹則會一直成長,直到所有葉節點的資料都一樣,或是直到所有葉節點的資料筆數小於`min_samples_split`所設定的大小 - min_samples_split: 內部節點(internal node)能被切割的最少資料個數,預設值為2,若內部節點資料個數小於設定值,則停止生長 - min_sample_leaf: 葉節點中最少的資料個數,如果小於等於設定值,則數會停止生長 - max_features: 幾個特徵要被搜尋最佳切割點,預設值為`None`,即為所有特徵 - max_leaf_nodes:最多有幾個葉節點,但若定設定過小的`max_leaf_nodes`則葉節點的impurity不會下降太多 - min_impurity_decrease: 若**impurity的下降值**低於此設定值,則不會再繼續進行切割 - min_impurity_split: 讓樹能夠提早訓練結束的閥值,如果切割後的impurity高於設定值,則會進行切割(version 0.21 sklearn 將會用,`min_impurity_decrease`代替) - presort: 是否要對資料進行排序,預設值為`Fasle`,排序後對於樣本數不多的資料集訓練的速度或許會比較快 ###Code x, y = iris.data, iris.target x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3) tree_clf = DecisionTreeClassifier(criterion = 'gini', splitter = 'best', max_depth = 3 , min_samples_split = 2, max_features = None, min_impurity_decrease = 0.01) tree_clf.fit(X = x_train, y = y_train) prediction = tree_clf.predict(X = x_test) prob_arr = tree_clf.predict_proba(X = x_test) # the probabilities of belonging to each class prediction = prob_arr.argmax(axis = 1) print("accuracy: {:.2f}".format(tree_clf.score(X = x_test, y = y_test))) print("-"*25) print(metrics.classification_report(y_true = y_test, y_pred = prediction, target_names = map_dic.values())) print("-"*25) print(metrics.confusion_matrix(y_true = y_test, y_pred = prediction)) ###Output accuracy: 0.96 ------------------------- precision recall f1-score support setosa 1.00 1.00 1.00 16 versicolor 0.93 0.93 0.93 15 virginica 0.93 0.93 0.93 14 avg / total 0.96 0.96 0.96 45 ------------------------- [[16 0 0] [ 0 14 1] [ 0 1 13]] ###Markdown Feature importances 利用Gini Index 或是 Entropy 來進行判斷特徵重要性 ###Code feature_importance_dic = {'feature names': iris.feature_names, 'importances': tree_clf.feature_importances_} feature_df = pd.DataFrame(data = feature_importance_dic) feature_df = feature_df[feature_df['importances'] != 0] # delete feature importances equal 0 feature_df = feature_df.sort_values(by = 'importances', ascending = False).reset_index(drop = True) feature_df['feature names'] = feature_df['feature names'].str.replace(pat = ' \([a-z]{,2}\)', repl = "") feature_df def adding_text(rectangle): # attach rectangle class to get axis and plotting information for rect in rectangle: height = rect.get_height() temp_x = rect.get_x() width = rect.get_width() plt.text(x = temp_x + width/2, y = height*1.03 , s = "%f"%(height), ha = 'center', va = 'center') # ha = horizontalaligment, va = verticalaligment rect = plt.bar(x = feature_df.index, height = feature_df['importances'], align = 'center') plt.xticks(feature_df.index, feature_df['feature names'].values) plt.xlabel('feature') plt.ylabel('feature importances') adding_text(rectangle = rect) plt.show() ###Output _____no_output_____ ###Markdown Visualize decision process - 必須先安裝 [graphviz](https://graphviz.gitlab.io/)和 graphviz 套件 ###Code from sklearn.tree import export_graphviz import graphviz import matplotlib.image as mpimg dot_data = export_graphviz(decision_tree = tree_clf, out_file = None, feature_names = iris.feature_names, filled = True, class_names = iris.target_names, special_characters=True, rounded = True) graph = graphviz.Source(dot_data) graph ###Output _____no_output_____ ###Markdown Output to Image ###Code export_graphviz(decision_tree = tree_clf, out_file = './picture/tree.dot', feature_names = iris.feature_names, filled = True, class_names = iris.target_names, special_characters=True, rounded = True) ###Output _____no_output_____ ###Markdown transform dot file to png file ###Code !dot -Tpng ./picture/tree.dot -o ./picture/tree.png %matplotlib inline img=mpimg.imread('./picture/tree.png') plt.figure(figsize = (10, 8)) plt.imshow(img) ###Output _____no_output_____ ###Markdown Task 3For the given ‘Iris’ dataset, create the Decision Tree classifier and visualize it graphically. Abhishek singh Libraries ###Code import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier # Load Data df = pd.read_csv('C:/Users/sony/Desktop/Spark_Foundation/Task 3/Iris.csv') df.head() df.describe().T #proper distributed data df.isna().sum().sum() ###Output _____no_output_____ ###Markdown Visualization ###Code from pandas_visual_analysis import VisualAnalysis VisualAnalysis(df.iloc[:,1:6]) ###Output _____no_output_____ ###Markdown Split the Data ###Code X = df.iloc[:,1:5] y = df.iloc[:,-1] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1010) clf = DecisionTreeClassifier(random_state=0) clf.fit(X_train,y_train) pred=clf.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(y_test, pred) from sklearn import tree plt.figure(figsize=(15,10)) tree.plot_tree(clf,filled=True) ###Output _____no_output_____ ###Markdown if we feed any new data to this classifier, it would be able to predict the right class accordingly. we use pred to predict the right class with 89 % Accuracy ###Code New_Data = [] pred=clf.predict(New_Data) ###Output _____no_output_____ ###Markdown Decision Tree ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn import tree from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import warnings warnings.filterwarnings('ignore') df = pd.read_csv("insurance.csv") df.head() df.isnull().sum() plt.figure(figsize=(7,7)) df["insuranceclaim"].value_counts().plot.pie(autopct="%1.1f%%") plt.show() plt.figure(figsize=(7,7)) sns.countplot(data=df,x="insuranceclaim",hue="sex") plt.show() plt.figure(figsize=(7,7)) sns.countplot(data=df,x="insuranceclaim",hue="smoker") plt.show() plt.figure(figsize=(7,7)) sns.countplot(data=df,x="insuranceclaim",hue="children") plt.show() plt.figure(figsize=(7,7)) sns.countplot(data=df,x="insuranceclaim",hue="region") plt.show() plt.figure(figsize=(7,7)) sns.scatterplot(data=df,x="bmi",y="insuranceclaim") plt.show() plt.figure(figsize=(7,7)) sns.scatterplot(data=df,x="charges",y="insuranceclaim") plt.show() plt.figure(figsize=(7,7)) sns.scatterplot(data=df,x="age",y="insuranceclaim") plt.show() cols = ["age","bmi","charges"] for col in cols: plt.figure(figsize=(6,6)) sns.distplot(df[col]) plt.show() def create_model(model,X,y): X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=1) model.fit(X_train,y_train) y_pred = model.predict(X_test) print(classification_report(y_test,y_pred)) return model X = df.iloc[:,:-1] y = df.iloc[:,-1] lr = LogisticRegression() lr = create_model(lr,X,y) dt = DecisionTreeClassifier() dt = create_model(dt,X,y) dt.feature_importances_ ###Output _____no_output_____ ###Markdown Decision Tree with max_depth ###Code dt2 = DecisionTreeClassifier(max_depth=5) dt2 = create_model(dt2,X,y) dt2.feature_importances_ ###Output _____no_output_____ ###Markdown Decision Tree with min_sample_leaf ###Code dt3 = DecisionTreeClassifier(min_samples_leaf=90) dt3 = create_model(dt3,X,y) dt3.feature_importances_ ###Output _____no_output_____ ###Markdown Building Final Decision Tree ###Code clf_tree = DecisionTreeClassifier( max_depth = 4, max_features=2) clf_tree.fit( train_X, train_y ) tree_test_pred = pd.DataFrame( { 'actual': test_y, 'predicted': clf_tree.predict( test_X ) } ) tree_test_pred.sample( n = 10 ) metrics.accuracy_score( tree_test_pred.actual, tree_test_pred.predicted ) tree_cm = metrics.confusion_matrix( tree_test_pred.predicted, tree_test_pred.actual, [1,0] ) sns.heatmap(tree_cm, annot=True, fmt='.2f', xticklabels = ["Yes", "No"] , yticklabels = ["Yes", "No"] ) plt.ylabel('True label') plt.xlabel('Predicted label') ###Output _____no_output_____ ###Markdown Graphical Representation of Decision Tree ###Code from sklearn import tree fn=['sepal length (cm)','sepal width (cm)','petal length (cm)','petal width (cm)'] cn=['setosa', 'versicolor', 'virginica'] fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (15,10), dpi=300) tree.plot_tree(clf_tree, feature_names = fn, class_names=cn, filled = True); fig.savefig('imagename.png') ###Output _____no_output_____ ###Markdown Changing the max_depth ###Code train_accuracy = [] validation_accuracy = [] for depth in range(1,10): dt_model = DecisionTreeClassifier(max_depth=depth, random_state=10) dt_model.fit(X_train, y_train) train_accuracy.append(dt_model.score(X_train, y_train)) validation_accuracy.append(dt_model.score(X_valid, y_valid)) frame = pd.DataFrame({'max_depth':range(1,10), 'train_acc':train_accuracy, 'valid_acc':validation_accuracy}) frame.head() plt.figure(figsize=(12,6)) plt.plot(frame['max_depth'], frame['train_acc'], marker='o') plt.plot(frame['max_depth'], frame['valid_acc'], marker='o') plt.xlabel('Depth of tree') plt.ylabel('performance') plt.legend() ###Output _____no_output_____ ###Markdown * max_leaf_nodes* min_samples_split* min_samples_leaf ###Code dt_model = DecisionTreeClassifier(max_depth=8, max_leaf_nodes=25, random_state=10) #fitting the model dt_model.fit(X_train, y_train) #Training score dt_model.score(X_train, y_train) #Validation score dt_model.score(X_valid, y_valid) from sklearn import tree !pip install graphviz decision_tree = tree.export_graphviz(dt_model,out_file='tree.dot',feature_names=X_train.columns,max_depth=2,filled=True) !dot -Tpng tree.dot -o tree.png image = plt.imread('tree.png') plt.figure(figsize=(15,15)) plt.imshow(image) ###Output _____no_output_____ ###Markdown Implementação da Decision Tree ###Code import numpy as np import pandas as pd import math df = pd.read_csv('tenis.csv') df ###Output _____no_output_____ ###Markdown Entropia e Montagem da Arvore ###Code def uncertainty(df, given): return entropy(df, None) - entropy(df, given) def entropy(df, given): result = 0 label = df.columns[len(df.columns)-1] distinct_label_values = df[label].unique() if given == None: for value in distinct_label_values: prob = df[df[label] == value].shape[0]/df.shape[0] x = df[df[label] == value].shape[0]/df.shape[0] log2 = 0 if x == 0 else math.log(x, 2) result += -prob*log2 else: distinct_values = [distinct_label_values, df[given].unique()] for info_label in distinct_values[0]: for info_given in distinct_values[1]: prob = df[(df[label] == info_label) & (df[given] == info_given)].shape[0]/df.shape[0] x = df[(df[label] == info_label) & (df[given] == info_given)].shape[0]/df[df[given] == info_given].shape[0] log2 = 0 if x == 0 else math.log(x, 2) result += -prob*log2 return result def decision_tree(df): return ['root', decision_tree_recursive(df, None)] def decision_tree_recursive(df, parent): df = df if parent is None else df.drop([parent], axis=1) lowest_uncertainty = 0 next_child = None for attr in df.columns[0:len(df.columns)-1]: u = uncertainty(df, attr) if u > lowest_uncertainty: lowest_uncertainty = u next_child = attr if lowest_uncertainty == 0: return df.iloc[0,len(df.columns)-1] child_values = df[next_child].unique() return [next_child, list(map(lambda c: [c, decision_tree_recursive(df[df[next_child] == c], next_child)], child_values))] ###Output _____no_output_____ ###Markdown Imprimir arvore ###Code dec_test = decision_tree(df) ###Output _____no_output_____ ###Markdown Formato:Dados de Array dentro de Array ###Code dec_test ###Output _____no_output_____ ###Markdown Decision Trees Agenda1. Introduction to Decision Trees2. Algorithm : Decision Trees3. Decision Tree for Classification4. Decision Tree for Regression5. Advantages & Limitations of Decision Trees 1. Introduction to Decision Trees- Its a tree like data structure to make a model of the data- uses if-else at every node of the tree- can be used for both classification and regression analysis Terminology: Root Node: It represents entire population or sample and this further gets divided into two or more homogeneous sets. Splitting: Process of dividing a node into two or more sub-nodes.Pruning: Why?The splitting process results in fully grown trees until the stopping criteria are reached. But, the fully grown tree is likely to overfit the data, leading to poor accuracy on unseen data. Remove the decision nodes starting from leaf such that overall accuracy is not disturbed. training set split into training dataset -> D and validation dataset -> V Create decision tree using training dataset D continue trimming the tree to optimize the accuracy of the Validation dataset -> V 2. Algorithm : Decision Trees1. ID3 (Entropy and Information Gain)2. Gini Index3. Chi Square4. Reduction in Variance ID3 (Entropy and Information Gain)* Decision Tree is based on (CART) which is advancement of ID3, developed in 1986 by Ross Quinlan.* ID3 works when feature data & target data both are categorical in nature* Objective of CART is to maximize information gain in each split Entropy- A decision tree involves partitioning the data into subsets that contain instances with similar values (homogeneous). - Homogeneity of a Sample is calcualted by ID3 algorithm using entropy. - Entropy Zero(Min) - Sample is completely homogeneous - Entropy One(Max) - Sample is equally divided $ Entropy(max) = - 0.5log_20.5 - 0.5log_20.5 = 1 $ $ Entropy(min) = - 0log_20 - 1log_21 = 0 $ ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline play_data = pd.read_csv('Data/Classification/tennis.csv') play_data ###Output _____no_output_____ ###Markdown * A decision tree for above data Entropy of play* Entropy(play) = – p(Yes) . log2p(Yes) – p(No) . log2p(No) ###Code play_data.play.value_counts() Entropy_Play = -(9/14)*np.log2(9/14) -(5/14)*np.log2(5/14) Entropy_Play play_data[play_data.outlook == 'sunny'] # Entropy(Play|Outlook=Sunny) Entropy_Play_Outlook_Sunny =-(3/5)*np.log2(3/5) -(2/5)*np.log2(2/5) Entropy_Play_Outlook_Sunny play_data[play_data.outlook == 'overcast'] # Entropy(Play|Outlook=overcast) # Since, it's a homogenous data entropy will be 0 play_data[play_data.outlook == 'rainy'] # Entropy(Play|Outlook=rainy) Entropy_Play_Outlook_Rain = -(2/5)*np.log2(2/5) - (3/5)*np.log2(3/5) Entropy_Play_Outlook_Rain ###Output _____no_output_____ ###Markdown Information Gain- The information gain is based on the decrease in entropy after a dataset is split on an attribute. - Constructing a decision tree is all about finding attribute that returns the highest information gain (i.e., the most homogeneous branches).$ Gain(A, B) = Entropy(A) – Entropy(A,B) $* We intend to choose the attribute, splitting by which information gain will be the most* Next step is calculating information gain for all attributes Information Gain on splitting by Outlook Gain on splitting by attribute outlook $Entropy(Play,Outlook) = P(Sunny).Entropy(Play,Outlook=Sunny) + P(Overcast).Entropy(Play,Outlook=Overcast) + P(Rainy).Entropy(Play,Outlook=rainy)$ $Entropy(Play,Outlook) = P(Sunny).E(3,2) + P(Overcast).E(4,0) + P(Rainy).E(2,3) $ $ Entropy(Play,Outlook) = (\frac{5}{14})*0.971 + (\frac{4}{14})*0 + (\frac{5}{14})*0.971 = 0.693 $ $Gain(Play,Outlook) = Entropy(Play) - Entropy(Play,Outlook) $ $Gain(Play,Outlook) = 0.94 - 0.693 = 0.247 $ Other gains* Gain(Play, Temperature) - 0.029* Gain(Play, Humidity) - 0.151* Gain(Play, Wind) - 0.048 Conclusion - Outlook is winner & thus becomes root of the tree Time to find the next splitting criteria ###Code play_data[play_data.outlook == 'overcast'] ###Output _____no_output_____ ###Markdown Conclusion - If outlook is overcast, play is true Algorithm is run recursively on the non-leaf branches ###Code play_data[play_data.outlook == 'sunny'] ###Output _____no_output_____ ###Markdown 3. Decision Tree for Classification* The leaf nodes of decision tree decides the class* CART will convert features with continues values into categorical values* Different tree will be generated with same data given in different order ###Code # Import libraries import matplotlib.pyplot as plt import pandas as pd import numpy as np # Import dataset dataset = pd.read_csv('Data/Classification/Apply_Job.csv') X = dataset.iloc[:, 0:2].values y = dataset.iloc[:, 2].values # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # building our decision tree classifier and fitting the model from sklearn.tree import DecisionTreeClassifier dt_c = DecisionTreeClassifier() dt_c.fit(X_train, y_train) from sklearn.metrics import accuracy_score pred_train = dt_c.predict(X_train) pred_test = dt_c.predict(X_test) train_accuracy = accuracy_score(y_train, pred_train) test_accuracy = accuracy_score(y_test, pred_test) print(train_accuracy) print(test_accuracy) # Visualising the Training set results from matplotlib.colors import ListedColormap import numpy as np #Define Variables clf = dt_c h = 0.01 X_plot, z_plot = X, y #Standard Template to draw graph x_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1 y_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, alpha = 0.7, cmap = ListedColormap(('blue', 'red'))) for i, j in enumerate(np.unique(z_plot)): plt.scatter(X_plot[z_plot == j, 0], X_plot[z_plot == j, 1], c = ['blue', 'red'][i], cmap = ListedColormap(('blue', 'red')), label = j) #X[:, 0], X[:, 1] plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.title('Decision Tree Classification') plt.xlabel('Experience in Years') plt.ylabel('Salary in lakhs') plt.legend() plt.show() ###Output 1.0 1.0 ###Markdown Decision Tree on iris dataset ###Code from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier, export_graphviz, ExtraTreeClassifier iris = load_iris() X = iris.data y = iris.target from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size = 0.33, random_state = 42, shuffle = True) # building our decision tree classifier and fitting the model dt = DecisionTreeClassifier(criterion='entropy') dt.fit(X_train, y_train) # predicting on the train and the test data and assessing the accuracies from sklearn.metrics import accuracy_score pred_train = dt.predict(X_train) pred_test = dt.predict(X_test) train_accuracy = accuracy_score(y_train, pred_train) test_accuracy = accuracy_score(y_test, pred_test) print('Training accuracy is: {0}'.format(train_accuracy)) print('Testing accuracy is: {0}'.format(test_accuracy)) export_graphviz(dt,'dt.tree') ###Output _____no_output_____ ###Markdown Visualizing the tree* http://www.webgraphviz.com/* Criteria - Entropy ###Code dt.predict(X_test) ###Output _____no_output_____ ###Markdown Feature Importances* Important features will be higher up the tree* We can use this techniques to identify important features ###Code dt.feature_importances_ ###Output _____no_output_____ ###Markdown Visualizing Decision Decision Boundry ###Code from sklearn.datasets import make_blobs X,Y = make_blobs(n_features=2, n_samples=1000, cluster_std=.8, centers=4, random_state=6) plt.scatter(X[:,0],X[:,1],c=Y,s=5, cmap='viridis') dt = DecisionTreeClassifier() dt.fit(X,Y) plot_step = 0.2 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) outcome = dt.predict(np.c_[xx.ravel(), yy.ravel()]) xx.shape plt.scatter(X[:,0],X[:,1],c=Y,s=5,cmap='viridis') plt.scatter(xx.ravel(),yy.ravel(),c=outcome,s=1,alpha=1, cmap='viridis') plt.show() ###Output _____no_output_____ ###Markdown 4. Decision Tree for Regression* Continues target is predicted with Tree.* ID3 is modified for regression by replacing Information Gain with Mean Squared Error.* Decision Tree tries to partition data into subsets of homogenous contents ( minimize mean squared error ) ###Code # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Data/Regression/Job_Exp.csv') X = dataset.iloc[:, [0]].values y = dataset.iloc[:, 1].values # Applying the Decision Tree Regressor from sklearn.tree import DecisionTreeRegressor dt_r = DecisionTreeRegressor(random_state = 0) dt_r.fit(X, y) # Predicting a new result y_pred = dt_r.predict([[27]]) # Visualising X_grid = np.arange(min(X), max(X), 0.01) X_grid = X_grid.reshape((len(X_grid), 1)) plt.scatter(X, y, color = 'green') plt.plot(X_grid, dt_r.predict(X_grid), color = 'red') plt.ylabel('getting Job Chance (%)') plt.xlabel('Years of Exp.') ###Output _____no_output_____
python/notebooks/Story Data Exploration.ipynb
###Markdown Story Data Exploration ###Code #Import required libraries import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns #Load the merged file storycontentdata = pd.read_csv("stories_content.csv") #View structure of the file storycontentdata.info() #View summary statistics for the numeric variables. Variable of interest is #reads. storycontentdata.describe() #Observations: Average number of reads is 119 for stories.#Max is 27K, Min is 0 reads. #Only data of published stories is present in the corpus <stories_status=1> #View initial rows from the file to get an idea of the content storycontentdata.head() #Observations: Stories are in multiple languages. Story category information seems to be missing. #View the number of stories published in various languages #<For a count, uncomment below line of code.> #storycontentdata.language_name.value_counts() #<For graphical view, use below lines of code.> plt.figure(figsize = (30,10)) sns.set(style="darkgrid") # Plot sns.countplot(x='language_name', data=storycontentdata).set_title('Language') # Setting labels plt.xticks(rotation = 45) plt.show() #Observation: Most of the stories are in English language and Hindi so would be a good place to start #analysis and tag generation as we would have sufficient corpus. #View the %distribution of stories created by children v/s adults. plt.figure(figsize = (5,4)) temp = pd.DataFrame(storycontentdata.is_child_created_story.value_counts()/sum(storycontentdata.is_child_created_story.value_counts())*100) fig = sns.barplot(temp.index,temp.is_child_created_story) fig.set_title('Children Stories (Percentage)') fig.set_xlabel('Is child created story') fig.set_ylabel('Percentage') #Setting y limits plt.ylim([0,100]) #plt.xlabel='Is child created story' #plt.ylabel='Percentage' plt.show() #Observation: A very few stories are child created. While on the portal, there are many #child created stories. Needs to be checked if we are missing these. #View the % distribution of stories which have been recommended. plt.figure(figsize = (5,4)) temp = pd.DataFrame(storycontentdata.is_recommended_story.value_counts()/sum(storycontentdata.is_recommended_story.value_counts())*100) fig = sns.barplot(temp.index,temp.is_recommended_story) fig.set_title('Recommended Stories (Percentage)') fig.set_xlabel('Is recommended story') fig.set_ylabel('Percentage') #Setting y limits plt.ylim([0,100]) plt.show() #Observation: There are a significant number of stories which are recommended to others for reading. # To get numeric count in place of % distribution, uncomment below lines of code # plt.figure(figsize = (5,10)) # sns.set(style="darkgrid") # # Plot # sns.countplot(x='is_recommended_story', data=storycontentdata).set_title('Recommended Stories') # # Setting labels # plt.show() #View the story derivation type distribution to see what % of stories are original v/s translated. plt.figure(figsize = (5,4)) temp = pd.DataFrame(storycontentdata.story_derivation_type.value_counts()/sum(storycontentdata.story_derivation_type.value_counts())*100) fig = sns.barplot(temp.index,temp.story_derivation_type) fig.set_title('Stories Derivation Type(Percentage)') fig.set_xlabel('Story Derivation Type') fig.set_ylabel('Percentage') #Setting y limits plt.ylim([0,100]) plt.show() #Observation: A large number of stories are translated. It may be worthwhile exploring how tagging #original v/s translated stories works. Are tags of translated stories similar to those in original? #Perhaps we can generate tags for original stories and then use them for translated ones in #respective languages? #View distribution of organization name to check which other publishers are involved plt.figure(figsize = (10,10)) temp = pd.DataFrame(storycontentdata.organization_name.value_counts()/sum(storycontentdata.organization_name.value_counts())*100) fig = sns.barplot(temp.index,temp.organization_name) fig.set_title('Organization Name(Percentage)') fig.set_xlabel('Organization Name') fig.set_ylabel('Percentage') plt.xticks(rotation=90) #Setting y limits plt.ylim([0,100]) plt.show() #Observation: There are nearly 11 publisher organizations. However, a large % of the stories #on the website are those from Pratham books. #View how many reading levels are present in the data and distribution of stories amongst those. plt.figure(figsize = (5,7)) temp = pd.DataFrame(storycontentdata.reading_level_cat.value_counts()/sum(storycontentdata.reading_level_cat.value_counts())*100) fig = sns.barplot(temp.index,temp.reading_level_cat) fig.set_title('Reading Level (Percentage)') fig.set_xlabel('Reading Level') fig.set_ylabel('Percentage') #Setting y limits plt.ylim([0,100]) plt.show() #Observation: Close to 50% of the stories are of reading level 1. #View the number of reads for the stories to get an idea of readership of the books. #Besides being best books, perhaps these highly read books may be well tagged and hence #easier to find than others plt.figure(figsize = (10,7)) sns.boxplot(storycontentdata.reads) #Observation: Several books have not even been read once. Several have been read about 2-3K times. #Some books enjoy very high readership. ###Output _____no_output_____
pyspark/pysparkTJ/pysparkTJhw01.ipynb
###Markdown pysparkTJhw01安装开发环境,写一个wordcount程序,并截屏。 ###Code import pyspark from pyspark.sql import SparkSession #生成SparkSession实例 spark = SparkSession.builder \ .master("local[*]") \ .appName("Word Count") \ .config("spark.some.config.option", "some-value") \ .getOrCreate() #通过sparkSession获取上下文 sc = spark.sparkContext rdd1 = sc.textFile("/test.py") result = rdd1.flatMap(lambda x:x.split())\ .map(lambda x:(x,1))\ .reduceByKey(lambda a,b: a+b)\ .collect() result ###Output _____no_output_____
src/Random tests.ipynb
###Markdown Random tests Comparing types ###Code type(5) type('hello') type('hello') == str type('hello') is str ###Output _____no_output_____ ###Markdown Dataframe attributes ###Code import pandas as pd data_dict = {"A":[1,2,3]} df = pd.DataFrame(data = data_dict) df df.attrs['thing'] = 'stuff' df.attrs ###Output _____no_output_____ ###Markdown Slicing arrays ###Code import numpy as np test_slice = np.s_[0:512,0:512] test_array = np.ones((512,512)) test_array[None] test_slice[0].stop ###Output _____no_output_____ ###Markdown Initializing a GaussianResult ###Code from data_analysis.signal_calculators import GaussianResult GaussianResult(1,1) ###Output _____no_output_____ ###Markdown Pandas dataframe setting on copy of a slice ###Code import pandas as pd df = pd.DataFrame(data = {'a':[1,2,3], 'b':[1,2,3]}) df['c'] = df.a/df.b df['c'] ###Output _____no_output_____ ###Markdown Saving dataframe to hdf ###Code import pandas as pd df_test = pd.DataFrame({"a" : [1,2,3]}) df_test.attrs["test"] = 1 df_test.attrs df_test.to_hdf("test.hdf", "test_key", 'a') df_load = pd.read_hdf("test.hdf") df_load.attrs ###Output _____no_output_____ ###Markdown Reading data from hdf ###Code fname = f"D:\Google Drive\CeNTREX Oskari\State preparation\SPA\Data analysis\Analyzed Data\SPA_test_11_9_2021_analyzed.hdf" df = pd.read_hdf(fname) df.attrs df ###Output _____no_output_____
ch00python/029structures.ipynb
###Markdown Data structures Nested Lists and Dictionaries In research programming, one of our most common tasks is building an appropriate *structure* to model our complicateddata. Later in the course, we'll see how we can define our own types, with their own attributes, properties, and methods. But probably the most common approach is to use nested structures of lists, dictionaries, and sets to model our data. For example, an address might be modelled as a dictionary with appropriately named fields: ###Code UCL={ 'City': 'London', 'Street': 'Gower Street', 'Postcode': 'WC1E 6BT' } James={ 'City': 'London', 'Street': 'Waterson Street', 'Postcode': 'E2 8HH' } ###Output _____no_output_____ ###Markdown A collection of people's addresses is then a list of dictionaries: ###Code addresses=[UCL, James] addresses ###Output _____no_output_____ ###Markdown A more complicated data structure, for example for a census database, might have a list of residents or employees at each address: ###Code UCL['people']=['Clare','James', 'Owain'] James['people']=['Sue', 'James'] addresses ###Output _____no_output_____ ###Markdown Which is then a list of dictionaries, with keys which are strings or lists. We can go further, e.g.: ###Code UCL['Residential']=False ###Output _____no_output_____ ###Markdown And we can write code against our structures: ###Code leaders = [place['people'][0] for place in addresses] leaders ###Output _____no_output_____ ###Markdown Data structures Nested Lists and Dictionaries In research programming, one of our most common tasks is building an appropriate *structure* to model our complicateddata. Later in the course, we'll see how we can define our own types, with their own attributes, properties, and methods. But probably the most common approach is to use nested structures of lists, dictionaries, and sets to model our data. For example, an address might be modelled as a dictionary with appropriately named fields: ###Code UCL = { 'City': 'London', 'Street': 'Gower Street', 'Postcode': 'WC1E 6BT' } Chapman = { 'City': 'London', 'Street': 'Southwood ln', 'Postcode': 'N6 5TB' } ###Output _____no_output_____ ###Markdown A collection of people's addresses is then a list of dictionaries: ###Code addresses = [UCL, Chapman] addresses ###Output _____no_output_____ ###Markdown A more complicated data structure, for example for a census database, might have a list of residents or employees at each address: ###Code UCL['people'] = ['Jeremy','Leonard', 'James', 'Henry'] Chapman['people'] = ['Graham', 'David'] addresses ###Output _____no_output_____ ###Markdown Which is then a list of dictionaries, with keys which are strings or lists. We can go further, e.g.: ###Code UCL['Residential'] = False ###Output _____no_output_____ ###Markdown And we can write code against our structures: ###Code leaders = [place['people'][0] for place in addresses] leaders ###Output _____no_output_____ ###Markdown Data structures Nested Lists and Dictionaries In research programming, one of our most common tasks is building an appropriate *structure* to model our complicateddata. Later in the course, we'll see how we can define our own types, with their own attributes, properties, and methods. But probably the most common approach is to use nested structures of lists, dictionaries, and sets to model our data. For example, an address might be modelled as a dictionary with appropriately named fields: ###Code UCL = { 'City': 'London', 'Street': 'Gower Street', 'Postcode': 'WC1E 6BT' } James = { 'City': 'London', 'Street': 'Waterson Street', 'Postcode': 'E2 8HH' } ###Output _____no_output_____ ###Markdown A collection of people's addresses is then a list of dictionaries: ###Code addresses = [UCL, James] addresses ###Output _____no_output_____ ###Markdown A more complicated data structure, for example for a census database, might have a list of residents or employees at each address: ###Code UCL['people'] = ['Clare','James', 'Owain'] James['people'] = ['Sue', 'James'] addresses ###Output _____no_output_____ ###Markdown Which is then a list of dictionaries, with keys which are strings or lists. We can go further, e.g.: ###Code UCL['Residential'] = False ###Output _____no_output_____ ###Markdown And we can write code against our structures: ###Code leaders = [place['people'][0] for place in addresses] leaders ###Output _____no_output_____ ###Markdown Data structures Nested Lists and Dictionaries In research programming, one of our most common tasks is building an appropriate *structure* to model our complicateddata. Later in the course, we'll see how we can define our own types, with their own attributes, properties, and methods. But probably the most common approach is to use nested structures of lists, dictionaries, and sets to model our data. For example, an address might be modelled as a dictionary with appropriately named fields: ###Code UCL = { 'City': 'London', 'Street': 'Gower Street', 'Postcode': 'WC1E 6BT' } James = { 'City': 'London', 'Street': 'Waterson Street', 'Postcode': 'E2 8HH' } ###Output _____no_output_____ ###Markdown A collection of people's addresses is then a list of dictionaries: ###Code addresses = [UCL, James] addresses ###Output _____no_output_____ ###Markdown A more complicated data structure, for example for a census database, might have a list of residents or employees at each address: ###Code UCL['people'] = ['Clare','James', 'Owain'] James['people'] = ['Sue', 'James'] addresses ###Output _____no_output_____ ###Markdown Which is then a list of dictionaries, with keys which are strings or lists. We can go further, e.g.: ###Code UCL['Residential'] = False ###Output _____no_output_____ ###Markdown And we can write code against our structures: ###Code leaders = [place['people'][0] for place in addresses] leaders ###Output _____no_output_____ ###Markdown Data structures Nested Lists and Dictionaries In research programming, one of our most common tasks is building an appropriate *structure* to model our complicateddata. Later in the course, we'll see how we can define our own types, with their own attributes, properties, and methods. But probably the most common approach is to use nested structures of lists, dictionaries, and sets to model our data. For example, an address might be modelled as a dictionary with appropriately named fields: ###Code UCL = { 'City': 'London', 'Street': 'Gower Street', 'Postcode': 'WC1E 6BT' } James = { 'City': 'London', 'Street': 'Waterson Street', 'Postcode': 'E2 8HH' } ###Output _____no_output_____ ###Markdown A collection of people's addresses is then a list of dictionaries: ###Code addresses = [UCL, James] addresses ###Output _____no_output_____ ###Markdown A more complicated data structure, for example for a census database, might have a list of residents or employees at each address: ###Code UCL['people'] = ['Clare','James', 'Owain'] James['people'] = ['Sue', 'James'] addresses ###Output _____no_output_____ ###Markdown Which is then a list of dictionaries, with keys which are strings or lists. We can go further, e.g.: ###Code UCL['Residential'] = False ###Output _____no_output_____ ###Markdown And we can write code against our structures: ###Code leaders = [place['people'][0] for place in addresses] leaders ###Output _____no_output_____